1//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// Implements semantic analysis for C++ expressions.
11///
12//===----------------------------------------------------------------------===//
13
14#include "TreeTransform.h"
15#include "TypeLocBuilder.h"
16#include "clang/AST/ASTContext.h"
17#include "clang/AST/ASTLambda.h"
18#include "clang/AST/CXXInheritance.h"
19#include "clang/AST/CharUnits.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/DynamicRecursiveASTVisitor.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/ExprConcepts.h"
25#include "clang/AST/ExprObjC.h"
26#include "clang/AST/Type.h"
27#include "clang/AST/TypeLoc.h"
28#include "clang/Basic/AlignedAllocation.h"
29#include "clang/Basic/DiagnosticSema.h"
30#include "clang/Basic/PartialDiagnostic.h"
31#include "clang/Basic/TargetInfo.h"
32#include "clang/Basic/TokenKinds.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/DeclSpec.h"
35#include "clang/Sema/EnterExpressionEvaluationContext.h"
36#include "clang/Sema/Initialization.h"
37#include "clang/Sema/Lookup.h"
38#include "clang/Sema/ParsedTemplate.h"
39#include "clang/Sema/Scope.h"
40#include "clang/Sema/ScopeInfo.h"
41#include "clang/Sema/SemaCUDA.h"
42#include "clang/Sema/SemaHLSL.h"
43#include "clang/Sema/SemaLambda.h"
44#include "clang/Sema/SemaObjC.h"
45#include "clang/Sema/SemaPPC.h"
46#include "clang/Sema/Template.h"
47#include "clang/Sema/TemplateDeduction.h"
48#include "llvm/ADT/APInt.h"
49#include "llvm/ADT/STLExtras.h"
50#include "llvm/ADT/StringExtras.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/TypeSize.h"
53#include <optional>
54using namespace clang;
55using namespace sema;
56
57ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
58 SourceLocation NameLoc,
59 const IdentifierInfo &Name) {
60 NestedNameSpecifier NNS = SS.getScopeRep();
61 QualType Type(NNS.getAsType(), 0);
62 if ([[maybe_unused]] const auto *DNT = dyn_cast<DependentNameType>(Val&: Type))
63 assert(DNT->getIdentifier() == &Name && "not a constructor name");
64
65 // This reference to the type is located entirely at the location of the
66 // final identifier in the qualified-id.
67 return CreateParsedType(T: Type,
68 TInfo: Context.getTrivialTypeSourceInfo(T: Type, Loc: NameLoc));
69}
70
71ParsedType Sema::getConstructorName(const IdentifierInfo &II,
72 SourceLocation NameLoc, Scope *S,
73 CXXScopeSpec &SS, bool EnteringContext) {
74 CXXRecordDecl *CurClass = getCurrentClass(S, SS: &SS);
75 assert(CurClass && &II == CurClass->getIdentifier() &&
76 "not a constructor name");
77
78 // When naming a constructor as a member of a dependent context (eg, in a
79 // friend declaration or an inherited constructor declaration), form an
80 // unresolved "typename" type.
81 if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
82 QualType T = Context.getDependentNameType(Keyword: ElaboratedTypeKeyword::None,
83 NNS: SS.getScopeRep(), Name: &II);
84 return ParsedType::make(P: T);
85 }
86
87 if (SS.isNotEmpty() && RequireCompleteDeclContext(SS, DC: CurClass))
88 return ParsedType();
89
90 // Find the injected-class-name declaration. Note that we make no attempt to
91 // diagnose cases where the injected-class-name is shadowed: the only
92 // declaration that can validly shadow the injected-class-name is a
93 // non-static data member, and if the class contains both a non-static data
94 // member and a constructor then it is ill-formed (we check that in
95 // CheckCompletedCXXClass).
96 CXXRecordDecl *InjectedClassName = nullptr;
97 for (NamedDecl *ND : CurClass->lookup(Name: &II)) {
98 auto *RD = dyn_cast<CXXRecordDecl>(Val: ND);
99 if (RD && RD->isInjectedClassName()) {
100 InjectedClassName = RD;
101 break;
102 }
103 }
104 if (!InjectedClassName) {
105 if (!CurClass->isInvalidDecl()) {
106 // FIXME: RequireCompleteDeclContext doesn't check dependent contexts
107 // properly. Work around it here for now.
108 Diag(Loc: SS.getLastQualifierNameLoc(),
109 DiagID: diag::err_incomplete_nested_name_spec) << CurClass << SS.getRange();
110 }
111 return ParsedType();
112 }
113
114 QualType T = Context.getTagType(Keyword: ElaboratedTypeKeyword::None, Qualifier: SS.getScopeRep(),
115 TD: InjectedClassName, /*OwnsTag=*/false);
116 return ParsedType::make(P: T);
117}
118
119ParsedType Sema::getDestructorName(const IdentifierInfo &II,
120 SourceLocation NameLoc, Scope *S,
121 CXXScopeSpec &SS, ParsedType ObjectTypePtr,
122 bool EnteringContext) {
123 // Determine where to perform name lookup.
124
125 // FIXME: This area of the standard is very messy, and the current
126 // wording is rather unclear about which scopes we search for the
127 // destructor name; see core issues 399 and 555. Issue 399 in
128 // particular shows where the current description of destructor name
129 // lookup is completely out of line with existing practice, e.g.,
130 // this appears to be ill-formed:
131 //
132 // namespace N {
133 // template <typename T> struct S {
134 // ~S();
135 // };
136 // }
137 //
138 // void f(N::S<int>* s) {
139 // s->N::S<int>::~S();
140 // }
141 //
142 // See also PR6358 and PR6359.
143 //
144 // For now, we accept all the cases in which the name given could plausibly
145 // be interpreted as a correct destructor name, issuing off-by-default
146 // extension diagnostics on the cases that don't strictly conform to the
147 // C++20 rules. This basically means we always consider looking in the
148 // nested-name-specifier prefix, the complete nested-name-specifier, and
149 // the scope, and accept if we find the expected type in any of the three
150 // places.
151
152 if (SS.isInvalid())
153 return nullptr;
154
155 // Whether we've failed with a diagnostic already.
156 bool Failed = false;
157
158 llvm::SmallVector<NamedDecl*, 8> FoundDecls;
159 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
160
161 // If we have an object type, it's because we are in a
162 // pseudo-destructor-expression or a member access expression, and
163 // we know what type we're looking for.
164 QualType SearchType =
165 ObjectTypePtr ? GetTypeFromParser(Ty: ObjectTypePtr) : QualType();
166
167 auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
168 auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
169 auto *Type = dyn_cast<TypeDecl>(Val: D->getUnderlyingDecl());
170 if (!Type)
171 return false;
172
173 if (SearchType.isNull() || SearchType->isDependentType())
174 return true;
175
176 CanQualType T = Context.getCanonicalTypeDeclType(TD: Type);
177 return Context.hasSameUnqualifiedType(T1: T, T2: SearchType);
178 };
179
180 unsigned NumAcceptableResults = 0;
181 for (NamedDecl *D : Found) {
182 if (IsAcceptableResult(D))
183 ++NumAcceptableResults;
184
185 // Don't list a class twice in the lookup failure diagnostic if it's
186 // found by both its injected-class-name and by the name in the enclosing
187 // scope.
188 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: D))
189 if (RD->isInjectedClassName())
190 D = cast<NamedDecl>(Val: RD->getParent());
191
192 if (FoundDeclSet.insert(Ptr: D).second)
193 FoundDecls.push_back(Elt: D);
194 }
195
196 // As an extension, attempt to "fix" an ambiguity by erasing all non-type
197 // results, and all non-matching results if we have a search type. It's not
198 // clear what the right behavior is if destructor lookup hits an ambiguity,
199 // but other compilers do generally accept at least some kinds of
200 // ambiguity.
201 if (Found.isAmbiguous() && NumAcceptableResults == 1) {
202 Diag(Loc: NameLoc, DiagID: diag::ext_dtor_name_ambiguous);
203 LookupResult::Filter F = Found.makeFilter();
204 while (F.hasNext()) {
205 NamedDecl *D = F.next();
206 if (auto *TD = dyn_cast<TypeDecl>(Val: D->getUnderlyingDecl()))
207 Diag(Loc: D->getLocation(), DiagID: diag::note_destructor_type_here)
208 << Context.getTypeDeclType(Keyword: ElaboratedTypeKeyword::None,
209 /*Qualifier=*/std::nullopt, Decl: TD);
210 else
211 Diag(Loc: D->getLocation(), DiagID: diag::note_destructor_nontype_here);
212
213 if (!IsAcceptableResult(D))
214 F.erase();
215 }
216 F.done();
217 }
218
219 if (Found.isAmbiguous())
220 Failed = true;
221
222 if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
223 if (IsAcceptableResult(Type)) {
224 QualType T = Context.getTypeDeclType(Keyword: ElaboratedTypeKeyword::None,
225 /*Qualifier=*/std::nullopt, Decl: Type);
226 MarkAnyDeclReferenced(Loc: Type->getLocation(), D: Type, /*OdrUse=*/MightBeOdrUse: false);
227 return CreateParsedType(T,
228 TInfo: Context.getTrivialTypeSourceInfo(T, Loc: NameLoc));
229 }
230 }
231
232 return nullptr;
233 };
234
235 bool IsDependent = false;
236
237 auto LookupInObjectType = [&]() -> ParsedType {
238 if (Failed || SearchType.isNull())
239 return nullptr;
240
241 IsDependent |= SearchType->isDependentType();
242
243 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
244 DeclContext *LookupCtx = computeDeclContext(T: SearchType);
245 if (!LookupCtx)
246 return nullptr;
247 LookupQualifiedName(R&: Found, LookupCtx);
248 return CheckLookupResult(Found);
249 };
250
251 auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
252 if (Failed)
253 return nullptr;
254
255 IsDependent |= isDependentScopeSpecifier(SS: LookupSS);
256 DeclContext *LookupCtx = computeDeclContext(SS: LookupSS, EnteringContext);
257 if (!LookupCtx)
258 return nullptr;
259
260 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
261 if (RequireCompleteDeclContext(SS&: LookupSS, DC: LookupCtx)) {
262 Failed = true;
263 return nullptr;
264 }
265 LookupQualifiedName(R&: Found, LookupCtx);
266 return CheckLookupResult(Found);
267 };
268
269 auto LookupInScope = [&]() -> ParsedType {
270 if (Failed || !S)
271 return nullptr;
272
273 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
274 LookupName(R&: Found, S);
275 return CheckLookupResult(Found);
276 };
277
278 // C++2a [basic.lookup.qual]p6:
279 // In a qualified-id of the form
280 //
281 // nested-name-specifier[opt] type-name :: ~ type-name
282 //
283 // the second type-name is looked up in the same scope as the first.
284 //
285 // We interpret this as meaning that if you do a dual-scope lookup for the
286 // first name, you also do a dual-scope lookup for the second name, per
287 // C++ [basic.lookup.classref]p4:
288 //
289 // If the id-expression in a class member access is a qualified-id of the
290 // form
291 //
292 // class-name-or-namespace-name :: ...
293 //
294 // the class-name-or-namespace-name following the . or -> is first looked
295 // up in the class of the object expression and the name, if found, is used.
296 // Otherwise, it is looked up in the context of the entire
297 // postfix-expression.
298 //
299 // This looks in the same scopes as for an unqualified destructor name:
300 //
301 // C++ [basic.lookup.classref]p3:
302 // If the unqualified-id is ~ type-name, the type-name is looked up
303 // in the context of the entire postfix-expression. If the type T
304 // of the object expression is of a class type C, the type-name is
305 // also looked up in the scope of class C. At least one of the
306 // lookups shall find a name that refers to cv T.
307 //
308 // FIXME: The intent is unclear here. Should type-name::~type-name look in
309 // the scope anyway if it finds a non-matching name declared in the class?
310 // If both lookups succeed and find a dependent result, which result should
311 // we retain? (Same question for p->~type-name().)
312
313 auto Prefix = [&]() -> NestedNameSpecifierLoc {
314 NestedNameSpecifierLoc NNS = SS.getWithLocInContext(Context);
315 if (!NNS)
316 return NestedNameSpecifierLoc();
317 if (auto TL = NNS.getAsTypeLoc())
318 return TL.getPrefix();
319 return NNS.getAsNamespaceAndPrefix().Prefix;
320 }();
321
322 if (Prefix) {
323 // This is
324 //
325 // nested-name-specifier type-name :: ~ type-name
326 //
327 // Look for the second type-name in the nested-name-specifier.
328 CXXScopeSpec PrefixSS;
329 PrefixSS.Adopt(Other: Prefix);
330 if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
331 return T;
332 } else {
333 // This is one of
334 //
335 // type-name :: ~ type-name
336 // ~ type-name
337 //
338 // Look in the scope and (if any) the object type.
339 if (ParsedType T = LookupInScope())
340 return T;
341 if (ParsedType T = LookupInObjectType())
342 return T;
343 }
344
345 if (Failed)
346 return nullptr;
347
348 if (IsDependent) {
349 // We didn't find our type, but that's OK: it's dependent anyway.
350
351 // FIXME: What if we have no nested-name-specifier?
352 TypeSourceInfo *TSI = nullptr;
353 QualType T =
354 CheckTypenameType(Keyword: ElaboratedTypeKeyword::None, KeywordLoc: SourceLocation(),
355 QualifierLoc: SS.getWithLocInContext(Context), II, IILoc: NameLoc, TSI: &TSI,
356 /*DeducedTSTContext=*/true);
357 if (T.isNull())
358 return ParsedType();
359 return CreateParsedType(T, TInfo: TSI);
360 }
361
362 // The remaining cases are all non-standard extensions imitating the behavior
363 // of various other compilers.
364 unsigned NumNonExtensionDecls = FoundDecls.size();
365
366 if (SS.isSet()) {
367 // For compatibility with older broken C++ rules and existing code,
368 //
369 // nested-name-specifier :: ~ type-name
370 //
371 // also looks for type-name within the nested-name-specifier.
372 if (ParsedType T = LookupInNestedNameSpec(SS)) {
373 Diag(Loc: SS.getEndLoc(), DiagID: diag::ext_dtor_named_in_wrong_scope)
374 << SS.getRange()
375 << FixItHint::CreateInsertion(InsertionLoc: SS.getEndLoc(),
376 Code: ("::" + II.getName()).str());
377 return T;
378 }
379
380 // For compatibility with other compilers and older versions of Clang,
381 //
382 // nested-name-specifier type-name :: ~ type-name
383 //
384 // also looks for type-name in the scope. Unfortunately, we can't
385 // reasonably apply this fallback for dependent nested-name-specifiers.
386 if (Prefix) {
387 if (ParsedType T = LookupInScope()) {
388 Diag(Loc: SS.getEndLoc(), DiagID: diag::ext_qualified_dtor_named_in_lexical_scope)
389 << FixItHint::CreateRemoval(RemoveRange: SS.getRange());
390 Diag(Loc: FoundDecls.back()->getLocation(), DiagID: diag::note_destructor_type_here)
391 << GetTypeFromParser(Ty: T);
392 return T;
393 }
394 }
395 }
396
397 // We didn't find anything matching; tell the user what we did find (if
398 // anything).
399
400 // Don't tell the user about declarations we shouldn't have found.
401 FoundDecls.resize(N: NumNonExtensionDecls);
402
403 // List types before non-types.
404 llvm::stable_sort(Range&: FoundDecls, C: [](NamedDecl *A, NamedDecl *B) {
405 return isa<TypeDecl>(Val: A->getUnderlyingDecl()) >
406 isa<TypeDecl>(Val: B->getUnderlyingDecl());
407 });
408
409 // Suggest a fixit to properly name the destroyed type.
410 auto MakeFixItHint = [&]{
411 const CXXRecordDecl *Destroyed = nullptr;
412 // FIXME: If we have a scope specifier, suggest its last component?
413 if (!SearchType.isNull())
414 Destroyed = SearchType->getAsCXXRecordDecl();
415 else if (S)
416 Destroyed = dyn_cast_or_null<CXXRecordDecl>(Val: S->getEntity());
417 if (Destroyed)
418 return FixItHint::CreateReplacement(RemoveRange: SourceRange(NameLoc),
419 Code: Destroyed->getNameAsString());
420 return FixItHint();
421 };
422
423 if (FoundDecls.empty()) {
424 // FIXME: Attempt typo-correction?
425 Diag(Loc: NameLoc, DiagID: diag::err_undeclared_destructor_name)
426 << &II << MakeFixItHint();
427 } else if (!SearchType.isNull() && FoundDecls.size() == 1) {
428 if (auto *TD = dyn_cast<TypeDecl>(Val: FoundDecls[0]->getUnderlyingDecl())) {
429 assert(!SearchType.isNull() &&
430 "should only reject a type result if we have a search type");
431 Diag(Loc: NameLoc, DiagID: diag::err_destructor_expr_type_mismatch)
432 << Context.getTypeDeclType(Keyword: ElaboratedTypeKeyword::None,
433 /*Qualifier=*/std::nullopt, Decl: TD)
434 << SearchType << MakeFixItHint();
435 } else {
436 Diag(Loc: NameLoc, DiagID: diag::err_destructor_expr_nontype)
437 << &II << MakeFixItHint();
438 }
439 } else {
440 Diag(Loc: NameLoc, DiagID: SearchType.isNull() ? diag::err_destructor_name_nontype
441 : diag::err_destructor_expr_mismatch)
442 << &II << SearchType << MakeFixItHint();
443 }
444
445 for (NamedDecl *FoundD : FoundDecls) {
446 if (auto *TD = dyn_cast<TypeDecl>(Val: FoundD->getUnderlyingDecl()))
447 Diag(Loc: FoundD->getLocation(), DiagID: diag::note_destructor_type_here)
448 << Context.getTypeDeclType(Keyword: ElaboratedTypeKeyword::None,
449 /*Qualifier=*/std::nullopt, Decl: TD);
450 else
451 Diag(Loc: FoundD->getLocation(), DiagID: diag::note_destructor_nontype_here)
452 << FoundD;
453 }
454
455 return nullptr;
456}
457
458ParsedType Sema::getDestructorTypeForDecltype(const DeclSpec &DS,
459 ParsedType ObjectType) {
460 if (DS.getTypeSpecType() == DeclSpec::TST_error)
461 return nullptr;
462
463 if (DS.getTypeSpecType() == DeclSpec::TST_decltype_auto) {
464 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_decltype_auto_invalid);
465 return nullptr;
466 }
467
468 assert(DS.getTypeSpecType() == DeclSpec::TST_decltype &&
469 "unexpected type in getDestructorType");
470 QualType T = BuildDecltypeType(E: DS.getRepAsExpr());
471
472 // If we know the type of the object, check that the correct destructor
473 // type was named now; we can give better diagnostics this way.
474 QualType SearchType = GetTypeFromParser(Ty: ObjectType);
475 if (!SearchType.isNull() && !SearchType->isDependentType() &&
476 !Context.hasSameUnqualifiedType(T1: T, T2: SearchType)) {
477 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_destructor_expr_type_mismatch)
478 << T << SearchType;
479 return nullptr;
480 }
481
482 return ParsedType::make(P: T);
483}
484
485bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
486 const UnqualifiedId &Name, bool IsUDSuffix) {
487 assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
488 if (!IsUDSuffix) {
489 // [over.literal] p8
490 //
491 // double operator""_Bq(long double); // OK: not a reserved identifier
492 // double operator"" _Bq(long double); // ill-formed, no diagnostic required
493 const IdentifierInfo *II = Name.Identifier;
494 ReservedIdentifierStatus Status = II->isReserved(LangOpts: PP.getLangOpts());
495 SourceLocation Loc = Name.getEndLoc();
496
497 auto Hint = FixItHint::CreateReplacement(
498 RemoveRange: Name.getSourceRange(),
499 Code: (StringRef("operator\"\"") + II->getName()).str());
500
501 // Only emit this diagnostic if we start with an underscore, else the
502 // diagnostic for C++11 requiring a space between the quotes and the
503 // identifier conflicts with this and gets confusing. The diagnostic stating
504 // this is a reserved name should force the underscore, which gets this
505 // back.
506 if (II->isReservedLiteralSuffixId() !=
507 ReservedLiteralSuffixIdStatus::NotStartsWithUnderscore)
508 Diag(Loc, DiagID: diag::warn_deprecated_literal_operator_id) << II << Hint;
509
510 if (isReservedInAllContexts(Status))
511 Diag(Loc, DiagID: diag::warn_reserved_extern_symbol)
512 << II << static_cast<int>(Status) << Hint;
513 }
514
515 switch (SS.getScopeRep().getKind()) {
516 case NestedNameSpecifier::Kind::Type:
517 // Per C++11 [over.literal]p2, literal operators can only be declared at
518 // namespace scope. Therefore, this unqualified-id cannot name anything.
519 // Reject it early, because we have no AST representation for this in the
520 // case where the scope is dependent.
521 Diag(Loc: Name.getBeginLoc(), DiagID: diag::err_literal_operator_id_outside_namespace)
522 << SS.getScopeRep();
523 return true;
524
525 case NestedNameSpecifier::Kind::Null:
526 case NestedNameSpecifier::Kind::Global:
527 case NestedNameSpecifier::Kind::MicrosoftSuper:
528 case NestedNameSpecifier::Kind::Namespace:
529 return false;
530 }
531
532 llvm_unreachable("unknown nested name specifier kind");
533}
534
535ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
536 SourceLocation TypeidLoc,
537 TypeSourceInfo *Operand,
538 SourceLocation RParenLoc) {
539 // C++ [expr.typeid]p4:
540 // The top-level cv-qualifiers of the lvalue expression or the type-id
541 // that is the operand of typeid are always ignored.
542 // If the type of the type-id is a class type or a reference to a class
543 // type, the class shall be completely-defined.
544 Qualifiers Quals;
545 QualType T
546 = Context.getUnqualifiedArrayType(T: Operand->getType().getNonReferenceType(),
547 Quals);
548 if (T->isRecordType() &&
549 RequireCompleteType(Loc: TypeidLoc, T, DiagID: diag::err_incomplete_typeid))
550 return ExprError();
551
552 if (T->isVariablyModifiedType())
553 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_variably_modified_typeid) << T);
554
555 if (CheckQualifiedFunctionForTypeId(T, Loc: TypeidLoc))
556 return ExprError();
557
558 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
559 SourceRange(TypeidLoc, RParenLoc));
560}
561
562ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
563 SourceLocation TypeidLoc,
564 Expr *E,
565 SourceLocation RParenLoc) {
566 bool WasEvaluated = false;
567 if (E && !E->isTypeDependent()) {
568 if (E->hasPlaceholderType()) {
569 ExprResult result = CheckPlaceholderExpr(E);
570 if (result.isInvalid()) return ExprError();
571 E = result.get();
572 }
573
574 QualType T = E->getType();
575 if (auto *RecordD = T->getAsCXXRecordDecl()) {
576 // C++ [expr.typeid]p3:
577 // [...] If the type of the expression is a class type, the class
578 // shall be completely-defined.
579 if (RequireCompleteType(Loc: TypeidLoc, T, DiagID: diag::err_incomplete_typeid))
580 return ExprError();
581
582 // C++ [expr.typeid]p3:
583 // When typeid is applied to an expression other than an glvalue of a
584 // polymorphic class type [...] [the] expression is an unevaluated
585 // operand. [...]
586 if (RecordD->isPolymorphic() && E->isGLValue()) {
587 if (isUnevaluatedContext()) {
588 // The operand was processed in unevaluated context, switch the
589 // context and recheck the subexpression.
590 ExprResult Result = TransformToPotentiallyEvaluated(E);
591 if (Result.isInvalid())
592 return ExprError();
593 E = Result.get();
594 }
595
596 // We require a vtable to query the type at run time.
597 MarkVTableUsed(Loc: TypeidLoc, Class: RecordD);
598 WasEvaluated = true;
599 }
600 }
601
602 ExprResult Result = CheckUnevaluatedOperand(E);
603 if (Result.isInvalid())
604 return ExprError();
605 E = Result.get();
606
607 // C++ [expr.typeid]p4:
608 // [...] If the type of the type-id is a reference to a possibly
609 // cv-qualified type, the result of the typeid expression refers to a
610 // std::type_info object representing the cv-unqualified referenced
611 // type.
612 Qualifiers Quals;
613 QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
614 if (!Context.hasSameType(T1: T, T2: UnqualT)) {
615 T = UnqualT;
616 E = ImpCastExprToType(E, Type: UnqualT, CK: CK_NoOp, VK: E->getValueKind()).get();
617 }
618 }
619
620 if (E->getType()->isVariablyModifiedType())
621 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_variably_modified_typeid)
622 << E->getType());
623 else if (!inTemplateInstantiation() &&
624 E->HasSideEffects(Ctx: Context, IncludePossibleEffects: WasEvaluated)) {
625 // The expression operand for typeid is in an unevaluated expression
626 // context, so side effects could result in unintended consequences.
627 Diag(Loc: E->getExprLoc(), DiagID: WasEvaluated
628 ? diag::warn_side_effects_typeid
629 : diag::warn_side_effects_unevaluated_context);
630 }
631
632 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
633 SourceRange(TypeidLoc, RParenLoc));
634}
635
636/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
637ExprResult
638Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
639 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
640 // typeid is not supported in OpenCL.
641 if (getLangOpts().OpenCLCPlusPlus) {
642 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_openclcxx_not_supported)
643 << "typeid");
644 }
645
646 // Find the std::type_info type.
647 if (!getStdNamespace()) {
648 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_need_header_before_typeid)
649 << (getLangOpts().CPlusPlus20 ? 1 : 0));
650 }
651
652 if (!CXXTypeInfoDecl) {
653 IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get(Name: "type_info");
654 LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
655 LookupQualifiedName(R, LookupCtx: getStdNamespace());
656 CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
657 // Microsoft's typeinfo doesn't have type_info in std but in the global
658 // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
659 if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
660 LookupQualifiedName(R, LookupCtx: Context.getTranslationUnitDecl());
661 CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
662 }
663 if (!CXXTypeInfoDecl)
664 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_need_header_before_typeid)
665 << (getLangOpts().CPlusPlus20 ? 1 : 0));
666 }
667
668 if (!getLangOpts().RTTI) {
669 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_no_typeid_with_fno_rtti));
670 }
671
672 CanQualType TypeInfoType = Context.getCanonicalTagType(TD: CXXTypeInfoDecl);
673
674 if (isType) {
675 // The operand is a type; handle it as such.
676 TypeSourceInfo *TInfo = nullptr;
677 QualType T = GetTypeFromParser(Ty: ParsedType::getFromOpaquePtr(P: TyOrExpr),
678 TInfo: &TInfo);
679 if (T.isNull())
680 return ExprError();
681
682 if (!TInfo)
683 TInfo = Context.getTrivialTypeSourceInfo(T, Loc: OpLoc);
684
685 return BuildCXXTypeId(TypeInfoType, TypeidLoc: OpLoc, Operand: TInfo, RParenLoc);
686 }
687
688 // The operand is an expression.
689 ExprResult Result =
690 BuildCXXTypeId(TypeInfoType, TypeidLoc: OpLoc, E: (Expr *)TyOrExpr, RParenLoc);
691
692 if (!getLangOpts().RTTIData && !Result.isInvalid())
693 if (auto *CTE = dyn_cast<CXXTypeidExpr>(Val: Result.get()))
694 if (CTE->isPotentiallyEvaluated() && !CTE->isMostDerived(Context))
695 Diag(Loc: OpLoc, DiagID: diag::warn_no_typeid_with_rtti_disabled)
696 << (getDiagnostics().getDiagnosticOptions().getFormat() ==
697 DiagnosticOptions::MSVC);
698 return Result;
699}
700
701/// Grabs __declspec(uuid()) off a type, or returns 0 if we cannot resolve to
702/// a single GUID.
703static void
704getUuidAttrOfType(Sema &SemaRef, QualType QT,
705 llvm::SmallSetVector<const UuidAttr *, 1> &UuidAttrs) {
706 // Optionally remove one level of pointer, reference or array indirection.
707 const Type *Ty = QT.getTypePtr();
708 if (QT->isPointerOrReferenceType())
709 Ty = QT->getPointeeType().getTypePtr();
710 else if (QT->isArrayType())
711 Ty = Ty->getBaseElementTypeUnsafe();
712
713 const auto *TD = Ty->getAsTagDecl();
714 if (!TD)
715 return;
716
717 if (const auto *Uuid = TD->getMostRecentDecl()->getAttr<UuidAttr>()) {
718 UuidAttrs.insert(X: Uuid);
719 return;
720 }
721
722 // __uuidof can grab UUIDs from template arguments.
723 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: TD)) {
724 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
725 for (const TemplateArgument &TA : TAL.asArray()) {
726 const UuidAttr *UuidForTA = nullptr;
727 if (TA.getKind() == TemplateArgument::Type)
728 getUuidAttrOfType(SemaRef, QT: TA.getAsType(), UuidAttrs);
729 else if (TA.getKind() == TemplateArgument::Declaration)
730 getUuidAttrOfType(SemaRef, QT: TA.getAsDecl()->getType(), UuidAttrs);
731
732 if (UuidForTA)
733 UuidAttrs.insert(X: UuidForTA);
734 }
735 }
736}
737
738ExprResult Sema::BuildCXXUuidof(QualType Type,
739 SourceLocation TypeidLoc,
740 TypeSourceInfo *Operand,
741 SourceLocation RParenLoc) {
742 MSGuidDecl *Guid = nullptr;
743 if (!Operand->getType()->isDependentType()) {
744 llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
745 getUuidAttrOfType(SemaRef&: *this, QT: Operand->getType(), UuidAttrs);
746 if (UuidAttrs.empty())
747 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_without_guid));
748 if (UuidAttrs.size() > 1)
749 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_with_multiple_guids));
750 Guid = UuidAttrs.back()->getGuidDecl();
751 }
752
753 return new (Context)
754 CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
755}
756
757ExprResult Sema::BuildCXXUuidof(QualType Type, SourceLocation TypeidLoc,
758 Expr *E, SourceLocation RParenLoc) {
759 MSGuidDecl *Guid = nullptr;
760 if (!E->getType()->isDependentType()) {
761 if (E->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
762 // A null pointer results in {00000000-0000-0000-0000-000000000000}.
763 Guid = Context.getMSGuidDecl(Parts: MSGuidDecl::Parts{});
764 } else {
765 llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
766 getUuidAttrOfType(SemaRef&: *this, QT: E->getType(), UuidAttrs);
767 if (UuidAttrs.empty())
768 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_without_guid));
769 if (UuidAttrs.size() > 1)
770 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_with_multiple_guids));
771 Guid = UuidAttrs.back()->getGuidDecl();
772 }
773 }
774
775 return new (Context)
776 CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
777}
778
779/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
780ExprResult
781Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
782 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
783 QualType GuidType = Context.getMSGuidType();
784 GuidType.addConst();
785
786 if (isType) {
787 // The operand is a type; handle it as such.
788 TypeSourceInfo *TInfo = nullptr;
789 QualType T = GetTypeFromParser(Ty: ParsedType::getFromOpaquePtr(P: TyOrExpr),
790 TInfo: &TInfo);
791 if (T.isNull())
792 return ExprError();
793
794 if (!TInfo)
795 TInfo = Context.getTrivialTypeSourceInfo(T, Loc: OpLoc);
796
797 return BuildCXXUuidof(Type: GuidType, TypeidLoc: OpLoc, Operand: TInfo, RParenLoc);
798 }
799
800 // The operand is an expression.
801 return BuildCXXUuidof(Type: GuidType, TypeidLoc: OpLoc, E: (Expr*)TyOrExpr, RParenLoc);
802}
803
804ExprResult
805Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
806 assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
807 "Unknown C++ Boolean value!");
808 return new (Context)
809 CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
810}
811
812ExprResult
813Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
814 return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
815}
816
817ExprResult
818Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
819 bool IsThrownVarInScope = false;
820 if (Ex) {
821 // C++0x [class.copymove]p31:
822 // When certain criteria are met, an implementation is allowed to omit the
823 // copy/move construction of a class object [...]
824 //
825 // - in a throw-expression, when the operand is the name of a
826 // non-volatile automatic object (other than a function or catch-
827 // clause parameter) whose scope does not extend beyond the end of the
828 // innermost enclosing try-block (if there is one), the copy/move
829 // operation from the operand to the exception object (15.1) can be
830 // omitted by constructing the automatic object directly into the
831 // exception object
832 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Ex->IgnoreParens()))
833 if (const auto *Var = dyn_cast<VarDecl>(Val: DRE->getDecl());
834 Var && Var->hasLocalStorage() &&
835 !Var->getType().isVolatileQualified()) {
836 for (; S; S = S->getParent()) {
837 if (S->isDeclScope(D: Var)) {
838 IsThrownVarInScope = true;
839 break;
840 }
841
842 // FIXME: Many of the scope checks here seem incorrect.
843 if (S->getFlags() &
844 (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
845 Scope::ObjCMethodScope | Scope::TryScope))
846 break;
847 }
848 }
849 }
850
851 return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
852}
853
854ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
855 bool IsThrownVarInScope) {
856 const llvm::Triple &T = Context.getTargetInfo().getTriple();
857 const bool IsOpenMPGPUTarget =
858 getLangOpts().OpenMPIsTargetDevice && T.isGPU();
859
860 DiagnoseExceptionUse(Loc: OpLoc, /* IsTry= */ false);
861
862 // In OpenMP target regions, we replace 'throw' with a trap on GPU targets.
863 if (IsOpenMPGPUTarget)
864 targetDiag(Loc: OpLoc, DiagID: diag::warn_throw_not_valid_on_target) << T.str();
865
866 // Exceptions aren't allowed in CUDA device code.
867 if (getLangOpts().CUDA)
868 CUDA().DiagIfDeviceCode(Loc: OpLoc, DiagID: diag::err_cuda_device_exceptions)
869 << "throw" << CUDA().CurrentTarget();
870
871 if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
872 Diag(Loc: OpLoc, DiagID: diag::err_omp_simd_region_cannot_use_stmt) << "throw";
873
874 // Exceptions that escape a compute construct are ill-formed.
875 if (getLangOpts().OpenACC && getCurScope() &&
876 getCurScope()->isInOpenACCComputeConstructScope(Flags: Scope::TryScope))
877 Diag(Loc: OpLoc, DiagID: diag::err_acc_branch_in_out_compute_construct)
878 << /*throw*/ 2 << /*out of*/ 0;
879
880 if (Ex && !Ex->isTypeDependent()) {
881 // Initialize the exception result. This implicitly weeds out
882 // abstract types or types with inaccessible copy constructors.
883
884 // C++0x [class.copymove]p31:
885 // When certain criteria are met, an implementation is allowed to omit the
886 // copy/move construction of a class object [...]
887 //
888 // - in a throw-expression, when the operand is the name of a
889 // non-volatile automatic object (other than a function or
890 // catch-clause
891 // parameter) whose scope does not extend beyond the end of the
892 // innermost enclosing try-block (if there is one), the copy/move
893 // operation from the operand to the exception object (15.1) can be
894 // omitted by constructing the automatic object directly into the
895 // exception object
896 NamedReturnInfo NRInfo =
897 IsThrownVarInScope ? getNamedReturnInfo(E&: Ex) : NamedReturnInfo();
898
899 QualType ExceptionObjectTy = Context.getExceptionObjectType(T: Ex->getType());
900 if (CheckCXXThrowOperand(ThrowLoc: OpLoc, ThrowTy: ExceptionObjectTy, E: Ex))
901 return ExprError();
902
903 InitializedEntity Entity =
904 InitializedEntity::InitializeException(ThrowLoc: OpLoc, Type: ExceptionObjectTy);
905 ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRInfo, Value: Ex);
906 if (Res.isInvalid())
907 return ExprError();
908 Ex = Res.get();
909 }
910
911 // PPC MMA non-pointer types are not allowed as throw expr types.
912 if (Ex && Context.getTargetInfo().getTriple().isPPC64())
913 PPC().CheckPPCMMAType(Type: Ex->getType(), TypeLoc: Ex->getBeginLoc());
914
915 return new (Context)
916 CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
917}
918
919static void
920collectPublicBases(CXXRecordDecl *RD,
921 llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen,
922 llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases,
923 llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen,
924 bool ParentIsPublic) {
925 for (const CXXBaseSpecifier &BS : RD->bases()) {
926 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
927 bool NewSubobject;
928 // Virtual bases constitute the same subobject. Non-virtual bases are
929 // always distinct subobjects.
930 if (BS.isVirtual())
931 NewSubobject = VBases.insert(Ptr: BaseDecl).second;
932 else
933 NewSubobject = true;
934
935 if (NewSubobject)
936 ++SubobjectsSeen[BaseDecl];
937
938 // Only add subobjects which have public access throughout the entire chain.
939 bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public;
940 if (PublicPath)
941 PublicSubobjectsSeen.insert(X: BaseDecl);
942
943 // Recurse on to each base subobject.
944 collectPublicBases(RD: BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen,
945 ParentIsPublic: PublicPath);
946 }
947}
948
949static void getUnambiguousPublicSubobjects(
950 CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) {
951 llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
952 llvm::SmallPtrSet<CXXRecordDecl *, 2> VBases;
953 llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
954 SubobjectsSeen[RD] = 1;
955 PublicSubobjectsSeen.insert(X: RD);
956 collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen,
957 /*ParentIsPublic=*/true);
958
959 for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) {
960 // Skip ambiguous objects.
961 if (SubobjectsSeen[PublicSubobject] > 1)
962 continue;
963
964 Objects.push_back(Elt: PublicSubobject);
965 }
966}
967
968bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
969 QualType ExceptionObjectTy, Expr *E) {
970 // If the type of the exception would be an incomplete type or a pointer
971 // to an incomplete type other than (cv) void the program is ill-formed.
972 QualType Ty = ExceptionObjectTy;
973 bool isPointer = false;
974 if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
975 Ty = Ptr->getPointeeType();
976 isPointer = true;
977 }
978
979 // Cannot throw WebAssembly reference type.
980 if (Ty.isWebAssemblyReferenceType()) {
981 Diag(Loc: ThrowLoc, DiagID: diag::err_wasm_reftype_tc) << 0 << E->getSourceRange();
982 return true;
983 }
984
985 // Cannot throw WebAssembly table.
986 if (isPointer && Ty.isWebAssemblyReferenceType()) {
987 Diag(Loc: ThrowLoc, DiagID: diag::err_wasm_table_art) << 2 << E->getSourceRange();
988 return true;
989 }
990
991 if (!isPointer || !Ty->isVoidType()) {
992 if (RequireCompleteType(Loc: ThrowLoc, T: Ty,
993 DiagID: isPointer ? diag::err_throw_incomplete_ptr
994 : diag::err_throw_incomplete,
995 Args: E->getSourceRange()))
996 return true;
997
998 if (!isPointer && Ty->isSizelessType()) {
999 Diag(Loc: ThrowLoc, DiagID: diag::err_throw_sizeless) << Ty << E->getSourceRange();
1000 return true;
1001 }
1002
1003 if (RequireNonAbstractType(Loc: ThrowLoc, T: ExceptionObjectTy,
1004 DiagID: diag::err_throw_abstract_type, Args: E))
1005 return true;
1006 }
1007
1008 // If the exception has class type, we need additional handling.
1009 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
1010 if (!RD)
1011 return false;
1012
1013 // If we are throwing a polymorphic class type or pointer thereof,
1014 // exception handling will make use of the vtable.
1015 MarkVTableUsed(Loc: ThrowLoc, Class: RD);
1016
1017 // If a pointer is thrown, the referenced object will not be destroyed.
1018 if (isPointer)
1019 return false;
1020
1021 // If the class has a destructor, we must be able to call it.
1022 if (!RD->hasIrrelevantDestructor()) {
1023 if (CXXDestructorDecl *Destructor = LookupDestructor(Class: RD)) {
1024 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: Destructor);
1025 CheckDestructorAccess(Loc: E->getExprLoc(), Dtor: Destructor,
1026 PDiag: PDiag(DiagID: diag::err_access_dtor_exception) << Ty);
1027 if (DiagnoseUseOfDecl(D: Destructor, Locs: E->getExprLoc()))
1028 return true;
1029 }
1030 }
1031
1032 // The MSVC ABI creates a list of all types which can catch the exception
1033 // object. This list also references the appropriate copy constructor to call
1034 // if the object is caught by value and has a non-trivial copy constructor.
1035 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1036 // We are only interested in the public, unambiguous bases contained within
1037 // the exception object. Bases which are ambiguous or otherwise
1038 // inaccessible are not catchable types.
1039 llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects;
1040 getUnambiguousPublicSubobjects(RD, Objects&: UnambiguousPublicSubobjects);
1041
1042 for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) {
1043 // Attempt to lookup the copy constructor. Various pieces of machinery
1044 // will spring into action, like template instantiation, which means this
1045 // cannot be a simple walk of the class's decls. Instead, we must perform
1046 // lookup and overload resolution.
1047 CXXConstructorDecl *CD = LookupCopyingConstructor(Class: Subobject, Quals: 0);
1048 if (!CD || CD->isDeleted())
1049 continue;
1050
1051 // Mark the constructor referenced as it is used by this throw expression.
1052 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: CD);
1053
1054 // Skip this copy constructor if it is trivial, we don't need to record it
1055 // in the catchable type data.
1056 if (CD->isTrivial())
1057 continue;
1058
1059 // The copy constructor is non-trivial, create a mapping from this class
1060 // type to this constructor.
1061 // N.B. The selection of copy constructor is not sensitive to this
1062 // particular throw-site. Lookup will be performed at the catch-site to
1063 // ensure that the copy constructor is, in fact, accessible (via
1064 // friendship or any other means).
1065 Context.addCopyConstructorForExceptionObject(RD: Subobject, CD);
1066
1067 // We don't keep the instantiated default argument expressions around so
1068 // we must rebuild them here.
1069 for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
1070 if (CheckCXXDefaultArgExpr(CallLoc: ThrowLoc, FD: CD, Param: CD->getParamDecl(i: I)))
1071 return true;
1072 }
1073 }
1074 }
1075
1076 // Under the Itanium C++ ABI, memory for the exception object is allocated by
1077 // the runtime with no ability for the compiler to request additional
1078 // alignment. Warn if the exception type requires alignment beyond the minimum
1079 // guaranteed by the target C++ runtime.
1080 if (Context.getTargetInfo().getCXXABI().isItaniumFamily()) {
1081 CharUnits TypeAlign = Context.getTypeAlignInChars(T: Ty);
1082 CharUnits ExnObjAlign = Context.getExnObjectAlignment();
1083 if (ExnObjAlign < TypeAlign) {
1084 Diag(Loc: ThrowLoc, DiagID: diag::warn_throw_underaligned_obj);
1085 Diag(Loc: ThrowLoc, DiagID: diag::note_throw_underaligned_obj)
1086 << Ty << (unsigned)TypeAlign.getQuantity()
1087 << (unsigned)ExnObjAlign.getQuantity();
1088 }
1089 }
1090 if (!isPointer && getLangOpts().AssumeNothrowExceptionDtor) {
1091 if (CXXDestructorDecl *Dtor = RD->getDestructor()) {
1092 auto Ty = Dtor->getType();
1093 if (auto *FT = Ty.getTypePtr()->getAs<FunctionProtoType>()) {
1094 if (!isUnresolvedExceptionSpec(ESpecType: FT->getExceptionSpecType()) &&
1095 !FT->isNothrow())
1096 Diag(Loc: ThrowLoc, DiagID: diag::err_throw_object_throwing_dtor) << RD;
1097 }
1098 }
1099 }
1100
1101 return false;
1102}
1103
1104static QualType adjustCVQualifiersForCXXThisWithinLambda(
1105 ArrayRef<FunctionScopeInfo *> FunctionScopes, QualType ThisTy,
1106 DeclContext *CurSemaContext, ASTContext &ASTCtx) {
1107
1108 QualType ClassType = ThisTy->getPointeeType();
1109 LambdaScopeInfo *CurLSI = nullptr;
1110 DeclContext *CurDC = CurSemaContext;
1111
1112 // Iterate through the stack of lambdas starting from the innermost lambda to
1113 // the outermost lambda, checking if '*this' is ever captured by copy - since
1114 // that could change the cv-qualifiers of the '*this' object.
1115 // The object referred to by '*this' starts out with the cv-qualifiers of its
1116 // member function. We then start with the innermost lambda and iterate
1117 // outward checking to see if any lambda performs a by-copy capture of '*this'
1118 // - and if so, any nested lambda must respect the 'constness' of that
1119 // capturing lamdbda's call operator.
1120 //
1121
1122 // Since the FunctionScopeInfo stack is representative of the lexical
1123 // nesting of the lambda expressions during initial parsing (and is the best
1124 // place for querying information about captures about lambdas that are
1125 // partially processed) and perhaps during instantiation of function templates
1126 // that contain lambda expressions that need to be transformed BUT not
1127 // necessarily during instantiation of a nested generic lambda's function call
1128 // operator (which might even be instantiated at the end of the TU) - at which
1129 // time the DeclContext tree is mature enough to query capture information
1130 // reliably - we use a two pronged approach to walk through all the lexically
1131 // enclosing lambda expressions:
1132 //
1133 // 1) Climb down the FunctionScopeInfo stack as long as each item represents
1134 // a Lambda (i.e. LambdaScopeInfo) AND each LSI's 'closure-type' is lexically
1135 // enclosed by the call-operator of the LSI below it on the stack (while
1136 // tracking the enclosing DC for step 2 if needed). Note the topmost LSI on
1137 // the stack represents the innermost lambda.
1138 //
1139 // 2) If we run out of enclosing LSI's, check if the enclosing DeclContext
1140 // represents a lambda's call operator. If it does, we must be instantiating
1141 // a generic lambda's call operator (represented by the Current LSI, and
1142 // should be the only scenario where an inconsistency between the LSI and the
1143 // DeclContext should occur), so climb out the DeclContexts if they
1144 // represent lambdas, while querying the corresponding closure types
1145 // regarding capture information.
1146
1147 // 1) Climb down the function scope info stack.
1148 for (int I = FunctionScopes.size();
1149 I-- && isa<LambdaScopeInfo>(Val: FunctionScopes[I]) &&
1150 (!CurLSI || !CurLSI->Lambda || CurLSI->Lambda->getDeclContext() ==
1151 cast<LambdaScopeInfo>(Val: FunctionScopes[I])->CallOperator);
1152 CurDC = getLambdaAwareParentOfDeclContext(DC: CurDC)) {
1153 CurLSI = cast<LambdaScopeInfo>(Val: FunctionScopes[I]);
1154
1155 if (!CurLSI->isCXXThisCaptured())
1156 continue;
1157
1158 auto C = CurLSI->getCXXThisCapture();
1159
1160 if (C.isCopyCapture()) {
1161 if (CurLSI->lambdaCaptureShouldBeConst())
1162 ClassType.addConst();
1163 return ASTCtx.getPointerType(T: ClassType);
1164 }
1165 }
1166
1167 // 2) We've run out of ScopeInfos but check 1. if CurDC is a lambda (which
1168 // can happen during instantiation of its nested generic lambda call
1169 // operator); 2. if we're in a lambda scope (lambda body).
1170 if (CurLSI && isLambdaCallOperator(DC: CurDC)) {
1171 assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator) &&
1172 "While computing 'this' capture-type for a generic lambda, when we "
1173 "run out of enclosing LSI's, yet the enclosing DC is a "
1174 "lambda-call-operator we must be (i.e. Current LSI) in a generic "
1175 "lambda call oeprator");
1176 assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
1177
1178 auto IsThisCaptured =
1179 [](CXXRecordDecl *Closure, bool &IsByCopy, bool &IsConst) {
1180 IsConst = false;
1181 IsByCopy = false;
1182 for (auto &&C : Closure->captures()) {
1183 if (C.capturesThis()) {
1184 if (C.getCaptureKind() == LCK_StarThis)
1185 IsByCopy = true;
1186 if (Closure->getLambdaCallOperator()->isConst())
1187 IsConst = true;
1188 return true;
1189 }
1190 }
1191 return false;
1192 };
1193
1194 bool IsByCopyCapture = false;
1195 bool IsConstCapture = false;
1196 CXXRecordDecl *Closure = cast<CXXRecordDecl>(Val: CurDC->getParent());
1197 while (Closure &&
1198 IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
1199 if (IsByCopyCapture) {
1200 if (IsConstCapture)
1201 ClassType.addConst();
1202 return ASTCtx.getPointerType(T: ClassType);
1203 }
1204 Closure = isLambdaCallOperator(DC: Closure->getParent())
1205 ? cast<CXXRecordDecl>(Val: Closure->getParent()->getParent())
1206 : nullptr;
1207 }
1208 }
1209 return ThisTy;
1210}
1211
1212QualType Sema::getCurrentThisType() {
1213 DeclContext *DC = getFunctionLevelDeclContext();
1214 QualType ThisTy = CXXThisTypeOverride;
1215
1216 if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(Val: DC)) {
1217 if (method && method->isImplicitObjectMemberFunction())
1218 ThisTy = method->getThisType().getNonReferenceType();
1219 }
1220
1221 if (ThisTy.isNull() && isLambdaCallWithImplicitObjectParameter(DC: CurContext) &&
1222 inTemplateInstantiation() && isa<CXXRecordDecl>(Val: DC)) {
1223
1224 // This is a lambda call operator that is being instantiated as a default
1225 // initializer. DC must point to the enclosing class type, so we can recover
1226 // the 'this' type from it.
1227 CanQualType ClassTy = Context.getCanonicalTagType(TD: cast<CXXRecordDecl>(Val: DC));
1228 // There are no cv-qualifiers for 'this' within default initializers,
1229 // per [expr.prim.general]p4.
1230 ThisTy = Context.getPointerType(T: ClassTy);
1231 }
1232
1233 // If we are within a lambda's call operator, the cv-qualifiers of 'this'
1234 // might need to be adjusted if the lambda or any of its enclosing lambda's
1235 // captures '*this' by copy.
1236 if (!ThisTy.isNull() && isLambdaCallOperator(DC: CurContext))
1237 return adjustCVQualifiersForCXXThisWithinLambda(FunctionScopes, ThisTy,
1238 CurSemaContext: CurContext, ASTCtx&: Context);
1239 return ThisTy;
1240}
1241
1242Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
1243 Decl *ContextDecl,
1244 Qualifiers CXXThisTypeQuals,
1245 bool Enabled)
1246 : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
1247{
1248 if (!Enabled || !ContextDecl)
1249 return;
1250
1251 CXXRecordDecl *Record = nullptr;
1252 if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(Val: ContextDecl))
1253 Record = Template->getTemplatedDecl();
1254 else
1255 Record = cast<CXXRecordDecl>(Val: ContextDecl);
1256
1257 // 'this' never refers to the lambda class itself.
1258 if (Record->isLambda())
1259 return;
1260
1261 QualType T = S.Context.getCanonicalTagType(TD: Record);
1262 T = S.getASTContext().getQualifiedType(T, Qs: CXXThisTypeQuals);
1263
1264 S.CXXThisTypeOverride =
1265 S.Context.getLangOpts().HLSL ? T : S.Context.getPointerType(T);
1266
1267 this->Enabled = true;
1268}
1269
1270
1271Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
1272 if (Enabled) {
1273 S.CXXThisTypeOverride = OldCXXThisTypeOverride;
1274 }
1275}
1276
1277static void buildLambdaThisCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI) {
1278 SourceLocation DiagLoc = LSI->IntroducerRange.getEnd();
1279 assert(!LSI->isCXXThisCaptured());
1280 // [=, this] {}; // until C++20: Error: this when = is the default
1281 if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval &&
1282 !Sema.getLangOpts().CPlusPlus20)
1283 return;
1284 Sema.Diag(Loc: DiagLoc, DiagID: diag::note_lambda_this_capture_fixit)
1285 << FixItHint::CreateInsertion(
1286 InsertionLoc: DiagLoc, Code: LSI->NumExplicitCaptures > 0 ? ", this" : "this");
1287}
1288
1289bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
1290 bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
1291 const bool ByCopy) {
1292 // We don't need to capture this in an unevaluated context.
1293 if (isUnevaluatedContext() && !Explicit)
1294 return true;
1295
1296 assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
1297
1298 const int MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
1299 ? *FunctionScopeIndexToStopAt
1300 : FunctionScopes.size() - 1;
1301
1302 // Check that we can capture the *enclosing object* (referred to by '*this')
1303 // by the capturing-entity/closure (lambda/block/etc) at
1304 // MaxFunctionScopesIndex-deep on the FunctionScopes stack.
1305
1306 // Note: The *enclosing object* can only be captured by-value by a
1307 // closure that is a lambda, using the explicit notation:
1308 // [*this] { ... }.
1309 // Every other capture of the *enclosing object* results in its by-reference
1310 // capture.
1311
1312 // For a closure 'L' (at MaxFunctionScopesIndex in the FunctionScopes
1313 // stack), we can capture the *enclosing object* only if:
1314 // - 'L' has an explicit byref or byval capture of the *enclosing object*
1315 // - or, 'L' has an implicit capture.
1316 // AND
1317 // -- there is no enclosing closure
1318 // -- or, there is some enclosing closure 'E' that has already captured the
1319 // *enclosing object*, and every intervening closure (if any) between 'E'
1320 // and 'L' can implicitly capture the *enclosing object*.
1321 // -- or, every enclosing closure can implicitly capture the
1322 // *enclosing object*
1323
1324
1325 unsigned NumCapturingClosures = 0;
1326 for (int idx = MaxFunctionScopesIndex; idx >= 0; idx--) {
1327 if (CapturingScopeInfo *CSI =
1328 dyn_cast<CapturingScopeInfo>(Val: FunctionScopes[idx])) {
1329 if (CSI->CXXThisCaptureIndex != 0) {
1330 // 'this' is already being captured; there isn't anything more to do.
1331 CSI->Captures[CSI->CXXThisCaptureIndex - 1].markUsed(IsODRUse: BuildAndDiagnose);
1332 break;
1333 }
1334 LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(Val: CSI);
1335 if (LSI && isGenericLambdaCallOperatorSpecialization(MD: LSI->CallOperator)) {
1336 // This context can't implicitly capture 'this'; fail out.
1337 if (BuildAndDiagnose) {
1338 LSI->CallOperator->setInvalidDecl();
1339 Diag(Loc, DiagID: diag::err_this_capture)
1340 << (Explicit && idx == MaxFunctionScopesIndex);
1341 if (!Explicit)
1342 buildLambdaThisCaptureFixit(Sema&: *this, LSI);
1343 }
1344 return true;
1345 }
1346 if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
1347 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
1348 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
1349 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion ||
1350 (Explicit && idx == MaxFunctionScopesIndex)) {
1351 // Regarding (Explicit && idx == MaxFunctionScopesIndex): only the first
1352 // iteration through can be an explicit capture, all enclosing closures,
1353 // if any, must perform implicit captures.
1354
1355 // This closure can capture 'this'; continue looking upwards.
1356 NumCapturingClosures++;
1357 continue;
1358 }
1359 // This context can't implicitly capture 'this'; fail out.
1360 if (BuildAndDiagnose) {
1361 LSI->CallOperator->setInvalidDecl();
1362 Diag(Loc, DiagID: diag::err_this_capture)
1363 << (Explicit && idx == MaxFunctionScopesIndex);
1364 }
1365 if (!Explicit)
1366 buildLambdaThisCaptureFixit(Sema&: *this, LSI);
1367 return true;
1368 }
1369 break;
1370 }
1371 if (!BuildAndDiagnose) return false;
1372
1373 // If we got here, then the closure at MaxFunctionScopesIndex on the
1374 // FunctionScopes stack, can capture the *enclosing object*, so capture it
1375 // (including implicit by-reference captures in any enclosing closures).
1376
1377 // In the loop below, respect the ByCopy flag only for the closure requesting
1378 // the capture (i.e. first iteration through the loop below). Ignore it for
1379 // all enclosing closure's up to NumCapturingClosures (since they must be
1380 // implicitly capturing the *enclosing object* by reference (see loop
1381 // above)).
1382 assert((!ByCopy ||
1383 isa<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
1384 "Only a lambda can capture the enclosing object (referred to by "
1385 "*this) by copy");
1386 QualType ThisTy = getCurrentThisType();
1387 for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
1388 --idx, --NumCapturingClosures) {
1389 CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(Val: FunctionScopes[idx]);
1390
1391 // The type of the corresponding data member (not a 'this' pointer if 'by
1392 // copy').
1393 QualType CaptureType = ByCopy ? ThisTy->getPointeeType() : ThisTy;
1394
1395 bool isNested = NumCapturingClosures > 1;
1396 CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
1397 }
1398 return false;
1399}
1400
1401ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
1402 // C++20 [expr.prim.this]p1:
1403 // The keyword this names a pointer to the object for which an
1404 // implicit object member function is invoked or a non-static
1405 // data member's initializer is evaluated.
1406 QualType ThisTy = getCurrentThisType();
1407
1408 if (CheckCXXThisType(Loc, Type: ThisTy))
1409 return ExprError();
1410
1411 return BuildCXXThisExpr(Loc, Type: ThisTy, /*IsImplicit=*/false);
1412}
1413
1414bool Sema::CheckCXXThisType(SourceLocation Loc, QualType Type) {
1415 if (!Type.isNull())
1416 return false;
1417
1418 // C++20 [expr.prim.this]p3:
1419 // If a declaration declares a member function or member function template
1420 // of a class X, the expression this is a prvalue of type
1421 // "pointer to cv-qualifier-seq X" wherever X is the current class between
1422 // the optional cv-qualifier-seq and the end of the function-definition,
1423 // member-declarator, or declarator. It shall not appear within the
1424 // declaration of either a static member function or an explicit object
1425 // member function of the current class (although its type and value
1426 // category are defined within such member functions as they are within
1427 // an implicit object member function).
1428 DeclContext *DC = getFunctionLevelDeclContext();
1429 const auto *Method = dyn_cast<CXXMethodDecl>(Val: DC);
1430 if (Method && Method->isExplicitObjectMemberFunction()) {
1431 Diag(Loc, DiagID: diag::err_invalid_this_use) << 1;
1432 } else if (Method && isLambdaCallWithExplicitObjectParameter(DC: CurContext)) {
1433 Diag(Loc, DiagID: diag::err_invalid_this_use) << 1;
1434 } else {
1435 Diag(Loc, DiagID: diag::err_invalid_this_use) << 0;
1436 }
1437 return true;
1438}
1439
1440Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
1441 bool IsImplicit) {
1442 auto *This = CXXThisExpr::Create(Ctx: Context, L: Loc, Ty: Type, IsImplicit);
1443 MarkThisReferenced(This);
1444 return This;
1445}
1446
1447void Sema::MarkThisReferenced(CXXThisExpr *This) {
1448 CheckCXXThisCapture(Loc: This->getExprLoc());
1449 if (This->isTypeDependent())
1450 return;
1451
1452 // Check if 'this' is captured by value in a lambda with a dependent explicit
1453 // object parameter, and mark it as type-dependent as well if so.
1454 auto IsDependent = [&]() {
1455 for (auto *Scope : llvm::reverse(C&: FunctionScopes)) {
1456 auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Val: Scope);
1457 if (!LSI)
1458 continue;
1459
1460 if (LSI->Lambda && !LSI->Lambda->Encloses(DC: CurContext) &&
1461 LSI->AfterParameterList)
1462 return false;
1463
1464 // If this lambda captures 'this' by value, then 'this' is dependent iff
1465 // this lambda has a dependent explicit object parameter. If we can't
1466 // determine whether it does (e.g. because the CXXMethodDecl's type is
1467 // null), assume it doesn't.
1468 if (LSI->isCXXThisCaptured()) {
1469 if (!LSI->getCXXThisCapture().isCopyCapture())
1470 continue;
1471
1472 const auto *MD = LSI->CallOperator;
1473 if (MD->getType().isNull())
1474 return false;
1475
1476 const auto *Ty = MD->getType()->getAs<FunctionProtoType>();
1477 return Ty && MD->isExplicitObjectMemberFunction() &&
1478 Ty->getParamType(i: 0)->isDependentType();
1479 }
1480 }
1481 return false;
1482 }();
1483
1484 This->setCapturedByCopyInLambdaWithExplicitObjectParameter(IsDependent);
1485}
1486
1487bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
1488 // If we're outside the body of a member function, then we'll have a specified
1489 // type for 'this'.
1490 if (CXXThisTypeOverride.isNull())
1491 return false;
1492
1493 // Determine whether we're looking into a class that's currently being
1494 // defined.
1495 CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
1496 return Class && Class->isBeingDefined();
1497}
1498
1499ExprResult
1500Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
1501 SourceLocation LParenOrBraceLoc,
1502 MultiExprArg exprs,
1503 SourceLocation RParenOrBraceLoc,
1504 bool ListInitialization) {
1505 if (!TypeRep)
1506 return ExprError();
1507
1508 TypeSourceInfo *TInfo;
1509 QualType Ty = GetTypeFromParser(Ty: TypeRep, TInfo: &TInfo);
1510 if (!TInfo)
1511 TInfo = Context.getTrivialTypeSourceInfo(T: Ty, Loc: SourceLocation());
1512
1513 auto Result = BuildCXXTypeConstructExpr(Type: TInfo, LParenLoc: LParenOrBraceLoc, Exprs: exprs,
1514 RParenLoc: RParenOrBraceLoc, ListInitialization);
1515 if (Result.isInvalid())
1516 Result = CreateRecoveryExpr(Begin: TInfo->getTypeLoc().getBeginLoc(),
1517 End: RParenOrBraceLoc, SubExprs: exprs, T: Ty);
1518 return Result;
1519}
1520
1521ExprResult
1522Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
1523 SourceLocation LParenOrBraceLoc,
1524 MultiExprArg Exprs,
1525 SourceLocation RParenOrBraceLoc,
1526 bool ListInitialization) {
1527 QualType Ty = TInfo->getType();
1528 SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
1529 SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
1530
1531 InitializedEntity Entity =
1532 InitializedEntity::InitializeTemporary(Context, TypeInfo: TInfo);
1533 InitializationKind Kind =
1534 Exprs.size()
1535 ? ListInitialization
1536 ? InitializationKind::CreateDirectList(
1537 InitLoc: TyBeginLoc, LBraceLoc: LParenOrBraceLoc, RBraceLoc: RParenOrBraceLoc)
1538 : InitializationKind::CreateDirect(InitLoc: TyBeginLoc, LParenLoc: LParenOrBraceLoc,
1539 RParenLoc: RParenOrBraceLoc)
1540 : InitializationKind::CreateValue(InitLoc: TyBeginLoc, LParenLoc: LParenOrBraceLoc,
1541 RParenLoc: RParenOrBraceLoc);
1542
1543 // C++17 [expr.type.conv]p1:
1544 // If the type is a placeholder for a deduced class type, [...perform class
1545 // template argument deduction...]
1546 // C++23:
1547 // Otherwise, if the type contains a placeholder type, it is replaced by the
1548 // type determined by placeholder type deduction.
1549 DeducedType *Deduced = Ty->getContainedDeducedType();
1550 if (Deduced && !Deduced->isDeduced() &&
1551 isa<DeducedTemplateSpecializationType>(Val: Deduced)) {
1552 Ty = DeduceTemplateSpecializationFromInitializer(TInfo, Entity,
1553 Kind, Init: Exprs);
1554 if (Ty.isNull())
1555 return ExprError();
1556 Entity = InitializedEntity::InitializeTemporary(TypeInfo: TInfo, Type: Ty);
1557 } else if (Deduced && !Deduced->isDeduced()) {
1558 MultiExprArg Inits = Exprs;
1559 if (ListInitialization) {
1560 auto *ILE = cast<InitListExpr>(Val: Exprs[0]);
1561 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
1562 }
1563
1564 if (Inits.empty())
1565 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_auto_expr_init_no_expression)
1566 << Ty << FullRange);
1567 if (Inits.size() > 1) {
1568 Expr *FirstBad = Inits[1];
1569 return ExprError(Diag(Loc: FirstBad->getBeginLoc(),
1570 DiagID: diag::err_auto_expr_init_multiple_expressions)
1571 << Ty << FullRange);
1572 }
1573 if (getLangOpts().CPlusPlus23) {
1574 if (Ty->getAs<AutoType>())
1575 Diag(Loc: TyBeginLoc, DiagID: diag::warn_cxx20_compat_auto_expr) << FullRange;
1576 }
1577 Expr *Deduce = Inits[0];
1578 if (isa<InitListExpr>(Val: Deduce))
1579 return ExprError(
1580 Diag(Loc: Deduce->getBeginLoc(), DiagID: diag::err_auto_expr_init_paren_braces)
1581 << ListInitialization << Ty << FullRange);
1582 QualType DeducedType;
1583 TemplateDeductionInfo Info(Deduce->getExprLoc());
1584 TemplateDeductionResult Result =
1585 DeduceAutoType(AutoTypeLoc: TInfo->getTypeLoc(), Initializer: Deduce, Result&: DeducedType, Info);
1586 if (Result != TemplateDeductionResult::Success &&
1587 Result != TemplateDeductionResult::AlreadyDiagnosed)
1588 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_auto_expr_deduction_failure)
1589 << Ty << Deduce->getType() << FullRange
1590 << Deduce->getSourceRange());
1591 if (DeducedType.isNull()) {
1592 assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
1593 return ExprError();
1594 }
1595
1596 Ty = DeducedType;
1597 Entity = InitializedEntity::InitializeTemporary(TypeInfo: TInfo, Type: Ty);
1598 }
1599
1600 if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs))
1601 return CXXUnresolvedConstructExpr::Create(
1602 Context, T: Ty.getNonReferenceType(), TSI: TInfo, LParenLoc: LParenOrBraceLoc, Args: Exprs,
1603 RParenLoc: RParenOrBraceLoc, IsListInit: ListInitialization);
1604
1605 // C++ [expr.type.conv]p1:
1606 // If the expression list is a parenthesized single expression, the type
1607 // conversion expression is equivalent (in definedness, and if defined in
1608 // meaning) to the corresponding cast expression.
1609 if (Exprs.size() == 1 && !ListInitialization &&
1610 !isa<InitListExpr>(Val: Exprs[0])) {
1611 Expr *Arg = Exprs[0];
1612 return BuildCXXFunctionalCastExpr(TInfo, Type: Ty, LParenLoc: LParenOrBraceLoc, CastExpr: Arg,
1613 RParenLoc: RParenOrBraceLoc);
1614 }
1615
1616 // For an expression of the form T(), T shall not be an array type.
1617 QualType ElemTy = Ty;
1618 if (Ty->isArrayType()) {
1619 if (!ListInitialization)
1620 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_value_init_for_array_type)
1621 << FullRange);
1622 ElemTy = Context.getBaseElementType(QT: Ty);
1623 }
1624
1625 // Only construct objects with object types.
1626 // The standard doesn't explicitly forbid function types here, but that's an
1627 // obvious oversight, as there's no way to dynamically construct a function
1628 // in general.
1629 if (Ty->isFunctionType())
1630 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_init_for_function_type)
1631 << Ty << FullRange);
1632
1633 // C++17 [expr.type.conv]p2, per DR2351:
1634 // If the type is cv void and the initializer is () or {}, the expression is
1635 // a prvalue of the specified type that performs no initialization.
1636 if (Ty->isVoidType()) {
1637 if (Exprs.empty())
1638 return new (Context) CXXScalarValueInitExpr(
1639 Ty.getUnqualifiedType(), TInfo, Kind.getRange().getEnd());
1640 if (ListInitialization &&
1641 cast<InitListExpr>(Val: Exprs[0])->getNumInits() == 0) {
1642 return CXXFunctionalCastExpr::Create(
1643 Context, T: Ty.getUnqualifiedType(), VK: VK_PRValue, Written: TInfo, Kind: CK_ToVoid,
1644 Op: Exprs[0], /*Path=*/nullptr, FPO: CurFPFeatureOverrides(),
1645 LPLoc: Exprs[0]->getBeginLoc(), RPLoc: Exprs[0]->getEndLoc());
1646 }
1647 } else if (RequireCompleteType(Loc: TyBeginLoc, T: ElemTy,
1648 DiagID: diag::err_invalid_incomplete_type_use,
1649 Args: FullRange))
1650 return ExprError();
1651
1652 // Otherwise, the expression is a prvalue of the specified type whose
1653 // result object is direct-initialized (11.6) with the initializer.
1654 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
1655 ExprResult Result = InitSeq.Perform(S&: *this, Entity, Kind, Args: Exprs);
1656
1657 if (Result.isInvalid())
1658 return Result;
1659
1660 Expr *Inner = Result.get();
1661 if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Val: Inner))
1662 Inner = BTE->getSubExpr();
1663 if (auto *CE = dyn_cast<ConstantExpr>(Val: Inner);
1664 CE && CE->isImmediateInvocation())
1665 Inner = CE->getSubExpr();
1666 if (!isa<CXXTemporaryObjectExpr>(Val: Inner) &&
1667 !isa<CXXScalarValueInitExpr>(Val: Inner)) {
1668 // If we created a CXXTemporaryObjectExpr, that node also represents the
1669 // functional cast. Otherwise, create an explicit cast to represent
1670 // the syntactic form of a functional-style cast that was used here.
1671 //
1672 // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr
1673 // would give a more consistent AST representation than using a
1674 // CXXTemporaryObjectExpr. It's also weird that the functional cast
1675 // is sometimes handled by initialization and sometimes not.
1676 QualType ResultType = Result.get()->getType();
1677 SourceRange Locs = ListInitialization
1678 ? SourceRange()
1679 : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
1680 Result = CXXFunctionalCastExpr::Create(
1681 Context, T: ResultType, VK: Expr::getValueKindForType(T: Ty), Written: TInfo, Kind: CK_NoOp,
1682 Op: Result.get(), /*Path=*/nullptr, FPO: CurFPFeatureOverrides(),
1683 LPLoc: Locs.getBegin(), RPLoc: Locs.getEnd());
1684 }
1685
1686 return Result;
1687}
1688
1689bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
1690 // [CUDA] Ignore this function, if we can't call it.
1691 const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
1692 if (getLangOpts().CUDA) {
1693 auto CallPreference = CUDA().IdentifyPreference(Caller, Callee: Method);
1694 // If it's not callable at all, it's not the right function.
1695 if (CallPreference < SemaCUDA::CFP_WrongSide)
1696 return false;
1697 if (CallPreference == SemaCUDA::CFP_WrongSide) {
1698 // Maybe. We have to check if there are better alternatives.
1699 DeclContext::lookup_result R =
1700 Method->getDeclContext()->lookup(Name: Method->getDeclName());
1701 for (const auto *D : R) {
1702 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1703 if (CUDA().IdentifyPreference(Caller, Callee: FD) > SemaCUDA::CFP_WrongSide)
1704 return false;
1705 }
1706 }
1707 // We've found no better variants.
1708 }
1709 }
1710
1711 SmallVector<const FunctionDecl*, 4> PreventedBy;
1712 bool Result = Method->isUsualDeallocationFunction(PreventedBy);
1713
1714 if (Result || !getLangOpts().CUDA || PreventedBy.empty())
1715 return Result;
1716
1717 // In case of CUDA, return true if none of the 1-argument deallocator
1718 // functions are actually callable.
1719 return llvm::none_of(Range&: PreventedBy, P: [&](const FunctionDecl *FD) {
1720 assert(FD->getNumParams() == 1 &&
1721 "Only single-operand functions should be in PreventedBy");
1722 return CUDA().IdentifyPreference(Caller, Callee: FD) >= SemaCUDA::CFP_HostDevice;
1723 });
1724}
1725
1726/// Determine whether the given function is a non-placement
1727/// deallocation function.
1728static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
1729 if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Val: FD))
1730 return S.isUsualDeallocationFunction(Method);
1731
1732 if (!FD->getDeclName().isAnyOperatorDelete())
1733 return false;
1734
1735 if (FD->isTypeAwareOperatorNewOrDelete())
1736 return FunctionDecl::RequiredTypeAwareDeleteParameterCount ==
1737 FD->getNumParams();
1738
1739 unsigned UsualParams = 1;
1740 if (S.getLangOpts().SizedDeallocation && UsualParams < FD->getNumParams() &&
1741 S.Context.hasSameUnqualifiedType(
1742 T1: FD->getParamDecl(i: UsualParams)->getType(),
1743 T2: S.Context.getSizeType()))
1744 ++UsualParams;
1745
1746 if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
1747 S.Context.hasSameUnqualifiedType(
1748 T1: FD->getParamDecl(i: UsualParams)->getType(),
1749 T2: S.Context.getCanonicalTagType(TD: S.getStdAlignValT())))
1750 ++UsualParams;
1751
1752 return UsualParams == FD->getNumParams();
1753}
1754
1755namespace {
1756 struct UsualDeallocFnInfo {
1757 UsualDeallocFnInfo()
1758 : Found(), FD(nullptr),
1759 IDP(AlignedAllocationMode::No, SizedDeallocationMode::No) {}
1760 UsualDeallocFnInfo(Sema &S, DeclAccessPair Found, QualType AllocType,
1761 SourceLocation Loc)
1762 : Found(Found), FD(dyn_cast<FunctionDecl>(Val: Found->getUnderlyingDecl())),
1763 Destroying(false),
1764 IDP({AllocType, TypeAwareAllocationMode::No,
1765 AlignedAllocationMode::No, SizedDeallocationMode::No}),
1766 CUDAPref(SemaCUDA::CFP_Native) {
1767 // A function template declaration is only a usual deallocation function
1768 // if it is a typed delete.
1769 if (!FD) {
1770 if (AllocType.isNull())
1771 return;
1772 auto *FTD = dyn_cast<FunctionTemplateDecl>(Val: Found->getUnderlyingDecl());
1773 if (!FTD)
1774 return;
1775 FunctionDecl *InstantiatedDecl =
1776 S.BuildTypeAwareUsualDelete(FnDecl: FTD, AllocType, Loc);
1777 if (!InstantiatedDecl)
1778 return;
1779 FD = InstantiatedDecl;
1780 }
1781 unsigned NumBaseParams = 1;
1782 if (FD->isTypeAwareOperatorNewOrDelete()) {
1783 // If this is a type aware operator delete we instantiate an appropriate
1784 // specialization of std::type_identity<>. If we do not know the
1785 // type being deallocated, or if the type-identity parameter of the
1786 // deallocation function does not match the constructed type_identity
1787 // specialization we reject the declaration.
1788 if (AllocType.isNull()) {
1789 FD = nullptr;
1790 return;
1791 }
1792 QualType TypeIdentityTag = FD->getParamDecl(i: 0)->getType();
1793 QualType ExpectedTypeIdentityTag =
1794 S.tryBuildStdTypeIdentity(Type: AllocType, Loc);
1795 if (ExpectedTypeIdentityTag.isNull()) {
1796 FD = nullptr;
1797 return;
1798 }
1799 if (!S.Context.hasSameType(T1: TypeIdentityTag, T2: ExpectedTypeIdentityTag)) {
1800 FD = nullptr;
1801 return;
1802 }
1803 IDP.PassTypeIdentity = TypeAwareAllocationMode::Yes;
1804 ++NumBaseParams;
1805 }
1806
1807 if (FD->isDestroyingOperatorDelete()) {
1808 Destroying = true;
1809 ++NumBaseParams;
1810 }
1811
1812 if (NumBaseParams < FD->getNumParams() &&
1813 S.Context.hasSameUnqualifiedType(
1814 T1: FD->getParamDecl(i: NumBaseParams)->getType(),
1815 T2: S.Context.getSizeType())) {
1816 ++NumBaseParams;
1817 IDP.PassSize = SizedDeallocationMode::Yes;
1818 }
1819
1820 if (NumBaseParams < FD->getNumParams() &&
1821 FD->getParamDecl(i: NumBaseParams)->getType()->isAlignValT()) {
1822 ++NumBaseParams;
1823 IDP.PassAlignment = AlignedAllocationMode::Yes;
1824 }
1825
1826 // In CUDA, determine how much we'd like / dislike to call this.
1827 if (S.getLangOpts().CUDA)
1828 CUDAPref = S.CUDA().IdentifyPreference(
1829 Caller: S.getCurFunctionDecl(/*AllowLambda=*/true), Callee: FD);
1830 }
1831
1832 explicit operator bool() const { return FD; }
1833
1834 int Compare(Sema &S, const UsualDeallocFnInfo &Other,
1835 ImplicitDeallocationParameters TargetIDP) const {
1836 assert(!TargetIDP.Type.isNull() ||
1837 !isTypeAwareAllocation(Other.IDP.PassTypeIdentity));
1838
1839 // C++ P0722:
1840 // A destroying operator delete is preferred over a non-destroying
1841 // operator delete.
1842 if (Destroying != Other.Destroying)
1843 return Destroying ? 1 : -1;
1844
1845 const ImplicitDeallocationParameters &OtherIDP = Other.IDP;
1846 // Selection for type awareness has priority over alignment and size
1847 if (IDP.PassTypeIdentity != OtherIDP.PassTypeIdentity)
1848 return IDP.PassTypeIdentity == TargetIDP.PassTypeIdentity ? 1 : -1;
1849
1850 // C++17 [expr.delete]p10:
1851 // If the type has new-extended alignment, a function with a parameter
1852 // of type std::align_val_t is preferred; otherwise a function without
1853 // such a parameter is preferred
1854 if (IDP.PassAlignment != OtherIDP.PassAlignment)
1855 return IDP.PassAlignment == TargetIDP.PassAlignment ? 1 : -1;
1856
1857 if (IDP.PassSize != OtherIDP.PassSize)
1858 return IDP.PassSize == TargetIDP.PassSize ? 1 : -1;
1859
1860 if (isTypeAwareAllocation(Mode: IDP.PassTypeIdentity)) {
1861 // Type aware allocation involves templates so we need to choose
1862 // the best type
1863 FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate();
1864 FunctionTemplateDecl *OtherPrimaryTemplate =
1865 Other.FD->getPrimaryTemplate();
1866 if ((!PrimaryTemplate) != (!OtherPrimaryTemplate))
1867 return OtherPrimaryTemplate ? 1 : -1;
1868
1869 if (PrimaryTemplate && OtherPrimaryTemplate) {
1870 const auto *DC = dyn_cast<CXXRecordDecl>(Val: Found->getDeclContext());
1871 const auto *OtherDC =
1872 dyn_cast<CXXRecordDecl>(Val: Other.Found->getDeclContext());
1873 unsigned ImplicitArgCount = Destroying + IDP.getNumImplicitArgs();
1874 if (FunctionTemplateDecl *Best = S.getMoreSpecializedTemplate(
1875 FT1: PrimaryTemplate, FT2: OtherPrimaryTemplate, Loc: SourceLocation(),
1876 TPOC: TPOC_Call, NumCallArguments1: ImplicitArgCount,
1877 RawObj1Ty: DC ? S.Context.getCanonicalTagType(TD: DC) : QualType{},
1878 RawObj2Ty: OtherDC ? S.Context.getCanonicalTagType(TD: OtherDC) : QualType{},
1879 Reversed: false)) {
1880 return Best == PrimaryTemplate ? 1 : -1;
1881 }
1882 }
1883 }
1884
1885 // Use CUDA call preference as a tiebreaker.
1886 if (CUDAPref > Other.CUDAPref)
1887 return 1;
1888 if (CUDAPref == Other.CUDAPref)
1889 return 0;
1890 return -1;
1891 }
1892
1893 DeclAccessPair Found;
1894 FunctionDecl *FD;
1895 bool Destroying;
1896 ImplicitDeallocationParameters IDP;
1897 SemaCUDA::CUDAFunctionPreference CUDAPref;
1898 };
1899}
1900
1901/// Determine whether a type has new-extended alignment. This may be called when
1902/// the type is incomplete (for a delete-expression with an incomplete pointee
1903/// type), in which case it will conservatively return false if the alignment is
1904/// not known.
1905static bool hasNewExtendedAlignment(Sema &S, QualType AllocType) {
1906 return S.getLangOpts().AlignedAllocation &&
1907 S.getASTContext().getTypeAlignIfKnown(T: AllocType) >
1908 S.getASTContext().getTargetInfo().getNewAlign();
1909}
1910
1911static bool CheckDeleteOperator(Sema &S, SourceLocation StartLoc,
1912 SourceRange Range, bool Diagnose,
1913 CXXRecordDecl *NamingClass, DeclAccessPair Decl,
1914 FunctionDecl *Operator) {
1915 if (Operator->isTypeAwareOperatorNewOrDelete()) {
1916 QualType SelectedTypeIdentityParameter =
1917 Operator->getParamDecl(i: 0)->getType();
1918 if (S.RequireCompleteType(Loc: StartLoc, T: SelectedTypeIdentityParameter,
1919 DiagID: diag::err_incomplete_type))
1920 return true;
1921 }
1922
1923 // FIXME: DiagnoseUseOfDecl?
1924 if (Operator->isDeleted()) {
1925 if (Diagnose) {
1926 StringLiteral *Msg = Operator->getDeletedMessage();
1927 S.Diag(Loc: StartLoc, DiagID: diag::err_deleted_function_use)
1928 << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef());
1929 S.NoteDeletedFunction(FD: Operator);
1930 }
1931 return true;
1932 }
1933 Sema::AccessResult Accessible =
1934 S.CheckAllocationAccess(OperatorLoc: StartLoc, PlacementRange: Range, NamingClass, FoundDecl: Decl, Diagnose);
1935 return Accessible == Sema::AR_inaccessible;
1936}
1937
1938/// Select the correct "usual" deallocation function to use from a selection of
1939/// deallocation functions (either global or class-scope).
1940static UsualDeallocFnInfo resolveDeallocationOverload(
1941 Sema &S, LookupResult &R, const ImplicitDeallocationParameters &IDP,
1942 SourceLocation Loc,
1943 llvm::SmallVectorImpl<UsualDeallocFnInfo> *BestFns = nullptr) {
1944
1945 UsualDeallocFnInfo Best;
1946 for (auto I = R.begin(), E = R.end(); I != E; ++I) {
1947 UsualDeallocFnInfo Info(S, I.getPair(), IDP.Type, Loc);
1948 if (!Info || !isNonPlacementDeallocationFunction(S, FD: Info.FD) ||
1949 Info.CUDAPref == SemaCUDA::CFP_Never)
1950 continue;
1951
1952 if (!isTypeAwareAllocation(Mode: IDP.PassTypeIdentity) &&
1953 isTypeAwareAllocation(Mode: Info.IDP.PassTypeIdentity))
1954 continue;
1955 if (!Best) {
1956 Best = Info;
1957 if (BestFns)
1958 BestFns->push_back(Elt: Info);
1959 continue;
1960 }
1961 int ComparisonResult = Best.Compare(S, Other: Info, TargetIDP: IDP);
1962 if (ComparisonResult > 0)
1963 continue;
1964
1965 // If more than one preferred function is found, all non-preferred
1966 // functions are eliminated from further consideration.
1967 if (BestFns && ComparisonResult < 0)
1968 BestFns->clear();
1969
1970 Best = Info;
1971 if (BestFns)
1972 BestFns->push_back(Elt: Info);
1973 }
1974
1975 return Best;
1976}
1977
1978/// Determine whether a given type is a class for which 'delete[]' would call
1979/// a member 'operator delete[]' with a 'size_t' parameter. This implies that
1980/// we need to store the array size (even if the type is
1981/// trivially-destructible).
1982static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
1983 TypeAwareAllocationMode PassType,
1984 QualType allocType) {
1985 const auto *record =
1986 allocType->getBaseElementTypeUnsafe()->getAsCanonical<RecordType>();
1987 if (!record) return false;
1988
1989 // Try to find an operator delete[] in class scope.
1990
1991 DeclarationName deleteName =
1992 S.Context.DeclarationNames.getCXXOperatorName(Op: OO_Array_Delete);
1993 LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
1994 S.LookupQualifiedName(R&: ops, LookupCtx: record->getDecl()->getDefinitionOrSelf());
1995
1996 // We're just doing this for information.
1997 ops.suppressDiagnostics();
1998
1999 // Very likely: there's no operator delete[].
2000 if (ops.empty()) return false;
2001
2002 // If it's ambiguous, it should be illegal to call operator delete[]
2003 // on this thing, so it doesn't matter if we allocate extra space or not.
2004 if (ops.isAmbiguous()) return false;
2005
2006 // C++17 [expr.delete]p10:
2007 // If the deallocation functions have class scope, the one without a
2008 // parameter of type std::size_t is selected.
2009 ImplicitDeallocationParameters IDP = {
2010 allocType, PassType,
2011 alignedAllocationModeFromBool(IsAligned: hasNewExtendedAlignment(S, AllocType: allocType)),
2012 SizedDeallocationMode::No};
2013 auto Best = resolveDeallocationOverload(S, R&: ops, IDP, Loc: loc);
2014 return Best && isSizedDeallocation(Mode: Best.IDP.PassSize);
2015}
2016
2017ExprResult
2018Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
2019 SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
2020 SourceLocation PlacementRParen, SourceRange TypeIdParens,
2021 Declarator &D, Expr *Initializer) {
2022 std::optional<Expr *> ArraySize;
2023 // If the specified type is an array, unwrap it and save the expression.
2024 if (D.getNumTypeObjects() > 0 &&
2025 D.getTypeObject(i: 0).Kind == DeclaratorChunk::Array) {
2026 DeclaratorChunk &Chunk = D.getTypeObject(i: 0);
2027 if (D.getDeclSpec().hasAutoTypeSpec())
2028 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_new_array_of_auto)
2029 << D.getSourceRange());
2030 if (Chunk.Arr.hasStatic)
2031 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_static_illegal_in_new)
2032 << D.getSourceRange());
2033 if (!Chunk.Arr.NumElts && !Initializer)
2034 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_array_new_needs_size)
2035 << D.getSourceRange());
2036
2037 ArraySize = Chunk.Arr.NumElts;
2038 D.DropFirstTypeObject();
2039 }
2040
2041 // Every dimension shall be of constant size.
2042 if (ArraySize) {
2043 for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
2044 if (D.getTypeObject(i: I).Kind != DeclaratorChunk::Array)
2045 break;
2046
2047 DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(i: I).Arr;
2048 if (Expr *NumElts = Array.NumElts) {
2049 if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
2050 // FIXME: GCC permits constant folding here. We should either do so consistently
2051 // or not do so at all, rather than changing behavior in C++14 onwards.
2052 if (getLangOpts().CPlusPlus14) {
2053 // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
2054 // shall be a converted constant expression (5.19) of type std::size_t
2055 // and shall evaluate to a strictly positive value.
2056 llvm::APSInt Value(Context.getIntWidth(T: Context.getSizeType()));
2057 Array.NumElts =
2058 CheckConvertedConstantExpression(From: NumElts, T: Context.getSizeType(),
2059 Value, CCE: CCEKind::ArrayBound)
2060 .get();
2061 } else {
2062 Array.NumElts = VerifyIntegerConstantExpression(
2063 E: NumElts, Result: nullptr, DiagID: diag::err_new_array_nonconst,
2064 CanFold: AllowFoldKind::Allow)
2065 .get();
2066 }
2067 if (!Array.NumElts)
2068 return ExprError();
2069 }
2070 }
2071 }
2072 }
2073
2074 TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
2075 QualType AllocType = TInfo->getType();
2076 if (D.isInvalidType())
2077 return ExprError();
2078
2079 SourceRange DirectInitRange;
2080 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Val: Initializer))
2081 DirectInitRange = List->getSourceRange();
2082
2083 return BuildCXXNew(Range: SourceRange(StartLoc, D.getEndLoc()), UseGlobal,
2084 PlacementLParen, PlacementArgs, PlacementRParen,
2085 TypeIdParens, AllocType, AllocTypeInfo: TInfo, ArraySize, DirectInitRange,
2086 Initializer);
2087}
2088
2089static bool isLegalArrayNewInitializer(CXXNewInitializationStyle Style,
2090 Expr *Init, bool IsCPlusPlus20) {
2091 if (!Init)
2092 return true;
2093 if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Val: Init))
2094 return IsCPlusPlus20 || PLE->getNumExprs() == 0;
2095 if (isa<ImplicitValueInitExpr>(Val: Init))
2096 return true;
2097 else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Val: Init))
2098 return !CCE->isListInitialization() &&
2099 CCE->getConstructor()->isDefaultConstructor();
2100 else if (Style == CXXNewInitializationStyle::Braces) {
2101 assert(isa<InitListExpr>(Init) &&
2102 "Shouldn't create list CXXConstructExprs for arrays.");
2103 return true;
2104 }
2105 return false;
2106}
2107
2108bool
2109Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
2110 if (!getLangOpts().AlignedAllocationUnavailable)
2111 return false;
2112 if (FD.isDefined())
2113 return false;
2114 UnsignedOrNone AlignmentParam = std::nullopt;
2115 if (FD.isReplaceableGlobalAllocationFunction(AlignmentParam: &AlignmentParam) &&
2116 AlignmentParam)
2117 return true;
2118 return false;
2119}
2120
2121// Emit a diagnostic if an aligned allocation/deallocation function that is not
2122// implemented in the standard library is selected.
2123void Sema::diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
2124 SourceLocation Loc) {
2125 if (isUnavailableAlignedAllocationFunction(FD)) {
2126 const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
2127 StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
2128 Platform: getASTContext().getTargetInfo().getPlatformName());
2129 VersionTuple OSVersion = alignedAllocMinVersion(OS: T.getOS());
2130
2131 bool IsDelete = FD.getDeclName().isAnyOperatorDelete();
2132 Diag(Loc, DiagID: diag::err_aligned_allocation_unavailable)
2133 << IsDelete << FD.getType().getAsString() << OSName
2134 << OSVersion.getAsString() << OSVersion.empty();
2135 Diag(Loc, DiagID: diag::note_silence_aligned_allocation_unavailable);
2136 }
2137}
2138
2139ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
2140 SourceLocation PlacementLParen,
2141 MultiExprArg PlacementArgs,
2142 SourceLocation PlacementRParen,
2143 SourceRange TypeIdParens, QualType AllocType,
2144 TypeSourceInfo *AllocTypeInfo,
2145 std::optional<Expr *> ArraySize,
2146 SourceRange DirectInitRange, Expr *Initializer) {
2147 SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
2148 SourceLocation StartLoc = Range.getBegin();
2149
2150 CXXNewInitializationStyle InitStyle;
2151 if (DirectInitRange.isValid()) {
2152 assert(Initializer && "Have parens but no initializer.");
2153 InitStyle = CXXNewInitializationStyle::Parens;
2154 } else if (isa_and_nonnull<InitListExpr>(Val: Initializer))
2155 InitStyle = CXXNewInitializationStyle::Braces;
2156 else {
2157 assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
2158 isa<CXXConstructExpr>(Initializer)) &&
2159 "Initializer expression that cannot have been implicitly created.");
2160 InitStyle = CXXNewInitializationStyle::None;
2161 }
2162
2163 MultiExprArg Exprs(&Initializer, Initializer ? 1 : 0);
2164 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Val: Initializer)) {
2165 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2166 "paren init for non-call init");
2167 Exprs = MultiExprArg(List->getExprs(), List->getNumExprs());
2168 } else if (auto *List = dyn_cast_or_null<CXXParenListInitExpr>(Val: Initializer)) {
2169 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2170 "paren init for non-call init");
2171 Exprs = List->getInitExprs();
2172 }
2173
2174 // C++11 [expr.new]p15:
2175 // A new-expression that creates an object of type T initializes that
2176 // object as follows:
2177 InitializationKind Kind = [&] {
2178 switch (InitStyle) {
2179 // - If the new-initializer is omitted, the object is default-
2180 // initialized (8.5); if no initialization is performed,
2181 // the object has indeterminate value
2182 case CXXNewInitializationStyle::None:
2183 return InitializationKind::CreateDefault(InitLoc: TypeRange.getBegin());
2184 // - Otherwise, the new-initializer is interpreted according to the
2185 // initialization rules of 8.5 for direct-initialization.
2186 case CXXNewInitializationStyle::Parens:
2187 return InitializationKind::CreateDirect(InitLoc: TypeRange.getBegin(),
2188 LParenLoc: DirectInitRange.getBegin(),
2189 RParenLoc: DirectInitRange.getEnd());
2190 case CXXNewInitializationStyle::Braces:
2191 return InitializationKind::CreateDirectList(InitLoc: TypeRange.getBegin(),
2192 LBraceLoc: Initializer->getBeginLoc(),
2193 RBraceLoc: Initializer->getEndLoc());
2194 }
2195 llvm_unreachable("Unknown initialization kind");
2196 }();
2197
2198 // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
2199 auto *Deduced = AllocType->getContainedDeducedType();
2200 if (Deduced && !Deduced->isDeduced() &&
2201 isa<DeducedTemplateSpecializationType>(Val: Deduced)) {
2202 if (ArraySize)
2203 return ExprError(
2204 Diag(Loc: *ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
2205 DiagID: diag::err_deduced_class_template_compound_type)
2206 << /*array*/ 2
2207 << (*ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
2208
2209 InitializedEntity Entity
2210 = InitializedEntity::InitializeNew(NewLoc: StartLoc, Type: AllocType);
2211 AllocType = DeduceTemplateSpecializationFromInitializer(
2212 TInfo: AllocTypeInfo, Entity, Kind, Init: Exprs);
2213 if (AllocType.isNull())
2214 return ExprError();
2215 } else if (Deduced && !Deduced->isDeduced()) {
2216 MultiExprArg Inits = Exprs;
2217 bool Braced = (InitStyle == CXXNewInitializationStyle::Braces);
2218 if (Braced) {
2219 auto *ILE = cast<InitListExpr>(Val: Exprs[0]);
2220 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
2221 }
2222
2223 if (InitStyle == CXXNewInitializationStyle::None || Inits.empty())
2224 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_auto_new_requires_ctor_arg)
2225 << AllocType << TypeRange);
2226 if (Inits.size() > 1) {
2227 Expr *FirstBad = Inits[1];
2228 return ExprError(Diag(Loc: FirstBad->getBeginLoc(),
2229 DiagID: diag::err_auto_new_ctor_multiple_expressions)
2230 << AllocType << TypeRange);
2231 }
2232 if (Braced && !getLangOpts().CPlusPlus17)
2233 Diag(Loc: Initializer->getBeginLoc(), DiagID: diag::ext_auto_new_list_init)
2234 << AllocType << TypeRange;
2235 Expr *Deduce = Inits[0];
2236 if (isa<InitListExpr>(Val: Deduce))
2237 return ExprError(
2238 Diag(Loc: Deduce->getBeginLoc(), DiagID: diag::err_auto_expr_init_paren_braces)
2239 << Braced << AllocType << TypeRange);
2240 QualType DeducedType;
2241 TemplateDeductionInfo Info(Deduce->getExprLoc());
2242 TemplateDeductionResult Result =
2243 DeduceAutoType(AutoTypeLoc: AllocTypeInfo->getTypeLoc(), Initializer: Deduce, Result&: DeducedType, Info);
2244 if (Result != TemplateDeductionResult::Success &&
2245 Result != TemplateDeductionResult::AlreadyDiagnosed)
2246 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_auto_new_deduction_failure)
2247 << AllocType << Deduce->getType() << TypeRange
2248 << Deduce->getSourceRange());
2249 if (DeducedType.isNull()) {
2250 assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
2251 return ExprError();
2252 }
2253 AllocType = DeducedType;
2254 }
2255
2256 // Per C++0x [expr.new]p5, the type being constructed may be a
2257 // typedef of an array type.
2258 // Dependent case will be handled separately.
2259 if (!ArraySize && !AllocType->isDependentType()) {
2260 if (const ConstantArrayType *Array
2261 = Context.getAsConstantArrayType(T: AllocType)) {
2262 ArraySize = IntegerLiteral::Create(C: Context, V: Array->getSize(),
2263 type: Context.getSizeType(),
2264 l: TypeRange.getEnd());
2265 AllocType = Array->getElementType();
2266 }
2267 }
2268
2269 if (CheckAllocatedType(AllocType, Loc: TypeRange.getBegin(), R: TypeRange))
2270 return ExprError();
2271
2272 if (ArraySize && !checkArrayElementAlignment(EltTy: AllocType, Loc: TypeRange.getBegin()))
2273 return ExprError();
2274
2275 // In ARC, infer 'retaining' for the allocated
2276 if (getLangOpts().ObjCAutoRefCount &&
2277 AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2278 AllocType->isObjCLifetimeType()) {
2279 AllocType = Context.getLifetimeQualifiedType(type: AllocType,
2280 lifetime: AllocType->getObjCARCImplicitLifetime());
2281 }
2282
2283 QualType ResultType = Context.getPointerType(T: AllocType);
2284
2285 if (ArraySize && *ArraySize &&
2286 (*ArraySize)->getType()->isNonOverloadPlaceholderType()) {
2287 ExprResult result = CheckPlaceholderExpr(E: *ArraySize);
2288 if (result.isInvalid()) return ExprError();
2289 ArraySize = result.get();
2290 }
2291 // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
2292 // integral or enumeration type with a non-negative value."
2293 // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
2294 // enumeration type, or a class type for which a single non-explicit
2295 // conversion function to integral or unscoped enumeration type exists.
2296 // C++1y [expr.new]p6: The expression [...] is implicitly converted to
2297 // std::size_t.
2298 std::optional<uint64_t> KnownArraySize;
2299 if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
2300 ExprResult ConvertedSize;
2301 if (getLangOpts().CPlusPlus14) {
2302 assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
2303
2304 ConvertedSize = PerformImplicitConversion(
2305 From: *ArraySize, ToType: Context.getSizeType(), Action: AssignmentAction::Converting);
2306
2307 if (!ConvertedSize.isInvalid() && (*ArraySize)->getType()->isRecordType())
2308 // Diagnose the compatibility of this conversion.
2309 Diag(Loc: StartLoc, DiagID: diag::warn_cxx98_compat_array_size_conversion)
2310 << (*ArraySize)->getType() << 0 << "'size_t'";
2311 } else {
2312 class SizeConvertDiagnoser : public ICEConvertDiagnoser {
2313 protected:
2314 Expr *ArraySize;
2315
2316 public:
2317 SizeConvertDiagnoser(Expr *ArraySize)
2318 : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
2319 ArraySize(ArraySize) {}
2320
2321 SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
2322 QualType T) override {
2323 return S.Diag(Loc, DiagID: diag::err_array_size_not_integral)
2324 << S.getLangOpts().CPlusPlus11 << T;
2325 }
2326
2327 SemaDiagnosticBuilder diagnoseIncomplete(
2328 Sema &S, SourceLocation Loc, QualType T) override {
2329 return S.Diag(Loc, DiagID: diag::err_array_size_incomplete_type)
2330 << T << ArraySize->getSourceRange();
2331 }
2332
2333 SemaDiagnosticBuilder diagnoseExplicitConv(
2334 Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
2335 return S.Diag(Loc, DiagID: diag::err_array_size_explicit_conversion) << T << ConvTy;
2336 }
2337
2338 SemaDiagnosticBuilder noteExplicitConv(
2339 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2340 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_array_size_conversion)
2341 << ConvTy->isEnumeralType() << ConvTy;
2342 }
2343
2344 SemaDiagnosticBuilder diagnoseAmbiguous(
2345 Sema &S, SourceLocation Loc, QualType T) override {
2346 return S.Diag(Loc, DiagID: diag::err_array_size_ambiguous_conversion) << T;
2347 }
2348
2349 SemaDiagnosticBuilder noteAmbiguous(
2350 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2351 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_array_size_conversion)
2352 << ConvTy->isEnumeralType() << ConvTy;
2353 }
2354
2355 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
2356 QualType T,
2357 QualType ConvTy) override {
2358 return S.Diag(Loc,
2359 DiagID: S.getLangOpts().CPlusPlus11
2360 ? diag::warn_cxx98_compat_array_size_conversion
2361 : diag::ext_array_size_conversion)
2362 << T << ConvTy->isEnumeralType() << ConvTy;
2363 }
2364 } SizeDiagnoser(*ArraySize);
2365
2366 ConvertedSize = PerformContextualImplicitConversion(Loc: StartLoc, FromE: *ArraySize,
2367 Converter&: SizeDiagnoser);
2368 }
2369 if (ConvertedSize.isInvalid())
2370 return ExprError();
2371
2372 ArraySize = ConvertedSize.get();
2373 QualType SizeType = (*ArraySize)->getType();
2374
2375 if (!SizeType->isIntegralOrUnscopedEnumerationType())
2376 return ExprError();
2377
2378 // C++98 [expr.new]p7:
2379 // The expression in a direct-new-declarator shall have integral type
2380 // with a non-negative value.
2381 //
2382 // Let's see if this is a constant < 0. If so, we reject it out of hand,
2383 // per CWG1464. Otherwise, if it's not a constant, we must have an
2384 // unparenthesized array type.
2385
2386 // We've already performed any required implicit conversion to integer or
2387 // unscoped enumeration type.
2388 // FIXME: Per CWG1464, we are required to check the value prior to
2389 // converting to size_t. This will never find a negative array size in
2390 // C++14 onwards, because Value is always unsigned here!
2391 if (std::optional<llvm::APSInt> Value =
2392 (*ArraySize)->getIntegerConstantExpr(Ctx: Context)) {
2393 if (Value->isSigned() && Value->isNegative()) {
2394 return ExprError(Diag(Loc: (*ArraySize)->getBeginLoc(),
2395 DiagID: diag::err_typecheck_negative_array_size)
2396 << (*ArraySize)->getSourceRange());
2397 }
2398
2399 if (!AllocType->isDependentType()) {
2400 unsigned ActiveSizeBits =
2401 ConstantArrayType::getNumAddressingBits(Context, ElementType: AllocType, NumElements: *Value);
2402 if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
2403 return ExprError(
2404 Diag(Loc: (*ArraySize)->getBeginLoc(), DiagID: diag::err_array_too_large)
2405 << toString(I: *Value, Radix: 10, Signed: Value->isSigned(),
2406 /*formatAsCLiteral=*/false, /*UpperCase=*/false,
2407 /*InsertSeparators=*/true)
2408 << (*ArraySize)->getSourceRange());
2409 }
2410
2411 KnownArraySize = Value->getZExtValue();
2412 } else if (TypeIdParens.isValid()) {
2413 // Can't have dynamic array size when the type-id is in parentheses.
2414 Diag(Loc: (*ArraySize)->getBeginLoc(), DiagID: diag::ext_new_paren_array_nonconst)
2415 << (*ArraySize)->getSourceRange()
2416 << FixItHint::CreateRemoval(RemoveRange: TypeIdParens.getBegin())
2417 << FixItHint::CreateRemoval(RemoveRange: TypeIdParens.getEnd());
2418
2419 TypeIdParens = SourceRange();
2420 }
2421
2422 // Note that we do *not* convert the argument in any way. It can
2423 // be signed, larger than size_t, whatever.
2424 }
2425
2426 FunctionDecl *OperatorNew = nullptr;
2427 FunctionDecl *OperatorDelete = nullptr;
2428 unsigned Alignment =
2429 AllocType->isDependentType() ? 0 : Context.getTypeAlign(T: AllocType);
2430 unsigned NewAlignment = Context.getTargetInfo().getNewAlign();
2431 ImplicitAllocationParameters IAP = {
2432 AllocType, ShouldUseTypeAwareOperatorNewOrDelete(),
2433 alignedAllocationModeFromBool(IsAligned: getLangOpts().AlignedAllocation &&
2434 Alignment > NewAlignment)};
2435
2436 if (CheckArgsForPlaceholders(args: PlacementArgs))
2437 return ExprError();
2438
2439 AllocationFunctionScope Scope = UseGlobal ? AllocationFunctionScope::Global
2440 : AllocationFunctionScope::Both;
2441 SourceRange AllocationParameterRange = Range;
2442 if (PlacementLParen.isValid() && PlacementRParen.isValid())
2443 AllocationParameterRange = SourceRange(PlacementLParen, PlacementRParen);
2444 if (!AllocType->isDependentType() &&
2445 !Expr::hasAnyTypeDependentArguments(Exprs: PlacementArgs) &&
2446 FindAllocationFunctions(StartLoc, Range: AllocationParameterRange, NewScope: Scope, DeleteScope: Scope,
2447 AllocType, IsArray: ArraySize.has_value(), IAP,
2448 PlaceArgs: PlacementArgs, OperatorNew, OperatorDelete))
2449 return ExprError();
2450
2451 // If this is an array allocation, compute whether the usual array
2452 // deallocation function for the type has a size_t parameter.
2453 bool UsualArrayDeleteWantsSize = false;
2454 if (ArraySize && !AllocType->isDependentType())
2455 UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(
2456 S&: *this, loc: StartLoc, PassType: IAP.PassTypeIdentity, allocType: AllocType);
2457
2458 SmallVector<Expr *, 8> AllPlaceArgs;
2459 if (OperatorNew) {
2460 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
2461 VariadicCallType CallType = Proto->isVariadic()
2462 ? VariadicCallType::Function
2463 : VariadicCallType::DoesNotApply;
2464
2465 // We've already converted the placement args, just fill in any default
2466 // arguments. Skip the first parameter because we don't have a corresponding
2467 // argument. Skip the second parameter too if we're passing in the
2468 // alignment; we've already filled it in.
2469 unsigned NumImplicitArgs = 1;
2470 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2471 assert(OperatorNew->isTypeAwareOperatorNewOrDelete());
2472 NumImplicitArgs++;
2473 }
2474 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2475 NumImplicitArgs++;
2476 if (GatherArgumentsForCall(CallLoc: AllocationParameterRange.getBegin(), FDecl: OperatorNew,
2477 Proto, FirstParam: NumImplicitArgs, Args: PlacementArgs,
2478 AllArgs&: AllPlaceArgs, CallType))
2479 return ExprError();
2480
2481 if (!AllPlaceArgs.empty())
2482 PlacementArgs = AllPlaceArgs;
2483
2484 // We would like to perform some checking on the given `operator new` call,
2485 // but the PlacementArgs does not contain the implicit arguments,
2486 // namely allocation size and maybe allocation alignment,
2487 // so we need to conjure them.
2488
2489 QualType SizeTy = Context.getSizeType();
2490 unsigned SizeTyWidth = Context.getTypeSize(T: SizeTy);
2491
2492 llvm::APInt SingleEltSize(
2493 SizeTyWidth, Context.getTypeSizeInChars(T: AllocType).getQuantity());
2494
2495 // How many bytes do we want to allocate here?
2496 std::optional<llvm::APInt> AllocationSize;
2497 if (!ArraySize && !AllocType->isDependentType()) {
2498 // For non-array operator new, we only want to allocate one element.
2499 AllocationSize = SingleEltSize;
2500 } else if (KnownArraySize && !AllocType->isDependentType()) {
2501 // For array operator new, only deal with static array size case.
2502 bool Overflow;
2503 AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
2504 .umul_ov(RHS: SingleEltSize, Overflow);
2505 (void)Overflow;
2506 assert(
2507 !Overflow &&
2508 "Expected that all the overflows would have been handled already.");
2509 }
2510
2511 IntegerLiteral AllocationSizeLiteral(
2512 Context, AllocationSize.value_or(u: llvm::APInt::getZero(numBits: SizeTyWidth)),
2513 SizeTy, StartLoc);
2514 // Otherwise, if we failed to constant-fold the allocation size, we'll
2515 // just give up and pass-in something opaque, that isn't a null pointer.
2516 OpaqueValueExpr OpaqueAllocationSize(StartLoc, SizeTy, VK_PRValue,
2517 OK_Ordinary, /*SourceExpr=*/nullptr);
2518
2519 // Let's synthesize the alignment argument in case we will need it.
2520 // Since we *really* want to allocate these on stack, this is slightly ugly
2521 // because there might not be a `std::align_val_t` type.
2522 EnumDecl *StdAlignValT = getStdAlignValT();
2523 QualType AlignValT =
2524 StdAlignValT ? Context.getCanonicalTagType(TD: StdAlignValT) : SizeTy;
2525 IntegerLiteral AlignmentLiteral(
2526 Context,
2527 llvm::APInt(Context.getTypeSize(T: SizeTy),
2528 Alignment / Context.getCharWidth()),
2529 SizeTy, StartLoc);
2530 ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
2531 CK_IntegralCast, &AlignmentLiteral,
2532 VK_PRValue, FPOptionsOverride());
2533
2534 // Adjust placement args by prepending conjured size and alignment exprs.
2535 llvm::SmallVector<Expr *, 8> CallArgs;
2536 CallArgs.reserve(N: NumImplicitArgs + PlacementArgs.size());
2537 CallArgs.emplace_back(Args: AllocationSize
2538 ? static_cast<Expr *>(&AllocationSizeLiteral)
2539 : &OpaqueAllocationSize);
2540 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2541 CallArgs.emplace_back(Args: &DesiredAlignment);
2542 llvm::append_range(C&: CallArgs, R&: PlacementArgs);
2543
2544 DiagnoseSentinelCalls(D: OperatorNew, Loc: PlacementLParen, Args: CallArgs);
2545
2546 checkCall(FDecl: OperatorNew, Proto, /*ThisArg=*/nullptr, Args: CallArgs,
2547 /*IsMemberFunction=*/false, Loc: StartLoc, Range, CallType);
2548
2549 // Warn if the type is over-aligned and is being allocated by (unaligned)
2550 // global operator new.
2551 if (PlacementArgs.empty() && !isAlignedAllocation(Mode: IAP.PassAlignment) &&
2552 (OperatorNew->isImplicit() ||
2553 (OperatorNew->getBeginLoc().isValid() &&
2554 getSourceManager().isInSystemHeader(Loc: OperatorNew->getBeginLoc())))) {
2555 if (Alignment > NewAlignment)
2556 Diag(Loc: StartLoc, DiagID: diag::warn_overaligned_type)
2557 << AllocType
2558 << unsigned(Alignment / Context.getCharWidth())
2559 << unsigned(NewAlignment / Context.getCharWidth());
2560 }
2561 }
2562
2563 // Array 'new' can't have any initializers except empty parentheses.
2564 // Initializer lists are also allowed, in C++11. Rely on the parser for the
2565 // dialect distinction.
2566 if (ArraySize && !isLegalArrayNewInitializer(Style: InitStyle, Init: Initializer,
2567 IsCPlusPlus20: getLangOpts().CPlusPlus20)) {
2568 SourceRange InitRange(Exprs.front()->getBeginLoc(),
2569 Exprs.back()->getEndLoc());
2570 Diag(Loc: StartLoc, DiagID: diag::err_new_array_init_args) << InitRange;
2571 return ExprError();
2572 }
2573
2574 // If we can perform the initialization, and we've not already done so,
2575 // do it now.
2576 if (!AllocType->isDependentType() &&
2577 !Expr::hasAnyTypeDependentArguments(Exprs)) {
2578 // The type we initialize is the complete type, including the array bound.
2579 QualType InitType;
2580 if (KnownArraySize)
2581 InitType = Context.getConstantArrayType(
2582 EltTy: AllocType,
2583 ArySize: llvm::APInt(Context.getTypeSize(T: Context.getSizeType()),
2584 *KnownArraySize),
2585 SizeExpr: *ArraySize, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
2586 else if (ArraySize)
2587 InitType = Context.getIncompleteArrayType(EltTy: AllocType,
2588 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
2589 else
2590 InitType = AllocType;
2591
2592 InitializedEntity Entity
2593 = InitializedEntity::InitializeNew(NewLoc: StartLoc, Type: InitType);
2594 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
2595 ExprResult FullInit = InitSeq.Perform(S&: *this, Entity, Kind, Args: Exprs);
2596 if (FullInit.isInvalid())
2597 return ExprError();
2598
2599 // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
2600 // we don't want the initialized object to be destructed.
2601 // FIXME: We should not create these in the first place.
2602 if (CXXBindTemporaryExpr *Binder =
2603 dyn_cast_or_null<CXXBindTemporaryExpr>(Val: FullInit.get()))
2604 FullInit = Binder->getSubExpr();
2605
2606 Initializer = FullInit.get();
2607
2608 // FIXME: If we have a KnownArraySize, check that the array bound of the
2609 // initializer is no greater than that constant value.
2610
2611 if (ArraySize && !*ArraySize) {
2612 auto *CAT = Context.getAsConstantArrayType(T: Initializer->getType());
2613 if (CAT) {
2614 // FIXME: Track that the array size was inferred rather than explicitly
2615 // specified.
2616 ArraySize = IntegerLiteral::Create(
2617 C: Context, V: CAT->getSize(), type: Context.getSizeType(), l: TypeRange.getEnd());
2618 } else {
2619 Diag(Loc: TypeRange.getEnd(), DiagID: diag::err_new_array_size_unknown_from_init)
2620 << Initializer->getSourceRange();
2621 }
2622 }
2623 }
2624
2625 // Mark the new and delete operators as referenced.
2626 if (OperatorNew) {
2627 if (DiagnoseUseOfDecl(D: OperatorNew, Locs: StartLoc))
2628 return ExprError();
2629 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorNew);
2630 }
2631 if (OperatorDelete) {
2632 if (DiagnoseUseOfDecl(D: OperatorDelete, Locs: StartLoc))
2633 return ExprError();
2634 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorDelete);
2635 }
2636
2637 // For MSVC vector deleting destructors support we record that for the class
2638 // new[] was called. We try to optimize the code size and only emit vector
2639 // deleting destructors when they are required. Vector deleting destructors
2640 // are required for delete[] call but MSVC triggers emission of them
2641 // whenever new[] is called for an object of the class and we do the same
2642 // for compatibility.
2643 if (const CXXConstructExpr *CCE =
2644 dyn_cast_or_null<CXXConstructExpr>(Val: Initializer);
2645 CCE && ArraySize) {
2646 Context.setClassNeedsVectorDeletingDestructor(
2647 CCE->getConstructor()->getParent());
2648 }
2649
2650 return CXXNewExpr::Create(Ctx: Context, IsGlobalNew: UseGlobal, OperatorNew, OperatorDelete,
2651 IAP, UsualArrayDeleteWantsSize, PlacementArgs,
2652 TypeIdParens, ArraySize, InitializationStyle: InitStyle, Initializer,
2653 Ty: ResultType, AllocatedTypeInfo: AllocTypeInfo, Range, DirectInitRange);
2654}
2655
2656bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
2657 SourceRange R) {
2658 // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
2659 // abstract class type or array thereof.
2660 if (AllocType->isFunctionType())
2661 return Diag(Loc, DiagID: diag::err_bad_new_type)
2662 << AllocType << 0 << R;
2663 else if (AllocType->isReferenceType())
2664 return Diag(Loc, DiagID: diag::err_bad_new_type)
2665 << AllocType << 1 << R;
2666 else if (!AllocType->isDependentType() &&
2667 RequireCompleteSizedType(
2668 Loc, T: AllocType, DiagID: diag::err_new_incomplete_or_sizeless_type, Args: R))
2669 return true;
2670 else if (RequireNonAbstractType(Loc, T: AllocType,
2671 DiagID: diag::err_allocation_of_abstract_type))
2672 return true;
2673 else if (AllocType->isVariablyModifiedType())
2674 return Diag(Loc, DiagID: diag::err_variably_modified_new_type)
2675 << AllocType;
2676 else if (AllocType.getAddressSpace() != LangAS::Default &&
2677 !getLangOpts().OpenCLCPlusPlus)
2678 return Diag(Loc, DiagID: diag::err_address_space_qualified_new)
2679 << AllocType.getUnqualifiedType()
2680 << Qualifiers::getAddrSpaceAsString(AS: AllocType.getAddressSpace());
2681
2682 else if (getLangOpts().ObjCAutoRefCount) {
2683 if (const ArrayType *AT = Context.getAsArrayType(T: AllocType)) {
2684 QualType BaseAllocType = Context.getBaseElementType(VAT: AT);
2685 if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2686 BaseAllocType->isObjCLifetimeType())
2687 return Diag(Loc, DiagID: diag::err_arc_new_array_without_ownership)
2688 << BaseAllocType;
2689 }
2690 }
2691
2692 return false;
2693}
2694
2695enum class ResolveMode { Typed, Untyped };
2696static bool resolveAllocationOverloadInterior(
2697 Sema &S, LookupResult &R, SourceRange Range, ResolveMode Mode,
2698 SmallVectorImpl<Expr *> &Args, AlignedAllocationMode &PassAlignment,
2699 FunctionDecl *&Operator, OverloadCandidateSet *AlignedCandidates,
2700 Expr *AlignArg, bool Diagnose) {
2701 unsigned NonTypeArgumentOffset = 0;
2702 if (Mode == ResolveMode::Typed) {
2703 ++NonTypeArgumentOffset;
2704 }
2705
2706 OverloadCandidateSet Candidates(R.getNameLoc(),
2707 OverloadCandidateSet::CSK_Normal);
2708 for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
2709 Alloc != AllocEnd; ++Alloc) {
2710 // Even member operator new/delete are implicitly treated as
2711 // static, so don't use AddMemberCandidate.
2712 NamedDecl *D = (*Alloc)->getUnderlyingDecl();
2713 bool IsTypeAware = D->getAsFunction()->isTypeAwareOperatorNewOrDelete();
2714 if (IsTypeAware == (Mode != ResolveMode::Typed))
2715 continue;
2716
2717 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(Val: D)) {
2718 S.AddTemplateOverloadCandidate(FunctionTemplate: FnTemplate, FoundDecl: Alloc.getPair(),
2719 /*ExplicitTemplateArgs=*/nullptr, Args,
2720 CandidateSet&: Candidates,
2721 /*SuppressUserConversions=*/false);
2722 continue;
2723 }
2724
2725 FunctionDecl *Fn = cast<FunctionDecl>(Val: D);
2726 S.AddOverloadCandidate(Function: Fn, FoundDecl: Alloc.getPair(), Args, CandidateSet&: Candidates,
2727 /*SuppressUserConversions=*/false);
2728 }
2729
2730 // Do the resolution.
2731 OverloadCandidateSet::iterator Best;
2732 switch (Candidates.BestViableFunction(S, Loc: R.getNameLoc(), Best)) {
2733 case OR_Success: {
2734 // Got one!
2735 FunctionDecl *FnDecl = Best->Function;
2736 if (S.CheckAllocationAccess(OperatorLoc: R.getNameLoc(), PlacementRange: Range, NamingClass: R.getNamingClass(),
2737 FoundDecl: Best->FoundDecl) == Sema::AR_inaccessible)
2738 return true;
2739
2740 Operator = FnDecl;
2741 return false;
2742 }
2743
2744 case OR_No_Viable_Function:
2745 // C++17 [expr.new]p13:
2746 // If no matching function is found and the allocated object type has
2747 // new-extended alignment, the alignment argument is removed from the
2748 // argument list, and overload resolution is performed again.
2749 if (isAlignedAllocation(Mode: PassAlignment)) {
2750 PassAlignment = AlignedAllocationMode::No;
2751 AlignArg = Args[NonTypeArgumentOffset + 1];
2752 Args.erase(CI: Args.begin() + NonTypeArgumentOffset + 1);
2753 return resolveAllocationOverloadInterior(S, R, Range, Mode, Args,
2754 PassAlignment, Operator,
2755 AlignedCandidates: &Candidates, AlignArg, Diagnose);
2756 }
2757
2758 // MSVC will fall back on trying to find a matching global operator new
2759 // if operator new[] cannot be found. Also, MSVC will leak by not
2760 // generating a call to operator delete or operator delete[], but we
2761 // will not replicate that bug.
2762 // FIXME: Find out how this interacts with the std::align_val_t fallback
2763 // once MSVC implements it.
2764 if (R.getLookupName().getCXXOverloadedOperator() == OO_Array_New &&
2765 S.Context.getLangOpts().MSVCCompat && Mode != ResolveMode::Typed) {
2766 R.clear();
2767 R.setLookupName(S.Context.DeclarationNames.getCXXOperatorName(Op: OO_New));
2768 S.LookupQualifiedName(R, LookupCtx: S.Context.getTranslationUnitDecl());
2769 // FIXME: This will give bad diagnostics pointing at the wrong functions.
2770 return resolveAllocationOverloadInterior(S, R, Range, Mode, Args,
2771 PassAlignment, Operator,
2772 /*Candidates=*/AlignedCandidates: nullptr,
2773 /*AlignArg=*/nullptr, Diagnose);
2774 }
2775 if (Mode == ResolveMode::Typed) {
2776 // If we can't find a matching type aware operator we don't consider this
2777 // a failure.
2778 Operator = nullptr;
2779 return false;
2780 }
2781 if (Diagnose) {
2782 // If this is an allocation of the form 'new (p) X' for some object
2783 // pointer p (or an expression that will decay to such a pointer),
2784 // diagnose the reason for the error.
2785 if (!R.isClassLookup() && Args.size() == 2 &&
2786 (Args[1]->getType()->isObjectPointerType() ||
2787 Args[1]->getType()->isArrayType())) {
2788 const QualType Arg1Type = Args[1]->getType();
2789 QualType UnderlyingType = S.Context.getBaseElementType(QT: Arg1Type);
2790 if (UnderlyingType->isPointerType())
2791 UnderlyingType = UnderlyingType->getPointeeType();
2792 if (UnderlyingType.isConstQualified()) {
2793 S.Diag(Loc: Args[1]->getExprLoc(),
2794 DiagID: diag::err_placement_new_into_const_qualified_storage)
2795 << Arg1Type << Args[1]->getSourceRange();
2796 return true;
2797 }
2798 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_need_header_before_placement_new)
2799 << R.getLookupName() << Range;
2800 // Listing the candidates is unlikely to be useful; skip it.
2801 return true;
2802 }
2803
2804 // Finish checking all candidates before we note any. This checking can
2805 // produce additional diagnostics so can't be interleaved with our
2806 // emission of notes.
2807 //
2808 // For an aligned allocation, separately check the aligned and unaligned
2809 // candidates with their respective argument lists.
2810 SmallVector<OverloadCandidate*, 32> Cands;
2811 SmallVector<OverloadCandidate*, 32> AlignedCands;
2812 llvm::SmallVector<Expr*, 4> AlignedArgs;
2813 if (AlignedCandidates) {
2814 auto IsAligned = [NonTypeArgumentOffset](OverloadCandidate &C) {
2815 auto AlignArgOffset = NonTypeArgumentOffset + 1;
2816 return C.Function->getNumParams() > AlignArgOffset &&
2817 C.Function->getParamDecl(i: AlignArgOffset)
2818 ->getType()
2819 ->isAlignValT();
2820 };
2821 auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
2822
2823 AlignedArgs.reserve(N: Args.size() + NonTypeArgumentOffset + 1);
2824 for (unsigned Idx = 0; Idx < NonTypeArgumentOffset + 1; ++Idx)
2825 AlignedArgs.push_back(Elt: Args[Idx]);
2826 AlignedArgs.push_back(Elt: AlignArg);
2827 AlignedArgs.append(in_start: Args.begin() + NonTypeArgumentOffset + 1,
2828 in_end: Args.end());
2829 AlignedCands = AlignedCandidates->CompleteCandidates(
2830 S, OCD: OCD_AllCandidates, Args: AlignedArgs, OpLoc: R.getNameLoc(), Filter: IsAligned);
2831
2832 Cands = Candidates.CompleteCandidates(S, OCD: OCD_AllCandidates, Args,
2833 OpLoc: R.getNameLoc(), Filter: IsUnaligned);
2834 } else {
2835 Cands = Candidates.CompleteCandidates(S, OCD: OCD_AllCandidates, Args,
2836 OpLoc: R.getNameLoc());
2837 }
2838
2839 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_ovl_no_viable_function_in_call)
2840 << R.getLookupName() << Range;
2841 if (AlignedCandidates)
2842 AlignedCandidates->NoteCandidates(S, Args: AlignedArgs, Cands: AlignedCands, Opc: "",
2843 OpLoc: R.getNameLoc());
2844 Candidates.NoteCandidates(S, Args, Cands, Opc: "", OpLoc: R.getNameLoc());
2845 }
2846 return true;
2847
2848 case OR_Ambiguous:
2849 if (Diagnose) {
2850 Candidates.NoteCandidates(
2851 PA: PartialDiagnosticAt(R.getNameLoc(),
2852 S.PDiag(DiagID: diag::err_ovl_ambiguous_call)
2853 << R.getLookupName() << Range),
2854 S, OCD: OCD_AmbiguousCandidates, Args);
2855 }
2856 return true;
2857
2858 case OR_Deleted: {
2859 if (Diagnose)
2860 S.DiagnoseUseOfDeletedFunction(Loc: R.getNameLoc(), Range, Name: R.getLookupName(),
2861 CandidateSet&: Candidates, Fn: Best->Function, Args);
2862 return true;
2863 }
2864 }
2865 llvm_unreachable("Unreachable, bad result from BestViableFunction");
2866}
2867
2868enum class DeallocLookupMode { Untyped, OptionallyTyped };
2869
2870static void LookupGlobalDeallocationFunctions(Sema &S, SourceLocation Loc,
2871 LookupResult &FoundDelete,
2872 DeallocLookupMode Mode,
2873 DeclarationName Name) {
2874 S.LookupQualifiedName(R&: FoundDelete, LookupCtx: S.Context.getTranslationUnitDecl());
2875 if (Mode != DeallocLookupMode::OptionallyTyped) {
2876 // We're going to remove either the typed or the non-typed
2877 bool RemoveTypedDecl = Mode == DeallocLookupMode::Untyped;
2878 LookupResult::Filter Filter = FoundDelete.makeFilter();
2879 while (Filter.hasNext()) {
2880 FunctionDecl *FD = Filter.next()->getUnderlyingDecl()->getAsFunction();
2881 if (FD->isTypeAwareOperatorNewOrDelete() == RemoveTypedDecl)
2882 Filter.erase();
2883 }
2884 Filter.done();
2885 }
2886}
2887
2888static bool resolveAllocationOverload(
2889 Sema &S, LookupResult &R, SourceRange Range, SmallVectorImpl<Expr *> &Args,
2890 ImplicitAllocationParameters &IAP, FunctionDecl *&Operator,
2891 OverloadCandidateSet *AlignedCandidates, Expr *AlignArg, bool Diagnose) {
2892 Operator = nullptr;
2893 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2894 assert(S.isStdTypeIdentity(Args[0]->getType(), nullptr));
2895 // The internal overload resolution work mutates the argument list
2896 // in accordance with the spec. We may want to change that in future,
2897 // but for now we deal with this by making a copy of the non-type-identity
2898 // arguments.
2899 SmallVector<Expr *> UntypedParameters;
2900 UntypedParameters.reserve(N: Args.size() - 1);
2901 UntypedParameters.push_back(Elt: Args[1]);
2902 // Type aware allocation implicitly includes the alignment parameter so
2903 // only include it in the untyped parameter list if alignment was explicitly
2904 // requested
2905 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2906 UntypedParameters.push_back(Elt: Args[2]);
2907 UntypedParameters.append(in_start: Args.begin() + 3, in_end: Args.end());
2908
2909 AlignedAllocationMode InitialAlignmentMode = IAP.PassAlignment;
2910 IAP.PassAlignment = AlignedAllocationMode::Yes;
2911 if (resolveAllocationOverloadInterior(
2912 S, R, Range, Mode: ResolveMode::Typed, Args, PassAlignment&: IAP.PassAlignment, Operator,
2913 AlignedCandidates, AlignArg, Diagnose))
2914 return true;
2915 if (Operator)
2916 return false;
2917
2918 // If we got to this point we could not find a matching typed operator
2919 // so we update the IAP flags, and revert to our stored copy of the
2920 // type-identity-less argument list.
2921 IAP.PassTypeIdentity = TypeAwareAllocationMode::No;
2922 IAP.PassAlignment = InitialAlignmentMode;
2923 Args = std::move(UntypedParameters);
2924 }
2925 assert(!S.isStdTypeIdentity(Args[0]->getType(), nullptr));
2926 return resolveAllocationOverloadInterior(
2927 S, R, Range, Mode: ResolveMode::Untyped, Args, PassAlignment&: IAP.PassAlignment, Operator,
2928 AlignedCandidates, AlignArg, Diagnose);
2929}
2930
2931bool Sema::FindAllocationFunctions(
2932 SourceLocation StartLoc, SourceRange Range,
2933 AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope,
2934 QualType AllocType, bool IsArray, ImplicitAllocationParameters &IAP,
2935 MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew,
2936 FunctionDecl *&OperatorDelete, bool Diagnose) {
2937 // --- Choosing an allocation function ---
2938 // C++ 5.3.4p8 - 14 & 18
2939 // 1) If looking in AllocationFunctionScope::Global scope for allocation
2940 // functions, only look in
2941 // the global scope. Else, if AllocationFunctionScope::Class, only look in
2942 // the scope of the allocated class. If AllocationFunctionScope::Both, look
2943 // in both.
2944 // 2) If an array size is given, look for operator new[], else look for
2945 // operator new.
2946 // 3) The first argument is always size_t. Append the arguments from the
2947 // placement form.
2948
2949 SmallVector<Expr*, 8> AllocArgs;
2950 AllocArgs.reserve(N: IAP.getNumImplicitArgs() + PlaceArgs.size());
2951
2952 // C++ [expr.new]p8:
2953 // If the allocated type is a non-array type, the allocation
2954 // function's name is operator new and the deallocation function's
2955 // name is operator delete. If the allocated type is an array
2956 // type, the allocation function's name is operator new[] and the
2957 // deallocation function's name is operator delete[].
2958 DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
2959 Op: IsArray ? OO_Array_New : OO_New);
2960
2961 QualType AllocElemType = Context.getBaseElementType(QT: AllocType);
2962
2963 // We don't care about the actual value of these arguments.
2964 // FIXME: Should the Sema create the expression and embed it in the syntax
2965 // tree? Or should the consumer just recalculate the value?
2966 // FIXME: Using a dummy value will interact poorly with attribute enable_if.
2967
2968 // We use size_t as a stand in so that we can construct the init
2969 // expr on the stack
2970 QualType TypeIdentity = Context.getSizeType();
2971 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2972 QualType SpecializedTypeIdentity =
2973 tryBuildStdTypeIdentity(Type: IAP.Type, Loc: StartLoc);
2974 if (!SpecializedTypeIdentity.isNull()) {
2975 TypeIdentity = SpecializedTypeIdentity;
2976 if (RequireCompleteType(Loc: StartLoc, T: TypeIdentity,
2977 DiagID: diag::err_incomplete_type))
2978 return true;
2979 } else
2980 IAP.PassTypeIdentity = TypeAwareAllocationMode::No;
2981 }
2982 TypeAwareAllocationMode OriginalTypeAwareState = IAP.PassTypeIdentity;
2983
2984 CXXScalarValueInitExpr TypeIdentityParam(TypeIdentity, nullptr, StartLoc);
2985 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity))
2986 AllocArgs.push_back(Elt: &TypeIdentityParam);
2987
2988 QualType SizeTy = Context.getSizeType();
2989 unsigned SizeTyWidth = Context.getTypeSize(T: SizeTy);
2990 IntegerLiteral Size(Context, llvm::APInt::getZero(numBits: SizeTyWidth), SizeTy,
2991 SourceLocation());
2992 AllocArgs.push_back(Elt: &Size);
2993
2994 QualType AlignValT = Context.VoidTy;
2995 bool IncludeAlignParam = isAlignedAllocation(Mode: IAP.PassAlignment) ||
2996 isTypeAwareAllocation(Mode: IAP.PassTypeIdentity);
2997 if (IncludeAlignParam) {
2998 DeclareGlobalNewDelete();
2999 AlignValT = Context.getCanonicalTagType(TD: getStdAlignValT());
3000 }
3001 CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
3002 if (IncludeAlignParam)
3003 AllocArgs.push_back(Elt: &Align);
3004
3005 llvm::append_range(C&: AllocArgs, R&: PlaceArgs);
3006
3007 // Find the allocation function.
3008 {
3009 LookupResult R(*this, NewName, StartLoc, LookupOrdinaryName);
3010
3011 // C++1z [expr.new]p9:
3012 // If the new-expression begins with a unary :: operator, the allocation
3013 // function's name is looked up in the global scope. Otherwise, if the
3014 // allocated type is a class type T or array thereof, the allocation
3015 // function's name is looked up in the scope of T.
3016 if (AllocElemType->isRecordType() &&
3017 NewScope != AllocationFunctionScope::Global)
3018 LookupQualifiedName(R, LookupCtx: AllocElemType->getAsCXXRecordDecl());
3019
3020 // We can see ambiguity here if the allocation function is found in
3021 // multiple base classes.
3022 if (R.isAmbiguous())
3023 return true;
3024
3025 // If this lookup fails to find the name, or if the allocated type is not
3026 // a class type, the allocation function's name is looked up in the
3027 // global scope.
3028 if (R.empty()) {
3029 if (NewScope == AllocationFunctionScope::Class)
3030 return true;
3031
3032 LookupQualifiedName(R, LookupCtx: Context.getTranslationUnitDecl());
3033 }
3034
3035 if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
3036 if (PlaceArgs.empty()) {
3037 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_not_supported) << "default new";
3038 } else {
3039 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_placement_new);
3040 }
3041 return true;
3042 }
3043
3044 assert(!R.empty() && "implicitly declared allocation functions not found");
3045 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
3046
3047 // We do our own custom access checks below.
3048 R.suppressDiagnostics();
3049
3050 if (resolveAllocationOverload(S&: *this, R, Range, Args&: AllocArgs, IAP, Operator&: OperatorNew,
3051 /*Candidates=*/AlignedCandidates: nullptr,
3052 /*AlignArg=*/nullptr, Diagnose))
3053 return true;
3054 }
3055
3056 // We don't need an operator delete if we're running under -fno-exceptions.
3057 if (!getLangOpts().Exceptions) {
3058 OperatorDelete = nullptr;
3059 return false;
3060 }
3061
3062 // Note, the name of OperatorNew might have been changed from array to
3063 // non-array by resolveAllocationOverload.
3064 DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
3065 Op: OperatorNew->getDeclName().getCXXOverloadedOperator() == OO_Array_New
3066 ? OO_Array_Delete
3067 : OO_Delete);
3068
3069 // C++ [expr.new]p19:
3070 //
3071 // If the new-expression begins with a unary :: operator, the
3072 // deallocation function's name is looked up in the global
3073 // scope. Otherwise, if the allocated type is a class type T or an
3074 // array thereof, the deallocation function's name is looked up in
3075 // the scope of T. If this lookup fails to find the name, or if
3076 // the allocated type is not a class type or array thereof, the
3077 // deallocation function's name is looked up in the global scope.
3078 LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
3079 if (AllocElemType->isRecordType() &&
3080 DeleteScope != AllocationFunctionScope::Global) {
3081 auto *RD = AllocElemType->castAsCXXRecordDecl();
3082 LookupQualifiedName(R&: FoundDelete, LookupCtx: RD);
3083 }
3084 if (FoundDelete.isAmbiguous())
3085 return true; // FIXME: clean up expressions?
3086
3087 // Filter out any destroying operator deletes. We can't possibly call such a
3088 // function in this context, because we're handling the case where the object
3089 // was not successfully constructed.
3090 // FIXME: This is not covered by the language rules yet.
3091 {
3092 LookupResult::Filter Filter = FoundDelete.makeFilter();
3093 while (Filter.hasNext()) {
3094 auto *FD = dyn_cast<FunctionDecl>(Val: Filter.next()->getUnderlyingDecl());
3095 if (FD && FD->isDestroyingOperatorDelete())
3096 Filter.erase();
3097 }
3098 Filter.done();
3099 }
3100
3101 auto GetRedeclContext = [](Decl *D) {
3102 return D->getDeclContext()->getRedeclContext();
3103 };
3104
3105 DeclContext *OperatorNewContext = GetRedeclContext(OperatorNew);
3106
3107 bool FoundGlobalDelete = FoundDelete.empty();
3108 bool IsClassScopedTypeAwareNew =
3109 isTypeAwareAllocation(Mode: IAP.PassTypeIdentity) &&
3110 OperatorNewContext->isRecord();
3111 auto DiagnoseMissingTypeAwareCleanupOperator = [&](bool IsPlacementOperator) {
3112 assert(isTypeAwareAllocation(IAP.PassTypeIdentity));
3113 if (Diagnose) {
3114 Diag(Loc: StartLoc, DiagID: diag::err_mismatching_type_aware_cleanup_deallocator)
3115 << OperatorNew->getDeclName() << IsPlacementOperator << DeleteName;
3116 Diag(Loc: OperatorNew->getLocation(), DiagID: diag::note_type_aware_operator_declared)
3117 << OperatorNew->isTypeAwareOperatorNewOrDelete()
3118 << OperatorNew->getDeclName() << OperatorNewContext;
3119 }
3120 };
3121 if (IsClassScopedTypeAwareNew && FoundDelete.empty()) {
3122 DiagnoseMissingTypeAwareCleanupOperator(/*isPlacementNew=*/false);
3123 return true;
3124 }
3125 if (FoundDelete.empty()) {
3126 FoundDelete.clear(Kind: LookupOrdinaryName);
3127
3128 if (DeleteScope == AllocationFunctionScope::Class)
3129 return true;
3130
3131 DeclareGlobalNewDelete();
3132 DeallocLookupMode LookupMode = isTypeAwareAllocation(Mode: OriginalTypeAwareState)
3133 ? DeallocLookupMode::OptionallyTyped
3134 : DeallocLookupMode::Untyped;
3135 LookupGlobalDeallocationFunctions(S&: *this, Loc: StartLoc, FoundDelete, Mode: LookupMode,
3136 Name: DeleteName);
3137 }
3138
3139 FoundDelete.suppressDiagnostics();
3140
3141 SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches;
3142
3143 // Whether we're looking for a placement operator delete is dictated
3144 // by whether we selected a placement operator new, not by whether
3145 // we had explicit placement arguments. This matters for things like
3146 // struct A { void *operator new(size_t, int = 0); ... };
3147 // A *a = new A()
3148 //
3149 // We don't have any definition for what a "placement allocation function"
3150 // is, but we assume it's any allocation function whose
3151 // parameter-declaration-clause is anything other than (size_t).
3152 //
3153 // FIXME: Should (size_t, std::align_val_t) also be considered non-placement?
3154 // This affects whether an exception from the constructor of an overaligned
3155 // type uses the sized or non-sized form of aligned operator delete.
3156
3157 unsigned NonPlacementNewArgCount = 1; // size parameter
3158 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity))
3159 NonPlacementNewArgCount =
3160 /* type-identity */ 1 + /* size */ 1 + /* alignment */ 1;
3161 bool isPlacementNew = !PlaceArgs.empty() ||
3162 OperatorNew->param_size() != NonPlacementNewArgCount ||
3163 OperatorNew->isVariadic();
3164
3165 if (isPlacementNew) {
3166 // C++ [expr.new]p20:
3167 // A declaration of a placement deallocation function matches the
3168 // declaration of a placement allocation function if it has the
3169 // same number of parameters and, after parameter transformations
3170 // (8.3.5), all parameter types except the first are
3171 // identical. [...]
3172 //
3173 // To perform this comparison, we compute the function type that
3174 // the deallocation function should have, and use that type both
3175 // for template argument deduction and for comparison purposes.
3176 QualType ExpectedFunctionType;
3177 {
3178 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
3179
3180 SmallVector<QualType, 6> ArgTypes;
3181 int InitialParamOffset = 0;
3182 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3183 ArgTypes.push_back(Elt: TypeIdentity);
3184 InitialParamOffset = 1;
3185 }
3186 ArgTypes.push_back(Elt: Context.VoidPtrTy);
3187 for (unsigned I = ArgTypes.size() - InitialParamOffset,
3188 N = Proto->getNumParams();
3189 I < N; ++I)
3190 ArgTypes.push_back(Elt: Proto->getParamType(i: I));
3191
3192 FunctionProtoType::ExtProtoInfo EPI;
3193 // FIXME: This is not part of the standard's rule.
3194 EPI.Variadic = Proto->isVariadic();
3195
3196 ExpectedFunctionType
3197 = Context.getFunctionType(ResultTy: Context.VoidTy, Args: ArgTypes, EPI);
3198 }
3199
3200 for (LookupResult::iterator D = FoundDelete.begin(),
3201 DEnd = FoundDelete.end();
3202 D != DEnd; ++D) {
3203 FunctionDecl *Fn = nullptr;
3204 if (FunctionTemplateDecl *FnTmpl =
3205 dyn_cast<FunctionTemplateDecl>(Val: (*D)->getUnderlyingDecl())) {
3206 // Perform template argument deduction to try to match the
3207 // expected function type.
3208 TemplateDeductionInfo Info(StartLoc);
3209 if (DeduceTemplateArguments(FunctionTemplate: FnTmpl, ExplicitTemplateArgs: nullptr, ArgFunctionType: ExpectedFunctionType, Specialization&: Fn,
3210 Info) != TemplateDeductionResult::Success)
3211 continue;
3212 } else
3213 Fn = cast<FunctionDecl>(Val: (*D)->getUnderlyingDecl());
3214
3215 if (Context.hasSameType(T1: adjustCCAndNoReturn(ArgFunctionType: Fn->getType(),
3216 FunctionType: ExpectedFunctionType,
3217 /*AdjustExcpetionSpec*/AdjustExceptionSpec: true),
3218 T2: ExpectedFunctionType))
3219 Matches.push_back(Elt: std::make_pair(x: D.getPair(), y&: Fn));
3220 }
3221
3222 if (getLangOpts().CUDA)
3223 CUDA().EraseUnwantedMatches(Caller: getCurFunctionDecl(/*AllowLambda=*/true),
3224 Matches);
3225 if (Matches.empty() && isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3226 DiagnoseMissingTypeAwareCleanupOperator(isPlacementNew);
3227 return true;
3228 }
3229 } else {
3230 // C++1y [expr.new]p22:
3231 // For a non-placement allocation function, the normal deallocation
3232 // function lookup is used
3233 //
3234 // Per [expr.delete]p10, this lookup prefers a member operator delete
3235 // without a size_t argument, but prefers a non-member operator delete
3236 // with a size_t where possible (which it always is in this case).
3237 llvm::SmallVector<UsualDeallocFnInfo, 4> BestDeallocFns;
3238 ImplicitDeallocationParameters IDP = {
3239 AllocElemType, OriginalTypeAwareState,
3240 alignedAllocationModeFromBool(
3241 IsAligned: hasNewExtendedAlignment(S&: *this, AllocType: AllocElemType)),
3242 sizedDeallocationModeFromBool(IsSized: FoundGlobalDelete)};
3243 UsualDeallocFnInfo Selected = resolveDeallocationOverload(
3244 S&: *this, R&: FoundDelete, IDP, Loc: StartLoc, BestFns: &BestDeallocFns);
3245 if (Selected && BestDeallocFns.empty())
3246 Matches.push_back(Elt: std::make_pair(x&: Selected.Found, y&: Selected.FD));
3247 else {
3248 // If we failed to select an operator, all remaining functions are viable
3249 // but ambiguous.
3250 for (auto Fn : BestDeallocFns)
3251 Matches.push_back(Elt: std::make_pair(x&: Fn.Found, y&: Fn.FD));
3252 }
3253 }
3254
3255 // C++ [expr.new]p20:
3256 // [...] If the lookup finds a single matching deallocation
3257 // function, that function will be called; otherwise, no
3258 // deallocation function will be called.
3259 if (Matches.size() == 1) {
3260 OperatorDelete = Matches[0].second;
3261 DeclContext *OperatorDeleteContext = GetRedeclContext(OperatorDelete);
3262 bool FoundTypeAwareOperator =
3263 OperatorDelete->isTypeAwareOperatorNewOrDelete() ||
3264 OperatorNew->isTypeAwareOperatorNewOrDelete();
3265 if (Diagnose && FoundTypeAwareOperator) {
3266 bool MismatchedTypeAwareness =
3267 OperatorDelete->isTypeAwareOperatorNewOrDelete() !=
3268 OperatorNew->isTypeAwareOperatorNewOrDelete();
3269 bool MismatchedContext = OperatorDeleteContext != OperatorNewContext;
3270 if (MismatchedTypeAwareness || MismatchedContext) {
3271 FunctionDecl *Operators[] = {OperatorDelete, OperatorNew};
3272 bool TypeAwareOperatorIndex =
3273 OperatorNew->isTypeAwareOperatorNewOrDelete();
3274 Diag(Loc: StartLoc, DiagID: diag::err_mismatching_type_aware_cleanup_deallocator)
3275 << Operators[TypeAwareOperatorIndex]->getDeclName()
3276 << isPlacementNew
3277 << Operators[!TypeAwareOperatorIndex]->getDeclName()
3278 << GetRedeclContext(Operators[TypeAwareOperatorIndex]);
3279 Diag(Loc: OperatorNew->getLocation(),
3280 DiagID: diag::note_type_aware_operator_declared)
3281 << OperatorNew->isTypeAwareOperatorNewOrDelete()
3282 << OperatorNew->getDeclName() << OperatorNewContext;
3283 Diag(Loc: OperatorDelete->getLocation(),
3284 DiagID: diag::note_type_aware_operator_declared)
3285 << OperatorDelete->isTypeAwareOperatorNewOrDelete()
3286 << OperatorDelete->getDeclName() << OperatorDeleteContext;
3287 }
3288 }
3289
3290 // C++1z [expr.new]p23:
3291 // If the lookup finds a usual deallocation function (3.7.4.2)
3292 // with a parameter of type std::size_t and that function, considered
3293 // as a placement deallocation function, would have been
3294 // selected as a match for the allocation function, the program
3295 // is ill-formed.
3296 if (getLangOpts().CPlusPlus11 && isPlacementNew &&
3297 isNonPlacementDeallocationFunction(S&: *this, FD: OperatorDelete)) {
3298 UsualDeallocFnInfo Info(*this,
3299 DeclAccessPair::make(D: OperatorDelete, AS: AS_public),
3300 AllocElemType, StartLoc);
3301 // Core issue, per mail to core reflector, 2016-10-09:
3302 // If this is a member operator delete, and there is a corresponding
3303 // non-sized member operator delete, this isn't /really/ a sized
3304 // deallocation function, it just happens to have a size_t parameter.
3305 bool IsSizedDelete = isSizedDeallocation(Mode: Info.IDP.PassSize);
3306 if (IsSizedDelete && !FoundGlobalDelete) {
3307 ImplicitDeallocationParameters SizeTestingIDP = {
3308 AllocElemType, Info.IDP.PassTypeIdentity, Info.IDP.PassAlignment,
3309 SizedDeallocationMode::No};
3310 auto NonSizedDelete = resolveDeallocationOverload(
3311 S&: *this, R&: FoundDelete, IDP: SizeTestingIDP, Loc: StartLoc);
3312 if (NonSizedDelete &&
3313 !isSizedDeallocation(Mode: NonSizedDelete.IDP.PassSize) &&
3314 NonSizedDelete.IDP.PassAlignment == Info.IDP.PassAlignment)
3315 IsSizedDelete = false;
3316 }
3317
3318 if (IsSizedDelete && !isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3319 SourceRange R = PlaceArgs.empty()
3320 ? SourceRange()
3321 : SourceRange(PlaceArgs.front()->getBeginLoc(),
3322 PlaceArgs.back()->getEndLoc());
3323 Diag(Loc: StartLoc, DiagID: diag::err_placement_new_non_placement_delete) << R;
3324 if (!OperatorDelete->isImplicit())
3325 Diag(Loc: OperatorDelete->getLocation(), DiagID: diag::note_previous_decl)
3326 << DeleteName;
3327 }
3328 }
3329 if (CheckDeleteOperator(S&: *this, StartLoc, Range, Diagnose,
3330 NamingClass: FoundDelete.getNamingClass(), Decl: Matches[0].first,
3331 Operator: Matches[0].second))
3332 return true;
3333
3334 } else if (!Matches.empty()) {
3335 // We found multiple suitable operators. Per [expr.new]p20, that means we
3336 // call no 'operator delete' function, but we should at least warn the user.
3337 // FIXME: Suppress this warning if the construction cannot throw.
3338 Diag(Loc: StartLoc, DiagID: diag::warn_ambiguous_suitable_delete_function_found)
3339 << DeleteName << AllocElemType;
3340
3341 for (auto &Match : Matches)
3342 Diag(Loc: Match.second->getLocation(),
3343 DiagID: diag::note_member_declared_here) << DeleteName;
3344 }
3345
3346 return false;
3347}
3348
3349void Sema::DeclareGlobalNewDelete() {
3350 if (GlobalNewDeleteDeclared)
3351 return;
3352
3353 // The implicitly declared new and delete operators
3354 // are not supported in OpenCL.
3355 if (getLangOpts().OpenCLCPlusPlus)
3356 return;
3357
3358 // C++ [basic.stc.dynamic.general]p2:
3359 // The library provides default definitions for the global allocation
3360 // and deallocation functions. Some global allocation and deallocation
3361 // functions are replaceable ([new.delete]); these are attached to the
3362 // global module ([module.unit]).
3363 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3364 PushGlobalModuleFragment(BeginLoc: SourceLocation());
3365
3366 // C++ [basic.std.dynamic]p2:
3367 // [...] The following allocation and deallocation functions (18.4) are
3368 // implicitly declared in global scope in each translation unit of a
3369 // program
3370 //
3371 // C++03:
3372 // void* operator new(std::size_t) throw(std::bad_alloc);
3373 // void* operator new[](std::size_t) throw(std::bad_alloc);
3374 // void operator delete(void*) throw();
3375 // void operator delete[](void*) throw();
3376 // C++11:
3377 // void* operator new(std::size_t);
3378 // void* operator new[](std::size_t);
3379 // void operator delete(void*) noexcept;
3380 // void operator delete[](void*) noexcept;
3381 // C++1y:
3382 // void* operator new(std::size_t);
3383 // void* operator new[](std::size_t);
3384 // void operator delete(void*) noexcept;
3385 // void operator delete[](void*) noexcept;
3386 // void operator delete(void*, std::size_t) noexcept;
3387 // void operator delete[](void*, std::size_t) noexcept;
3388 //
3389 // These implicit declarations introduce only the function names operator
3390 // new, operator new[], operator delete, operator delete[].
3391 //
3392 // Here, we need to refer to std::bad_alloc, so we will implicitly declare
3393 // "std" or "bad_alloc" as necessary to form the exception specification.
3394 // However, we do not make these implicit declarations visible to name
3395 // lookup.
3396 if (!StdBadAlloc && !getLangOpts().CPlusPlus11) {
3397 // The "std::bad_alloc" class has not yet been declared, so build it
3398 // implicitly.
3399 StdBadAlloc = CXXRecordDecl::Create(
3400 C: Context, TK: TagTypeKind::Class, DC: getOrCreateStdNamespace(),
3401 StartLoc: SourceLocation(), IdLoc: SourceLocation(),
3402 Id: &PP.getIdentifierTable().get(Name: "bad_alloc"), PrevDecl: nullptr);
3403 getStdBadAlloc()->setImplicit(true);
3404
3405 // The implicitly declared "std::bad_alloc" should live in global module
3406 // fragment.
3407 if (TheGlobalModuleFragment) {
3408 getStdBadAlloc()->setModuleOwnershipKind(
3409 Decl::ModuleOwnershipKind::ReachableWhenImported);
3410 getStdBadAlloc()->setLocalOwningModule(TheGlobalModuleFragment);
3411 }
3412 }
3413 if (!StdAlignValT && getLangOpts().AlignedAllocation) {
3414 // The "std::align_val_t" enum class has not yet been declared, so build it
3415 // implicitly.
3416 auto *AlignValT = EnumDecl::Create(
3417 C&: Context, DC: getOrCreateStdNamespace(), StartLoc: SourceLocation(), IdLoc: SourceLocation(),
3418 Id: &PP.getIdentifierTable().get(Name: "align_val_t"), PrevDecl: nullptr, IsScoped: true, IsScopedUsingClassTag: true, IsFixed: true);
3419
3420 // The implicitly declared "std::align_val_t" should live in global module
3421 // fragment.
3422 if (TheGlobalModuleFragment) {
3423 AlignValT->setModuleOwnershipKind(
3424 Decl::ModuleOwnershipKind::ReachableWhenImported);
3425 AlignValT->setLocalOwningModule(TheGlobalModuleFragment);
3426 }
3427
3428 AlignValT->setIntegerType(Context.getSizeType());
3429 AlignValT->setPromotionType(Context.getSizeType());
3430 AlignValT->setImplicit(true);
3431
3432 StdAlignValT = AlignValT;
3433 }
3434
3435 GlobalNewDeleteDeclared = true;
3436
3437 QualType VoidPtr = Context.getPointerType(T: Context.VoidTy);
3438 QualType SizeT = Context.getSizeType();
3439
3440 auto DeclareGlobalAllocationFunctions = [&](OverloadedOperatorKind Kind,
3441 QualType Return, QualType Param) {
3442 llvm::SmallVector<QualType, 3> Params;
3443 Params.push_back(Elt: Param);
3444
3445 // Create up to four variants of the function (sized/aligned).
3446 bool HasSizedVariant = getLangOpts().SizedDeallocation &&
3447 (Kind == OO_Delete || Kind == OO_Array_Delete);
3448 bool HasAlignedVariant = getLangOpts().AlignedAllocation;
3449
3450 int NumSizeVariants = (HasSizedVariant ? 2 : 1);
3451 int NumAlignVariants = (HasAlignedVariant ? 2 : 1);
3452 for (int Sized = 0; Sized < NumSizeVariants; ++Sized) {
3453 if (Sized)
3454 Params.push_back(Elt: SizeT);
3455
3456 for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
3457 if (Aligned)
3458 Params.push_back(Elt: Context.getCanonicalTagType(TD: getStdAlignValT()));
3459
3460 DeclareGlobalAllocationFunction(
3461 Name: Context.DeclarationNames.getCXXOperatorName(Op: Kind), Return, Params);
3462
3463 if (Aligned)
3464 Params.pop_back();
3465 }
3466 }
3467 };
3468
3469 DeclareGlobalAllocationFunctions(OO_New, VoidPtr, SizeT);
3470 DeclareGlobalAllocationFunctions(OO_Array_New, VoidPtr, SizeT);
3471 DeclareGlobalAllocationFunctions(OO_Delete, Context.VoidTy, VoidPtr);
3472 DeclareGlobalAllocationFunctions(OO_Array_Delete, Context.VoidTy, VoidPtr);
3473
3474 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3475 PopGlobalModuleFragment();
3476}
3477
3478/// DeclareGlobalAllocationFunction - Declares a single implicit global
3479/// allocation function if it doesn't already exist.
3480void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
3481 QualType Return,
3482 ArrayRef<QualType> Params) {
3483 DeclContext *GlobalCtx = Context.getTranslationUnitDecl();
3484
3485 // Check if this function is already declared.
3486 DeclContext::lookup_result R = GlobalCtx->lookup(Name);
3487 for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end();
3488 Alloc != AllocEnd; ++Alloc) {
3489 // Only look at non-template functions, as it is the predefined,
3490 // non-templated allocation function we are trying to declare here.
3491 if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Val: *Alloc)) {
3492 if (Func->getNumParams() == Params.size()) {
3493 if (std::equal(first1: Func->param_begin(), last1: Func->param_end(), first2: Params.begin(),
3494 last2: Params.end(), binary_pred: [&](ParmVarDecl *D, QualType RT) {
3495 return Context.hasSameUnqualifiedType(T1: D->getType(),
3496 T2: RT);
3497 })) {
3498 // Make the function visible to name lookup, even if we found it in
3499 // an unimported module. It either is an implicitly-declared global
3500 // allocation function, or is suppressing that function.
3501 Func->setVisibleDespiteOwningModule();
3502 return;
3503 }
3504 }
3505 }
3506 }
3507
3508 FunctionProtoType::ExtProtoInfo EPI(
3509 Context.getTargetInfo().getDefaultCallingConv());
3510
3511 QualType BadAllocType;
3512 bool HasBadAllocExceptionSpec = Name.isAnyOperatorNew();
3513 if (HasBadAllocExceptionSpec) {
3514 if (!getLangOpts().CPlusPlus11) {
3515 BadAllocType = Context.getCanonicalTagType(TD: getStdBadAlloc());
3516 assert(StdBadAlloc && "Must have std::bad_alloc declared");
3517 EPI.ExceptionSpec.Type = EST_Dynamic;
3518 EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
3519 }
3520 if (getLangOpts().NewInfallible) {
3521 EPI.ExceptionSpec.Type = EST_DynamicNone;
3522 }
3523 } else {
3524 EPI.ExceptionSpec =
3525 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
3526 }
3527
3528 auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
3529 // The MSVC STL has explicit cdecl on its (host-side) allocation function
3530 // specializations for the allocation, so in order to prevent a CC clash
3531 // we use the host's CC, if available, or CC_C as a fallback, for the
3532 // host-side implicit decls, knowing these do not get emitted when compiling
3533 // for device.
3534 if (getLangOpts().CUDAIsDevice && ExtraAttr &&
3535 isa<CUDAHostAttr>(Val: ExtraAttr) &&
3536 Context.getTargetInfo().getTriple().isSPIRV()) {
3537 if (auto *ATI = Context.getAuxTargetInfo())
3538 EPI.ExtInfo = EPI.ExtInfo.withCallingConv(cc: ATI->getDefaultCallingConv());
3539 else
3540 EPI.ExtInfo = EPI.ExtInfo.withCallingConv(cc: CallingConv::CC_C);
3541 }
3542 QualType FnType = Context.getFunctionType(ResultTy: Return, Args: Params, EPI);
3543 FunctionDecl *Alloc = FunctionDecl::Create(
3544 C&: Context, DC: GlobalCtx, StartLoc: SourceLocation(), NLoc: SourceLocation(), N: Name, T: FnType,
3545 /*TInfo=*/nullptr, SC: SC_None, UsesFPIntrin: getCurFPFeatures().isFPConstrained(), isInlineSpecified: false,
3546 hasWrittenPrototype: true);
3547 Alloc->setImplicit();
3548 // Global allocation functions should always be visible.
3549 Alloc->setVisibleDespiteOwningModule();
3550
3551 if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible &&
3552 !getLangOpts().CheckNew)
3553 Alloc->addAttr(
3554 A: ReturnsNonNullAttr::CreateImplicit(Ctx&: Context, Range: Alloc->getLocation()));
3555
3556 // C++ [basic.stc.dynamic.general]p2:
3557 // The library provides default definitions for the global allocation
3558 // and deallocation functions. Some global allocation and deallocation
3559 // functions are replaceable ([new.delete]); these are attached to the
3560 // global module ([module.unit]).
3561 //
3562 // In the language wording, these functions are attched to the global
3563 // module all the time. But in the implementation, the global module
3564 // is only meaningful when we're in a module unit. So here we attach
3565 // these allocation functions to global module conditionally.
3566 if (TheGlobalModuleFragment) {
3567 Alloc->setModuleOwnershipKind(
3568 Decl::ModuleOwnershipKind::ReachableWhenImported);
3569 Alloc->setLocalOwningModule(TheGlobalModuleFragment);
3570 }
3571
3572 if (LangOpts.hasGlobalAllocationFunctionVisibility())
3573 Alloc->addAttr(A: VisibilityAttr::CreateImplicit(
3574 Ctx&: Context, Visibility: LangOpts.hasHiddenGlobalAllocationFunctionVisibility()
3575 ? VisibilityAttr::Hidden
3576 : LangOpts.hasProtectedGlobalAllocationFunctionVisibility()
3577 ? VisibilityAttr::Protected
3578 : VisibilityAttr::Default));
3579
3580 llvm::SmallVector<ParmVarDecl *, 3> ParamDecls;
3581 for (QualType T : Params) {
3582 ParamDecls.push_back(Elt: ParmVarDecl::Create(
3583 C&: Context, DC: Alloc, StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: nullptr, T,
3584 /*TInfo=*/nullptr, S: SC_None, DefArg: nullptr));
3585 ParamDecls.back()->setImplicit();
3586 }
3587 Alloc->setParams(ParamDecls);
3588 if (ExtraAttr)
3589 Alloc->addAttr(A: ExtraAttr);
3590 AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(FD: Alloc);
3591 Context.getTranslationUnitDecl()->addDecl(D: Alloc);
3592 IdResolver.tryAddTopLevelDecl(D: Alloc, Name);
3593 };
3594
3595 if (!LangOpts.CUDA)
3596 CreateAllocationFunctionDecl(nullptr);
3597 else {
3598 // Host and device get their own declaration so each can be
3599 // defined or re-declared independently.
3600 CreateAllocationFunctionDecl(CUDAHostAttr::CreateImplicit(Ctx&: Context));
3601 CreateAllocationFunctionDecl(CUDADeviceAttr::CreateImplicit(Ctx&: Context));
3602 }
3603}
3604
3605FunctionDecl *
3606Sema::FindUsualDeallocationFunction(SourceLocation StartLoc,
3607 ImplicitDeallocationParameters IDP,
3608 DeclarationName Name, bool Diagnose) {
3609 DeclareGlobalNewDelete();
3610
3611 LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName);
3612 LookupGlobalDeallocationFunctions(S&: *this, Loc: StartLoc, FoundDelete,
3613 Mode: DeallocLookupMode::OptionallyTyped, Name);
3614
3615 // FIXME: It's possible for this to result in ambiguity, through a
3616 // user-declared variadic operator delete or the enable_if attribute. We
3617 // should probably not consider those cases to be usual deallocation
3618 // functions. But for now we just make an arbitrary choice in that case.
3619 auto Result = resolveDeallocationOverload(S&: *this, R&: FoundDelete, IDP, Loc: StartLoc);
3620 if (!Result)
3621 return nullptr;
3622
3623 if (CheckDeleteOperator(S&: *this, StartLoc, Range: StartLoc, Diagnose,
3624 NamingClass: FoundDelete.getNamingClass(), Decl: Result.Found,
3625 Operator: Result.FD))
3626 return nullptr;
3627
3628 assert(Result.FD && "operator delete missing from global scope?");
3629 return Result.FD;
3630}
3631
3632FunctionDecl *Sema::FindDeallocationFunctionForDestructor(
3633 SourceLocation Loc, CXXRecordDecl *RD, bool Diagnose, bool LookForGlobal,
3634 DeclarationName Name) {
3635
3636 FunctionDecl *OperatorDelete = nullptr;
3637 CanQualType DeallocType = Context.getCanonicalTagType(TD: RD);
3638 ImplicitDeallocationParameters IDP = {
3639 DeallocType, ShouldUseTypeAwareOperatorNewOrDelete(),
3640 AlignedAllocationMode::No, SizedDeallocationMode::No};
3641
3642 if (!LookForGlobal) {
3643 if (FindDeallocationFunction(StartLoc: Loc, RD, Name, Operator&: OperatorDelete, IDP, Diagnose))
3644 return nullptr;
3645
3646 if (OperatorDelete)
3647 return OperatorDelete;
3648 }
3649
3650 // If there's no class-specific operator delete, look up the global
3651 // non-array delete.
3652 IDP.PassAlignment = alignedAllocationModeFromBool(
3653 IsAligned: hasNewExtendedAlignment(S&: *this, AllocType: DeallocType));
3654 IDP.PassSize = SizedDeallocationMode::Yes;
3655 return FindUsualDeallocationFunction(StartLoc: Loc, IDP, Name, Diagnose);
3656}
3657
3658bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
3659 DeclarationName Name,
3660 FunctionDecl *&Operator,
3661 ImplicitDeallocationParameters IDP,
3662 bool Diagnose) {
3663 LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
3664 // Try to find operator delete/operator delete[] in class scope.
3665 LookupQualifiedName(R&: Found, LookupCtx: RD);
3666
3667 if (Found.isAmbiguous()) {
3668 if (!Diagnose)
3669 Found.suppressDiagnostics();
3670 return true;
3671 }
3672
3673 Found.suppressDiagnostics();
3674
3675 if (!isAlignedAllocation(Mode: IDP.PassAlignment) &&
3676 hasNewExtendedAlignment(S&: *this, AllocType: Context.getCanonicalTagType(TD: RD)))
3677 IDP.PassAlignment = AlignedAllocationMode::Yes;
3678
3679 // C++17 [expr.delete]p10:
3680 // If the deallocation functions have class scope, the one without a
3681 // parameter of type std::size_t is selected.
3682 llvm::SmallVector<UsualDeallocFnInfo, 4> Matches;
3683 resolveDeallocationOverload(S&: *this, R&: Found, IDP, Loc: StartLoc, BestFns: &Matches);
3684
3685 // If we could find an overload, use it.
3686 if (Matches.size() == 1) {
3687 Operator = cast<CXXMethodDecl>(Val: Matches[0].FD);
3688 return CheckDeleteOperator(S&: *this, StartLoc, Range: StartLoc, Diagnose,
3689 NamingClass: Found.getNamingClass(), Decl: Matches[0].Found,
3690 Operator);
3691 }
3692
3693 // We found multiple suitable operators; complain about the ambiguity.
3694 // FIXME: The standard doesn't say to do this; it appears that the intent
3695 // is that this should never happen.
3696 if (!Matches.empty()) {
3697 if (Diagnose) {
3698 Diag(Loc: StartLoc, DiagID: diag::err_ambiguous_suitable_delete_member_function_found)
3699 << Name << RD;
3700 for (auto &Match : Matches)
3701 Diag(Loc: Match.FD->getLocation(), DiagID: diag::note_member_declared_here) << Name;
3702 }
3703 return true;
3704 }
3705
3706 // We did find operator delete/operator delete[] declarations, but
3707 // none of them were suitable.
3708 if (!Found.empty()) {
3709 if (Diagnose) {
3710 Diag(Loc: StartLoc, DiagID: diag::err_no_suitable_delete_member_function_found)
3711 << Name << RD;
3712
3713 for (NamedDecl *D : Found)
3714 Diag(Loc: D->getUnderlyingDecl()->getLocation(),
3715 DiagID: diag::note_member_declared_here) << Name;
3716 }
3717 return true;
3718 }
3719
3720 Operator = nullptr;
3721 return false;
3722}
3723
3724namespace {
3725/// Checks whether delete-expression, and new-expression used for
3726/// initializing deletee have the same array form.
3727class MismatchingNewDeleteDetector {
3728public:
3729 enum MismatchResult {
3730 /// Indicates that there is no mismatch or a mismatch cannot be proven.
3731 NoMismatch,
3732 /// Indicates that variable is initialized with mismatching form of \a new.
3733 VarInitMismatches,
3734 /// Indicates that member is initialized with mismatching form of \a new.
3735 MemberInitMismatches,
3736 /// Indicates that 1 or more constructors' definitions could not been
3737 /// analyzed, and they will be checked again at the end of translation unit.
3738 AnalyzeLater
3739 };
3740
3741 /// \param EndOfTU True, if this is the final analysis at the end of
3742 /// translation unit. False, if this is the initial analysis at the point
3743 /// delete-expression was encountered.
3744 explicit MismatchingNewDeleteDetector(bool EndOfTU)
3745 : Field(nullptr), IsArrayForm(false), EndOfTU(EndOfTU),
3746 HasUndefinedConstructors(false) {}
3747
3748 /// Checks whether pointee of a delete-expression is initialized with
3749 /// matching form of new-expression.
3750 ///
3751 /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the
3752 /// point where delete-expression is encountered, then a warning will be
3753 /// issued immediately. If return value is \c AnalyzeLater at the point where
3754 /// delete-expression is seen, then member will be analyzed at the end of
3755 /// translation unit. \c AnalyzeLater is returned iff at least one constructor
3756 /// couldn't be analyzed. If at least one constructor initializes the member
3757 /// with matching type of new, the return value is \c NoMismatch.
3758 MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE);
3759 /// Analyzes a class member.
3760 /// \param Field Class member to analyze.
3761 /// \param DeleteWasArrayForm Array form-ness of the delete-expression used
3762 /// for deleting the \p Field.
3763 MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm);
3764 FieldDecl *Field;
3765 /// List of mismatching new-expressions used for initialization of the pointee
3766 llvm::SmallVector<const CXXNewExpr *, 4> NewExprs;
3767 /// Indicates whether delete-expression was in array form.
3768 bool IsArrayForm;
3769
3770private:
3771 const bool EndOfTU;
3772 /// Indicates that there is at least one constructor without body.
3773 bool HasUndefinedConstructors;
3774 /// Returns \c CXXNewExpr from given initialization expression.
3775 /// \param E Expression used for initializing pointee in delete-expression.
3776 /// E can be a single-element \c InitListExpr consisting of new-expression.
3777 const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E);
3778 /// Returns whether member is initialized with mismatching form of
3779 /// \c new either by the member initializer or in-class initialization.
3780 ///
3781 /// If bodies of all constructors are not visible at the end of translation
3782 /// unit or at least one constructor initializes member with the matching
3783 /// form of \c new, mismatch cannot be proven, and this function will return
3784 /// \c NoMismatch.
3785 MismatchResult analyzeMemberExpr(const MemberExpr *ME);
3786 /// Returns whether variable is initialized with mismatching form of
3787 /// \c new.
3788 ///
3789 /// If variable is initialized with matching form of \c new or variable is not
3790 /// initialized with a \c new expression, this function will return true.
3791 /// If variable is initialized with mismatching form of \c new, returns false.
3792 /// \param D Variable to analyze.
3793 bool hasMatchingVarInit(const DeclRefExpr *D);
3794 /// Checks whether the constructor initializes pointee with mismatching
3795 /// form of \c new.
3796 ///
3797 /// Returns true, if member is initialized with matching form of \c new in
3798 /// member initializer list. Returns false, if member is initialized with the
3799 /// matching form of \c new in this constructor's initializer or given
3800 /// constructor isn't defined at the point where delete-expression is seen, or
3801 /// member isn't initialized by the constructor.
3802 bool hasMatchingNewInCtor(const CXXConstructorDecl *CD);
3803 /// Checks whether member is initialized with matching form of
3804 /// \c new in member initializer list.
3805 bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI);
3806 /// Checks whether member is initialized with mismatching form of \c new by
3807 /// in-class initializer.
3808 MismatchResult analyzeInClassInitializer();
3809};
3810}
3811
3812MismatchingNewDeleteDetector::MismatchResult
3813MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) {
3814 NewExprs.clear();
3815 assert(DE && "Expected delete-expression");
3816 IsArrayForm = DE->isArrayForm();
3817 const Expr *E = DE->getArgument()->IgnoreParenImpCasts();
3818 if (const MemberExpr *ME = dyn_cast<const MemberExpr>(Val: E)) {
3819 return analyzeMemberExpr(ME);
3820 } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(Val: E)) {
3821 if (!hasMatchingVarInit(D))
3822 return VarInitMismatches;
3823 }
3824 return NoMismatch;
3825}
3826
3827const CXXNewExpr *
3828MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) {
3829 assert(E != nullptr && "Expected a valid initializer expression");
3830 E = E->IgnoreParenImpCasts();
3831 if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(Val: E)) {
3832 if (ILE->getNumInits() == 1)
3833 E = dyn_cast<const CXXNewExpr>(Val: ILE->getInit(Init: 0)->IgnoreParenImpCasts());
3834 }
3835
3836 return dyn_cast_or_null<const CXXNewExpr>(Val: E);
3837}
3838
3839bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit(
3840 const CXXCtorInitializer *CI) {
3841 const CXXNewExpr *NE = nullptr;
3842 if (Field == CI->getMember() &&
3843 (NE = getNewExprFromInitListOrExpr(E: CI->getInit()))) {
3844 if (NE->isArray() == IsArrayForm)
3845 return true;
3846 else
3847 NewExprs.push_back(Elt: NE);
3848 }
3849 return false;
3850}
3851
3852bool MismatchingNewDeleteDetector::hasMatchingNewInCtor(
3853 const CXXConstructorDecl *CD) {
3854 if (CD->isImplicit())
3855 return false;
3856 const FunctionDecl *Definition = CD;
3857 if (!CD->isThisDeclarationADefinition() && !CD->isDefined(Definition)) {
3858 HasUndefinedConstructors = true;
3859 return EndOfTU;
3860 }
3861 for (const auto *CI : cast<const CXXConstructorDecl>(Val: Definition)->inits()) {
3862 if (hasMatchingNewInCtorInit(CI))
3863 return true;
3864 }
3865 return false;
3866}
3867
3868MismatchingNewDeleteDetector::MismatchResult
3869MismatchingNewDeleteDetector::analyzeInClassInitializer() {
3870 assert(Field != nullptr && "This should be called only for members");
3871 const Expr *InitExpr = Field->getInClassInitializer();
3872 if (!InitExpr)
3873 return EndOfTU ? NoMismatch : AnalyzeLater;
3874 if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(E: InitExpr)) {
3875 if (NE->isArray() != IsArrayForm) {
3876 NewExprs.push_back(Elt: NE);
3877 return MemberInitMismatches;
3878 }
3879 }
3880 return NoMismatch;
3881}
3882
3883MismatchingNewDeleteDetector::MismatchResult
3884MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field,
3885 bool DeleteWasArrayForm) {
3886 assert(Field != nullptr && "Analysis requires a valid class member.");
3887 this->Field = Field;
3888 IsArrayForm = DeleteWasArrayForm;
3889 const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Val: Field->getParent());
3890 for (const auto *CD : RD->ctors()) {
3891 if (hasMatchingNewInCtor(CD))
3892 return NoMismatch;
3893 }
3894 if (HasUndefinedConstructors)
3895 return EndOfTU ? NoMismatch : AnalyzeLater;
3896 if (!NewExprs.empty())
3897 return MemberInitMismatches;
3898 return Field->hasInClassInitializer() ? analyzeInClassInitializer()
3899 : NoMismatch;
3900}
3901
3902MismatchingNewDeleteDetector::MismatchResult
3903MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) {
3904 assert(ME != nullptr && "Expected a member expression");
3905 if (FieldDecl *F = dyn_cast<FieldDecl>(Val: ME->getMemberDecl()))
3906 return analyzeField(Field: F, DeleteWasArrayForm: IsArrayForm);
3907 return NoMismatch;
3908}
3909
3910bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) {
3911 const CXXNewExpr *NE = nullptr;
3912 if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D->getDecl())) {
3913 if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(E: VD->getInit())) &&
3914 NE->isArray() != IsArrayForm) {
3915 NewExprs.push_back(Elt: NE);
3916 }
3917 }
3918 return NewExprs.empty();
3919}
3920
3921static void
3922DiagnoseMismatchedNewDelete(Sema &SemaRef, SourceLocation DeleteLoc,
3923 const MismatchingNewDeleteDetector &Detector) {
3924 SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(Loc: DeleteLoc);
3925 FixItHint H;
3926 if (!Detector.IsArrayForm)
3927 H = FixItHint::CreateInsertion(InsertionLoc: EndOfDelete, Code: "[]");
3928 else {
3929 SourceLocation RSquare = Lexer::findLocationAfterToken(
3930 loc: DeleteLoc, TKind: tok::l_square, SM: SemaRef.getSourceManager(),
3931 LangOpts: SemaRef.getLangOpts(), SkipTrailingWhitespaceAndNewLine: true);
3932 if (RSquare.isValid())
3933 H = FixItHint::CreateRemoval(RemoveRange: SourceRange(EndOfDelete, RSquare));
3934 }
3935 SemaRef.Diag(Loc: DeleteLoc, DiagID: diag::warn_mismatched_delete_new)
3936 << Detector.IsArrayForm << H;
3937
3938 for (const auto *NE : Detector.NewExprs)
3939 SemaRef.Diag(Loc: NE->getExprLoc(), DiagID: diag::note_allocated_here)
3940 << Detector.IsArrayForm;
3941}
3942
3943void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) {
3944 if (Diags.isIgnored(DiagID: diag::warn_mismatched_delete_new, Loc: SourceLocation()))
3945 return;
3946 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false);
3947 switch (Detector.analyzeDeleteExpr(DE)) {
3948 case MismatchingNewDeleteDetector::VarInitMismatches:
3949 case MismatchingNewDeleteDetector::MemberInitMismatches: {
3950 DiagnoseMismatchedNewDelete(SemaRef&: *this, DeleteLoc: DE->getBeginLoc(), Detector);
3951 break;
3952 }
3953 case MismatchingNewDeleteDetector::AnalyzeLater: {
3954 DeleteExprs[Detector.Field].push_back(
3955 Elt: std::make_pair(x: DE->getBeginLoc(), y: DE->isArrayForm()));
3956 break;
3957 }
3958 case MismatchingNewDeleteDetector::NoMismatch:
3959 break;
3960 }
3961}
3962
3963void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
3964 bool DeleteWasArrayForm) {
3965 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true);
3966 switch (Detector.analyzeField(Field, DeleteWasArrayForm)) {
3967 case MismatchingNewDeleteDetector::VarInitMismatches:
3968 llvm_unreachable("This analysis should have been done for class members.");
3969 case MismatchingNewDeleteDetector::AnalyzeLater:
3970 llvm_unreachable("Analysis cannot be postponed any point beyond end of "
3971 "translation unit.");
3972 case MismatchingNewDeleteDetector::MemberInitMismatches:
3973 DiagnoseMismatchedNewDelete(SemaRef&: *this, DeleteLoc, Detector);
3974 break;
3975 case MismatchingNewDeleteDetector::NoMismatch:
3976 break;
3977 }
3978}
3979
3980ExprResult
3981Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
3982 bool ArrayForm, Expr *ExE) {
3983 // C++ [expr.delete]p1:
3984 // The operand shall have a pointer type, or a class type having a single
3985 // non-explicit conversion function to a pointer type. The result has type
3986 // void.
3987 //
3988 // DR599 amends "pointer type" to "pointer to object type" in both cases.
3989
3990 ExprResult Ex = ExE;
3991 FunctionDecl *OperatorDelete = nullptr;
3992 bool ArrayFormAsWritten = ArrayForm;
3993 bool UsualArrayDeleteWantsSize = false;
3994
3995 if (!Ex.get()->isTypeDependent()) {
3996 // Perform lvalue-to-rvalue cast, if needed.
3997 Ex = DefaultLvalueConversion(E: Ex.get());
3998 if (Ex.isInvalid())
3999 return ExprError();
4000
4001 QualType Type = Ex.get()->getType();
4002
4003 class DeleteConverter : public ContextualImplicitConverter {
4004 public:
4005 DeleteConverter() : ContextualImplicitConverter(false, true) {}
4006
4007 bool match(QualType ConvType) override {
4008 // FIXME: If we have an operator T* and an operator void*, we must pick
4009 // the operator T*.
4010 if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
4011 if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
4012 return true;
4013 return false;
4014 }
4015
4016 SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc,
4017 QualType T) override {
4018 return S.Diag(Loc, DiagID: diag::err_delete_operand) << T;
4019 }
4020
4021 SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
4022 QualType T) override {
4023 return S.Diag(Loc, DiagID: diag::err_delete_incomplete_class_type) << T;
4024 }
4025
4026 SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
4027 QualType T,
4028 QualType ConvTy) override {
4029 return S.Diag(Loc, DiagID: diag::err_delete_explicit_conversion) << T << ConvTy;
4030 }
4031
4032 SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
4033 QualType ConvTy) override {
4034 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_delete_conversion)
4035 << ConvTy;
4036 }
4037
4038 SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
4039 QualType T) override {
4040 return S.Diag(Loc, DiagID: diag::err_ambiguous_delete_operand) << T;
4041 }
4042
4043 SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
4044 QualType ConvTy) override {
4045 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_delete_conversion)
4046 << ConvTy;
4047 }
4048
4049 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
4050 QualType T,
4051 QualType ConvTy) override {
4052 llvm_unreachable("conversion functions are permitted");
4053 }
4054 } Converter;
4055
4056 Ex = PerformContextualImplicitConversion(Loc: StartLoc, FromE: Ex.get(), Converter);
4057 if (Ex.isInvalid())
4058 return ExprError();
4059 Type = Ex.get()->getType();
4060 if (!Converter.match(ConvType: Type))
4061 // FIXME: PerformContextualImplicitConversion should return ExprError
4062 // itself in this case.
4063 return ExprError();
4064
4065 QualType Pointee = Type->castAs<PointerType>()->getPointeeType();
4066 QualType PointeeElem = Context.getBaseElementType(QT: Pointee);
4067
4068 if (Pointee.getAddressSpace() != LangAS::Default &&
4069 !getLangOpts().OpenCLCPlusPlus)
4070 return Diag(Loc: Ex.get()->getBeginLoc(),
4071 DiagID: diag::err_address_space_qualified_delete)
4072 << Pointee.getUnqualifiedType()
4073 << Qualifiers::getAddrSpaceAsString(AS: Pointee.getAddressSpace());
4074
4075 CXXRecordDecl *PointeeRD = nullptr;
4076 if (Pointee->isVoidType() && !isSFINAEContext()) {
4077 // The C++ standard bans deleting a pointer to a non-object type, which
4078 // effectively bans deletion of "void*". However, most compilers support
4079 // this, so we treat it as a warning unless we're in a SFINAE context.
4080 // But we still prohibit this since C++26.
4081 Diag(Loc: StartLoc, DiagID: LangOpts.CPlusPlus26 ? diag::err_delete_incomplete
4082 : diag::ext_delete_void_ptr_operand)
4083 << (LangOpts.CPlusPlus26 ? Pointee : Type)
4084 << Ex.get()->getSourceRange();
4085 } else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
4086 Pointee->isSizelessType()) {
4087 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_delete_operand)
4088 << Type << Ex.get()->getSourceRange());
4089 } else if (!Pointee->isDependentType()) {
4090 // FIXME: This can result in errors if the definition was imported from a
4091 // module but is hidden.
4092 if (Pointee->isEnumeralType() ||
4093 !RequireCompleteType(Loc: StartLoc, T: Pointee,
4094 DiagID: LangOpts.CPlusPlus26
4095 ? diag::err_delete_incomplete
4096 : diag::warn_delete_incomplete,
4097 Args: Ex.get())) {
4098 PointeeRD = PointeeElem->getAsCXXRecordDecl();
4099 }
4100 }
4101
4102 if (Pointee->isArrayType() && !ArrayForm) {
4103 Diag(Loc: StartLoc, DiagID: diag::warn_delete_array_type)
4104 << Type << Ex.get()->getSourceRange()
4105 << FixItHint::CreateInsertion(InsertionLoc: getLocForEndOfToken(Loc: StartLoc), Code: "[]");
4106 ArrayForm = true;
4107 }
4108
4109 DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
4110 Op: ArrayForm ? OO_Array_Delete : OO_Delete);
4111
4112 if (PointeeRD) {
4113 ImplicitDeallocationParameters IDP = {
4114 Pointee, ShouldUseTypeAwareOperatorNewOrDelete(),
4115 AlignedAllocationMode::No, SizedDeallocationMode::No};
4116 if (!UseGlobal &&
4117 FindDeallocationFunction(StartLoc, RD: PointeeRD, Name: DeleteName,
4118 Operator&: OperatorDelete, IDP))
4119 return ExprError();
4120
4121 // If we're allocating an array of records, check whether the
4122 // usual operator delete[] has a size_t parameter.
4123 if (ArrayForm) {
4124 // If the user specifically asked to use the global allocator,
4125 // we'll need to do the lookup into the class.
4126 if (UseGlobal)
4127 UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(
4128 S&: *this, loc: StartLoc, PassType: IDP.PassTypeIdentity, allocType: PointeeElem);
4129
4130 // Otherwise, the usual operator delete[] should be the
4131 // function we just found.
4132 else if (isa_and_nonnull<CXXMethodDecl>(Val: OperatorDelete)) {
4133 UsualDeallocFnInfo UDFI(
4134 *this, DeclAccessPair::make(D: OperatorDelete, AS: AS_public), Pointee,
4135 StartLoc);
4136 UsualArrayDeleteWantsSize = isSizedDeallocation(Mode: UDFI.IDP.PassSize);
4137 }
4138 }
4139
4140 if (!PointeeRD->hasIrrelevantDestructor()) {
4141 if (CXXDestructorDecl *Dtor = LookupDestructor(Class: PointeeRD)) {
4142 if (Dtor->isCalledByDelete(OpDel: OperatorDelete)) {
4143 MarkFunctionReferenced(Loc: StartLoc, Func: Dtor);
4144 if (DiagnoseUseOfDecl(D: Dtor, Locs: StartLoc))
4145 return ExprError();
4146 }
4147 }
4148 }
4149
4150 CheckVirtualDtorCall(dtor: PointeeRD->getDestructor(), Loc: StartLoc,
4151 /*IsDelete=*/true, /*CallCanBeVirtual=*/true,
4152 /*WarnOnNonAbstractTypes=*/!ArrayForm,
4153 DtorLoc: SourceLocation());
4154 }
4155
4156 if (!OperatorDelete) {
4157 if (getLangOpts().OpenCLCPlusPlus) {
4158 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_not_supported) << "default delete";
4159 return ExprError();
4160 }
4161
4162 bool IsComplete = isCompleteType(Loc: StartLoc, T: Pointee);
4163 bool CanProvideSize =
4164 IsComplete && (!ArrayForm || UsualArrayDeleteWantsSize ||
4165 Pointee.isDestructedType());
4166 bool Overaligned = hasNewExtendedAlignment(S&: *this, AllocType: Pointee);
4167
4168 // Look for a global declaration.
4169 ImplicitDeallocationParameters IDP = {
4170 Pointee, ShouldUseTypeAwareOperatorNewOrDelete(),
4171 alignedAllocationModeFromBool(IsAligned: Overaligned),
4172 sizedDeallocationModeFromBool(IsSized: CanProvideSize)};
4173 OperatorDelete = FindUsualDeallocationFunction(StartLoc, IDP, Name: DeleteName);
4174 if (!OperatorDelete)
4175 return ExprError();
4176 }
4177
4178 if (OperatorDelete->isInvalidDecl())
4179 return ExprError();
4180
4181 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorDelete);
4182
4183 // Check access and ambiguity of destructor if we're going to call it.
4184 // Note that this is required even for a virtual delete.
4185 bool IsVirtualDelete = false;
4186 if (PointeeRD) {
4187 if (CXXDestructorDecl *Dtor = LookupDestructor(Class: PointeeRD)) {
4188 if (Dtor->isCalledByDelete(OpDel: OperatorDelete))
4189 CheckDestructorAccess(Loc: Ex.get()->getExprLoc(), Dtor,
4190 PDiag: PDiag(DiagID: diag::err_access_dtor) << PointeeElem);
4191 IsVirtualDelete = Dtor->isVirtual();
4192 }
4193 }
4194
4195 DiagnoseUseOfDecl(D: OperatorDelete, Locs: StartLoc);
4196
4197 unsigned AddressParamIdx = 0;
4198 if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
4199 QualType TypeIdentity = OperatorDelete->getParamDecl(i: 0)->getType();
4200 if (RequireCompleteType(Loc: StartLoc, T: TypeIdentity,
4201 DiagID: diag::err_incomplete_type))
4202 return ExprError();
4203 AddressParamIdx = 1;
4204 }
4205
4206 // Convert the operand to the type of the first parameter of operator
4207 // delete. This is only necessary if we selected a destroying operator
4208 // delete that we are going to call (non-virtually); converting to void*
4209 // is trivial and left to AST consumers to handle.
4210 QualType ParamType =
4211 OperatorDelete->getParamDecl(i: AddressParamIdx)->getType();
4212 if (!IsVirtualDelete && !ParamType->getPointeeType()->isVoidType()) {
4213 Qualifiers Qs = Pointee.getQualifiers();
4214 if (Qs.hasCVRQualifiers()) {
4215 // Qualifiers are irrelevant to this conversion; we're only looking
4216 // for access and ambiguity.
4217 Qs.removeCVRQualifiers();
4218 QualType Unqual = Context.getPointerType(
4219 T: Context.getQualifiedType(T: Pointee.getUnqualifiedType(), Qs));
4220 Ex = ImpCastExprToType(E: Ex.get(), Type: Unqual, CK: CK_NoOp);
4221 }
4222 Ex = PerformImplicitConversion(From: Ex.get(), ToType: ParamType,
4223 Action: AssignmentAction::Passing);
4224 if (Ex.isInvalid())
4225 return ExprError();
4226 }
4227 }
4228
4229 CXXDeleteExpr *Result = new (Context) CXXDeleteExpr(
4230 Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten,
4231 UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc);
4232 AnalyzeDeleteExprMismatch(DE: Result);
4233 return Result;
4234}
4235
4236static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
4237 bool IsDelete,
4238 FunctionDecl *&Operator) {
4239
4240 DeclarationName NewName = S.Context.DeclarationNames.getCXXOperatorName(
4241 Op: IsDelete ? OO_Delete : OO_New);
4242
4243 LookupResult R(S, NewName, TheCall->getBeginLoc(), Sema::LookupOrdinaryName);
4244 S.LookupQualifiedName(R, LookupCtx: S.Context.getTranslationUnitDecl());
4245 assert(!R.empty() && "implicitly declared allocation functions not found");
4246 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
4247
4248 // We do our own custom access checks below.
4249 R.suppressDiagnostics();
4250
4251 SmallVector<Expr *, 8> Args(TheCall->arguments());
4252 OverloadCandidateSet Candidates(R.getNameLoc(),
4253 OverloadCandidateSet::CSK_Normal);
4254 for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
4255 FnOvl != FnOvlEnd; ++FnOvl) {
4256 // Even member operator new/delete are implicitly treated as
4257 // static, so don't use AddMemberCandidate.
4258 NamedDecl *D = (*FnOvl)->getUnderlyingDecl();
4259
4260 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(Val: D)) {
4261 S.AddTemplateOverloadCandidate(FunctionTemplate: FnTemplate, FoundDecl: FnOvl.getPair(),
4262 /*ExplicitTemplateArgs=*/nullptr, Args,
4263 CandidateSet&: Candidates,
4264 /*SuppressUserConversions=*/false);
4265 continue;
4266 }
4267
4268 FunctionDecl *Fn = cast<FunctionDecl>(Val: D);
4269 S.AddOverloadCandidate(Function: Fn, FoundDecl: FnOvl.getPair(), Args, CandidateSet&: Candidates,
4270 /*SuppressUserConversions=*/false);
4271 }
4272
4273 SourceRange Range = TheCall->getSourceRange();
4274
4275 // Do the resolution.
4276 OverloadCandidateSet::iterator Best;
4277 switch (Candidates.BestViableFunction(S, Loc: R.getNameLoc(), Best)) {
4278 case OR_Success: {
4279 // Got one!
4280 FunctionDecl *FnDecl = Best->Function;
4281 assert(R.getNamingClass() == nullptr &&
4282 "class members should not be considered");
4283
4284 if (!FnDecl->isReplaceableGlobalAllocationFunction()) {
4285 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_builtin_operator_new_delete_not_usual)
4286 << (IsDelete ? 1 : 0) << Range;
4287 S.Diag(Loc: FnDecl->getLocation(), DiagID: diag::note_non_usual_function_declared_here)
4288 << R.getLookupName() << FnDecl->getSourceRange();
4289 return true;
4290 }
4291
4292 Operator = FnDecl;
4293 return false;
4294 }
4295
4296 case OR_No_Viable_Function:
4297 Candidates.NoteCandidates(
4298 PA: PartialDiagnosticAt(R.getNameLoc(),
4299 S.PDiag(DiagID: diag::err_ovl_no_viable_function_in_call)
4300 << R.getLookupName() << Range),
4301 S, OCD: OCD_AllCandidates, Args);
4302 return true;
4303
4304 case OR_Ambiguous:
4305 Candidates.NoteCandidates(
4306 PA: PartialDiagnosticAt(R.getNameLoc(),
4307 S.PDiag(DiagID: diag::err_ovl_ambiguous_call)
4308 << R.getLookupName() << Range),
4309 S, OCD: OCD_AmbiguousCandidates, Args);
4310 return true;
4311
4312 case OR_Deleted:
4313 S.DiagnoseUseOfDeletedFunction(Loc: R.getNameLoc(), Range, Name: R.getLookupName(),
4314 CandidateSet&: Candidates, Fn: Best->Function, Args);
4315 return true;
4316 }
4317 llvm_unreachable("Unreachable, bad result from BestViableFunction");
4318}
4319
4320ExprResult Sema::BuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
4321 bool IsDelete) {
4322 CallExpr *TheCall = cast<CallExpr>(Val: TheCallResult.get());
4323 if (!getLangOpts().CPlusPlus) {
4324 Diag(Loc: TheCall->getExprLoc(), DiagID: diag::err_builtin_requires_language)
4325 << (IsDelete ? "__builtin_operator_delete" : "__builtin_operator_new")
4326 << "C++";
4327 return ExprError();
4328 }
4329 // CodeGen assumes it can find the global new and delete to call,
4330 // so ensure that they are declared.
4331 DeclareGlobalNewDelete();
4332
4333 FunctionDecl *OperatorNewOrDelete = nullptr;
4334 if (resolveBuiltinNewDeleteOverload(S&: *this, TheCall, IsDelete,
4335 Operator&: OperatorNewOrDelete))
4336 return ExprError();
4337 assert(OperatorNewOrDelete && "should be found");
4338
4339 DiagnoseUseOfDecl(D: OperatorNewOrDelete, Locs: TheCall->getExprLoc());
4340 MarkFunctionReferenced(Loc: TheCall->getExprLoc(), Func: OperatorNewOrDelete);
4341
4342 TheCall->setType(OperatorNewOrDelete->getReturnType());
4343 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
4344 QualType ParamTy = OperatorNewOrDelete->getParamDecl(i)->getType();
4345 InitializedEntity Entity =
4346 InitializedEntity::InitializeParameter(Context, Type: ParamTy, Consumed: false);
4347 ExprResult Arg = PerformCopyInitialization(
4348 Entity, EqualLoc: TheCall->getArg(Arg: i)->getBeginLoc(), Init: TheCall->getArg(Arg: i));
4349 if (Arg.isInvalid())
4350 return ExprError();
4351 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
4352 }
4353 auto Callee = dyn_cast<ImplicitCastExpr>(Val: TheCall->getCallee());
4354 assert(Callee && Callee->getCastKind() == CK_BuiltinFnToFnPtr &&
4355 "Callee expected to be implicit cast to a builtin function pointer");
4356 Callee->setType(OperatorNewOrDelete->getType());
4357
4358 return TheCallResult;
4359}
4360
4361void Sema::CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
4362 bool IsDelete, bool CallCanBeVirtual,
4363 bool WarnOnNonAbstractTypes,
4364 SourceLocation DtorLoc) {
4365 if (!dtor || dtor->isVirtual() || !CallCanBeVirtual || isUnevaluatedContext())
4366 return;
4367
4368 // C++ [expr.delete]p3:
4369 // In the first alternative (delete object), if the static type of the
4370 // object to be deleted is different from its dynamic type, the static
4371 // type shall be a base class of the dynamic type of the object to be
4372 // deleted and the static type shall have a virtual destructor or the
4373 // behavior is undefined.
4374 //
4375 const CXXRecordDecl *PointeeRD = dtor->getParent();
4376 // Note: a final class cannot be derived from, no issue there
4377 if (!PointeeRD->isPolymorphic() || PointeeRD->hasAttr<FinalAttr>())
4378 return;
4379
4380 // If the superclass is in a system header, there's nothing that can be done.
4381 // The `delete` (where we emit the warning) can be in a system header,
4382 // what matters for this warning is where the deleted type is defined.
4383 if (getSourceManager().isInSystemHeader(Loc: PointeeRD->getLocation()))
4384 return;
4385
4386 QualType ClassType = dtor->getFunctionObjectParameterType();
4387 if (PointeeRD->isAbstract()) {
4388 // If the class is abstract, we warn by default, because we're
4389 // sure the code has undefined behavior.
4390 Diag(Loc, DiagID: diag::warn_delete_abstract_non_virtual_dtor) << (IsDelete ? 0 : 1)
4391 << ClassType;
4392 } else if (WarnOnNonAbstractTypes) {
4393 // Otherwise, if this is not an array delete, it's a bit suspect,
4394 // but not necessarily wrong.
4395 Diag(Loc, DiagID: diag::warn_delete_non_virtual_dtor) << (IsDelete ? 0 : 1)
4396 << ClassType;
4397 }
4398 if (!IsDelete) {
4399 std::string TypeStr;
4400 ClassType.getAsStringInternal(Str&: TypeStr, Policy: getPrintingPolicy());
4401 Diag(Loc: DtorLoc, DiagID: diag::note_delete_non_virtual)
4402 << FixItHint::CreateInsertion(InsertionLoc: DtorLoc, Code: TypeStr + "::");
4403 }
4404}
4405
4406Sema::ConditionResult Sema::ActOnConditionVariable(Decl *ConditionVar,
4407 SourceLocation StmtLoc,
4408 ConditionKind CK) {
4409 ExprResult E =
4410 CheckConditionVariable(ConditionVar: cast<VarDecl>(Val: ConditionVar), StmtLoc, CK);
4411 if (E.isInvalid())
4412 return ConditionError();
4413 E = ActOnFinishFullExpr(Expr: E.get(), /*DiscardedValue*/ false);
4414 return ConditionResult(*this, ConditionVar, E,
4415 CK == ConditionKind::ConstexprIf);
4416}
4417
4418ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
4419 SourceLocation StmtLoc,
4420 ConditionKind CK) {
4421 if (ConditionVar->isInvalidDecl())
4422 return ExprError();
4423
4424 QualType T = ConditionVar->getType();
4425
4426 // C++ [stmt.select]p2:
4427 // The declarator shall not specify a function or an array.
4428 if (T->isFunctionType())
4429 return ExprError(Diag(Loc: ConditionVar->getLocation(),
4430 DiagID: diag::err_invalid_use_of_function_type)
4431 << ConditionVar->getSourceRange());
4432 else if (T->isArrayType())
4433 return ExprError(Diag(Loc: ConditionVar->getLocation(),
4434 DiagID: diag::err_invalid_use_of_array_type)
4435 << ConditionVar->getSourceRange());
4436
4437 ExprResult Condition = BuildDeclRefExpr(
4438 D: ConditionVar, Ty: ConditionVar->getType().getNonReferenceType(), VK: VK_LValue,
4439 Loc: ConditionVar->getLocation());
4440
4441 switch (CK) {
4442 case ConditionKind::Boolean:
4443 return CheckBooleanCondition(Loc: StmtLoc, E: Condition.get());
4444
4445 case ConditionKind::ConstexprIf:
4446 return CheckBooleanCondition(Loc: StmtLoc, E: Condition.get(), IsConstexpr: true);
4447
4448 case ConditionKind::Switch:
4449 return CheckSwitchCondition(SwitchLoc: StmtLoc, Cond: Condition.get());
4450 }
4451
4452 llvm_unreachable("unexpected condition kind");
4453}
4454
4455ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
4456 // C++11 6.4p4:
4457 // The value of a condition that is an initialized declaration in a statement
4458 // other than a switch statement is the value of the declared variable
4459 // implicitly converted to type bool. If that conversion is ill-formed, the
4460 // program is ill-formed.
4461 // The value of a condition that is an expression is the value of the
4462 // expression, implicitly converted to bool.
4463 //
4464 // C++23 8.5.2p2
4465 // If the if statement is of the form if constexpr, the value of the condition
4466 // is contextually converted to bool and the converted expression shall be
4467 // a constant expression.
4468 //
4469
4470 ExprResult E = PerformContextuallyConvertToBool(From: CondExpr);
4471 if (!IsConstexpr || E.isInvalid() || E.get()->isValueDependent())
4472 return E;
4473
4474 E = ActOnFinishFullExpr(Expr: E.get(), CC: E.get()->getExprLoc(),
4475 /*DiscardedValue*/ false,
4476 /*IsConstexpr*/ true);
4477 if (E.isInvalid())
4478 return E;
4479
4480 // FIXME: Return this value to the caller so they don't need to recompute it.
4481 llvm::APSInt Cond;
4482 E = VerifyIntegerConstantExpression(
4483 E: E.get(), Result: &Cond,
4484 DiagID: diag::err_constexpr_if_condition_expression_is_not_constant);
4485 return E;
4486}
4487
4488bool
4489Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
4490 // Look inside the implicit cast, if it exists.
4491 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Val: From))
4492 From = Cast->getSubExpr();
4493
4494 // A string literal (2.13.4) that is not a wide string literal can
4495 // be converted to an rvalue of type "pointer to char"; a wide
4496 // string literal can be converted to an rvalue of type "pointer
4497 // to wchar_t" (C++ 4.2p2).
4498 if (StringLiteral *StrLit = dyn_cast<StringLiteral>(Val: From->IgnoreParens()))
4499 if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
4500 if (const BuiltinType *ToPointeeType
4501 = ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
4502 // This conversion is considered only when there is an
4503 // explicit appropriate pointer target type (C++ 4.2p2).
4504 if (!ToPtrType->getPointeeType().hasQualifiers()) {
4505 switch (StrLit->getKind()) {
4506 case StringLiteralKind::UTF8:
4507 case StringLiteralKind::UTF16:
4508 case StringLiteralKind::UTF32:
4509 // We don't allow UTF literals to be implicitly converted
4510 break;
4511 case StringLiteralKind::Ordinary:
4512 case StringLiteralKind::Binary:
4513 return (ToPointeeType->getKind() == BuiltinType::Char_U ||
4514 ToPointeeType->getKind() == BuiltinType::Char_S);
4515 case StringLiteralKind::Wide:
4516 return Context.typesAreCompatible(T1: Context.getWideCharType(),
4517 T2: QualType(ToPointeeType, 0));
4518 case StringLiteralKind::Unevaluated:
4519 assert(false && "Unevaluated string literal in expression");
4520 break;
4521 }
4522 }
4523 }
4524
4525 return false;
4526}
4527
4528static ExprResult BuildCXXCastArgument(Sema &S,
4529 SourceLocation CastLoc,
4530 QualType Ty,
4531 CastKind Kind,
4532 CXXMethodDecl *Method,
4533 DeclAccessPair FoundDecl,
4534 bool HadMultipleCandidates,
4535 Expr *From) {
4536 switch (Kind) {
4537 default: llvm_unreachable("Unhandled cast kind!");
4538 case CK_ConstructorConversion: {
4539 CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Val: Method);
4540 SmallVector<Expr*, 8> ConstructorArgs;
4541
4542 if (S.RequireNonAbstractType(Loc: CastLoc, T: Ty,
4543 DiagID: diag::err_allocation_of_abstract_type))
4544 return ExprError();
4545
4546 if (S.CompleteConstructorCall(Constructor, DeclInitType: Ty, ArgsPtr: From, Loc: CastLoc,
4547 ConvertedArgs&: ConstructorArgs))
4548 return ExprError();
4549
4550 S.CheckConstructorAccess(Loc: CastLoc, D: Constructor, FoundDecl,
4551 Entity: InitializedEntity::InitializeTemporary(Type: Ty));
4552 if (S.DiagnoseUseOfDecl(D: Method, Locs: CastLoc))
4553 return ExprError();
4554
4555 ExprResult Result = S.BuildCXXConstructExpr(
4556 ConstructLoc: CastLoc, DeclInitType: Ty, FoundDecl, Constructor: cast<CXXConstructorDecl>(Val: Method),
4557 Exprs: ConstructorArgs, HadMultipleCandidates,
4558 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4559 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4560 if (Result.isInvalid())
4561 return ExprError();
4562
4563 return S.MaybeBindToTemporary(E: Result.getAs<Expr>());
4564 }
4565
4566 case CK_UserDefinedConversion: {
4567 assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
4568
4569 S.CheckMemberOperatorAccess(Loc: CastLoc, ObjectExpr: From, /*arg*/ ArgExpr: nullptr, FoundDecl);
4570 if (S.DiagnoseUseOfDecl(D: Method, Locs: CastLoc))
4571 return ExprError();
4572
4573 // Create an implicit call expr that calls it.
4574 CXXConversionDecl *Conv = cast<CXXConversionDecl>(Val: Method);
4575 ExprResult Result = S.BuildCXXMemberCallExpr(Exp: From, FoundDecl, Method: Conv,
4576 HadMultipleCandidates);
4577 if (Result.isInvalid())
4578 return ExprError();
4579 // Record usage of conversion in an implicit cast.
4580 Result = ImplicitCastExpr::Create(Context: S.Context, T: Result.get()->getType(),
4581 Kind: CK_UserDefinedConversion, Operand: Result.get(),
4582 BasePath: nullptr, Cat: Result.get()->getValueKind(),
4583 FPO: S.CurFPFeatureOverrides());
4584
4585 return S.MaybeBindToTemporary(E: Result.get());
4586 }
4587 }
4588}
4589
4590ExprResult
4591Sema::PerformImplicitConversion(Expr *From, QualType ToType,
4592 const ImplicitConversionSequence &ICS,
4593 AssignmentAction Action,
4594 CheckedConversionKind CCK) {
4595 // C++ [over.match.oper]p7: [...] operands of class type are converted [...]
4596 if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp &&
4597 !From->getType()->isRecordType())
4598 return From;
4599
4600 switch (ICS.getKind()) {
4601 case ImplicitConversionSequence::StandardConversion: {
4602 ExprResult Res = PerformImplicitConversion(From, ToType, SCS: ICS.Standard,
4603 Action, CCK);
4604 if (Res.isInvalid())
4605 return ExprError();
4606 From = Res.get();
4607 break;
4608 }
4609
4610 case ImplicitConversionSequence::UserDefinedConversion: {
4611
4612 FunctionDecl *FD = ICS.UserDefined.ConversionFunction;
4613 CastKind CastKind;
4614 QualType BeforeToType;
4615 assert(FD && "no conversion function for user-defined conversion seq");
4616 if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(Val: FD)) {
4617 CastKind = CK_UserDefinedConversion;
4618
4619 // If the user-defined conversion is specified by a conversion function,
4620 // the initial standard conversion sequence converts the source type to
4621 // the implicit object parameter of the conversion function.
4622 BeforeToType = Context.getCanonicalTagType(TD: Conv->getParent());
4623 } else {
4624 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(Val: FD);
4625 CastKind = CK_ConstructorConversion;
4626 // Do no conversion if dealing with ... for the first conversion.
4627 if (!ICS.UserDefined.EllipsisConversion) {
4628 // If the user-defined conversion is specified by a constructor, the
4629 // initial standard conversion sequence converts the source type to
4630 // the type required by the argument of the constructor
4631 BeforeToType = Ctor->getParamDecl(i: 0)->getType().getNonReferenceType();
4632 }
4633 }
4634 // Watch out for ellipsis conversion.
4635 if (!ICS.UserDefined.EllipsisConversion) {
4636 ExprResult Res = PerformImplicitConversion(
4637 From, ToType: BeforeToType, SCS: ICS.UserDefined.Before,
4638 Action: AssignmentAction::Converting, CCK);
4639 if (Res.isInvalid())
4640 return ExprError();
4641 From = Res.get();
4642 }
4643
4644 ExprResult CastArg = BuildCXXCastArgument(
4645 S&: *this, CastLoc: From->getBeginLoc(), Ty: ToType.getNonReferenceType(), Kind: CastKind,
4646 Method: cast<CXXMethodDecl>(Val: FD), FoundDecl: ICS.UserDefined.FoundConversionFunction,
4647 HadMultipleCandidates: ICS.UserDefined.HadMultipleCandidates, From);
4648
4649 if (CastArg.isInvalid())
4650 return ExprError();
4651
4652 From = CastArg.get();
4653
4654 // C++ [over.match.oper]p7:
4655 // [...] the second standard conversion sequence of a user-defined
4656 // conversion sequence is not applied.
4657 if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp)
4658 return From;
4659
4660 return PerformImplicitConversion(From, ToType, SCS: ICS.UserDefined.After,
4661 Action: AssignmentAction::Converting, CCK);
4662 }
4663
4664 case ImplicitConversionSequence::AmbiguousConversion:
4665 ICS.DiagnoseAmbiguousConversion(S&: *this, CaretLoc: From->getExprLoc(),
4666 PDiag: PDiag(DiagID: diag::err_typecheck_ambiguous_condition)
4667 << From->getSourceRange());
4668 return ExprError();
4669
4670 case ImplicitConversionSequence::EllipsisConversion:
4671 case ImplicitConversionSequence::StaticObjectArgumentConversion:
4672 llvm_unreachable("bad conversion");
4673
4674 case ImplicitConversionSequence::BadConversion:
4675 AssignConvertType ConvTy =
4676 CheckAssignmentConstraints(Loc: From->getExprLoc(), LHSType: ToType, RHSType: From->getType());
4677 bool Diagnosed = DiagnoseAssignmentResult(
4678 ConvTy: ConvTy == AssignConvertType::Compatible
4679 ? AssignConvertType::Incompatible
4680 : ConvTy,
4681 Loc: From->getExprLoc(), DstType: ToType, SrcType: From->getType(), SrcExpr: From, Action);
4682 assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
4683 return ExprError();
4684 }
4685
4686 // Everything went well.
4687 return From;
4688}
4689
4690// adjustVectorOrConstantMatrixType - Compute the intermediate cast type casting
4691// elements of the from type to the elements of the to type without resizing the
4692// vector or matrix.
4693static QualType adjustVectorOrConstantMatrixType(ASTContext &Context,
4694 QualType FromTy,
4695 QualType ToType,
4696 QualType *ElTy = nullptr) {
4697 QualType ElType = ToType;
4698 if (auto *ToVec = ToType->getAs<VectorType>())
4699 ElType = ToVec->getElementType();
4700 else if (auto *ToMat = ToType->getAs<ConstantMatrixType>())
4701 ElType = ToMat->getElementType();
4702
4703 if (ElTy)
4704 *ElTy = ElType;
4705 if (FromTy->isVectorType()) {
4706 auto *FromVec = FromTy->castAs<VectorType>();
4707 return Context.getExtVectorType(VectorType: ElType, NumElts: FromVec->getNumElements());
4708 }
4709 if (FromTy->isConstantMatrixType()) {
4710 auto *FromMat = FromTy->castAs<ConstantMatrixType>();
4711 return Context.getConstantMatrixType(ElementType: ElType, NumRows: FromMat->getNumRows(),
4712 NumColumns: FromMat->getNumColumns());
4713 }
4714 return ElType;
4715}
4716
4717/// Check if an integral conversion involves incompatible overflow behavior
4718/// types. Returns true if the conversion is invalid.
4719static bool checkIncompatibleOBTConversion(Sema &S, QualType FromType,
4720 QualType ToType, Expr *From) {
4721 const auto *FromOBT = FromType->getAs<OverflowBehaviorType>();
4722 const auto *ToOBT = ToType->getAs<OverflowBehaviorType>();
4723
4724 if (FromOBT && ToOBT &&
4725 FromOBT->getBehaviorKind() != ToOBT->getBehaviorKind()) {
4726 S.Diag(Loc: From->getExprLoc(), DiagID: diag::err_incompatible_obt_kinds_assignment)
4727 << ToType << FromType
4728 << (ToOBT->getBehaviorKind() ==
4729 OverflowBehaviorType::OverflowBehaviorKind::Trap
4730 ? "__ob_trap"
4731 : "__ob_wrap")
4732 << (FromOBT->getBehaviorKind() ==
4733 OverflowBehaviorType::OverflowBehaviorKind::Trap
4734 ? "__ob_trap"
4735 : "__ob_wrap");
4736 return true;
4737 }
4738 return false;
4739}
4740
4741ExprResult
4742Sema::PerformImplicitConversion(Expr *From, QualType ToType,
4743 const StandardConversionSequence& SCS,
4744 AssignmentAction Action,
4745 CheckedConversionKind CCK) {
4746 bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
4747 CCK == CheckedConversionKind::FunctionalCast);
4748
4749 // Overall FIXME: we are recomputing too many types here and doing far too
4750 // much extra work. What this means is that we need to keep track of more
4751 // information that is computed when we try the implicit conversion initially,
4752 // so that we don't need to recompute anything here.
4753 QualType FromType = From->getType();
4754
4755 if (SCS.CopyConstructor) {
4756 // FIXME: When can ToType be a reference type?
4757 assert(!ToType->isReferenceType());
4758 if (SCS.Second == ICK_Derived_To_Base) {
4759 SmallVector<Expr*, 8> ConstructorArgs;
4760 if (CompleteConstructorCall(
4761 Constructor: cast<CXXConstructorDecl>(Val: SCS.CopyConstructor), DeclInitType: ToType, ArgsPtr: From,
4762 /*FIXME:ConstructLoc*/ Loc: SourceLocation(), ConvertedArgs&: ConstructorArgs))
4763 return ExprError();
4764 return BuildCXXConstructExpr(
4765 /*FIXME:ConstructLoc*/ ConstructLoc: SourceLocation(), DeclInitType: ToType,
4766 FoundDecl: SCS.FoundCopyConstructor, Constructor: SCS.CopyConstructor, Exprs: ConstructorArgs,
4767 /*HadMultipleCandidates*/ false,
4768 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4769 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4770 }
4771 return BuildCXXConstructExpr(
4772 /*FIXME:ConstructLoc*/ ConstructLoc: SourceLocation(), DeclInitType: ToType,
4773 FoundDecl: SCS.FoundCopyConstructor, Constructor: SCS.CopyConstructor, Exprs: From,
4774 /*HadMultipleCandidates*/ false,
4775 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4776 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4777 }
4778
4779 // Resolve overloaded function references.
4780 if (Context.hasSameType(T1: FromType, T2: Context.OverloadTy)) {
4781 DeclAccessPair Found;
4782 FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(AddressOfExpr: From, TargetType: ToType,
4783 Complain: true, Found);
4784 if (!Fn)
4785 return ExprError();
4786
4787 if (DiagnoseUseOfDecl(D: Fn, Locs: From->getBeginLoc()))
4788 return ExprError();
4789
4790 ExprResult Res = FixOverloadedFunctionReference(E: From, FoundDecl: Found, Fn);
4791 if (Res.isInvalid())
4792 return ExprError();
4793
4794 // We might get back another placeholder expression if we resolved to a
4795 // builtin.
4796 Res = CheckPlaceholderExpr(E: Res.get());
4797 if (Res.isInvalid())
4798 return ExprError();
4799
4800 From = Res.get();
4801 FromType = From->getType();
4802 }
4803
4804 // If we're converting to an atomic type, first convert to the corresponding
4805 // non-atomic type.
4806 QualType ToAtomicType;
4807 if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>()) {
4808 ToAtomicType = ToType;
4809 ToType = ToAtomic->getValueType();
4810 }
4811
4812 QualType InitialFromType = FromType;
4813 // Perform the first implicit conversion.
4814 switch (SCS.First) {
4815 case ICK_Identity:
4816 if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
4817 FromType = FromAtomic->getValueType().getUnqualifiedType();
4818 From = ImplicitCastExpr::Create(Context, T: FromType, Kind: CK_AtomicToNonAtomic,
4819 Operand: From, /*BasePath=*/nullptr, Cat: VK_PRValue,
4820 FPO: FPOptionsOverride());
4821 }
4822 break;
4823
4824 case ICK_Lvalue_To_Rvalue: {
4825 assert(From->getObjectKind() != OK_ObjCProperty);
4826 ExprResult FromRes = DefaultLvalueConversion(E: From);
4827 if (FromRes.isInvalid())
4828 return ExprError();
4829
4830 From = FromRes.get();
4831 FromType = From->getType();
4832 break;
4833 }
4834
4835 case ICK_Array_To_Pointer:
4836 FromType = Context.getArrayDecayedType(T: FromType);
4837 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_ArrayToPointerDecay, VK: VK_PRValue,
4838 /*BasePath=*/nullptr, CCK)
4839 .get();
4840 break;
4841
4842 case ICK_HLSL_Array_RValue:
4843 if (ToType->isArrayParameterType()) {
4844 FromType = Context.getArrayParameterType(Ty: FromType);
4845 } else if (FromType->isArrayParameterType()) {
4846 const ArrayParameterType *APT = cast<ArrayParameterType>(Val&: FromType);
4847 FromType = APT->getConstantArrayType(Ctx: Context);
4848 }
4849 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_HLSLArrayRValue, VK: VK_PRValue,
4850 /*BasePath=*/nullptr, CCK)
4851 .get();
4852 break;
4853
4854 case ICK_Function_To_Pointer:
4855 FromType = Context.getPointerType(T: FromType);
4856 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_FunctionToPointerDecay,
4857 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
4858 .get();
4859 break;
4860
4861 default:
4862 llvm_unreachable("Improper first standard conversion");
4863 }
4864
4865 // Perform the second implicit conversion
4866 switch (SCS.Second) {
4867 case ICK_Identity:
4868 // C++ [except.spec]p5:
4869 // [For] assignment to and initialization of pointers to functions,
4870 // pointers to member functions, and references to functions: the
4871 // target entity shall allow at least the exceptions allowed by the
4872 // source value in the assignment or initialization.
4873 switch (Action) {
4874 case AssignmentAction::Assigning:
4875 case AssignmentAction::Initializing:
4876 // Note, function argument passing and returning are initialization.
4877 case AssignmentAction::Passing:
4878 case AssignmentAction::Returning:
4879 case AssignmentAction::Sending:
4880 case AssignmentAction::Passing_CFAudited:
4881 if (CheckExceptionSpecCompatibility(From, ToType))
4882 return ExprError();
4883 break;
4884
4885 case AssignmentAction::Casting:
4886 case AssignmentAction::Converting:
4887 // Casts and implicit conversions are not initialization, so are not
4888 // checked for exception specification mismatches.
4889 break;
4890 }
4891 // Nothing else to do.
4892 break;
4893
4894 case ICK_Integral_Promotion:
4895 case ICK_Integral_Conversion: {
4896 QualType ElTy = ToType;
4897 QualType StepTy = ToType;
4898 if (FromType->isVectorType() || ToType->isVectorType() ||
4899 FromType->isConstantMatrixType() || ToType->isConstantMatrixType())
4900 StepTy =
4901 adjustVectorOrConstantMatrixType(Context, FromTy: FromType, ToType, ElTy: &ElTy);
4902
4903 // Check for incompatible OBT kinds before converting
4904 if (checkIncompatibleOBTConversion(S&: *this, FromType, ToType: StepTy, From))
4905 return ExprError();
4906
4907 if (ElTy->isBooleanType()) {
4908 assert(FromType->castAsEnumDecl()->isFixed() &&
4909 SCS.Second == ICK_Integral_Promotion &&
4910 "only enums with fixed underlying type can promote to bool");
4911 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralToBoolean, VK: VK_PRValue,
4912 /*BasePath=*/nullptr, CCK)
4913 .get();
4914 } else {
4915 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralCast, VK: VK_PRValue,
4916 /*BasePath=*/nullptr, CCK)
4917 .get();
4918 }
4919 break;
4920 }
4921
4922 case ICK_Floating_Promotion:
4923 case ICK_Floating_Conversion: {
4924 QualType StepTy = ToType;
4925 if (FromType->isVectorType() || ToType->isVectorType() ||
4926 FromType->isConstantMatrixType() || ToType->isConstantMatrixType())
4927 StepTy = adjustVectorOrConstantMatrixType(Context, FromTy: FromType, ToType);
4928 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_FloatingCast, VK: VK_PRValue,
4929 /*BasePath=*/nullptr, CCK)
4930 .get();
4931 break;
4932 }
4933
4934 case ICK_Complex_Promotion:
4935 case ICK_Complex_Conversion: {
4936 QualType FromEl = From->getType()->castAs<ComplexType>()->getElementType();
4937 QualType ToEl = ToType->castAs<ComplexType>()->getElementType();
4938 CastKind CK;
4939 if (FromEl->isRealFloatingType()) {
4940 if (ToEl->isRealFloatingType())
4941 CK = CK_FloatingComplexCast;
4942 else
4943 CK = CK_FloatingComplexToIntegralComplex;
4944 } else if (ToEl->isRealFloatingType()) {
4945 CK = CK_IntegralComplexToFloatingComplex;
4946 } else {
4947 CK = CK_IntegralComplexCast;
4948 }
4949 From = ImpCastExprToType(E: From, Type: ToType, CK, VK: VK_PRValue, /*BasePath=*/nullptr,
4950 CCK)
4951 .get();
4952 break;
4953 }
4954
4955 case ICK_Floating_Integral: {
4956 QualType ElTy = ToType;
4957 QualType StepTy = ToType;
4958 if (FromType->isVectorType() || ToType->isVectorType() ||
4959 FromType->isConstantMatrixType() || ToType->isConstantMatrixType())
4960 StepTy =
4961 adjustVectorOrConstantMatrixType(Context, FromTy: FromType, ToType, ElTy: &ElTy);
4962 if (ElTy->isRealFloatingType())
4963 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralToFloating, VK: VK_PRValue,
4964 /*BasePath=*/nullptr, CCK)
4965 .get();
4966 else
4967 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_FloatingToIntegral, VK: VK_PRValue,
4968 /*BasePath=*/nullptr, CCK)
4969 .get();
4970 break;
4971 }
4972
4973 case ICK_Fixed_Point_Conversion:
4974 assert((FromType->isFixedPointType() || ToType->isFixedPointType()) &&
4975 "Attempting implicit fixed point conversion without a fixed "
4976 "point operand");
4977 if (FromType->isFloatingType())
4978 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FloatingToFixedPoint,
4979 VK: VK_PRValue,
4980 /*BasePath=*/nullptr, CCK).get();
4981 else if (ToType->isFloatingType())
4982 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToFloating,
4983 VK: VK_PRValue,
4984 /*BasePath=*/nullptr, CCK).get();
4985 else if (FromType->isIntegralType(Ctx: Context))
4986 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_IntegralToFixedPoint,
4987 VK: VK_PRValue,
4988 /*BasePath=*/nullptr, CCK).get();
4989 else if (ToType->isIntegralType(Ctx: Context))
4990 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToIntegral,
4991 VK: VK_PRValue,
4992 /*BasePath=*/nullptr, CCK).get();
4993 else if (ToType->isBooleanType())
4994 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToBoolean,
4995 VK: VK_PRValue,
4996 /*BasePath=*/nullptr, CCK).get();
4997 else
4998 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointCast,
4999 VK: VK_PRValue,
5000 /*BasePath=*/nullptr, CCK).get();
5001 break;
5002
5003 case ICK_Compatible_Conversion:
5004 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_NoOp, VK: From->getValueKind(),
5005 /*BasePath=*/nullptr, CCK).get();
5006 break;
5007
5008 case ICK_Writeback_Conversion:
5009 case ICK_Pointer_Conversion: {
5010 if (SCS.IncompatibleObjC && Action != AssignmentAction::Casting) {
5011 // Diagnose incompatible Objective-C conversions
5012 if (Action == AssignmentAction::Initializing ||
5013 Action == AssignmentAction::Assigning)
5014 Diag(Loc: From->getBeginLoc(),
5015 DiagID: diag::ext_typecheck_convert_incompatible_pointer)
5016 << ToType << From->getType() << Action << From->getSourceRange()
5017 << 0;
5018 else
5019 Diag(Loc: From->getBeginLoc(),
5020 DiagID: diag::ext_typecheck_convert_incompatible_pointer)
5021 << From->getType() << ToType << Action << From->getSourceRange()
5022 << 0;
5023
5024 if (From->getType()->isObjCObjectPointerType() &&
5025 ToType->isObjCObjectPointerType())
5026 ObjC().EmitRelatedResultTypeNote(E: From);
5027 } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
5028 !ObjC().CheckObjCARCUnavailableWeakConversion(castType: ToType,
5029 ExprType: From->getType())) {
5030 if (Action == AssignmentAction::Initializing)
5031 Diag(Loc: From->getBeginLoc(), DiagID: diag::err_arc_weak_unavailable_assign);
5032 else
5033 Diag(Loc: From->getBeginLoc(), DiagID: diag::err_arc_convesion_of_weak_unavailable)
5034 << (Action == AssignmentAction::Casting) << From->getType()
5035 << ToType << From->getSourceRange();
5036 }
5037
5038 // Defer address space conversion to the third conversion.
5039 QualType FromPteeType = From->getType()->getPointeeType();
5040 QualType ToPteeType = ToType->getPointeeType();
5041 QualType NewToType = ToType;
5042 if (!FromPteeType.isNull() && !ToPteeType.isNull() &&
5043 FromPteeType.getAddressSpace() != ToPteeType.getAddressSpace()) {
5044 NewToType = Context.removeAddrSpaceQualType(T: ToPteeType);
5045 NewToType = Context.getAddrSpaceQualType(T: NewToType,
5046 AddressSpace: FromPteeType.getAddressSpace());
5047 if (ToType->isObjCObjectPointerType())
5048 NewToType = Context.getObjCObjectPointerType(OIT: NewToType);
5049 else if (ToType->isBlockPointerType())
5050 NewToType = Context.getBlockPointerType(T: NewToType);
5051 else
5052 NewToType = Context.getPointerType(T: NewToType);
5053 }
5054
5055 CastKind Kind;
5056 CXXCastPath BasePath;
5057 if (CheckPointerConversion(From, ToType: NewToType, Kind, BasePath, IgnoreBaseAccess: CStyle))
5058 return ExprError();
5059
5060 // Make sure we extend blocks if necessary.
5061 // FIXME: doing this here is really ugly.
5062 if (Kind == CK_BlockPointerToObjCPointerCast) {
5063 ExprResult E = From;
5064 (void)ObjC().PrepareCastToObjCObjectPointer(E);
5065 From = E.get();
5066 }
5067 if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
5068 ObjC().CheckObjCConversion(castRange: SourceRange(), castType: NewToType, op&: From, CCK);
5069 From = ImpCastExprToType(E: From, Type: NewToType, CK: Kind, VK: VK_PRValue, BasePath: &BasePath, CCK)
5070 .get();
5071 break;
5072 }
5073
5074 case ICK_Pointer_Member: {
5075 CastKind Kind;
5076 CXXCastPath BasePath;
5077 switch (CheckMemberPointerConversion(
5078 FromType: From->getType(), ToPtrType: ToType->castAs<MemberPointerType>(), Kind, BasePath,
5079 CheckLoc: From->getExprLoc(), OpRange: From->getSourceRange(), IgnoreBaseAccess: CStyle,
5080 Direction: MemberPointerConversionDirection::Downcast)) {
5081 case MemberPointerConversionResult::Success:
5082 assert((Kind != CK_NullToMemberPointer ||
5083 From->isNullPointerConstant(Context,
5084 Expr::NPC_ValueDependentIsNull)) &&
5085 "Expr must be null pointer constant!");
5086 break;
5087 case MemberPointerConversionResult::Inaccessible:
5088 break;
5089 case MemberPointerConversionResult::DifferentPointee:
5090 llvm_unreachable("unexpected result");
5091 case MemberPointerConversionResult::NotDerived:
5092 llvm_unreachable("Should not have been called if derivation isn't OK.");
5093 case MemberPointerConversionResult::Ambiguous:
5094 case MemberPointerConversionResult::Virtual:
5095 return ExprError();
5096 }
5097 if (CheckExceptionSpecCompatibility(From, ToType))
5098 return ExprError();
5099
5100 From =
5101 ImpCastExprToType(E: From, Type: ToType, CK: Kind, VK: VK_PRValue, BasePath: &BasePath, CCK).get();
5102 break;
5103 }
5104
5105 case ICK_Boolean_Conversion: {
5106 // Perform half-to-boolean conversion via float.
5107 if (From->getType()->isHalfType()) {
5108 From = ImpCastExprToType(E: From, Type: Context.FloatTy, CK: CK_FloatingCast).get();
5109 FromType = Context.FloatTy;
5110 }
5111 QualType ElTy = FromType;
5112 QualType StepTy = ToType;
5113 if (FromType->isVectorType())
5114 ElTy = FromType->castAs<VectorType>()->getElementType();
5115 else if (FromType->isConstantMatrixType())
5116 ElTy = FromType->castAs<ConstantMatrixType>()->getElementType();
5117 if (getLangOpts().HLSL) {
5118 if (FromType->isVectorType() || ToType->isVectorType() ||
5119 FromType->isConstantMatrixType() || ToType->isConstantMatrixType())
5120 StepTy = adjustVectorOrConstantMatrixType(Context, FromTy: FromType, ToType);
5121 }
5122
5123 From = ImpCastExprToType(E: From, Type: StepTy, CK: ScalarTypeToBooleanCastKind(ScalarTy: ElTy),
5124 VK: VK_PRValue,
5125 /*BasePath=*/nullptr, CCK)
5126 .get();
5127 break;
5128 }
5129
5130 case ICK_Derived_To_Base: {
5131 CXXCastPath BasePath;
5132 if (CheckDerivedToBaseConversion(
5133 Derived: From->getType(), Base: ToType.getNonReferenceType(), Loc: From->getBeginLoc(),
5134 Range: From->getSourceRange(), BasePath: &BasePath, IgnoreAccess: CStyle))
5135 return ExprError();
5136
5137 From = ImpCastExprToType(E: From, Type: ToType.getNonReferenceType(),
5138 CK: CK_DerivedToBase, VK: From->getValueKind(),
5139 BasePath: &BasePath, CCK).get();
5140 break;
5141 }
5142
5143 case ICK_Vector_Conversion:
5144 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_BitCast, VK: VK_PRValue,
5145 /*BasePath=*/nullptr, CCK)
5146 .get();
5147 break;
5148
5149 case ICK_SVE_Vector_Conversion:
5150 case ICK_RVV_Vector_Conversion:
5151 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_BitCast, VK: VK_PRValue,
5152 /*BasePath=*/nullptr, CCK)
5153 .get();
5154 break;
5155
5156 case ICK_Vector_Splat: {
5157 // Vector splat from any arithmetic type to a vector.
5158 Expr *Elem = prepareVectorSplat(VectorTy: ToType, SplattedExpr: From).get();
5159 From = ImpCastExprToType(E: Elem, Type: ToType, CK: CK_VectorSplat, VK: VK_PRValue,
5160 /*BasePath=*/nullptr, CCK)
5161 .get();
5162 break;
5163 }
5164
5165 case ICK_Complex_Real:
5166 // Case 1. x -> _Complex y
5167 if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
5168 QualType ElType = ToComplex->getElementType();
5169 bool isFloatingComplex = ElType->isRealFloatingType();
5170
5171 // x -> y
5172 if (Context.hasSameUnqualifiedType(T1: ElType, T2: From->getType())) {
5173 // do nothing
5174 } else if (From->getType()->isRealFloatingType()) {
5175 From = ImpCastExprToType(E: From, Type: ElType,
5176 CK: isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get();
5177 } else {
5178 assert(From->getType()->isIntegerType());
5179 From = ImpCastExprToType(E: From, Type: ElType,
5180 CK: isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get();
5181 }
5182 // y -> _Complex y
5183 From = ImpCastExprToType(E: From, Type: ToType,
5184 CK: isFloatingComplex ? CK_FloatingRealToComplex
5185 : CK_IntegralRealToComplex).get();
5186
5187 // Case 2. _Complex x -> y
5188 } else {
5189 auto *FromComplex = From->getType()->castAs<ComplexType>();
5190 QualType ElType = FromComplex->getElementType();
5191 bool isFloatingComplex = ElType->isRealFloatingType();
5192
5193 // _Complex x -> x
5194 From = ImpCastExprToType(E: From, Type: ElType,
5195 CK: isFloatingComplex ? CK_FloatingComplexToReal
5196 : CK_IntegralComplexToReal,
5197 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5198 .get();
5199
5200 // x -> y
5201 if (Context.hasSameUnqualifiedType(T1: ElType, T2: ToType)) {
5202 // do nothing
5203 } else if (ToType->isRealFloatingType()) {
5204 From = ImpCastExprToType(E: From, Type: ToType,
5205 CK: isFloatingComplex ? CK_FloatingCast
5206 : CK_IntegralToFloating,
5207 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5208 .get();
5209 } else {
5210 assert(ToType->isIntegerType());
5211 From = ImpCastExprToType(E: From, Type: ToType,
5212 CK: isFloatingComplex ? CK_FloatingToIntegral
5213 : CK_IntegralCast,
5214 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5215 .get();
5216 }
5217 }
5218 break;
5219
5220 case ICK_Block_Pointer_Conversion: {
5221 LangAS AddrSpaceL =
5222 ToType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
5223 LangAS AddrSpaceR =
5224 FromType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
5225 assert(Qualifiers::isAddressSpaceSupersetOf(AddrSpaceL, AddrSpaceR,
5226 getASTContext()) &&
5227 "Invalid cast");
5228 CastKind Kind =
5229 AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
5230 From = ImpCastExprToType(E: From, Type: ToType.getUnqualifiedType(), CK: Kind,
5231 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5232 .get();
5233 break;
5234 }
5235
5236 case ICK_TransparentUnionConversion: {
5237 ExprResult FromRes = From;
5238 AssignConvertType ConvTy =
5239 CheckTransparentUnionArgumentConstraints(ArgType: ToType, RHS&: FromRes);
5240 if (FromRes.isInvalid())
5241 return ExprError();
5242 From = FromRes.get();
5243 assert((ConvTy == AssignConvertType::Compatible) &&
5244 "Improper transparent union conversion");
5245 (void)ConvTy;
5246 break;
5247 }
5248
5249 case ICK_Zero_Event_Conversion:
5250 case ICK_Zero_Queue_Conversion:
5251 From = ImpCastExprToType(E: From, Type: ToType,
5252 CK: CK_ZeroToOCLOpaqueType,
5253 VK: From->getValueKind()).get();
5254 break;
5255
5256 case ICK_Lvalue_To_Rvalue:
5257 case ICK_Array_To_Pointer:
5258 case ICK_Function_To_Pointer:
5259 case ICK_Function_Conversion:
5260 case ICK_Qualification:
5261 case ICK_Num_Conversion_Kinds:
5262 case ICK_C_Only_Conversion:
5263 case ICK_Incompatible_Pointer_Conversion:
5264 case ICK_HLSL_Array_RValue:
5265 case ICK_HLSL_Vector_Truncation:
5266 case ICK_HLSL_Matrix_Truncation:
5267 case ICK_HLSL_Vector_Splat:
5268 case ICK_HLSL_Matrix_Splat:
5269 llvm_unreachable("Improper second standard conversion");
5270 }
5271
5272 if (SCS.Dimension != ICK_Identity) {
5273 // If SCS.Element is not ICK_Identity the To and From types must be HLSL
5274 // vectors or matrices.
5275 assert(
5276 (ToType->isVectorType() || ToType->isConstantMatrixType() ||
5277 ToType->isBuiltinType()) &&
5278 "Dimension conversion output must be vector, matrix, or scalar type.");
5279 switch (SCS.Dimension) {
5280 case ICK_HLSL_Vector_Splat: {
5281 // Vector splat from any arithmetic type to a vector.
5282 Expr *Elem = prepareVectorSplat(VectorTy: ToType, SplattedExpr: From).get();
5283 From = ImpCastExprToType(E: Elem, Type: ToType, CK: CK_VectorSplat, VK: VK_PRValue,
5284 /*BasePath=*/nullptr, CCK)
5285 .get();
5286 break;
5287 }
5288 case ICK_HLSL_Matrix_Splat: {
5289 // Matrix splat from any arithmetic type to a matrix.
5290 Expr *Elem = prepareMatrixSplat(MatrixTy: ToType, SplattedExpr: From).get();
5291 From =
5292 ImpCastExprToType(E: Elem, Type: ToType, CK: CK_HLSLAggregateSplatCast, VK: VK_PRValue,
5293 /*BasePath=*/nullptr, CCK)
5294 .get();
5295 break;
5296 }
5297 case ICK_HLSL_Vector_Truncation: {
5298 // Note: HLSL built-in vectors are ExtVectors. Since this truncates a
5299 // vector to a smaller vector or to a scalar, this can only operate on
5300 // arguments where the source type is an ExtVector and the destination
5301 // type is destination type is either an ExtVectorType or a builtin scalar
5302 // type.
5303 auto *FromVec = From->getType()->castAs<VectorType>();
5304 QualType TruncTy = FromVec->getElementType();
5305 if (auto *ToVec = ToType->getAs<VectorType>())
5306 TruncTy = Context.getExtVectorType(VectorType: TruncTy, NumElts: ToVec->getNumElements());
5307 From = ImpCastExprToType(E: From, Type: TruncTy, CK: CK_HLSLVectorTruncation,
5308 VK: From->getValueKind())
5309 .get();
5310
5311 break;
5312 }
5313 case ICK_HLSL_Matrix_Truncation: {
5314 auto *FromMat = From->getType()->castAs<ConstantMatrixType>();
5315 QualType TruncTy = FromMat->getElementType();
5316 if (auto *ToMat = ToType->getAs<ConstantMatrixType>())
5317 TruncTy = Context.getConstantMatrixType(ElementType: TruncTy, NumRows: ToMat->getNumRows(),
5318 NumColumns: ToMat->getNumColumns());
5319 From = ImpCastExprToType(E: From, Type: TruncTy, CK: CK_HLSLMatrixTruncation,
5320 VK: From->getValueKind())
5321 .get();
5322 break;
5323 }
5324 case ICK_Identity:
5325 default:
5326 llvm_unreachable("Improper element standard conversion");
5327 }
5328 }
5329
5330 switch (SCS.Third) {
5331 case ICK_Identity:
5332 // Nothing to do.
5333 break;
5334
5335 case ICK_Function_Conversion:
5336 // If both sides are functions (or pointers/references to them), there could
5337 // be incompatible exception declarations.
5338 if (CheckExceptionSpecCompatibility(From, ToType))
5339 return ExprError();
5340
5341 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_NoOp, VK: VK_PRValue,
5342 /*BasePath=*/nullptr, CCK)
5343 .get();
5344 break;
5345
5346 case ICK_Qualification: {
5347 ExprValueKind VK = From->getValueKind();
5348 CastKind CK = CK_NoOp;
5349
5350 if (ToType->isReferenceType() &&
5351 ToType->getPointeeType().getAddressSpace() !=
5352 From->getType().getAddressSpace())
5353 CK = CK_AddressSpaceConversion;
5354
5355 if (ToType->isPointerType() &&
5356 ToType->getPointeeType().getAddressSpace() !=
5357 From->getType()->getPointeeType().getAddressSpace())
5358 CK = CK_AddressSpaceConversion;
5359
5360 if (!isCast(CCK) &&
5361 !ToType->getPointeeType().getQualifiers().hasUnaligned() &&
5362 From->getType()->getPointeeType().getQualifiers().hasUnaligned()) {
5363 Diag(Loc: From->getBeginLoc(), DiagID: diag::warn_imp_cast_drops_unaligned)
5364 << InitialFromType << ToType;
5365 }
5366
5367 From = ImpCastExprToType(E: From, Type: ToType.getNonLValueExprType(Context), CK, VK,
5368 /*BasePath=*/nullptr, CCK)
5369 .get();
5370
5371 if (SCS.DeprecatedStringLiteralToCharPtr &&
5372 !getLangOpts().WritableStrings) {
5373 Diag(Loc: From->getBeginLoc(),
5374 DiagID: getLangOpts().CPlusPlus11
5375 ? diag::ext_deprecated_string_literal_conversion
5376 : diag::warn_deprecated_string_literal_conversion)
5377 << ToType.getNonReferenceType();
5378 }
5379
5380 break;
5381 }
5382
5383 default:
5384 llvm_unreachable("Improper third standard conversion");
5385 }
5386
5387 // If this conversion sequence involved a scalar -> atomic conversion, perform
5388 // that conversion now.
5389 if (!ToAtomicType.isNull()) {
5390 assert(Context.hasSameType(
5391 ToAtomicType->castAs<AtomicType>()->getValueType(), From->getType()));
5392 From = ImpCastExprToType(E: From, Type: ToAtomicType, CK: CK_NonAtomicToAtomic,
5393 VK: VK_PRValue, BasePath: nullptr, CCK)
5394 .get();
5395 }
5396
5397 // Materialize a temporary if we're implicitly converting to a reference
5398 // type. This is not required by the C++ rules but is necessary to maintain
5399 // AST invariants.
5400 if (ToType->isReferenceType() && From->isPRValue()) {
5401 ExprResult Res = TemporaryMaterializationConversion(E: From);
5402 if (Res.isInvalid())
5403 return ExprError();
5404 From = Res.get();
5405 }
5406
5407 // If this conversion sequence succeeded and involved implicitly converting a
5408 // _Nullable type to a _Nonnull one, complain.
5409 if (!isCast(CCK))
5410 diagnoseNullableToNonnullConversion(DstType: ToType, SrcType: InitialFromType,
5411 Loc: From->getBeginLoc());
5412
5413 return From;
5414}
5415
5416QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
5417 ExprValueKind &VK,
5418 SourceLocation Loc,
5419 bool isIndirect) {
5420 assert(!LHS.get()->hasPlaceholderType() && !RHS.get()->hasPlaceholderType() &&
5421 "placeholders should have been weeded out by now");
5422
5423 // The LHS undergoes lvalue conversions if this is ->*, and undergoes the
5424 // temporary materialization conversion otherwise.
5425 if (isIndirect)
5426 LHS = DefaultLvalueConversion(E: LHS.get());
5427 else if (LHS.get()->isPRValue())
5428 LHS = TemporaryMaterializationConversion(E: LHS.get());
5429 if (LHS.isInvalid())
5430 return QualType();
5431
5432 // The RHS always undergoes lvalue conversions.
5433 RHS = DefaultLvalueConversion(E: RHS.get());
5434 if (RHS.isInvalid()) return QualType();
5435
5436 const char *OpSpelling = isIndirect ? "->*" : ".*";
5437 // C++ 5.5p2
5438 // The binary operator .* [p3: ->*] binds its second operand, which shall
5439 // be of type "pointer to member of T" (where T is a completely-defined
5440 // class type) [...]
5441 QualType RHSType = RHS.get()->getType();
5442 const MemberPointerType *MemPtr = RHSType->getAs<MemberPointerType>();
5443 if (!MemPtr) {
5444 Diag(Loc, DiagID: diag::err_bad_memptr_rhs)
5445 << OpSpelling << RHSType << RHS.get()->getSourceRange();
5446 return QualType();
5447 }
5448
5449 CXXRecordDecl *RHSClass = MemPtr->getMostRecentCXXRecordDecl();
5450
5451 // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the
5452 // member pointer points must be completely-defined. However, there is no
5453 // reason for this semantic distinction, and the rule is not enforced by
5454 // other compilers. Therefore, we do not check this property, as it is
5455 // likely to be considered a defect.
5456
5457 // C++ 5.5p2
5458 // [...] to its first operand, which shall be of class T or of a class of
5459 // which T is an unambiguous and accessible base class. [p3: a pointer to
5460 // such a class]
5461 QualType LHSType = LHS.get()->getType();
5462 if (isIndirect) {
5463 if (const PointerType *Ptr = LHSType->getAs<PointerType>())
5464 LHSType = Ptr->getPointeeType();
5465 else {
5466 Diag(Loc, DiagID: diag::err_bad_memptr_lhs)
5467 << OpSpelling << 1 << LHSType
5468 << FixItHint::CreateReplacement(RemoveRange: SourceRange(Loc), Code: ".*");
5469 return QualType();
5470 }
5471 }
5472 CXXRecordDecl *LHSClass = LHSType->getAsCXXRecordDecl();
5473
5474 if (!declaresSameEntity(D1: LHSClass, D2: RHSClass)) {
5475 // If we want to check the hierarchy, we need a complete type.
5476 if (RequireCompleteType(Loc, T: LHSType, DiagID: diag::err_bad_memptr_lhs,
5477 Args: OpSpelling, Args: (int)isIndirect)) {
5478 return QualType();
5479 }
5480
5481 if (!IsDerivedFrom(Loc, Derived: LHSClass, Base: RHSClass)) {
5482 Diag(Loc, DiagID: diag::err_bad_memptr_lhs) << OpSpelling
5483 << (int)isIndirect << LHS.get()->getType();
5484 return QualType();
5485 }
5486
5487 // FIXME: use sugared type from member pointer.
5488 CanQualType RHSClassType = Context.getCanonicalTagType(TD: RHSClass);
5489 CXXCastPath BasePath;
5490 if (CheckDerivedToBaseConversion(
5491 Derived: LHSType, Base: RHSClassType, Loc,
5492 Range: SourceRange(LHS.get()->getBeginLoc(), RHS.get()->getEndLoc()),
5493 BasePath: &BasePath))
5494 return QualType();
5495
5496 // Cast LHS to type of use.
5497 QualType UseType =
5498 Context.getQualifiedType(T: RHSClassType, Qs: LHSType.getQualifiers());
5499 if (isIndirect)
5500 UseType = Context.getPointerType(T: UseType);
5501 ExprValueKind VK = isIndirect ? VK_PRValue : LHS.get()->getValueKind();
5502 LHS = ImpCastExprToType(E: LHS.get(), Type: UseType, CK: CK_DerivedToBase, VK,
5503 BasePath: &BasePath);
5504 }
5505
5506 if (isa<CXXScalarValueInitExpr>(Val: RHS.get()->IgnoreParens())) {
5507 // Diagnose use of pointer-to-member type which when used as
5508 // the functional cast in a pointer-to-member expression.
5509 Diag(Loc, DiagID: diag::err_pointer_to_member_type) << isIndirect;
5510 return QualType();
5511 }
5512
5513 // C++ 5.5p2
5514 // The result is an object or a function of the type specified by the
5515 // second operand.
5516 // The cv qualifiers are the union of those in the pointer and the left side,
5517 // in accordance with 5.5p5 and 5.2.5.
5518 QualType Result = MemPtr->getPointeeType();
5519 Result = Context.getCVRQualifiedType(T: Result, CVR: LHSType.getCVRQualifiers());
5520
5521 // C++0x [expr.mptr.oper]p6:
5522 // In a .* expression whose object expression is an rvalue, the program is
5523 // ill-formed if the second operand is a pointer to member function with
5524 // ref-qualifier &. In a ->* expression or in a .* expression whose object
5525 // expression is an lvalue, the program is ill-formed if the second operand
5526 // is a pointer to member function with ref-qualifier &&.
5527 if (const FunctionProtoType *Proto = Result->getAs<FunctionProtoType>()) {
5528 switch (Proto->getRefQualifier()) {
5529 case RQ_None:
5530 // Do nothing
5531 break;
5532
5533 case RQ_LValue:
5534 if (!isIndirect && !LHS.get()->Classify(Ctx&: Context).isLValue()) {
5535 // C++2a allows functions with ref-qualifier & if their cv-qualifier-seq
5536 // is (exactly) 'const'.
5537 if (Proto->isConst() && !Proto->isVolatile())
5538 Diag(Loc, DiagID: getLangOpts().CPlusPlus20
5539 ? diag::warn_cxx17_compat_pointer_to_const_ref_member_on_rvalue
5540 : diag::ext_pointer_to_const_ref_member_on_rvalue);
5541 else
5542 Diag(Loc, DiagID: diag::err_pointer_to_member_oper_value_classify)
5543 << RHSType << 1 << LHS.get()->getSourceRange();
5544 }
5545 break;
5546
5547 case RQ_RValue:
5548 if (isIndirect || !LHS.get()->Classify(Ctx&: Context).isRValue())
5549 Diag(Loc, DiagID: diag::err_pointer_to_member_oper_value_classify)
5550 << RHSType << 0 << LHS.get()->getSourceRange();
5551 break;
5552 }
5553 }
5554
5555 // C++ [expr.mptr.oper]p6:
5556 // The result of a .* expression whose second operand is a pointer
5557 // to a data member is of the same value category as its
5558 // first operand. The result of a .* expression whose second
5559 // operand is a pointer to a member function is a prvalue. The
5560 // result of an ->* expression is an lvalue if its second operand
5561 // is a pointer to data member and a prvalue otherwise.
5562 if (Result->isFunctionType()) {
5563 VK = VK_PRValue;
5564 return Context.BoundMemberTy;
5565 } else if (isIndirect) {
5566 VK = VK_LValue;
5567 } else {
5568 VK = LHS.get()->getValueKind();
5569 }
5570
5571 return Result;
5572}
5573
5574/// Try to convert a type to another according to C++11 5.16p3.
5575///
5576/// This is part of the parameter validation for the ? operator. If either
5577/// value operand is a class type, the two operands are attempted to be
5578/// converted to each other. This function does the conversion in one direction.
5579/// It returns true if the program is ill-formed and has already been diagnosed
5580/// as such.
5581static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
5582 SourceLocation QuestionLoc,
5583 bool &HaveConversion,
5584 QualType &ToType) {
5585 HaveConversion = false;
5586 ToType = To->getType();
5587
5588 InitializationKind Kind =
5589 InitializationKind::CreateCopy(InitLoc: To->getBeginLoc(), EqualLoc: SourceLocation());
5590 // C++11 5.16p3
5591 // The process for determining whether an operand expression E1 of type T1
5592 // can be converted to match an operand expression E2 of type T2 is defined
5593 // as follows:
5594 // -- If E2 is an lvalue: E1 can be converted to match E2 if E1 can be
5595 // implicitly converted to type "lvalue reference to T2", subject to the
5596 // constraint that in the conversion the reference must bind directly to
5597 // an lvalue.
5598 // -- If E2 is an xvalue: E1 can be converted to match E2 if E1 can be
5599 // implicitly converted to the type "rvalue reference to R2", subject to
5600 // the constraint that the reference must bind directly.
5601 if (To->isGLValue()) {
5602 QualType T = Self.Context.getReferenceQualifiedType(e: To);
5603 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: T);
5604
5605 InitializationSequence InitSeq(Self, Entity, Kind, From);
5606 if (InitSeq.isDirectReferenceBinding()) {
5607 ToType = T;
5608 HaveConversion = true;
5609 return false;
5610 }
5611
5612 if (InitSeq.isAmbiguous())
5613 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5614 }
5615
5616 // -- If E2 is an rvalue, or if the conversion above cannot be done:
5617 // -- if E1 and E2 have class type, and the underlying class types are
5618 // the same or one is a base class of the other:
5619 QualType FTy = From->getType();
5620 QualType TTy = To->getType();
5621 const RecordType *FRec = FTy->getAsCanonical<RecordType>();
5622 const RecordType *TRec = TTy->getAsCanonical<RecordType>();
5623 bool FDerivedFromT = FRec && TRec && FRec != TRec &&
5624 Self.IsDerivedFrom(Loc: QuestionLoc, Derived: FTy, Base: TTy);
5625 if (FRec && TRec && (FRec == TRec || FDerivedFromT ||
5626 Self.IsDerivedFrom(Loc: QuestionLoc, Derived: TTy, Base: FTy))) {
5627 // E1 can be converted to match E2 if the class of T2 is the
5628 // same type as, or a base class of, the class of T1, and
5629 // [cv2 > cv1].
5630 if (FRec == TRec || FDerivedFromT) {
5631 if (TTy.isAtLeastAsQualifiedAs(other: FTy, Ctx: Self.getASTContext())) {
5632 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: TTy);
5633 InitializationSequence InitSeq(Self, Entity, Kind, From);
5634 if (InitSeq) {
5635 HaveConversion = true;
5636 return false;
5637 }
5638
5639 if (InitSeq.isAmbiguous())
5640 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5641 }
5642 }
5643
5644 return false;
5645 }
5646
5647 // -- Otherwise: E1 can be converted to match E2 if E1 can be
5648 // implicitly converted to the type that expression E2 would have
5649 // if E2 were converted to an rvalue (or the type it has, if E2 is
5650 // an rvalue).
5651 //
5652 // This actually refers very narrowly to the lvalue-to-rvalue conversion, not
5653 // to the array-to-pointer or function-to-pointer conversions.
5654 TTy = TTy.getNonLValueExprType(Context: Self.Context);
5655
5656 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: TTy);
5657 InitializationSequence InitSeq(Self, Entity, Kind, From);
5658 HaveConversion = !InitSeq.Failed();
5659 ToType = TTy;
5660 if (InitSeq.isAmbiguous())
5661 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5662
5663 return false;
5664}
5665
5666/// Try to find a common type for two according to C++0x 5.16p5.
5667///
5668/// This is part of the parameter validation for the ? operator. If either
5669/// value operand is a class type, overload resolution is used to find a
5670/// conversion to a common type.
5671static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS,
5672 SourceLocation QuestionLoc) {
5673 Expr *Args[2] = { LHS.get(), RHS.get() };
5674 OverloadCandidateSet CandidateSet(QuestionLoc,
5675 OverloadCandidateSet::CSK_Operator);
5676 Self.AddBuiltinOperatorCandidates(Op: OO_Conditional, OpLoc: QuestionLoc, Args,
5677 CandidateSet);
5678
5679 OverloadCandidateSet::iterator Best;
5680 switch (CandidateSet.BestViableFunction(S&: Self, Loc: QuestionLoc, Best)) {
5681 case OR_Success: {
5682 // We found a match. Perform the conversions on the arguments and move on.
5683 ExprResult LHSRes = Self.PerformImplicitConversion(
5684 From: LHS.get(), ToType: Best->BuiltinParamTypes[0], ICS: Best->Conversions[0],
5685 Action: AssignmentAction::Converting);
5686 if (LHSRes.isInvalid())
5687 break;
5688 LHS = LHSRes;
5689
5690 ExprResult RHSRes = Self.PerformImplicitConversion(
5691 From: RHS.get(), ToType: Best->BuiltinParamTypes[1], ICS: Best->Conversions[1],
5692 Action: AssignmentAction::Converting);
5693 if (RHSRes.isInvalid())
5694 break;
5695 RHS = RHSRes;
5696 if (Best->Function)
5697 Self.MarkFunctionReferenced(Loc: QuestionLoc, Func: Best->Function);
5698 return false;
5699 }
5700
5701 case OR_No_Viable_Function:
5702
5703 // Emit a better diagnostic if one of the expressions is a null pointer
5704 // constant and the other is a pointer type. In this case, the user most
5705 // likely forgot to take the address of the other expression.
5706 if (Self.DiagnoseConditionalForNull(LHSExpr: LHS.get(), RHSExpr: RHS.get(), QuestionLoc))
5707 return true;
5708
5709 Self.Diag(Loc: QuestionLoc, DiagID: diag::err_typecheck_cond_incompatible_operands)
5710 << LHS.get()->getType() << RHS.get()->getType()
5711 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5712 return true;
5713
5714 case OR_Ambiguous:
5715 Self.Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_ambiguous_ovl)
5716 << LHS.get()->getType() << RHS.get()->getType()
5717 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5718 // FIXME: Print the possible common types by printing the return types of
5719 // the viable candidates.
5720 break;
5721
5722 case OR_Deleted:
5723 llvm_unreachable("Conditional operator has only built-in overloads");
5724 }
5725 return true;
5726}
5727
5728/// Perform an "extended" implicit conversion as returned by
5729/// TryClassUnification.
5730static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
5731 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: T);
5732 InitializationKind Kind =
5733 InitializationKind::CreateCopy(InitLoc: E.get()->getBeginLoc(), EqualLoc: SourceLocation());
5734 Expr *Arg = E.get();
5735 InitializationSequence InitSeq(Self, Entity, Kind, Arg);
5736 ExprResult Result = InitSeq.Perform(S&: Self, Entity, Kind, Args: Arg);
5737 if (Result.isInvalid())
5738 return true;
5739
5740 E = Result;
5741 return false;
5742}
5743
5744// Check the condition operand of ?: to see if it is valid for the GCC
5745// extension.
5746static bool isValidVectorForConditionalCondition(ASTContext &Ctx,
5747 QualType CondTy) {
5748 bool IsSVEVectorType = CondTy->isSveVLSBuiltinType();
5749 if (!CondTy->isVectorType() && !CondTy->isExtVectorType() && !IsSVEVectorType)
5750 return false;
5751 const QualType EltTy =
5752 IsSVEVectorType
5753 ? cast<BuiltinType>(Val: CondTy.getCanonicalType())->getSveEltType(Ctx)
5754 : cast<VectorType>(Val: CondTy.getCanonicalType())->getElementType();
5755 assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
5756 return EltTy->isIntegralType(Ctx);
5757}
5758
5759QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
5760 ExprResult &RHS,
5761 SourceLocation QuestionLoc) {
5762 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
5763 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
5764
5765 QualType CondType = Cond.get()->getType();
5766 QualType LHSType = LHS.get()->getType();
5767 QualType RHSType = RHS.get()->getType();
5768
5769 bool LHSSizelessVector = LHSType->isSizelessVectorType();
5770 bool RHSSizelessVector = RHSType->isSizelessVectorType();
5771 bool LHSIsVector = LHSType->isVectorType() || LHSSizelessVector;
5772 bool RHSIsVector = RHSType->isVectorType() || RHSSizelessVector;
5773
5774 auto GetVectorInfo =
5775 [&](QualType Type) -> std::pair<QualType, llvm::ElementCount> {
5776 if (const auto *VT = Type->getAs<VectorType>())
5777 return std::make_pair(x: VT->getElementType(),
5778 y: llvm::ElementCount::getFixed(MinVal: VT->getNumElements()));
5779 ASTContext::BuiltinVectorTypeInfo VectorInfo =
5780 Context.getBuiltinVectorTypeInfo(VecTy: Type->castAs<BuiltinType>());
5781 return std::make_pair(x&: VectorInfo.ElementType, y&: VectorInfo.EC);
5782 };
5783
5784 auto [CondElementTy, CondElementCount] = GetVectorInfo(CondType);
5785
5786 QualType ResultType;
5787 if (LHSIsVector && RHSIsVector) {
5788 if (CondType->isExtVectorType() != LHSType->isExtVectorType()) {
5789 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_cond_result_mismatch)
5790 << /*isExtVectorNotSizeless=*/1;
5791 return {};
5792 }
5793
5794 // If both are vector types, they must be the same type.
5795 if (!Context.hasSameType(T1: LHSType, T2: RHSType)) {
5796 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_mismatched)
5797 << LHSType << RHSType;
5798 return {};
5799 }
5800 ResultType = Context.getCommonSugaredType(X: LHSType, Y: RHSType);
5801 } else if (LHSIsVector || RHSIsVector) {
5802 bool ResultSizeless = LHSSizelessVector || RHSSizelessVector;
5803 if (ResultSizeless != CondType->isSizelessVectorType()) {
5804 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_cond_result_mismatch)
5805 << /*isExtVectorNotSizeless=*/0;
5806 return {};
5807 }
5808 if (ResultSizeless)
5809 ResultType = CheckSizelessVectorOperands(LHS, RHS, Loc: QuestionLoc,
5810 /*IsCompAssign*/ false,
5811 OperationKind: ArithConvKind::Conditional);
5812 else
5813 ResultType = CheckVectorOperands(
5814 LHS, RHS, Loc: QuestionLoc, /*isCompAssign*/ IsCompAssign: false, /*AllowBothBool*/ true,
5815 /*AllowBoolConversions*/ AllowBoolConversion: false,
5816 /*AllowBoolOperation*/ true,
5817 /*ReportInvalid*/ true);
5818 if (ResultType.isNull())
5819 return {};
5820 } else {
5821 // Both are scalar.
5822 LHSType = LHSType.getUnqualifiedType();
5823 RHSType = RHSType.getUnqualifiedType();
5824 QualType ResultElementTy =
5825 Context.hasSameType(T1: LHSType, T2: RHSType)
5826 ? Context.getCommonSugaredType(X: LHSType, Y: RHSType)
5827 : UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
5828 ACK: ArithConvKind::Conditional);
5829
5830 if (ResultElementTy->isEnumeralType()) {
5831 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_operand_type)
5832 << ResultElementTy;
5833 return {};
5834 }
5835 if (CondType->isExtVectorType()) {
5836 ResultType = Context.getExtVectorType(VectorType: ResultElementTy,
5837 NumElts: CondElementCount.getFixedValue());
5838 } else if (CondType->isSizelessVectorType()) {
5839 ResultType = Context.getScalableVectorType(
5840 EltTy: ResultElementTy, NumElts: CondElementCount.getKnownMinValue());
5841 // There are not scalable vector type mappings for all element counts.
5842 if (ResultType.isNull()) {
5843 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_scalar_type_unsupported)
5844 << ResultElementTy << CondType;
5845 return {};
5846 }
5847 } else {
5848 ResultType = Context.getVectorType(VectorType: ResultElementTy,
5849 NumElts: CondElementCount.getFixedValue(),
5850 VecKind: VectorKind::Generic);
5851 }
5852 LHS = ImpCastExprToType(E: LHS.get(), Type: ResultType, CK: CK_VectorSplat);
5853 RHS = ImpCastExprToType(E: RHS.get(), Type: ResultType, CK: CK_VectorSplat);
5854 }
5855
5856 assert(!ResultType.isNull() &&
5857 (ResultType->isVectorType() || ResultType->isSizelessVectorType()) &&
5858 (!CondType->isExtVectorType() || ResultType->isExtVectorType()) &&
5859 "Result should have been a vector type");
5860
5861 auto [ResultElementTy, ResultElementCount] = GetVectorInfo(ResultType);
5862 if (ResultElementCount != CondElementCount) {
5863 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_size) << CondType
5864 << ResultType;
5865 return {};
5866 }
5867
5868 // Boolean vectors are permitted outside of OpenCL mode.
5869 if (Context.getTypeSize(T: ResultElementTy) !=
5870 Context.getTypeSize(T: CondElementTy) &&
5871 (!CondElementTy->isBooleanType() || LangOpts.OpenCL)) {
5872 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_element_size)
5873 << CondType << ResultType;
5874 return {};
5875 }
5876
5877 return ResultType;
5878}
5879
5880QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
5881 ExprResult &RHS, ExprValueKind &VK,
5882 ExprObjectKind &OK,
5883 SourceLocation QuestionLoc) {
5884 // FIXME: Handle C99's complex types, block pointers and Obj-C++ interface
5885 // pointers.
5886
5887 // Assume r-value.
5888 VK = VK_PRValue;
5889 OK = OK_Ordinary;
5890 bool IsVectorConditional =
5891 isValidVectorForConditionalCondition(Ctx&: Context, CondTy: Cond.get()->getType());
5892
5893 // C++11 [expr.cond]p1
5894 // The first expression is contextually converted to bool.
5895 if (!Cond.get()->isTypeDependent()) {
5896 ExprResult CondRes = IsVectorConditional
5897 ? DefaultFunctionArrayLvalueConversion(E: Cond.get())
5898 : CheckCXXBooleanCondition(CondExpr: Cond.get());
5899 if (CondRes.isInvalid())
5900 return QualType();
5901 Cond = CondRes;
5902 } else {
5903 // To implement C++, the first expression typically doesn't alter the result
5904 // type of the conditional, however the GCC compatible vector extension
5905 // changes the result type to be that of the conditional. Since we cannot
5906 // know if this is a vector extension here, delay the conversion of the
5907 // LHS/RHS below until later.
5908 return Context.DependentTy;
5909 }
5910
5911
5912 // Either of the arguments dependent?
5913 if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent())
5914 return Context.DependentTy;
5915
5916 // C++11 [expr.cond]p2
5917 // If either the second or the third operand has type (cv) void, ...
5918 QualType LTy = LHS.get()->getType();
5919 QualType RTy = RHS.get()->getType();
5920 bool LVoid = LTy->isVoidType();
5921 bool RVoid = RTy->isVoidType();
5922 if (LVoid || RVoid) {
5923 // ... one of the following shall hold:
5924 // -- The second or the third operand (but not both) is a (possibly
5925 // parenthesized) throw-expression; the result is of the type
5926 // and value category of the other.
5927 bool LThrow = isa<CXXThrowExpr>(Val: LHS.get()->IgnoreParenImpCasts());
5928 bool RThrow = isa<CXXThrowExpr>(Val: RHS.get()->IgnoreParenImpCasts());
5929
5930 // Void expressions aren't legal in the vector-conditional expressions.
5931 if (IsVectorConditional) {
5932 SourceRange DiagLoc =
5933 LVoid ? LHS.get()->getSourceRange() : RHS.get()->getSourceRange();
5934 bool IsThrow = LVoid ? LThrow : RThrow;
5935 Diag(Loc: DiagLoc.getBegin(), DiagID: diag::err_conditional_vector_has_void)
5936 << DiagLoc << IsThrow;
5937 return QualType();
5938 }
5939
5940 if (LThrow != RThrow) {
5941 Expr *NonThrow = LThrow ? RHS.get() : LHS.get();
5942 VK = NonThrow->getValueKind();
5943 // DR (no number yet): the result is a bit-field if the
5944 // non-throw-expression operand is a bit-field.
5945 OK = NonThrow->getObjectKind();
5946 return NonThrow->getType();
5947 }
5948
5949 // -- Both the second and third operands have type void; the result is of
5950 // type void and is a prvalue.
5951 if (LVoid && RVoid)
5952 return Context.getCommonSugaredType(X: LTy, Y: RTy);
5953
5954 // Neither holds, error.
5955 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_void_nonvoid)
5956 << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1)
5957 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5958 return QualType();
5959 }
5960
5961 // Neither is void.
5962 if (IsVectorConditional)
5963 return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
5964
5965 // WebAssembly tables are not allowed as conditional LHS or RHS.
5966 if (LTy->isWebAssemblyTableType() || RTy->isWebAssemblyTableType()) {
5967 Diag(Loc: QuestionLoc, DiagID: diag::err_wasm_table_conditional_expression)
5968 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5969 return QualType();
5970 }
5971
5972 // C++11 [expr.cond]p3
5973 // Otherwise, if the second and third operand have different types, and
5974 // either has (cv) class type [...] an attempt is made to convert each of
5975 // those operands to the type of the other.
5976 if (!Context.hasSameType(T1: LTy, T2: RTy) &&
5977 (LTy->isRecordType() || RTy->isRecordType())) {
5978 // These return true if a single direction is already ambiguous.
5979 QualType L2RType, R2LType;
5980 bool HaveL2R, HaveR2L;
5981 if (TryClassUnification(Self&: *this, From: LHS.get(), To: RHS.get(), QuestionLoc, HaveConversion&: HaveL2R, ToType&: L2RType))
5982 return QualType();
5983 if (TryClassUnification(Self&: *this, From: RHS.get(), To: LHS.get(), QuestionLoc, HaveConversion&: HaveR2L, ToType&: R2LType))
5984 return QualType();
5985
5986 // If both can be converted, [...] the program is ill-formed.
5987 if (HaveL2R && HaveR2L) {
5988 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_ambiguous)
5989 << LTy << RTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5990 return QualType();
5991 }
5992
5993 // If exactly one conversion is possible, that conversion is applied to
5994 // the chosen operand and the converted operands are used in place of the
5995 // original operands for the remainder of this section.
5996 if (HaveL2R) {
5997 if (ConvertForConditional(Self&: *this, E&: LHS, T: L2RType) || LHS.isInvalid())
5998 return QualType();
5999 LTy = LHS.get()->getType();
6000 } else if (HaveR2L) {
6001 if (ConvertForConditional(Self&: *this, E&: RHS, T: R2LType) || RHS.isInvalid())
6002 return QualType();
6003 RTy = RHS.get()->getType();
6004 }
6005 }
6006
6007 // C++11 [expr.cond]p3
6008 // if both are glvalues of the same value category and the same type except
6009 // for cv-qualification, an attempt is made to convert each of those
6010 // operands to the type of the other.
6011 // FIXME:
6012 // Resolving a defect in P0012R1: we extend this to cover all cases where
6013 // one of the operands is reference-compatible with the other, in order
6014 // to support conditionals between functions differing in noexcept. This
6015 // will similarly cover difference in array bounds after P0388R4.
6016 // FIXME: If LTy and RTy have a composite pointer type, should we convert to
6017 // that instead?
6018 ExprValueKind LVK = LHS.get()->getValueKind();
6019 ExprValueKind RVK = RHS.get()->getValueKind();
6020 if (!Context.hasSameType(T1: LTy, T2: RTy) && LVK == RVK && LVK != VK_PRValue) {
6021 // DerivedToBase was already handled by the class-specific case above.
6022 // FIXME: Should we allow ObjC conversions here?
6023 const ReferenceConversions AllowedConversions =
6024 ReferenceConversions::Qualification |
6025 ReferenceConversions::NestedQualification |
6026 ReferenceConversions::Function;
6027
6028 ReferenceConversions RefConv;
6029 if (CompareReferenceRelationship(Loc: QuestionLoc, T1: LTy, T2: RTy, Conv: &RefConv) ==
6030 Ref_Compatible &&
6031 !(RefConv & ~AllowedConversions) &&
6032 // [...] subject to the constraint that the reference must bind
6033 // directly [...]
6034 !RHS.get()->refersToBitField() && !RHS.get()->refersToVectorElement()) {
6035 RHS = ImpCastExprToType(E: RHS.get(), Type: LTy, CK: CK_NoOp, VK: RVK);
6036 RTy = RHS.get()->getType();
6037 } else if (CompareReferenceRelationship(Loc: QuestionLoc, T1: RTy, T2: LTy, Conv: &RefConv) ==
6038 Ref_Compatible &&
6039 !(RefConv & ~AllowedConversions) &&
6040 !LHS.get()->refersToBitField() &&
6041 !LHS.get()->refersToVectorElement()) {
6042 LHS = ImpCastExprToType(E: LHS.get(), Type: RTy, CK: CK_NoOp, VK: LVK);
6043 LTy = LHS.get()->getType();
6044 }
6045 }
6046
6047 // C++11 [expr.cond]p4
6048 // If the second and third operands are glvalues of the same value
6049 // category and have the same type, the result is of that type and
6050 // value category and it is a bit-field if the second or the third
6051 // operand is a bit-field, or if both are bit-fields.
6052 // We only extend this to bitfields, not to the crazy other kinds of
6053 // l-values.
6054 bool Same = Context.hasSameType(T1: LTy, T2: RTy);
6055 if (Same && LVK == RVK && LVK != VK_PRValue &&
6056 LHS.get()->isOrdinaryOrBitFieldObject() &&
6057 RHS.get()->isOrdinaryOrBitFieldObject()) {
6058 VK = LHS.get()->getValueKind();
6059 if (LHS.get()->getObjectKind() == OK_BitField ||
6060 RHS.get()->getObjectKind() == OK_BitField)
6061 OK = OK_BitField;
6062 return Context.getCommonSugaredType(X: LTy, Y: RTy);
6063 }
6064
6065 // C++11 [expr.cond]p5
6066 // Otherwise, the result is a prvalue. If the second and third operands
6067 // do not have the same type, and either has (cv) class type, ...
6068 if (!Same && (LTy->isRecordType() || RTy->isRecordType())) {
6069 // ... overload resolution is used to determine the conversions (if any)
6070 // to be applied to the operands. If the overload resolution fails, the
6071 // program is ill-formed.
6072 if (FindConditionalOverload(Self&: *this, LHS, RHS, QuestionLoc))
6073 return QualType();
6074 }
6075
6076 // C++11 [expr.cond]p6
6077 // Lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard
6078 // conversions are performed on the second and third operands.
6079 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
6080 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
6081 if (LHS.isInvalid() || RHS.isInvalid())
6082 return QualType();
6083 LTy = LHS.get()->getType();
6084 RTy = RHS.get()->getType();
6085
6086 // After those conversions, one of the following shall hold:
6087 // -- The second and third operands have the same type; the result
6088 // is of that type. If the operands have class type, the result
6089 // is a prvalue temporary of the result type, which is
6090 // copy-initialized from either the second operand or the third
6091 // operand depending on the value of the first operand.
6092 if (Context.hasSameType(T1: LTy, T2: RTy)) {
6093 if (LTy->isRecordType()) {
6094 // The operands have class type. Make a temporary copy.
6095 ExprResult LHSCopy = PerformCopyInitialization(
6096 Entity: InitializedEntity::InitializeTemporary(Type: LTy), EqualLoc: SourceLocation(), Init: LHS);
6097 if (LHSCopy.isInvalid())
6098 return QualType();
6099
6100 ExprResult RHSCopy = PerformCopyInitialization(
6101 Entity: InitializedEntity::InitializeTemporary(Type: RTy), EqualLoc: SourceLocation(), Init: RHS);
6102 if (RHSCopy.isInvalid())
6103 return QualType();
6104
6105 LHS = LHSCopy;
6106 RHS = RHSCopy;
6107 }
6108 return Context.getCommonSugaredType(X: LTy, Y: RTy);
6109 }
6110
6111 // Extension: conditional operator involving vector types.
6112 if (LTy->isVectorType() || RTy->isVectorType())
6113 return CheckVectorOperands(LHS, RHS, Loc: QuestionLoc, /*isCompAssign*/ IsCompAssign: false,
6114 /*AllowBothBool*/ true,
6115 /*AllowBoolConversions*/ AllowBoolConversion: false,
6116 /*AllowBoolOperation*/ false,
6117 /*ReportInvalid*/ true);
6118
6119 // -- The second and third operands have arithmetic or enumeration type;
6120 // the usual arithmetic conversions are performed to bring them to a
6121 // common type, and the result is of that type.
6122 if (LTy->isArithmeticType() && RTy->isArithmeticType()) {
6123 QualType ResTy = UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
6124 ACK: ArithConvKind::Conditional);
6125 if (LHS.isInvalid() || RHS.isInvalid())
6126 return QualType();
6127 if (ResTy.isNull()) {
6128 Diag(Loc: QuestionLoc,
6129 DiagID: diag::err_typecheck_cond_incompatible_operands) << LTy << RTy
6130 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6131 return QualType();
6132 }
6133
6134 LHS = ImpCastExprToType(E: LHS.get(), Type: ResTy, CK: PrepareScalarCast(src&: LHS, destType: ResTy));
6135 RHS = ImpCastExprToType(E: RHS.get(), Type: ResTy, CK: PrepareScalarCast(src&: RHS, destType: ResTy));
6136
6137 return ResTy;
6138 }
6139
6140 // -- The second and third operands have pointer type, or one has pointer
6141 // type and the other is a null pointer constant, or both are null
6142 // pointer constants, at least one of which is non-integral; pointer
6143 // conversions and qualification conversions are performed to bring them
6144 // to their composite pointer type. The result is of the composite
6145 // pointer type.
6146 // -- The second and third operands have pointer to member type, or one has
6147 // pointer to member type and the other is a null pointer constant;
6148 // pointer to member conversions and qualification conversions are
6149 // performed to bring them to a common type, whose cv-qualification
6150 // shall match the cv-qualification of either the second or the third
6151 // operand. The result is of the common type.
6152 QualType Composite = FindCompositePointerType(Loc: QuestionLoc, E1&: LHS, E2&: RHS);
6153 if (!Composite.isNull())
6154 return Composite;
6155
6156 // Similarly, attempt to find composite type of two objective-c pointers.
6157 Composite = ObjC().FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
6158 if (LHS.isInvalid() || RHS.isInvalid())
6159 return QualType();
6160 if (!Composite.isNull())
6161 return Composite;
6162
6163 // Check if we are using a null with a non-pointer type.
6164 if (DiagnoseConditionalForNull(LHSExpr: LHS.get(), RHSExpr: RHS.get(), QuestionLoc))
6165 return QualType();
6166
6167 Diag(Loc: QuestionLoc, DiagID: diag::err_typecheck_cond_incompatible_operands)
6168 << LHS.get()->getType() << RHS.get()->getType()
6169 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6170 return QualType();
6171}
6172
6173QualType Sema::FindCompositePointerType(SourceLocation Loc,
6174 Expr *&E1, Expr *&E2,
6175 bool ConvertArgs) {
6176 assert(getLangOpts().CPlusPlus && "This function assumes C++");
6177
6178 // C++1z [expr]p14:
6179 // The composite pointer type of two operands p1 and p2 having types T1
6180 // and T2
6181 QualType T1 = E1->getType(), T2 = E2->getType();
6182
6183 // where at least one is a pointer or pointer to member type or
6184 // std::nullptr_t is:
6185 bool T1IsPointerLike = T1->isAnyPointerType() || T1->isMemberPointerType() ||
6186 T1->isNullPtrType();
6187 bool T2IsPointerLike = T2->isAnyPointerType() || T2->isMemberPointerType() ||
6188 T2->isNullPtrType();
6189 if (!T1IsPointerLike && !T2IsPointerLike)
6190 return QualType();
6191
6192 // - if both p1 and p2 are null pointer constants, std::nullptr_t;
6193 // This can't actually happen, following the standard, but we also use this
6194 // to implement the end of [expr.conv], which hits this case.
6195 //
6196 // - if either p1 or p2 is a null pointer constant, T2 or T1, respectively;
6197 if (T1IsPointerLike &&
6198 E2->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
6199 if (ConvertArgs)
6200 E2 = ImpCastExprToType(E: E2, Type: T1, CK: T1->isMemberPointerType()
6201 ? CK_NullToMemberPointer
6202 : CK_NullToPointer).get();
6203 return T1;
6204 }
6205 if (T2IsPointerLike &&
6206 E1->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
6207 if (ConvertArgs)
6208 E1 = ImpCastExprToType(E: E1, Type: T2, CK: T2->isMemberPointerType()
6209 ? CK_NullToMemberPointer
6210 : CK_NullToPointer).get();
6211 return T2;
6212 }
6213
6214 // Now both have to be pointers or member pointers.
6215 if (!T1IsPointerLike || !T2IsPointerLike)
6216 return QualType();
6217 assert(!T1->isNullPtrType() && !T2->isNullPtrType() &&
6218 "nullptr_t should be a null pointer constant");
6219
6220 struct Step {
6221 enum Kind { Pointer, ObjCPointer, MemberPointer, Array } K;
6222 // Qualifiers to apply under the step kind.
6223 Qualifiers Quals;
6224 /// The class for a pointer-to-member; a constant array type with a bound
6225 /// (if any) for an array.
6226 /// FIXME: Store Qualifier for pointer-to-member.
6227 const Type *ClassOrBound;
6228
6229 Step(Kind K, const Type *ClassOrBound = nullptr)
6230 : K(K), ClassOrBound(ClassOrBound) {}
6231 QualType rebuild(ASTContext &Ctx, QualType T) const {
6232 T = Ctx.getQualifiedType(T, Qs: Quals);
6233 switch (K) {
6234 case Pointer:
6235 return Ctx.getPointerType(T);
6236 case MemberPointer:
6237 return Ctx.getMemberPointerType(T, /*Qualifier=*/std::nullopt,
6238 Cls: ClassOrBound->getAsCXXRecordDecl());
6239 case ObjCPointer:
6240 return Ctx.getObjCObjectPointerType(OIT: T);
6241 case Array:
6242 if (auto *CAT = cast_or_null<ConstantArrayType>(Val: ClassOrBound))
6243 return Ctx.getConstantArrayType(EltTy: T, ArySize: CAT->getSize(), SizeExpr: nullptr,
6244 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
6245 else
6246 return Ctx.getIncompleteArrayType(EltTy: T, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
6247 }
6248 llvm_unreachable("unknown step kind");
6249 }
6250 };
6251
6252 SmallVector<Step, 8> Steps;
6253
6254 // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
6255 // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
6256 // the cv-combined type of T1 and T2 or the cv-combined type of T2 and T1,
6257 // respectively;
6258 // - if T1 is "pointer to member of C1 of type cv1 U1" and T2 is "pointer
6259 // to member of C2 of type cv2 U2" for some non-function type U, where
6260 // C1 is reference-related to C2 or C2 is reference-related to C1, the
6261 // cv-combined type of T2 and T1 or the cv-combined type of T1 and T2,
6262 // respectively;
6263 // - if T1 and T2 are similar types (4.5), the cv-combined type of T1 and
6264 // T2;
6265 //
6266 // Dismantle T1 and T2 to simultaneously determine whether they are similar
6267 // and to prepare to form the cv-combined type if so.
6268 QualType Composite1 = T1;
6269 QualType Composite2 = T2;
6270 unsigned NeedConstBefore = 0;
6271 while (true) {
6272 assert(!Composite1.isNull() && !Composite2.isNull());
6273
6274 Qualifiers Q1, Q2;
6275 Composite1 = Context.getUnqualifiedArrayType(T: Composite1, Quals&: Q1);
6276 Composite2 = Context.getUnqualifiedArrayType(T: Composite2, Quals&: Q2);
6277
6278 // Top-level qualifiers are ignored. Merge at all lower levels.
6279 if (!Steps.empty()) {
6280 // Find the qualifier union: (approximately) the unique minimal set of
6281 // qualifiers that is compatible with both types.
6282 Qualifiers Quals = Qualifiers::fromCVRUMask(CVRU: Q1.getCVRUQualifiers() |
6283 Q2.getCVRUQualifiers());
6284
6285 // Under one level of pointer or pointer-to-member, we can change to an
6286 // unambiguous compatible address space.
6287 if (Q1.getAddressSpace() == Q2.getAddressSpace()) {
6288 Quals.setAddressSpace(Q1.getAddressSpace());
6289 } else if (Steps.size() == 1) {
6290 bool MaybeQ1 = Q1.isAddressSpaceSupersetOf(other: Q2, Ctx: getASTContext());
6291 bool MaybeQ2 = Q2.isAddressSpaceSupersetOf(other: Q1, Ctx: getASTContext());
6292 if (MaybeQ1 == MaybeQ2) {
6293 // Exception for ptr size address spaces. Should be able to choose
6294 // either address space during comparison.
6295 if (isPtrSizeAddressSpace(AS: Q1.getAddressSpace()) ||
6296 isPtrSizeAddressSpace(AS: Q2.getAddressSpace()))
6297 MaybeQ1 = true;
6298 else
6299 return QualType(); // No unique best address space.
6300 }
6301 Quals.setAddressSpace(MaybeQ1 ? Q1.getAddressSpace()
6302 : Q2.getAddressSpace());
6303 } else {
6304 return QualType();
6305 }
6306
6307 // FIXME: In C, we merge __strong and none to __strong at the top level.
6308 if (Q1.getObjCGCAttr() == Q2.getObjCGCAttr())
6309 Quals.setObjCGCAttr(Q1.getObjCGCAttr());
6310 else if (T1->isVoidPointerType() || T2->isVoidPointerType())
6311 assert(Steps.size() == 1);
6312 else
6313 return QualType();
6314
6315 // Mismatched lifetime qualifiers never compatibly include each other.
6316 if (Q1.getObjCLifetime() == Q2.getObjCLifetime())
6317 Quals.setObjCLifetime(Q1.getObjCLifetime());
6318 else if (T1->isVoidPointerType() || T2->isVoidPointerType())
6319 assert(Steps.size() == 1);
6320 else
6321 return QualType();
6322
6323 if (Q1.getPointerAuth().isEquivalent(Other: Q2.getPointerAuth()))
6324 Quals.setPointerAuth(Q1.getPointerAuth());
6325 else
6326 return QualType();
6327
6328 Steps.back().Quals = Quals;
6329 if (Q1 != Quals || Q2 != Quals)
6330 NeedConstBefore = Steps.size() - 1;
6331 }
6332
6333 // FIXME: Can we unify the following with UnwrapSimilarTypes?
6334
6335 const ArrayType *Arr1, *Arr2;
6336 if ((Arr1 = Context.getAsArrayType(T: Composite1)) &&
6337 (Arr2 = Context.getAsArrayType(T: Composite2))) {
6338 auto *CAT1 = dyn_cast<ConstantArrayType>(Val: Arr1);
6339 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: Arr2);
6340 if (CAT1 && CAT2 && CAT1->getSize() == CAT2->getSize()) {
6341 Composite1 = Arr1->getElementType();
6342 Composite2 = Arr2->getElementType();
6343 Steps.emplace_back(Args: Step::Array, Args&: CAT1);
6344 continue;
6345 }
6346 bool IAT1 = isa<IncompleteArrayType>(Val: Arr1);
6347 bool IAT2 = isa<IncompleteArrayType>(Val: Arr2);
6348 if ((IAT1 && IAT2) ||
6349 (getLangOpts().CPlusPlus20 && (IAT1 != IAT2) &&
6350 ((bool)CAT1 != (bool)CAT2) &&
6351 (Steps.empty() || Steps.back().K != Step::Array))) {
6352 // In C++20 onwards, we can unify an array of N T with an array of
6353 // a different or unknown bound. But we can't form an array whose
6354 // element type is an array of unknown bound by doing so.
6355 Composite1 = Arr1->getElementType();
6356 Composite2 = Arr2->getElementType();
6357 Steps.emplace_back(Args: Step::Array);
6358 if (CAT1 || CAT2)
6359 NeedConstBefore = Steps.size();
6360 continue;
6361 }
6362 }
6363
6364 const PointerType *Ptr1, *Ptr2;
6365 if ((Ptr1 = Composite1->getAs<PointerType>()) &&
6366 (Ptr2 = Composite2->getAs<PointerType>())) {
6367 Composite1 = Ptr1->getPointeeType();
6368 Composite2 = Ptr2->getPointeeType();
6369 Steps.emplace_back(Args: Step::Pointer);
6370 continue;
6371 }
6372
6373 const ObjCObjectPointerType *ObjPtr1, *ObjPtr2;
6374 if ((ObjPtr1 = Composite1->getAs<ObjCObjectPointerType>()) &&
6375 (ObjPtr2 = Composite2->getAs<ObjCObjectPointerType>())) {
6376 Composite1 = ObjPtr1->getPointeeType();
6377 Composite2 = ObjPtr2->getPointeeType();
6378 Steps.emplace_back(Args: Step::ObjCPointer);
6379 continue;
6380 }
6381
6382 const MemberPointerType *MemPtr1, *MemPtr2;
6383 if ((MemPtr1 = Composite1->getAs<MemberPointerType>()) &&
6384 (MemPtr2 = Composite2->getAs<MemberPointerType>())) {
6385 Composite1 = MemPtr1->getPointeeType();
6386 Composite2 = MemPtr2->getPointeeType();
6387
6388 // At the top level, we can perform a base-to-derived pointer-to-member
6389 // conversion:
6390 //
6391 // - [...] where C1 is reference-related to C2 or C2 is
6392 // reference-related to C1
6393 //
6394 // (Note that the only kinds of reference-relatedness in scope here are
6395 // "same type or derived from".) At any other level, the class must
6396 // exactly match.
6397 CXXRecordDecl *Cls = nullptr,
6398 *Cls1 = MemPtr1->getMostRecentCXXRecordDecl(),
6399 *Cls2 = MemPtr2->getMostRecentCXXRecordDecl();
6400 if (declaresSameEntity(D1: Cls1, D2: Cls2))
6401 Cls = Cls1;
6402 else if (Steps.empty())
6403 Cls = IsDerivedFrom(Loc, Derived: Cls1, Base: Cls2) ? Cls1
6404 : IsDerivedFrom(Loc, Derived: Cls2, Base: Cls1) ? Cls2
6405 : nullptr;
6406 if (!Cls)
6407 return QualType();
6408
6409 Steps.emplace_back(Args: Step::MemberPointer,
6410 Args: Context.getCanonicalTagType(TD: Cls).getTypePtr());
6411 continue;
6412 }
6413
6414 // Special case: at the top level, we can decompose an Objective-C pointer
6415 // and a 'cv void *'. Unify the qualifiers.
6416 if (Steps.empty() && ((Composite1->isVoidPointerType() &&
6417 Composite2->isObjCObjectPointerType()) ||
6418 (Composite1->isObjCObjectPointerType() &&
6419 Composite2->isVoidPointerType()))) {
6420 Composite1 = Composite1->getPointeeType();
6421 Composite2 = Composite2->getPointeeType();
6422 Steps.emplace_back(Args: Step::Pointer);
6423 continue;
6424 }
6425
6426 // FIXME: block pointer types?
6427
6428 // Cannot unwrap any more types.
6429 break;
6430 }
6431
6432 // - if T1 or T2 is "pointer to noexcept function" and the other type is
6433 // "pointer to function", where the function types are otherwise the same,
6434 // "pointer to function";
6435 // - if T1 or T2 is "pointer to member of C1 of type function", the other
6436 // type is "pointer to member of C2 of type noexcept function", and C1
6437 // is reference-related to C2 or C2 is reference-related to C1, where
6438 // the function types are otherwise the same, "pointer to member of C2 of
6439 // type function" or "pointer to member of C1 of type function",
6440 // respectively;
6441 //
6442 // We also support 'noreturn' here, so as a Clang extension we generalize the
6443 // above to:
6444 //
6445 // - [Clang] If T1 and T2 are both of type "pointer to function" or
6446 // "pointer to member function" and the pointee types can be unified
6447 // by a function pointer conversion, that conversion is applied
6448 // before checking the following rules.
6449 //
6450 // We've already unwrapped down to the function types, and we want to merge
6451 // rather than just convert, so do this ourselves rather than calling
6452 // IsFunctionConversion.
6453 //
6454 // FIXME: In order to match the standard wording as closely as possible, we
6455 // currently only do this under a single level of pointers. Ideally, we would
6456 // allow this in general, and set NeedConstBefore to the relevant depth on
6457 // the side(s) where we changed anything. If we permit that, we should also
6458 // consider this conversion when determining type similarity and model it as
6459 // a qualification conversion.
6460 if (Steps.size() == 1) {
6461 if (auto *FPT1 = Composite1->getAs<FunctionProtoType>()) {
6462 if (auto *FPT2 = Composite2->getAs<FunctionProtoType>()) {
6463 FunctionProtoType::ExtProtoInfo EPI1 = FPT1->getExtProtoInfo();
6464 FunctionProtoType::ExtProtoInfo EPI2 = FPT2->getExtProtoInfo();
6465
6466 // The result is noreturn if both operands are.
6467 bool Noreturn =
6468 EPI1.ExtInfo.getNoReturn() && EPI2.ExtInfo.getNoReturn();
6469 EPI1.ExtInfo = EPI1.ExtInfo.withNoReturn(noReturn: Noreturn);
6470 EPI2.ExtInfo = EPI2.ExtInfo.withNoReturn(noReturn: Noreturn);
6471
6472 bool CFIUncheckedCallee =
6473 EPI1.CFIUncheckedCallee || EPI2.CFIUncheckedCallee;
6474 EPI1.CFIUncheckedCallee = CFIUncheckedCallee;
6475 EPI2.CFIUncheckedCallee = CFIUncheckedCallee;
6476
6477 // The result is nothrow if both operands are.
6478 SmallVector<QualType, 8> ExceptionTypeStorage;
6479 EPI1.ExceptionSpec = EPI2.ExceptionSpec = Context.mergeExceptionSpecs(
6480 ESI1: EPI1.ExceptionSpec, ESI2: EPI2.ExceptionSpec, ExceptionTypeStorage,
6481 AcceptDependent: getLangOpts().CPlusPlus17);
6482
6483 Composite1 = Context.getFunctionType(ResultTy: FPT1->getReturnType(),
6484 Args: FPT1->getParamTypes(), EPI: EPI1);
6485 Composite2 = Context.getFunctionType(ResultTy: FPT2->getReturnType(),
6486 Args: FPT2->getParamTypes(), EPI: EPI2);
6487 }
6488 }
6489 }
6490
6491 // There are some more conversions we can perform under exactly one pointer.
6492 if (Steps.size() == 1 && Steps.front().K == Step::Pointer &&
6493 !Context.hasSameType(T1: Composite1, T2: Composite2)) {
6494 // - if T1 or T2 is "pointer to cv1 void" and the other type is
6495 // "pointer to cv2 T", where T is an object type or void,
6496 // "pointer to cv12 void", where cv12 is the union of cv1 and cv2;
6497 if (Composite1->isVoidType() && Composite2->isObjectType())
6498 Composite2 = Composite1;
6499 else if (Composite2->isVoidType() && Composite1->isObjectType())
6500 Composite1 = Composite2;
6501 // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
6502 // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
6503 // the cv-combined type of T1 and T2 or the cv-combined type of T2 and
6504 // T1, respectively;
6505 //
6506 // The "similar type" handling covers all of this except for the "T1 is a
6507 // base class of T2" case in the definition of reference-related.
6508 else if (IsDerivedFrom(Loc, Derived: Composite1, Base: Composite2))
6509 Composite1 = Composite2;
6510 else if (IsDerivedFrom(Loc, Derived: Composite2, Base: Composite1))
6511 Composite2 = Composite1;
6512 }
6513
6514 // At this point, either the inner types are the same or we have failed to
6515 // find a composite pointer type.
6516 if (!Context.hasSameType(T1: Composite1, T2: Composite2))
6517 return QualType();
6518
6519 // Per C++ [conv.qual]p3, add 'const' to every level before the last
6520 // differing qualifier.
6521 for (unsigned I = 0; I != NeedConstBefore; ++I)
6522 Steps[I].Quals.addConst();
6523
6524 // Rebuild the composite type.
6525 QualType Composite = Context.getCommonSugaredType(X: Composite1, Y: Composite2);
6526 for (auto &S : llvm::reverse(C&: Steps))
6527 Composite = S.rebuild(Ctx&: Context, T: Composite);
6528
6529 if (ConvertArgs) {
6530 // Convert the expressions to the composite pointer type.
6531 InitializedEntity Entity =
6532 InitializedEntity::InitializeTemporary(Type: Composite);
6533 InitializationKind Kind =
6534 InitializationKind::CreateCopy(InitLoc: Loc, EqualLoc: SourceLocation());
6535
6536 InitializationSequence E1ToC(*this, Entity, Kind, E1);
6537 if (!E1ToC)
6538 return QualType();
6539
6540 InitializationSequence E2ToC(*this, Entity, Kind, E2);
6541 if (!E2ToC)
6542 return QualType();
6543
6544 // FIXME: Let the caller know if these fail to avoid duplicate diagnostics.
6545 ExprResult E1Result = E1ToC.Perform(S&: *this, Entity, Kind, Args: E1);
6546 if (E1Result.isInvalid())
6547 return QualType();
6548 E1 = E1Result.get();
6549
6550 ExprResult E2Result = E2ToC.Perform(S&: *this, Entity, Kind, Args: E2);
6551 if (E2Result.isInvalid())
6552 return QualType();
6553 E2 = E2Result.get();
6554 }
6555
6556 return Composite;
6557}
6558
6559ExprResult Sema::MaybeBindToTemporary(Expr *E) {
6560 if (!E)
6561 return ExprError();
6562
6563 assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
6564
6565 // If the result is a glvalue, we shouldn't bind it.
6566 if (E->isGLValue())
6567 return E;
6568
6569 // In ARC, calls that return a retainable type can return retained,
6570 // in which case we have to insert a consuming cast.
6571 if (getLangOpts().ObjCAutoRefCount &&
6572 E->getType()->isObjCRetainableType()) {
6573
6574 bool ReturnsRetained;
6575
6576 // For actual calls, we compute this by examining the type of the
6577 // called value.
6578 if (CallExpr *Call = dyn_cast<CallExpr>(Val: E)) {
6579 Expr *Callee = Call->getCallee()->IgnoreParens();
6580 QualType T = Callee->getType();
6581
6582 if (T == Context.BoundMemberTy) {
6583 // Handle pointer-to-members.
6584 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val: Callee))
6585 T = BinOp->getRHS()->getType();
6586 else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Val: Callee))
6587 T = Mem->getMemberDecl()->getType();
6588 }
6589
6590 if (const PointerType *Ptr = T->getAs<PointerType>())
6591 T = Ptr->getPointeeType();
6592 else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>())
6593 T = Ptr->getPointeeType();
6594 else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
6595 T = MemPtr->getPointeeType();
6596
6597 auto *FTy = T->castAs<FunctionType>();
6598 ReturnsRetained = FTy->getExtInfo().getProducesResult();
6599
6600 // ActOnStmtExpr arranges things so that StmtExprs of retainable
6601 // type always produce a +1 object.
6602 } else if (isa<StmtExpr>(Val: E)) {
6603 ReturnsRetained = true;
6604
6605 // We hit this case with the lambda conversion-to-block optimization;
6606 // we don't want any extra casts here.
6607 } else if (isa<CastExpr>(Val: E) &&
6608 isa<BlockExpr>(Val: cast<CastExpr>(Val: E)->getSubExpr())) {
6609 return E;
6610
6611 // For message sends and property references, we try to find an
6612 // actual method. FIXME: we should infer retention by selector in
6613 // cases where we don't have an actual method.
6614 } else {
6615 ObjCMethodDecl *D = nullptr;
6616 if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(Val: E)) {
6617 D = Send->getMethodDecl();
6618 } else if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(Val: E)) {
6619 D = BoxedExpr->getBoxingMethod();
6620 } else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(Val: E)) {
6621 // Don't do reclaims if we're using the zero-element array
6622 // constant.
6623 if (ArrayLit->getNumElements() == 0 &&
6624 Context.getLangOpts().ObjCRuntime.hasEmptyCollections())
6625 return E;
6626
6627 D = ArrayLit->getArrayWithObjectsMethod();
6628 } else if (ObjCDictionaryLiteral *DictLit
6629 = dyn_cast<ObjCDictionaryLiteral>(Val: E)) {
6630 // Don't do reclaims if we're using the zero-element dictionary
6631 // constant.
6632 if (DictLit->getNumElements() == 0 &&
6633 Context.getLangOpts().ObjCRuntime.hasEmptyCollections())
6634 return E;
6635
6636 D = DictLit->getDictWithObjectsMethod();
6637 }
6638
6639 ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>());
6640
6641 // Don't do reclaims on performSelector calls; despite their
6642 // return type, the invoked method doesn't necessarily actually
6643 // return an object.
6644 if (!ReturnsRetained &&
6645 D && D->getMethodFamily() == OMF_performSelector)
6646 return E;
6647 }
6648
6649 // Don't reclaim an object of Class type.
6650 if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType())
6651 return E;
6652
6653 Cleanup.setExprNeedsCleanups(true);
6654
6655 CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
6656 : CK_ARCReclaimReturnedObject);
6657 return ImplicitCastExpr::Create(Context, T: E->getType(), Kind: ck, Operand: E, BasePath: nullptr,
6658 Cat: VK_PRValue, FPO: FPOptionsOverride());
6659 }
6660
6661 if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
6662 Cleanup.setExprNeedsCleanups(true);
6663
6664 if (!getLangOpts().CPlusPlus)
6665 return E;
6666
6667 // Search for the base element type (cf. ASTContext::getBaseElementType) with
6668 // a fast path for the common case that the type is directly a RecordType.
6669 const Type *T = Context.getCanonicalType(T: E->getType().getTypePtr());
6670 const RecordType *RT = nullptr;
6671 while (!RT) {
6672 switch (T->getTypeClass()) {
6673 case Type::Record:
6674 RT = cast<RecordType>(Val: T);
6675 break;
6676 case Type::ConstantArray:
6677 case Type::IncompleteArray:
6678 case Type::VariableArray:
6679 case Type::DependentSizedArray:
6680 T = cast<ArrayType>(Val: T)->getElementType().getTypePtr();
6681 break;
6682 default:
6683 return E;
6684 }
6685 }
6686
6687 // That should be enough to guarantee that this type is complete, if we're
6688 // not processing a decltype expression.
6689 auto *RD = cast<CXXRecordDecl>(Val: RT->getDecl())->getDefinitionOrSelf();
6690 if (RD->isInvalidDecl() || RD->isDependentContext())
6691 return E;
6692
6693 bool IsDecltype = ExprEvalContexts.back().ExprContext ==
6694 ExpressionEvaluationContextRecord::EK_Decltype;
6695 CXXDestructorDecl *Destructor = IsDecltype ? nullptr : LookupDestructor(Class: RD);
6696
6697 if (Destructor) {
6698 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: Destructor);
6699 CheckDestructorAccess(Loc: E->getExprLoc(), Dtor: Destructor,
6700 PDiag: PDiag(DiagID: diag::err_access_dtor_temp)
6701 << E->getType());
6702 if (DiagnoseUseOfDecl(D: Destructor, Locs: E->getExprLoc()))
6703 return ExprError();
6704
6705 // If destructor is trivial, we can avoid the extra copy.
6706 if (Destructor->isTrivial())
6707 return E;
6708
6709 // We need a cleanup, but we don't need to remember the temporary.
6710 Cleanup.setExprNeedsCleanups(true);
6711 }
6712
6713 CXXTemporary *Temp = CXXTemporary::Create(C: Context, Destructor);
6714 CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(C: Context, Temp, SubExpr: E);
6715
6716 if (IsDecltype)
6717 ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Elt: Bind);
6718
6719 return Bind;
6720}
6721
6722ExprResult
6723Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
6724 if (SubExpr.isInvalid())
6725 return ExprError();
6726
6727 return MaybeCreateExprWithCleanups(SubExpr: SubExpr.get());
6728}
6729
6730Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
6731 assert(SubExpr && "subexpression can't be null!");
6732
6733 CleanupVarDeclMarking();
6734
6735 unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects;
6736 assert(ExprCleanupObjects.size() >= FirstCleanup);
6737 assert(Cleanup.exprNeedsCleanups() ||
6738 ExprCleanupObjects.size() == FirstCleanup);
6739 if (!Cleanup.exprNeedsCleanups())
6740 return SubExpr;
6741
6742 auto Cleanups = llvm::ArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
6743 ExprCleanupObjects.size() - FirstCleanup);
6744
6745 auto *E = ExprWithCleanups::Create(
6746 C: Context, subexpr: SubExpr, CleanupsHaveSideEffects: Cleanup.cleanupsHaveSideEffects(), objects: Cleanups);
6747 DiscardCleanupsInEvaluationContext();
6748
6749 return E;
6750}
6751
6752Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
6753 assert(SubStmt && "sub-statement can't be null!");
6754
6755 CleanupVarDeclMarking();
6756
6757 if (!Cleanup.exprNeedsCleanups())
6758 return SubStmt;
6759
6760 // FIXME: In order to attach the temporaries, wrap the statement into
6761 // a StmtExpr; currently this is only used for asm statements.
6762 // This is hacky, either create a new CXXStmtWithTemporaries statement or
6763 // a new AsmStmtWithTemporaries.
6764 CompoundStmt *CompStmt =
6765 CompoundStmt::Create(C: Context, Stmts: SubStmt, FPFeatures: FPOptionsOverride(),
6766 LB: SourceLocation(), RB: SourceLocation());
6767 Expr *E = new (Context)
6768 StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation(),
6769 /*FIXME TemplateDepth=*/0);
6770 return MaybeCreateExprWithCleanups(SubExpr: E);
6771}
6772
6773ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
6774 assert(ExprEvalContexts.back().ExprContext ==
6775 ExpressionEvaluationContextRecord::EK_Decltype &&
6776 "not in a decltype expression");
6777
6778 ExprResult Result = CheckPlaceholderExpr(E);
6779 if (Result.isInvalid())
6780 return ExprError();
6781 E = Result.get();
6782
6783 // C++11 [expr.call]p11:
6784 // If a function call is a prvalue of object type,
6785 // -- if the function call is either
6786 // -- the operand of a decltype-specifier, or
6787 // -- the right operand of a comma operator that is the operand of a
6788 // decltype-specifier,
6789 // a temporary object is not introduced for the prvalue.
6790
6791 // Recursively rebuild ParenExprs and comma expressions to strip out the
6792 // outermost CXXBindTemporaryExpr, if any.
6793 if (ParenExpr *PE = dyn_cast<ParenExpr>(Val: E)) {
6794 ExprResult SubExpr = ActOnDecltypeExpression(E: PE->getSubExpr());
6795 if (SubExpr.isInvalid())
6796 return ExprError();
6797 if (SubExpr.get() == PE->getSubExpr())
6798 return E;
6799 return ActOnParenExpr(L: PE->getLParen(), R: PE->getRParen(), E: SubExpr.get());
6800 }
6801 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
6802 if (BO->getOpcode() == BO_Comma) {
6803 ExprResult RHS = ActOnDecltypeExpression(E: BO->getRHS());
6804 if (RHS.isInvalid())
6805 return ExprError();
6806 if (RHS.get() == BO->getRHS())
6807 return E;
6808 return BinaryOperator::Create(C: Context, lhs: BO->getLHS(), rhs: RHS.get(), opc: BO_Comma,
6809 ResTy: BO->getType(), VK: BO->getValueKind(),
6810 OK: BO->getObjectKind(), opLoc: BO->getOperatorLoc(),
6811 FPFeatures: BO->getFPFeatures());
6812 }
6813 }
6814
6815 CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(Val: E);
6816 CallExpr *TopCall = TopBind ? dyn_cast<CallExpr>(Val: TopBind->getSubExpr())
6817 : nullptr;
6818 if (TopCall)
6819 E = TopCall;
6820 else
6821 TopBind = nullptr;
6822
6823 // Disable the special decltype handling now.
6824 ExprEvalContexts.back().ExprContext =
6825 ExpressionEvaluationContextRecord::EK_Other;
6826
6827 Result = CheckUnevaluatedOperand(E);
6828 if (Result.isInvalid())
6829 return ExprError();
6830 E = Result.get();
6831
6832 // In MS mode, don't perform any extra checking of call return types within a
6833 // decltype expression.
6834 if (getLangOpts().MSVCCompat)
6835 return E;
6836
6837 // Perform the semantic checks we delayed until this point.
6838 for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeCalls.size();
6839 I != N; ++I) {
6840 CallExpr *Call = ExprEvalContexts.back().DelayedDecltypeCalls[I];
6841 if (Call == TopCall)
6842 continue;
6843
6844 if (CheckCallReturnType(ReturnType: Call->getCallReturnType(Ctx: Context),
6845 Loc: Call->getBeginLoc(), CE: Call, FD: Call->getDirectCallee()))
6846 return ExprError();
6847 }
6848
6849 // Now all relevant types are complete, check the destructors are accessible
6850 // and non-deleted, and annotate them on the temporaries.
6851 for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeBinds.size();
6852 I != N; ++I) {
6853 CXXBindTemporaryExpr *Bind =
6854 ExprEvalContexts.back().DelayedDecltypeBinds[I];
6855 if (Bind == TopBind)
6856 continue;
6857
6858 CXXTemporary *Temp = Bind->getTemporary();
6859
6860 CXXRecordDecl *RD =
6861 Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
6862 CXXDestructorDecl *Destructor = LookupDestructor(Class: RD);
6863 Temp->setDestructor(Destructor);
6864
6865 MarkFunctionReferenced(Loc: Bind->getExprLoc(), Func: Destructor);
6866 CheckDestructorAccess(Loc: Bind->getExprLoc(), Dtor: Destructor,
6867 PDiag: PDiag(DiagID: diag::err_access_dtor_temp)
6868 << Bind->getType());
6869 if (DiagnoseUseOfDecl(D: Destructor, Locs: Bind->getExprLoc()))
6870 return ExprError();
6871
6872 // We need a cleanup, but we don't need to remember the temporary.
6873 Cleanup.setExprNeedsCleanups(true);
6874 }
6875
6876 // Possibly strip off the top CXXBindTemporaryExpr.
6877 return E;
6878}
6879
6880/// Note a set of 'operator->' functions that were used for a member access.
6881static void noteOperatorArrows(Sema &S,
6882 ArrayRef<FunctionDecl *> OperatorArrows) {
6883 unsigned SkipStart = OperatorArrows.size(), SkipCount = 0;
6884 // FIXME: Make this configurable?
6885 unsigned Limit = 9;
6886 if (OperatorArrows.size() > Limit) {
6887 // Produce Limit-1 normal notes and one 'skipping' note.
6888 SkipStart = (Limit - 1) / 2 + (Limit - 1) % 2;
6889 SkipCount = OperatorArrows.size() - (Limit - 1);
6890 }
6891
6892 for (unsigned I = 0; I < OperatorArrows.size(); /**/) {
6893 if (I == SkipStart) {
6894 S.Diag(Loc: OperatorArrows[I]->getLocation(),
6895 DiagID: diag::note_operator_arrows_suppressed)
6896 << SkipCount;
6897 I += SkipCount;
6898 } else {
6899 S.Diag(Loc: OperatorArrows[I]->getLocation(), DiagID: diag::note_operator_arrow_here)
6900 << OperatorArrows[I]->getCallResultType();
6901 ++I;
6902 }
6903 }
6904}
6905
6906ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
6907 SourceLocation OpLoc,
6908 tok::TokenKind OpKind,
6909 ParsedType &ObjectType,
6910 bool &MayBePseudoDestructor) {
6911 // Since this might be a postfix expression, get rid of ParenListExprs.
6912 ExprResult Result = MaybeConvertParenListExprToParenExpr(S, ME: Base);
6913 if (Result.isInvalid()) return ExprError();
6914 Base = Result.get();
6915
6916 Result = CheckPlaceholderExpr(E: Base);
6917 if (Result.isInvalid()) return ExprError();
6918 Base = Result.get();
6919
6920 QualType BaseType = Base->getType();
6921 MayBePseudoDestructor = false;
6922 if (BaseType->isDependentType()) {
6923 // If we have a pointer to a dependent type and are using the -> operator,
6924 // the object type is the type that the pointer points to. We might still
6925 // have enough information about that type to do something useful.
6926 if (OpKind == tok::arrow)
6927 if (const PointerType *Ptr = BaseType->getAs<PointerType>())
6928 BaseType = Ptr->getPointeeType();
6929
6930 ObjectType = ParsedType::make(P: BaseType);
6931 MayBePseudoDestructor = true;
6932 return Base;
6933 }
6934
6935 // C++ [over.match.oper]p8:
6936 // [...] When operator->returns, the operator-> is applied to the value
6937 // returned, with the original second operand.
6938 if (OpKind == tok::arrow) {
6939 QualType StartingType = BaseType;
6940 bool NoArrowOperatorFound = false;
6941 bool FirstIteration = true;
6942 FunctionDecl *CurFD = dyn_cast<FunctionDecl>(Val: CurContext);
6943 // The set of types we've considered so far.
6944 llvm::SmallPtrSet<CanQualType,8> CTypes;
6945 SmallVector<FunctionDecl*, 8> OperatorArrows;
6946 CTypes.insert(Ptr: Context.getCanonicalType(T: BaseType));
6947
6948 while (BaseType->isRecordType()) {
6949 if (OperatorArrows.size() >= getLangOpts().ArrowDepth) {
6950 Diag(Loc: OpLoc, DiagID: diag::err_operator_arrow_depth_exceeded)
6951 << StartingType << getLangOpts().ArrowDepth << Base->getSourceRange();
6952 noteOperatorArrows(S&: *this, OperatorArrows);
6953 Diag(Loc: OpLoc, DiagID: diag::note_operator_arrow_depth)
6954 << getLangOpts().ArrowDepth;
6955 return ExprError();
6956 }
6957
6958 Result = BuildOverloadedArrowExpr(
6959 S, Base, OpLoc,
6960 // When in a template specialization and on the first loop iteration,
6961 // potentially give the default diagnostic (with the fixit in a
6962 // separate note) instead of having the error reported back to here
6963 // and giving a diagnostic with a fixit attached to the error itself.
6964 NoArrowOperatorFound: (FirstIteration && CurFD && CurFD->isFunctionTemplateSpecialization())
6965 ? nullptr
6966 : &NoArrowOperatorFound);
6967 if (Result.isInvalid()) {
6968 if (NoArrowOperatorFound) {
6969 if (FirstIteration) {
6970 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
6971 << BaseType << 1 << Base->getSourceRange()
6972 << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: ".");
6973 OpKind = tok::period;
6974 break;
6975 }
6976 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_arrow)
6977 << BaseType << Base->getSourceRange();
6978 CallExpr *CE = dyn_cast<CallExpr>(Val: Base);
6979 if (Decl *CD = (CE ? CE->getCalleeDecl() : nullptr)) {
6980 Diag(Loc: CD->getBeginLoc(),
6981 DiagID: diag::note_member_reference_arrow_from_operator_arrow);
6982 }
6983 }
6984 return ExprError();
6985 }
6986 Base = Result.get();
6987 if (CXXOperatorCallExpr *OpCall = dyn_cast<CXXOperatorCallExpr>(Val: Base))
6988 OperatorArrows.push_back(Elt: OpCall->getDirectCallee());
6989 BaseType = Base->getType();
6990 CanQualType CBaseType = Context.getCanonicalType(T: BaseType);
6991 if (!CTypes.insert(Ptr: CBaseType).second) {
6992 Diag(Loc: OpLoc, DiagID: diag::err_operator_arrow_circular) << StartingType;
6993 noteOperatorArrows(S&: *this, OperatorArrows);
6994 return ExprError();
6995 }
6996 FirstIteration = false;
6997 }
6998
6999 if (OpKind == tok::arrow) {
7000 if (BaseType->isPointerType())
7001 BaseType = BaseType->getPointeeType();
7002 else if (auto *AT = Context.getAsArrayType(T: BaseType))
7003 BaseType = AT->getElementType();
7004 }
7005 }
7006
7007 // Objective-C properties allow "." access on Objective-C pointer types,
7008 // so adjust the base type to the object type itself.
7009 if (BaseType->isObjCObjectPointerType())
7010 BaseType = BaseType->getPointeeType();
7011
7012 // C++ [basic.lookup.classref]p2:
7013 // [...] If the type of the object expression is of pointer to scalar
7014 // type, the unqualified-id is looked up in the context of the complete
7015 // postfix-expression.
7016 //
7017 // This also indicates that we could be parsing a pseudo-destructor-name.
7018 // Note that Objective-C class and object types can be pseudo-destructor
7019 // expressions or normal member (ivar or property) access expressions, and
7020 // it's legal for the type to be incomplete if this is a pseudo-destructor
7021 // call. We'll do more incomplete-type checks later in the lookup process,
7022 // so just skip this check for ObjC types.
7023 if (!BaseType->isRecordType()) {
7024 ObjectType = ParsedType::make(P: BaseType);
7025 MayBePseudoDestructor = true;
7026 return Base;
7027 }
7028
7029 // The object type must be complete (or dependent), or
7030 // C++11 [expr.prim.general]p3:
7031 // Unlike the object expression in other contexts, *this is not required to
7032 // be of complete type for purposes of class member access (5.2.5) outside
7033 // the member function body.
7034 if (!BaseType->isDependentType() &&
7035 !isThisOutsideMemberFunctionBody(BaseType) &&
7036 RequireCompleteType(Loc: OpLoc, T: BaseType,
7037 DiagID: diag::err_incomplete_member_access)) {
7038 return CreateRecoveryExpr(Begin: Base->getBeginLoc(), End: Base->getEndLoc(), SubExprs: {Base});
7039 }
7040
7041 // C++ [basic.lookup.classref]p2:
7042 // If the id-expression in a class member access (5.2.5) is an
7043 // unqualified-id, and the type of the object expression is of a class
7044 // type C (or of pointer to a class type C), the unqualified-id is looked
7045 // up in the scope of class C. [...]
7046 ObjectType = ParsedType::make(P: BaseType);
7047 return Base;
7048}
7049
7050static bool CheckArrow(Sema &S, QualType &ObjectType, Expr *&Base,
7051 tok::TokenKind &OpKind, SourceLocation OpLoc) {
7052 if (Base->hasPlaceholderType()) {
7053 ExprResult result = S.CheckPlaceholderExpr(E: Base);
7054 if (result.isInvalid()) return true;
7055 Base = result.get();
7056 }
7057 ObjectType = Base->getType();
7058
7059 // C++ [expr.pseudo]p2:
7060 // The left-hand side of the dot operator shall be of scalar type. The
7061 // left-hand side of the arrow operator shall be of pointer to scalar type.
7062 // This scalar type is the object type.
7063 // Note that this is rather different from the normal handling for the
7064 // arrow operator.
7065 if (OpKind == tok::arrow) {
7066 // The operator requires a prvalue, so perform lvalue conversions.
7067 // Only do this if we might plausibly end with a pointer, as otherwise
7068 // this was likely to be intended to be a '.'.
7069 if (ObjectType->isPointerType() || ObjectType->isArrayType() ||
7070 ObjectType->isFunctionType()) {
7071 ExprResult BaseResult = S.DefaultFunctionArrayLvalueConversion(E: Base);
7072 if (BaseResult.isInvalid())
7073 return true;
7074 Base = BaseResult.get();
7075 ObjectType = Base->getType();
7076 }
7077
7078 if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
7079 ObjectType = Ptr->getPointeeType();
7080 } else if (!Base->isTypeDependent()) {
7081 // The user wrote "p->" when they probably meant "p."; fix it.
7082 S.Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
7083 << ObjectType << true
7084 << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: ".");
7085 if (S.isSFINAEContext())
7086 return true;
7087
7088 OpKind = tok::period;
7089 }
7090 }
7091
7092 return false;
7093}
7094
7095/// Check if it's ok to try and recover dot pseudo destructor calls on
7096/// pointer objects.
7097static bool
7098canRecoverDotPseudoDestructorCallsOnPointerObjects(Sema &SemaRef,
7099 QualType DestructedType) {
7100 // If this is a record type, check if its destructor is callable.
7101 if (auto *RD = DestructedType->getAsCXXRecordDecl()) {
7102 if (RD->hasDefinition())
7103 if (CXXDestructorDecl *D = SemaRef.LookupDestructor(Class: RD))
7104 return SemaRef.CanUseDecl(D, /*TreatUnavailableAsInvalid=*/false);
7105 return false;
7106 }
7107
7108 // Otherwise, check if it's a type for which it's valid to use a pseudo-dtor.
7109 return DestructedType->isDependentType() || DestructedType->isScalarType() ||
7110 DestructedType->isVectorType();
7111}
7112
7113ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
7114 SourceLocation OpLoc,
7115 tok::TokenKind OpKind,
7116 const CXXScopeSpec &SS,
7117 TypeSourceInfo *ScopeTypeInfo,
7118 SourceLocation CCLoc,
7119 SourceLocation TildeLoc,
7120 PseudoDestructorTypeStorage Destructed) {
7121 TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
7122
7123 QualType ObjectType;
7124 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc))
7125 return ExprError();
7126
7127 if (!ObjectType->isDependentType() && !ObjectType->isScalarType() &&
7128 !ObjectType->isVectorType() && !ObjectType->isMatrixType()) {
7129 if (getLangOpts().MSVCCompat && ObjectType->isVoidType())
7130 Diag(Loc: OpLoc, DiagID: diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
7131 else {
7132 Diag(Loc: OpLoc, DiagID: diag::err_pseudo_dtor_base_not_scalar)
7133 << ObjectType << Base->getSourceRange();
7134 return ExprError();
7135 }
7136 }
7137
7138 // C++ [expr.pseudo]p2:
7139 // [...] The cv-unqualified versions of the object type and of the type
7140 // designated by the pseudo-destructor-name shall be the same type.
7141 if (DestructedTypeInfo) {
7142 QualType DestructedType = DestructedTypeInfo->getType();
7143 SourceLocation DestructedTypeStart =
7144 DestructedTypeInfo->getTypeLoc().getBeginLoc();
7145 if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) {
7146 if (!Context.hasSameUnqualifiedType(T1: DestructedType, T2: ObjectType)) {
7147 // Detect dot pseudo destructor calls on pointer objects, e.g.:
7148 // Foo *foo;
7149 // foo.~Foo();
7150 if (OpKind == tok::period && ObjectType->isPointerType() &&
7151 Context.hasSameUnqualifiedType(T1: DestructedType,
7152 T2: ObjectType->getPointeeType())) {
7153 auto Diagnostic =
7154 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
7155 << ObjectType << /*IsArrow=*/0 << Base->getSourceRange();
7156
7157 // Issue a fixit only when the destructor is valid.
7158 if (canRecoverDotPseudoDestructorCallsOnPointerObjects(
7159 SemaRef&: *this, DestructedType))
7160 Diagnostic << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: "->");
7161
7162 // Recover by setting the object type to the destructed type and the
7163 // operator to '->'.
7164 ObjectType = DestructedType;
7165 OpKind = tok::arrow;
7166 } else {
7167 Diag(Loc: DestructedTypeStart, DiagID: diag::err_pseudo_dtor_type_mismatch)
7168 << ObjectType << DestructedType << Base->getSourceRange()
7169 << DestructedTypeInfo->getTypeLoc().getSourceRange();
7170
7171 // Recover by setting the destructed type to the object type.
7172 DestructedType = ObjectType;
7173 DestructedTypeInfo =
7174 Context.getTrivialTypeSourceInfo(T: ObjectType, Loc: DestructedTypeStart);
7175 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7176 }
7177 } else if (DestructedType.getObjCLifetime() !=
7178 ObjectType.getObjCLifetime()) {
7179
7180 if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) {
7181 // Okay: just pretend that the user provided the correctly-qualified
7182 // type.
7183 } else {
7184 Diag(Loc: DestructedTypeStart, DiagID: diag::err_arc_pseudo_dtor_inconstant_quals)
7185 << ObjectType << DestructedType << Base->getSourceRange()
7186 << DestructedTypeInfo->getTypeLoc().getSourceRange();
7187 }
7188
7189 // Recover by setting the destructed type to the object type.
7190 DestructedType = ObjectType;
7191 DestructedTypeInfo = Context.getTrivialTypeSourceInfo(T: ObjectType,
7192 Loc: DestructedTypeStart);
7193 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7194 }
7195 }
7196 }
7197
7198 // C++ [expr.pseudo]p2:
7199 // [...] Furthermore, the two type-names in a pseudo-destructor-name of the
7200 // form
7201 //
7202 // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name
7203 //
7204 // shall designate the same scalar type.
7205 if (ScopeTypeInfo) {
7206 QualType ScopeType = ScopeTypeInfo->getType();
7207 if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
7208 !Context.hasSameUnqualifiedType(T1: ScopeType, T2: ObjectType)) {
7209
7210 Diag(Loc: ScopeTypeInfo->getTypeLoc().getSourceRange().getBegin(),
7211 DiagID: diag::err_pseudo_dtor_type_mismatch)
7212 << ObjectType << ScopeType << Base->getSourceRange()
7213 << ScopeTypeInfo->getTypeLoc().getSourceRange();
7214
7215 ScopeType = QualType();
7216 ScopeTypeInfo = nullptr;
7217 }
7218 }
7219
7220 Expr *Result
7221 = new (Context) CXXPseudoDestructorExpr(Context, Base,
7222 OpKind == tok::arrow, OpLoc,
7223 SS.getWithLocInContext(Context),
7224 ScopeTypeInfo,
7225 CCLoc,
7226 TildeLoc,
7227 Destructed);
7228
7229 return Result;
7230}
7231
7232ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
7233 SourceLocation OpLoc,
7234 tok::TokenKind OpKind,
7235 CXXScopeSpec &SS,
7236 UnqualifiedId &FirstTypeName,
7237 SourceLocation CCLoc,
7238 SourceLocation TildeLoc,
7239 UnqualifiedId &SecondTypeName) {
7240 assert((FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7241 FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
7242 "Invalid first type name in pseudo-destructor");
7243 assert((SecondTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7244 SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
7245 "Invalid second type name in pseudo-destructor");
7246
7247 QualType ObjectType;
7248 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc))
7249 return ExprError();
7250
7251 // Compute the object type that we should use for name lookup purposes. Only
7252 // record types and dependent types matter.
7253 ParsedType ObjectTypePtrForLookup;
7254 if (!SS.isSet()) {
7255 if (ObjectType->isRecordType())
7256 ObjectTypePtrForLookup = ParsedType::make(P: ObjectType);
7257 else if (ObjectType->isDependentType())
7258 ObjectTypePtrForLookup = ParsedType::make(P: Context.DependentTy);
7259 }
7260
7261 // Convert the name of the type being destructed (following the ~) into a
7262 // type (with source-location information).
7263 QualType DestructedType;
7264 TypeSourceInfo *DestructedTypeInfo = nullptr;
7265 PseudoDestructorTypeStorage Destructed;
7266 if (SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
7267 ParsedType T = getTypeName(II: *SecondTypeName.Identifier,
7268 NameLoc: SecondTypeName.StartLocation,
7269 S, SS: &SS, isClassName: true, HasTrailingDot: false, ObjectType: ObjectTypePtrForLookup,
7270 /*IsCtorOrDtorName*/true);
7271 if (!T &&
7272 ((SS.isSet() && !computeDeclContext(SS, EnteringContext: false)) ||
7273 (!SS.isSet() && ObjectType->isDependentType()))) {
7274 // The name of the type being destroyed is a dependent name, and we
7275 // couldn't find anything useful in scope. Just store the identifier and
7276 // it's location, and we'll perform (qualified) name lookup again at
7277 // template instantiation time.
7278 Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier,
7279 SecondTypeName.StartLocation);
7280 } else if (!T) {
7281 Diag(Loc: SecondTypeName.StartLocation,
7282 DiagID: diag::err_pseudo_dtor_destructor_non_type)
7283 << SecondTypeName.Identifier << ObjectType;
7284 if (isSFINAEContext())
7285 return ExprError();
7286
7287 // Recover by assuming we had the right type all along.
7288 DestructedType = ObjectType;
7289 } else
7290 DestructedType = GetTypeFromParser(Ty: T, TInfo: &DestructedTypeInfo);
7291 } else {
7292 // Resolve the template-id to a type.
7293 TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
7294 ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
7295 TemplateId->NumArgs);
7296 TypeResult T = ActOnTemplateIdType(
7297 S, ElaboratedKeyword: ElaboratedTypeKeyword::None,
7298 /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
7299 TemplateKWLoc: TemplateId->TemplateKWLoc, Template: TemplateId->Template, TemplateII: TemplateId->Name,
7300 TemplateIILoc: TemplateId->TemplateNameLoc, LAngleLoc: TemplateId->LAngleLoc, TemplateArgs: TemplateArgsPtr,
7301 RAngleLoc: TemplateId->RAngleLoc,
7302 /*IsCtorOrDtorName*/ true);
7303 if (T.isInvalid() || !T.get()) {
7304 // Recover by assuming we had the right type all along.
7305 DestructedType = ObjectType;
7306 } else
7307 DestructedType = GetTypeFromParser(Ty: T.get(), TInfo: &DestructedTypeInfo);
7308 }
7309
7310 // If we've performed some kind of recovery, (re-)build the type source
7311 // information.
7312 if (!DestructedType.isNull()) {
7313 if (!DestructedTypeInfo)
7314 DestructedTypeInfo = Context.getTrivialTypeSourceInfo(T: DestructedType,
7315 Loc: SecondTypeName.StartLocation);
7316 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7317 }
7318
7319 // Convert the name of the scope type (the type prior to '::') into a type.
7320 TypeSourceInfo *ScopeTypeInfo = nullptr;
7321 QualType ScopeType;
7322 if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7323 FirstTypeName.Identifier) {
7324 if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
7325 ParsedType T = getTypeName(II: *FirstTypeName.Identifier,
7326 NameLoc: FirstTypeName.StartLocation,
7327 S, SS: &SS, isClassName: true, HasTrailingDot: false, ObjectType: ObjectTypePtrForLookup,
7328 /*IsCtorOrDtorName*/true);
7329 if (!T) {
7330 Diag(Loc: FirstTypeName.StartLocation,
7331 DiagID: diag::err_pseudo_dtor_destructor_non_type)
7332 << FirstTypeName.Identifier << ObjectType;
7333
7334 if (isSFINAEContext())
7335 return ExprError();
7336
7337 // Just drop this type. It's unnecessary anyway.
7338 ScopeType = QualType();
7339 } else
7340 ScopeType = GetTypeFromParser(Ty: T, TInfo: &ScopeTypeInfo);
7341 } else {
7342 // Resolve the template-id to a type.
7343 TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
7344 ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
7345 TemplateId->NumArgs);
7346 TypeResult T = ActOnTemplateIdType(
7347 S, ElaboratedKeyword: ElaboratedTypeKeyword::None,
7348 /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
7349 TemplateKWLoc: TemplateId->TemplateKWLoc, Template: TemplateId->Template, TemplateII: TemplateId->Name,
7350 TemplateIILoc: TemplateId->TemplateNameLoc, LAngleLoc: TemplateId->LAngleLoc, TemplateArgs: TemplateArgsPtr,
7351 RAngleLoc: TemplateId->RAngleLoc,
7352 /*IsCtorOrDtorName*/ true);
7353 if (T.isInvalid() || !T.get()) {
7354 // Recover by dropping this type.
7355 ScopeType = QualType();
7356 } else
7357 ScopeType = GetTypeFromParser(Ty: T.get(), TInfo: &ScopeTypeInfo);
7358 }
7359 }
7360
7361 if (!ScopeType.isNull() && !ScopeTypeInfo)
7362 ScopeTypeInfo = Context.getTrivialTypeSourceInfo(T: ScopeType,
7363 Loc: FirstTypeName.StartLocation);
7364
7365
7366 return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS,
7367 ScopeTypeInfo, CCLoc, TildeLoc,
7368 Destructed);
7369}
7370
7371ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
7372 SourceLocation OpLoc,
7373 tok::TokenKind OpKind,
7374 SourceLocation TildeLoc,
7375 const DeclSpec& DS) {
7376 QualType ObjectType;
7377 QualType T;
7378 TypeLocBuilder TLB;
7379 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc) ||
7380 DS.getTypeSpecType() == DeclSpec::TST_error)
7381 return ExprError();
7382
7383 switch (DS.getTypeSpecType()) {
7384 case DeclSpec::TST_decltype_auto: {
7385 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_decltype_auto_invalid);
7386 return true;
7387 }
7388 case DeclSpec::TST_decltype: {
7389 T = BuildDecltypeType(E: DS.getRepAsExpr(), /*AsUnevaluated=*/false);
7390 DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
7391 DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
7392 DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
7393 break;
7394 }
7395 case DeclSpec::TST_typename_pack_indexing: {
7396 T = ActOnPackIndexingType(Pattern: DS.getRepAsType().get(), IndexExpr: DS.getPackIndexingExpr(),
7397 Loc: DS.getBeginLoc(), EllipsisLoc: DS.getEllipsisLoc());
7398 TLB.pushTrivial(Context&: getASTContext(),
7399 T: cast<PackIndexingType>(Val: T.getTypePtr())->getPattern(),
7400 Loc: DS.getBeginLoc());
7401 PackIndexingTypeLoc PITL = TLB.push<PackIndexingTypeLoc>(T);
7402 PITL.setEllipsisLoc(DS.getEllipsisLoc());
7403 break;
7404 }
7405 default:
7406 llvm_unreachable("Unsupported type in pseudo destructor");
7407 }
7408 TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
7409 PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
7410
7411 return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS: CXXScopeSpec(),
7412 ScopeTypeInfo: nullptr, CCLoc: SourceLocation(), TildeLoc,
7413 Destructed);
7414}
7415
7416ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
7417 SourceLocation RParen) {
7418 // If the operand is an unresolved lookup expression, the expression is ill-
7419 // formed per [over.over]p1, because overloaded function names cannot be used
7420 // without arguments except in explicit contexts.
7421 ExprResult R = CheckPlaceholderExpr(E: Operand);
7422 if (R.isInvalid())
7423 return R;
7424
7425 R = CheckUnevaluatedOperand(E: R.get());
7426 if (R.isInvalid())
7427 return ExprError();
7428
7429 Operand = R.get();
7430
7431 if (!inTemplateInstantiation() && !Operand->isInstantiationDependent() &&
7432 Operand->HasSideEffects(Ctx: Context, IncludePossibleEffects: false)) {
7433 // The expression operand for noexcept is in an unevaluated expression
7434 // context, so side effects could result in unintended consequences.
7435 Diag(Loc: Operand->getExprLoc(), DiagID: diag::warn_side_effects_unevaluated_context);
7436 }
7437
7438 CanThrowResult CanThrow = canThrow(E: Operand);
7439 return new (Context)
7440 CXXNoexceptExpr(Context.BoolTy, Operand, CanThrow, KeyLoc, RParen);
7441}
7442
7443ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
7444 Expr *Operand, SourceLocation RParen) {
7445 return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
7446}
7447
7448static void MaybeDecrementCount(
7449 Expr *E, llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
7450 DeclRefExpr *LHS = nullptr;
7451 bool IsCompoundAssign = false;
7452 bool isIncrementDecrementUnaryOp = false;
7453 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
7454 if (BO->getLHS()->getType()->isDependentType() ||
7455 BO->getRHS()->getType()->isDependentType()) {
7456 if (BO->getOpcode() != BO_Assign)
7457 return;
7458 } else if (!BO->isAssignmentOp())
7459 return;
7460 else
7461 IsCompoundAssign = BO->isCompoundAssignmentOp();
7462 LHS = dyn_cast<DeclRefExpr>(Val: BO->getLHS());
7463 } else if (CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) {
7464 if (COCE->getOperator() != OO_Equal)
7465 return;
7466 LHS = dyn_cast<DeclRefExpr>(Val: COCE->getArg(Arg: 0));
7467 } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) {
7468 if (!UO->isIncrementDecrementOp())
7469 return;
7470 isIncrementDecrementUnaryOp = true;
7471 LHS = dyn_cast<DeclRefExpr>(Val: UO->getSubExpr());
7472 }
7473 if (!LHS)
7474 return;
7475 VarDecl *VD = dyn_cast<VarDecl>(Val: LHS->getDecl());
7476 if (!VD)
7477 return;
7478 // Don't decrement RefsMinusAssignments if volatile variable with compound
7479 // assignment (+=, ...) or increment/decrement unary operator to avoid
7480 // potential unused-but-set-variable warning.
7481 if ((IsCompoundAssign || isIncrementDecrementUnaryOp) &&
7482 VD->getType().isVolatileQualified())
7483 return;
7484 auto iter = RefsMinusAssignments.find(Val: VD);
7485 if (iter == RefsMinusAssignments.end())
7486 return;
7487 iter->getSecond()--;
7488}
7489
7490/// Perform the conversions required for an expression used in a
7491/// context that ignores the result.
7492ExprResult Sema::IgnoredValueConversions(Expr *E) {
7493 MaybeDecrementCount(E, RefsMinusAssignments);
7494
7495 if (E->hasPlaceholderType()) {
7496 ExprResult result = CheckPlaceholderExpr(E);
7497 if (result.isInvalid()) return E;
7498 E = result.get();
7499 }
7500
7501 if (getLangOpts().CPlusPlus) {
7502 // The C++11 standard defines the notion of a discarded-value expression;
7503 // normally, we don't need to do anything to handle it, but if it is a
7504 // volatile lvalue with a special form, we perform an lvalue-to-rvalue
7505 // conversion.
7506 if (getLangOpts().CPlusPlus11 && E->isReadIfDiscardedInCPlusPlus11()) {
7507 ExprResult Res = DefaultLvalueConversion(E);
7508 if (Res.isInvalid())
7509 return E;
7510 E = Res.get();
7511 } else {
7512 // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
7513 // it occurs as a discarded-value expression.
7514 CheckUnusedVolatileAssignment(E);
7515 }
7516
7517 // C++1z:
7518 // If the expression is a prvalue after this optional conversion, the
7519 // temporary materialization conversion is applied.
7520 //
7521 // We do not materialize temporaries by default in order to avoid creating
7522 // unnecessary temporary objects. If we skip this step, IR generation is
7523 // able to synthesize the storage for itself in the aggregate case, and
7524 // adding the extra node to the AST is just clutter.
7525 if (isInLifetimeExtendingContext() && getLangOpts().CPlusPlus17 &&
7526 E->isPRValue() && !E->getType()->isVoidType()) {
7527 ExprResult Res = TemporaryMaterializationConversion(E);
7528 if (Res.isInvalid())
7529 return E;
7530 E = Res.get();
7531 }
7532 return E;
7533 }
7534
7535 // C99 6.3.2.1:
7536 // [Except in specific positions,] an lvalue that does not have
7537 // array type is converted to the value stored in the
7538 // designated object (and is no longer an lvalue).
7539 if (E->isPRValue()) {
7540 // In C, function designators (i.e. expressions of function type)
7541 // are r-values, but we still want to do function-to-pointer decay
7542 // on them. This is both technically correct and convenient for
7543 // some clients.
7544 if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
7545 return DefaultFunctionArrayConversion(E);
7546
7547 return E;
7548 }
7549
7550 // GCC seems to also exclude expressions of incomplete enum type.
7551 if (const auto *ED = E->getType()->getAsEnumDecl(); ED && !ED->isComplete()) {
7552 // FIXME: stupid workaround for a codegen bug!
7553 E = ImpCastExprToType(E, Type: Context.VoidTy, CK: CK_ToVoid).get();
7554 return E;
7555 }
7556
7557 ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
7558 if (Res.isInvalid())
7559 return E;
7560 E = Res.get();
7561
7562 if (!E->getType()->isVoidType())
7563 RequireCompleteType(Loc: E->getExprLoc(), T: E->getType(),
7564 DiagID: diag::err_incomplete_type);
7565 return E;
7566}
7567
7568ExprResult Sema::CheckUnevaluatedOperand(Expr *E) {
7569 // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
7570 // it occurs as an unevaluated operand.
7571 CheckUnusedVolatileAssignment(E);
7572
7573 return E;
7574}
7575
7576// If we can unambiguously determine whether Var can never be used
7577// in a constant expression, return true.
7578// - if the variable and its initializer are non-dependent, then
7579// we can unambiguously check if the variable is a constant expression.
7580// - if the initializer is not value dependent - we can determine whether
7581// it can be used to initialize a constant expression. If Init can not
7582// be used to initialize a constant expression we conclude that Var can
7583// never be a constant expression.
7584// - FXIME: if the initializer is dependent, we can still do some analysis and
7585// identify certain cases unambiguously as non-const by using a Visitor:
7586// - such as those that involve odr-use of a ParmVarDecl, involve a new
7587// delete, lambda-expr, dynamic-cast, reinterpret-cast etc...
7588static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
7589 ASTContext &Context) {
7590 if (isa<ParmVarDecl>(Val: Var)) return true;
7591 const VarDecl *DefVD = nullptr;
7592
7593 // If there is no initializer - this can not be a constant expression.
7594 const Expr *Init = Var->getAnyInitializer(D&: DefVD);
7595 if (!Init)
7596 return true;
7597 assert(DefVD);
7598 if (DefVD->isWeak())
7599 return false;
7600
7601 if (Var->getType()->isDependentType() || Init->isValueDependent()) {
7602 // FIXME: Teach the constant evaluator to deal with the non-dependent parts
7603 // of value-dependent expressions, and use it here to determine whether the
7604 // initializer is a potential constant expression.
7605 return false;
7606 }
7607
7608 return !Var->isUsableInConstantExpressions(C: Context);
7609}
7610
7611/// Check if the current lambda has any potential captures
7612/// that must be captured by any of its enclosing lambdas that are ready to
7613/// capture. If there is a lambda that can capture a nested
7614/// potential-capture, go ahead and do so. Also, check to see if any
7615/// variables are uncaptureable or do not involve an odr-use so do not
7616/// need to be captured.
7617
7618static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
7619 Expr *const FE, LambdaScopeInfo *const CurrentLSI, Sema &S) {
7620
7621 assert(!S.isUnevaluatedContext());
7622 assert(S.CurContext->isDependentContext());
7623#ifndef NDEBUG
7624 DeclContext *DC = S.CurContext;
7625 while (isa_and_nonnull<CapturedDecl>(DC))
7626 DC = DC->getParent();
7627 assert(
7628 (CurrentLSI->CallOperator == DC || !CurrentLSI->AfterParameterList) &&
7629 "The current call operator must be synchronized with Sema's CurContext");
7630#endif // NDEBUG
7631
7632 const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent();
7633
7634 // All the potentially captureable variables in the current nested
7635 // lambda (within a generic outer lambda), must be captured by an
7636 // outer lambda that is enclosed within a non-dependent context.
7637 CurrentLSI->visitPotentialCaptures(Callback: [&](ValueDecl *Var, Expr *VarExpr) {
7638 // If the variable is clearly identified as non-odr-used and the full
7639 // expression is not instantiation dependent, only then do we not
7640 // need to check enclosing lambda's for speculative captures.
7641 // For e.g.:
7642 // Even though 'x' is not odr-used, it should be captured.
7643 // int test() {
7644 // const int x = 10;
7645 // auto L = [=](auto a) {
7646 // (void) +x + a;
7647 // };
7648 // }
7649 if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(CapturingVarExpr: VarExpr) &&
7650 !IsFullExprInstantiationDependent)
7651 return;
7652
7653 VarDecl *UnderlyingVar = Var->getPotentiallyDecomposedVarDecl();
7654 if (!UnderlyingVar)
7655 return;
7656
7657 // If we have a capture-capable lambda for the variable, go ahead and
7658 // capture the variable in that lambda (and all its enclosing lambdas).
7659 if (const UnsignedOrNone Index =
7660 getStackIndexOfNearestEnclosingCaptureCapableLambda(
7661 FunctionScopes: S.FunctionScopes, VarToCapture: Var, S))
7662 S.MarkCaptureUsedInEnclosingContext(Capture: Var, Loc: VarExpr->getExprLoc(), CapturingScopeIndex: *Index);
7663 const bool IsVarNeverAConstantExpression =
7664 VariableCanNeverBeAConstantExpression(Var: UnderlyingVar, Context&: S.Context);
7665 if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
7666 // This full expression is not instantiation dependent or the variable
7667 // can not be used in a constant expression - which means
7668 // this variable must be odr-used here, so diagnose a
7669 // capture violation early, if the variable is un-captureable.
7670 // This is purely for diagnosing errors early. Otherwise, this
7671 // error would get diagnosed when the lambda becomes capture ready.
7672 QualType CaptureType, DeclRefType;
7673 SourceLocation ExprLoc = VarExpr->getExprLoc();
7674 if (S.tryCaptureVariable(Var, Loc: ExprLoc, Kind: TryCaptureKind::Implicit,
7675 /*EllipsisLoc*/ SourceLocation(),
7676 /*BuildAndDiagnose*/ false, CaptureType,
7677 DeclRefType, FunctionScopeIndexToStopAt: nullptr)) {
7678 // We will never be able to capture this variable, and we need
7679 // to be able to in any and all instantiations, so diagnose it.
7680 S.tryCaptureVariable(Var, Loc: ExprLoc, Kind: TryCaptureKind::Implicit,
7681 /*EllipsisLoc*/ SourceLocation(),
7682 /*BuildAndDiagnose*/ true, CaptureType,
7683 DeclRefType, FunctionScopeIndexToStopAt: nullptr);
7684 }
7685 }
7686 });
7687
7688 // Check if 'this' needs to be captured.
7689 if (CurrentLSI->hasPotentialThisCapture()) {
7690 // If we have a capture-capable lambda for 'this', go ahead and capture
7691 // 'this' in that lambda (and all its enclosing lambdas).
7692 if (const UnsignedOrNone Index =
7693 getStackIndexOfNearestEnclosingCaptureCapableLambda(
7694 FunctionScopes: S.FunctionScopes, /*0 is 'this'*/ VarToCapture: nullptr, S)) {
7695 const unsigned FunctionScopeIndexOfCapturableLambda = *Index;
7696 S.CheckCXXThisCapture(Loc: CurrentLSI->PotentialThisCaptureLocation,
7697 /*Explicit*/ false, /*BuildAndDiagnose*/ true,
7698 FunctionScopeIndexToStopAt: &FunctionScopeIndexOfCapturableLambda);
7699 }
7700 }
7701
7702 // Reset all the potential captures at the end of each full-expression.
7703 CurrentLSI->clearPotentialCaptures();
7704}
7705
7706ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
7707 bool DiscardedValue, bool IsConstexpr,
7708 bool IsTemplateArgument) {
7709 ExprResult FullExpr = FE;
7710
7711 if (!FullExpr.get())
7712 return ExprError();
7713
7714 if (!IsTemplateArgument && DiagnoseUnexpandedParameterPack(E: FullExpr.get()))
7715 return ExprError();
7716
7717 if (DiscardedValue) {
7718 // Top-level expressions default to 'id' when we're in a debugger.
7719 if (getLangOpts().DebuggerCastResultToId &&
7720 FullExpr.get()->getType() == Context.UnknownAnyTy) {
7721 FullExpr = forceUnknownAnyToType(E: FullExpr.get(), ToType: Context.getObjCIdType());
7722 if (FullExpr.isInvalid())
7723 return ExprError();
7724 }
7725
7726 FullExpr = CheckPlaceholderExpr(E: FullExpr.get());
7727 if (FullExpr.isInvalid())
7728 return ExprError();
7729
7730 FullExpr = IgnoredValueConversions(E: FullExpr.get());
7731 if (FullExpr.isInvalid())
7732 return ExprError();
7733
7734 DiagnoseUnusedExprResult(S: FullExpr.get(), DiagID: diag::warn_unused_expr);
7735 }
7736
7737 if (FullExpr.isInvalid())
7738 return ExprError();
7739
7740 CheckCompletedExpr(E: FullExpr.get(), CheckLoc: CC, IsConstexpr);
7741
7742 // At the end of this full expression (which could be a deeply nested
7743 // lambda), if there is a potential capture within the nested lambda,
7744 // have the outer capture-able lambda try and capture it.
7745 // Consider the following code:
7746 // void f(int, int);
7747 // void f(const int&, double);
7748 // void foo() {
7749 // const int x = 10, y = 20;
7750 // auto L = [=](auto a) {
7751 // auto M = [=](auto b) {
7752 // f(x, b); <-- requires x to be captured by L and M
7753 // f(y, a); <-- requires y to be captured by L, but not all Ms
7754 // };
7755 // };
7756 // }
7757
7758 // FIXME: Also consider what happens for something like this that involves
7759 // the gnu-extension statement-expressions or even lambda-init-captures:
7760 // void f() {
7761 // const int n = 0;
7762 // auto L = [&](auto a) {
7763 // +n + ({ 0; a; });
7764 // };
7765 // }
7766 //
7767 // Here, we see +n, and then the full-expression 0; ends, so we don't
7768 // capture n (and instead remove it from our list of potential captures),
7769 // and then the full-expression +n + ({ 0; }); ends, but it's too late
7770 // for us to see that we need to capture n after all.
7771
7772 LambdaScopeInfo *const CurrentLSI =
7773 getCurLambda(/*IgnoreCapturedRegions=*/IgnoreNonLambdaCapturingScope: true);
7774 // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer
7775 // even if CurContext is not a lambda call operator. Refer to that Bug Report
7776 // for an example of the code that might cause this asynchrony.
7777 // By ensuring we are in the context of a lambda's call operator
7778 // we can fix the bug (we only need to check whether we need to capture
7779 // if we are within a lambda's body); but per the comments in that
7780 // PR, a proper fix would entail :
7781 // "Alternative suggestion:
7782 // - Add to Sema an integer holding the smallest (outermost) scope
7783 // index that we are *lexically* within, and save/restore/set to
7784 // FunctionScopes.size() in InstantiatingTemplate's
7785 // constructor/destructor.
7786 // - Teach the handful of places that iterate over FunctionScopes to
7787 // stop at the outermost enclosing lexical scope."
7788 DeclContext *DC = CurContext;
7789 while (isa_and_nonnull<CapturedDecl>(Val: DC))
7790 DC = DC->getParent();
7791 const bool IsInLambdaDeclContext = isLambdaCallOperator(DC);
7792 if (IsInLambdaDeclContext && CurrentLSI &&
7793 CurrentLSI->hasPotentialCaptures() && !FullExpr.isInvalid())
7794 CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(FE, CurrentLSI,
7795 S&: *this);
7796 return MaybeCreateExprWithCleanups(SubExpr: FullExpr);
7797}
7798
7799StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
7800 if (!FullStmt) return StmtError();
7801
7802 return MaybeCreateStmtWithCleanups(SubStmt: FullStmt);
7803}
7804
7805IfExistsResult
7806Sema::CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
7807 const DeclarationNameInfo &TargetNameInfo) {
7808 DeclarationName TargetName = TargetNameInfo.getName();
7809 if (!TargetName)
7810 return IfExistsResult::DoesNotExist;
7811
7812 // If the name itself is dependent, then the result is dependent.
7813 if (TargetName.isDependentName())
7814 return IfExistsResult::Dependent;
7815
7816 // Do the redeclaration lookup in the current scope.
7817 LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
7818 RedeclarationKind::NotForRedeclaration);
7819 LookupParsedName(R, S, SS: &SS, /*ObjectType=*/QualType());
7820 R.suppressDiagnostics();
7821
7822 switch (R.getResultKind()) {
7823 case LookupResultKind::Found:
7824 case LookupResultKind::FoundOverloaded:
7825 case LookupResultKind::FoundUnresolvedValue:
7826 case LookupResultKind::Ambiguous:
7827 return IfExistsResult::Exists;
7828
7829 case LookupResultKind::NotFound:
7830 return IfExistsResult::DoesNotExist;
7831
7832 case LookupResultKind::NotFoundInCurrentInstantiation:
7833 return IfExistsResult::Dependent;
7834 }
7835
7836 llvm_unreachable("Invalid LookupResult Kind!");
7837}
7838
7839IfExistsResult Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
7840 SourceLocation KeywordLoc,
7841 bool IsIfExists,
7842 CXXScopeSpec &SS,
7843 UnqualifiedId &Name) {
7844 DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
7845
7846 // Check for an unexpanded parameter pack.
7847 auto UPPC = IsIfExists ? UPPC_IfExists : UPPC_IfNotExists;
7848 if (DiagnoseUnexpandedParameterPack(SS, UPPC) ||
7849 DiagnoseUnexpandedParameterPack(NameInfo: TargetNameInfo, UPPC))
7850 return IfExistsResult::Error;
7851
7852 return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
7853}
7854
7855concepts::Requirement *Sema::ActOnSimpleRequirement(Expr *E) {
7856 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: true,
7857 /*NoexceptLoc=*/SourceLocation(),
7858 /*ReturnTypeRequirement=*/{});
7859}
7860
7861concepts::Requirement *Sema::ActOnTypeRequirement(
7862 SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
7863 const IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId) {
7864 assert(((!TypeName && TemplateId) || (TypeName && !TemplateId)) &&
7865 "Exactly one of TypeName and TemplateId must be specified.");
7866 TypeSourceInfo *TSI = nullptr;
7867 if (TypeName) {
7868 QualType T =
7869 CheckTypenameType(Keyword: ElaboratedTypeKeyword::Typename, KeywordLoc: TypenameKWLoc,
7870 QualifierLoc: SS.getWithLocInContext(Context), II: *TypeName, IILoc: NameLoc,
7871 TSI: &TSI, /*DeducedTSTContext=*/false);
7872 if (T.isNull())
7873 return nullptr;
7874 } else {
7875 ASTTemplateArgsPtr ArgsPtr(TemplateId->getTemplateArgs(),
7876 TemplateId->NumArgs);
7877 TypeResult T = ActOnTypenameType(S: CurScope, TypenameLoc: TypenameKWLoc, SS,
7878 TemplateLoc: TemplateId->TemplateKWLoc,
7879 TemplateName: TemplateId->Template, TemplateII: TemplateId->Name,
7880 TemplateIILoc: TemplateId->TemplateNameLoc,
7881 LAngleLoc: TemplateId->LAngleLoc, TemplateArgs: ArgsPtr,
7882 RAngleLoc: TemplateId->RAngleLoc);
7883 if (T.isInvalid())
7884 return nullptr;
7885 if (GetTypeFromParser(Ty: T.get(), TInfo: &TSI).isNull())
7886 return nullptr;
7887 }
7888 return BuildTypeRequirement(Type: TSI);
7889}
7890
7891concepts::Requirement *
7892Sema::ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc) {
7893 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc,
7894 /*ReturnTypeRequirement=*/{});
7895}
7896
7897concepts::Requirement *
7898Sema::ActOnCompoundRequirement(
7899 Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
7900 TemplateIdAnnotation *TypeConstraint, unsigned Depth) {
7901 // C++2a [expr.prim.req.compound] p1.3.3
7902 // [..] the expression is deduced against an invented function template
7903 // F [...] F is a void function template with a single type template
7904 // parameter T declared with the constrained-parameter. Form a new
7905 // cv-qualifier-seq cv by taking the union of const and volatile specifiers
7906 // around the constrained-parameter. F has a single parameter whose
7907 // type-specifier is cv T followed by the abstract-declarator. [...]
7908 //
7909 // The cv part is done in the calling function - we get the concept with
7910 // arguments and the abstract declarator with the correct CV qualification and
7911 // have to synthesize T and the single parameter of F.
7912 auto &II = Context.Idents.get(Name: "expr-type");
7913 auto *TParam = TemplateTypeParmDecl::Create(C: Context, DC: CurContext,
7914 KeyLoc: SourceLocation(),
7915 NameLoc: SourceLocation(), D: Depth,
7916 /*Index=*/P: 0, Id: &II,
7917 /*Typename=*/true,
7918 /*ParameterPack=*/false,
7919 /*HasTypeConstraint=*/true);
7920
7921 if (BuildTypeConstraint(SS, TypeConstraint, ConstrainedParameter: TParam,
7922 /*EllipsisLoc=*/SourceLocation(),
7923 /*AllowUnexpandedPack=*/true))
7924 // Just produce a requirement with no type requirements.
7925 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc, ReturnTypeRequirement: {});
7926
7927 auto *TPL = TemplateParameterList::Create(C: Context, TemplateLoc: SourceLocation(),
7928 LAngleLoc: SourceLocation(),
7929 Params: ArrayRef<NamedDecl *>(TParam),
7930 RAngleLoc: SourceLocation(),
7931 /*RequiresClause=*/nullptr);
7932 return BuildExprRequirement(
7933 E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc,
7934 ReturnTypeRequirement: concepts::ExprRequirement::ReturnTypeRequirement(TPL));
7935}
7936
7937concepts::ExprRequirement *
7938Sema::BuildExprRequirement(
7939 Expr *E, bool IsSimple, SourceLocation NoexceptLoc,
7940 concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
7941 auto Status = concepts::ExprRequirement::SS_Satisfied;
7942 ConceptSpecializationExpr *SubstitutedConstraintExpr = nullptr;
7943 if (E->isInstantiationDependent() || E->getType()->isPlaceholderType() ||
7944 ReturnTypeRequirement.isDependent())
7945 Status = concepts::ExprRequirement::SS_Dependent;
7946 else if (NoexceptLoc.isValid() && canThrow(E) == CanThrowResult::CT_Can)
7947 Status = concepts::ExprRequirement::SS_NoexceptNotMet;
7948 else if (ReturnTypeRequirement.isSubstitutionFailure())
7949 Status = concepts::ExprRequirement::SS_TypeRequirementSubstitutionFailure;
7950 else if (ReturnTypeRequirement.isTypeConstraint()) {
7951 // C++2a [expr.prim.req]p1.3.3
7952 // The immediately-declared constraint ([temp]) of decltype((E)) shall
7953 // be satisfied.
7954 TemplateParameterList *TPL =
7955 ReturnTypeRequirement.getTypeConstraintTemplateParameterList();
7956 QualType MatchedType = Context.getReferenceQualifiedType(e: E);
7957 llvm::SmallVector<TemplateArgument, 1> Args;
7958 Args.push_back(Elt: TemplateArgument(MatchedType));
7959
7960 auto *Param = cast<TemplateTypeParmDecl>(Val: TPL->getParam(Idx: 0));
7961
7962 MultiLevelTemplateArgumentList MLTAL(Param, Args, /*Final=*/true);
7963 MLTAL.addOuterRetainedLevels(Num: TPL->getDepth());
7964 const TypeConstraint *TC = Param->getTypeConstraint();
7965 assert(TC && "Type Constraint cannot be null here");
7966 auto *IDC = TC->getImmediatelyDeclaredConstraint();
7967 assert(IDC && "ImmediatelyDeclaredConstraint can't be null here.");
7968 ExprResult Constraint = SubstExpr(E: IDC, TemplateArgs: MLTAL);
7969 bool HasError = Constraint.isInvalid();
7970 if (!HasError) {
7971 SubstitutedConstraintExpr =
7972 cast<ConceptSpecializationExpr>(Val: Constraint.get());
7973 if (SubstitutedConstraintExpr->getSatisfaction().ContainsErrors)
7974 HasError = true;
7975 }
7976 if (HasError) {
7977 return new (Context) concepts::ExprRequirement(
7978 createSubstDiagAt(Location: IDC->getExprLoc(),
7979 Printer: [&](llvm::raw_ostream &OS) {
7980 IDC->printPretty(OS, /*Helper=*/nullptr,
7981 Policy: getPrintingPolicy());
7982 }),
7983 IsSimple, NoexceptLoc, ReturnTypeRequirement);
7984 }
7985 if (!SubstitutedConstraintExpr->isSatisfied())
7986 Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
7987 }
7988 return new (Context) concepts::ExprRequirement(E, IsSimple, NoexceptLoc,
7989 ReturnTypeRequirement, Status,
7990 SubstitutedConstraintExpr);
7991}
7992
7993concepts::ExprRequirement *
7994Sema::BuildExprRequirement(
7995 concepts::Requirement::SubstitutionDiagnostic *ExprSubstitutionDiagnostic,
7996 bool IsSimple, SourceLocation NoexceptLoc,
7997 concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
7998 return new (Context) concepts::ExprRequirement(ExprSubstitutionDiagnostic,
7999 IsSimple, NoexceptLoc,
8000 ReturnTypeRequirement);
8001}
8002
8003concepts::TypeRequirement *
8004Sema::BuildTypeRequirement(TypeSourceInfo *Type) {
8005 return new (Context) concepts::TypeRequirement(Type);
8006}
8007
8008concepts::TypeRequirement *
8009Sema::BuildTypeRequirement(
8010 concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
8011 return new (Context) concepts::TypeRequirement(SubstDiag);
8012}
8013
8014concepts::Requirement *Sema::ActOnNestedRequirement(Expr *Constraint) {
8015 return BuildNestedRequirement(E: Constraint);
8016}
8017
8018concepts::NestedRequirement *
8019Sema::BuildNestedRequirement(Expr *Constraint) {
8020 ConstraintSatisfaction Satisfaction;
8021 LocalInstantiationScope Scope(*this);
8022 if (!Constraint->isInstantiationDependent() &&
8023 !Constraint->isValueDependent() &&
8024 CheckConstraintSatisfaction(Entity: nullptr, AssociatedConstraints: AssociatedConstraint(Constraint),
8025 /*TemplateArgs=*/TemplateArgLists: {},
8026 TemplateIDRange: Constraint->getSourceRange(), Satisfaction))
8027 return nullptr;
8028 return new (Context) concepts::NestedRequirement(Context, Constraint,
8029 Satisfaction);
8030}
8031
8032concepts::NestedRequirement *
8033Sema::BuildNestedRequirement(StringRef InvalidConstraintEntity,
8034 const ASTConstraintSatisfaction &Satisfaction) {
8035 return new (Context) concepts::NestedRequirement(
8036 InvalidConstraintEntity,
8037 ASTConstraintSatisfaction::Rebuild(C: Context, Satisfaction));
8038}
8039
8040RequiresExprBodyDecl *
8041Sema::ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
8042 ArrayRef<ParmVarDecl *> LocalParameters,
8043 Scope *BodyScope) {
8044 assert(BodyScope);
8045
8046 RequiresExprBodyDecl *Body = RequiresExprBodyDecl::Create(C&: Context, DC: CurContext,
8047 StartLoc: RequiresKWLoc);
8048
8049 PushDeclContext(S: BodyScope, DC: Body);
8050
8051 for (ParmVarDecl *Param : LocalParameters) {
8052 if (Param->getType()->isVoidType()) {
8053 if (LocalParameters.size() > 1) {
8054 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_void_only_param);
8055 Param->setType(Context.IntTy);
8056 } else if (Param->getIdentifier()) {
8057 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_param_with_void_type);
8058 Param->setType(Context.IntTy);
8059 } else if (Param->getType().hasQualifiers()) {
8060 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_void_param_qualified);
8061 }
8062 } else if (Param->hasDefaultArg()) {
8063 // C++2a [expr.prim.req] p4
8064 // [...] A local parameter of a requires-expression shall not have a
8065 // default argument. [...]
8066 Diag(Loc: Param->getDefaultArgRange().getBegin(),
8067 DiagID: diag::err_requires_expr_local_parameter_default_argument);
8068 // Ignore default argument and move on
8069 } else if (Param->isExplicitObjectParameter()) {
8070 // C++23 [dcl.fct]p6:
8071 // An explicit-object-parameter-declaration is a parameter-declaration
8072 // with a this specifier. An explicit-object-parameter-declaration
8073 // shall appear only as the first parameter-declaration of a
8074 // parameter-declaration-list of either:
8075 // - a member-declarator that declares a member function, or
8076 // - a lambda-declarator.
8077 //
8078 // The parameter-declaration-list of a requires-expression is not such
8079 // a context.
8080 Diag(Loc: Param->getExplicitObjectParamThisLoc(),
8081 DiagID: diag::err_requires_expr_explicit_object_parameter);
8082 Param->setExplicitObjectParameterLoc(SourceLocation());
8083 }
8084
8085 Param->setDeclContext(Body);
8086 // If this has an identifier, add it to the scope stack.
8087 if (Param->getIdentifier()) {
8088 CheckShadow(S: BodyScope, D: Param);
8089 PushOnScopeChains(D: Param, S: BodyScope);
8090 }
8091 }
8092 return Body;
8093}
8094
8095void Sema::ActOnFinishRequiresExpr() {
8096 assert(CurContext && "DeclContext imbalance!");
8097 CurContext = CurContext->getLexicalParent();
8098 assert(CurContext && "Popped translation unit!");
8099}
8100
8101ExprResult Sema::ActOnRequiresExpr(
8102 SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body,
8103 SourceLocation LParenLoc, ArrayRef<ParmVarDecl *> LocalParameters,
8104 SourceLocation RParenLoc, ArrayRef<concepts::Requirement *> Requirements,
8105 SourceLocation ClosingBraceLoc) {
8106 auto *RE = RequiresExpr::Create(C&: Context, RequiresKWLoc, Body, LParenLoc,
8107 LocalParameters, RParenLoc, Requirements,
8108 RBraceLoc: ClosingBraceLoc);
8109 if (DiagnoseUnexpandedParameterPackInRequiresExpr(RE))
8110 return ExprError();
8111 return RE;
8112}
8113