1//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the actions class which performs semantic analysis and
10// builds an AST out of a parse stream.
11//
12//===----------------------------------------------------------------------===//
13
14#include "UsedDeclVisitor.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/ASTDiagnostic.h"
17#include "clang/AST/Decl.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/DeclFriend.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/Expr.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/PrettyDeclStackTrace.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/AST/TypeOrdering.h"
26#include "clang/Basic/DarwinSDKInfo.h"
27#include "clang/Basic/DiagnosticOptions.h"
28#include "clang/Basic/PartialDiagnostic.h"
29#include "clang/Basic/SourceManager.h"
30#include "clang/Basic/TargetInfo.h"
31#include "clang/Lex/HeaderSearch.h"
32#include "clang/Lex/HeaderSearchOptions.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/CXXFieldCollector.h"
35#include "clang/Sema/EnterExpressionEvaluationContext.h"
36#include "clang/Sema/ExternalSemaSource.h"
37#include "clang/Sema/Initialization.h"
38#include "clang/Sema/MultiplexExternalSemaSource.h"
39#include "clang/Sema/ObjCMethodList.h"
40#include "clang/Sema/RISCVIntrinsicManager.h"
41#include "clang/Sema/Scope.h"
42#include "clang/Sema/ScopeInfo.h"
43#include "clang/Sema/SemaAMDGPU.h"
44#include "clang/Sema/SemaARM.h"
45#include "clang/Sema/SemaAVR.h"
46#include "clang/Sema/SemaBPF.h"
47#include "clang/Sema/SemaCUDA.h"
48#include "clang/Sema/SemaCodeCompletion.h"
49#include "clang/Sema/SemaConsumer.h"
50#include "clang/Sema/SemaDirectX.h"
51#include "clang/Sema/SemaHLSL.h"
52#include "clang/Sema/SemaHexagon.h"
53#include "clang/Sema/SemaLoongArch.h"
54#include "clang/Sema/SemaM68k.h"
55#include "clang/Sema/SemaMIPS.h"
56#include "clang/Sema/SemaMSP430.h"
57#include "clang/Sema/SemaNVPTX.h"
58#include "clang/Sema/SemaObjC.h"
59#include "clang/Sema/SemaOpenACC.h"
60#include "clang/Sema/SemaOpenCL.h"
61#include "clang/Sema/SemaOpenMP.h"
62#include "clang/Sema/SemaPPC.h"
63#include "clang/Sema/SemaPseudoObject.h"
64#include "clang/Sema/SemaRISCV.h"
65#include "clang/Sema/SemaSPIRV.h"
66#include "clang/Sema/SemaSYCL.h"
67#include "clang/Sema/SemaSwift.h"
68#include "clang/Sema/SemaSystemZ.h"
69#include "clang/Sema/SemaWasm.h"
70#include "clang/Sema/SemaX86.h"
71#include "clang/Sema/TemplateDeduction.h"
72#include "clang/Sema/TemplateInstCallback.h"
73#include "clang/Sema/TypoCorrection.h"
74#include "llvm/ADT/DenseMap.h"
75#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/SetVector.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/Support/TimeProfiler.h"
79#include <optional>
80
81using namespace clang;
82using namespace sema;
83
84SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
85 return Lexer::getLocForEndOfToken(Loc, Offset, SM: SourceMgr, LangOpts);
86}
87
88SourceRange
89Sema::getRangeForNextToken(SourceLocation Loc, bool IncludeMacros,
90 bool IncludeComments,
91 std::optional<tok::TokenKind> ExpectedToken) {
92 if (!Loc.isValid())
93 return SourceRange();
94 std::optional<Token> NextToken =
95 Lexer::findNextToken(Loc, SM: SourceMgr, LangOpts, IncludeComments);
96 if (!NextToken)
97 return SourceRange();
98 if (ExpectedToken && NextToken->getKind() != *ExpectedToken)
99 return SourceRange();
100 SourceLocation TokenStart = NextToken->getLocation();
101 SourceLocation TokenEnd = NextToken->getLastLoc();
102 if (!TokenStart.isValid() || !TokenEnd.isValid())
103 return SourceRange();
104 if (!IncludeMacros && (TokenStart.isMacroID() || TokenEnd.isMacroID()))
105 return SourceRange();
106
107 return SourceRange(TokenStart, TokenEnd);
108}
109
110ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
111
112DarwinSDKInfo *
113Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
114 StringRef Platform) {
115 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
116 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
117 Diag(Loc, DiagID: diag::warn_missing_sdksettings_for_availability_checking)
118 << Platform;
119 WarnedDarwinSDKInfoMissing = true;
120 }
121 return SDKInfo;
122}
123
124DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
125 if (CachedDarwinSDKInfo)
126 return CachedDarwinSDKInfo->get();
127 auto SDKInfo = parseDarwinSDKInfo(
128 VFS&: PP.getFileManager().getVirtualFileSystem(),
129 SDKRootPath: PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
130 if (SDKInfo && *SDKInfo) {
131 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(args: std::move(**SDKInfo));
132 return CachedDarwinSDKInfo->get();
133 }
134 if (!SDKInfo)
135 llvm::consumeError(Err: SDKInfo.takeError());
136 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
137 return nullptr;
138}
139
140IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName(
141 const IdentifierInfo *ParamName, unsigned int Index) {
142 std::string InventedName;
143 llvm::raw_string_ostream OS(InventedName);
144
145 if (!ParamName)
146 OS << "auto:" << Index + 1;
147 else
148 OS << ParamName->getName() << ":auto";
149
150 return &Context.Idents.get(Name: OS.str());
151}
152
153PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
154 const Preprocessor &PP) {
155 PrintingPolicy Policy = Context.getPrintingPolicy();
156 // In diagnostics, we print _Bool as bool if the latter is defined as the
157 // former.
158 Policy.Bool = Context.getLangOpts().Bool;
159 if (!Policy.Bool) {
160 if (const MacroInfo *BoolMacro = PP.getMacroInfo(II: Context.getBoolName())) {
161 Policy.Bool = BoolMacro->isObjectLike() &&
162 BoolMacro->getNumTokens() == 1 &&
163 BoolMacro->getReplacementToken(Tok: 0).is(K: tok::kw__Bool);
164 }
165 }
166
167 // Shorten the data output if needed
168 Policy.EntireContentsOfLargeArray = false;
169
170 return Policy;
171}
172
173void Sema::ActOnTranslationUnitScope(Scope *S) {
174 TUScope = S;
175 PushDeclContext(S, DC: Context.getTranslationUnitDecl());
176}
177
178namespace clang {
179namespace sema {
180
181class SemaPPCallbacks : public PPCallbacks {
182 Sema *S = nullptr;
183 llvm::SmallVector<SourceLocation, 8> IncludeStack;
184 llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
185
186public:
187 void set(Sema &S) { this->S = &S; }
188
189 void reset() { S = nullptr; }
190
191 void FileChanged(SourceLocation Loc, FileChangeReason Reason,
192 SrcMgr::CharacteristicKind FileType,
193 FileID PrevFID) override {
194 if (!S)
195 return;
196 switch (Reason) {
197 case EnterFile: {
198 SourceManager &SM = S->getSourceManager();
199 SourceLocation IncludeLoc = SM.getIncludeLoc(FID: SM.getFileID(SpellingLoc: Loc));
200 if (IncludeLoc.isValid()) {
201 if (llvm::timeTraceProfilerEnabled()) {
202 OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID: SM.getFileID(SpellingLoc: Loc));
203 ProfilerStack.push_back(Elt: llvm::timeTraceAsyncProfilerBegin(
204 Name: "Source", Detail: FE ? FE->getName() : StringRef("<unknown>")));
205 }
206
207 IncludeStack.push_back(Elt: IncludeLoc);
208 S->DiagnoseNonDefaultPragmaAlignPack(
209 Kind: Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
210 IncludeLoc);
211 }
212 break;
213 }
214 case ExitFile:
215 if (!IncludeStack.empty()) {
216 if (llvm::timeTraceProfilerEnabled())
217 llvm::timeTraceProfilerEnd(E: ProfilerStack.pop_back_val());
218
219 S->DiagnoseNonDefaultPragmaAlignPack(
220 Kind: Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
221 IncludeLoc: IncludeStack.pop_back_val());
222 }
223 break;
224 default:
225 break;
226 }
227 }
228 void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
229 diag::Severity Mapping, StringRef Str) override {
230 // If one of the analysis-based diagnostics was enabled while processing
231 // a function, we want to note it in the analysis-based warnings so they
232 // can be run at the end of the function body even if the analysis warnings
233 // are disabled at that point.
234 SmallVector<diag::kind, 256> GroupDiags;
235 diag::Flavor Flavor =
236 Str[1] == 'W' ? diag::Flavor::WarningOrError : diag::Flavor::Remark;
237 StringRef Group = Str.substr(Start: 2);
238
239 if (S->PP.getDiagnostics().getDiagnosticIDs()->getDiagnosticsInGroup(
240 Flavor, Group, Diags&: GroupDiags))
241 return;
242
243 for (diag::kind K : GroupDiags) {
244 // Note: the cases in this switch should be kept in sync with the
245 // diagnostics in AnalysisBasedWarnings::getPolicyInEffectAt().
246 AnalysisBasedWarnings::Policy &Override =
247 S->AnalysisWarnings.getPolicyOverrides();
248 switch (K) {
249 default: break;
250 case diag::warn_unreachable:
251 case diag::warn_unreachable_break:
252 case diag::warn_unreachable_return:
253 case diag::warn_unreachable_loop_increment:
254 Override.enableCheckUnreachable = true;
255 break;
256 case diag::warn_double_lock:
257 Override.enableThreadSafetyAnalysis = true;
258 break;
259 case diag::warn_use_in_invalid_state:
260 Override.enableConsumedAnalysis = true;
261 break;
262 }
263 }
264 }
265};
266
267} // end namespace sema
268} // end namespace clang
269
270const unsigned Sema::MaxAlignmentExponent;
271const uint64_t Sema::MaximumAlignment;
272
273Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
274 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
275 : SemaBase(*this), CollectStats(false), TUKind(TUKind),
276 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
277 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
278 SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts),
279 AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
280 LateTemplateParser(nullptr), OpaqueParser(nullptr), CurContext(nullptr),
281 ExternalSource(nullptr), StackHandler(Diags), CurScope(nullptr),
282 Ident_super(nullptr), AMDGPUPtr(std::make_unique<SemaAMDGPU>(args&: *this)),
283 ARMPtr(std::make_unique<SemaARM>(args&: *this)),
284 AVRPtr(std::make_unique<SemaAVR>(args&: *this)),
285 BPFPtr(std::make_unique<SemaBPF>(args&: *this)),
286 CodeCompletionPtr(
287 std::make_unique<SemaCodeCompletion>(args&: *this, args&: CodeCompleter)),
288 CUDAPtr(std::make_unique<SemaCUDA>(args&: *this)),
289 DirectXPtr(std::make_unique<SemaDirectX>(args&: *this)),
290 HLSLPtr(std::make_unique<SemaHLSL>(args&: *this)),
291 HexagonPtr(std::make_unique<SemaHexagon>(args&: *this)),
292 LoongArchPtr(std::make_unique<SemaLoongArch>(args&: *this)),
293 M68kPtr(std::make_unique<SemaM68k>(args&: *this)),
294 MIPSPtr(std::make_unique<SemaMIPS>(args&: *this)),
295 MSP430Ptr(std::make_unique<SemaMSP430>(args&: *this)),
296 NVPTXPtr(std::make_unique<SemaNVPTX>(args&: *this)),
297 ObjCPtr(std::make_unique<SemaObjC>(args&: *this)),
298 OpenACCPtr(std::make_unique<SemaOpenACC>(args&: *this)),
299 OpenCLPtr(std::make_unique<SemaOpenCL>(args&: *this)),
300 OpenMPPtr(std::make_unique<SemaOpenMP>(args&: *this)),
301 PPCPtr(std::make_unique<SemaPPC>(args&: *this)),
302 PseudoObjectPtr(std::make_unique<SemaPseudoObject>(args&: *this)),
303 RISCVPtr(std::make_unique<SemaRISCV>(args&: *this)),
304 SPIRVPtr(std::make_unique<SemaSPIRV>(args&: *this)),
305 SYCLPtr(std::make_unique<SemaSYCL>(args&: *this)),
306 SwiftPtr(std::make_unique<SemaSwift>(args&: *this)),
307 SystemZPtr(std::make_unique<SemaSystemZ>(args&: *this)),
308 WasmPtr(std::make_unique<SemaWasm>(args&: *this)),
309 X86Ptr(std::make_unique<SemaX86>(args&: *this)),
310 MSPointerToMemberRepresentationMethod(
311 LangOpts.getMSPointerToMemberRepresentationMethod()),
312 MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
313 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
314 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
315 CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
316 FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
317 VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
318 StdCoroutineTraitsCache(nullptr), IdResolver(pp),
319 OriginalLexicalContext(nullptr), StdInitializerList(nullptr),
320 StdTypeIdentity(nullptr),
321 FullyCheckedComparisonCategories(
322 static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
323 StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr),
324 GlobalNewDeleteDeclared(false), DisableTypoCorrection(false),
325 TyposCorrected(0), IsBuildingRecoveryCallExpr(false),
326 CurrentInstantiationScope(nullptr), NonInstantiationEntries(0),
327 ArgPackSubstIndex(std::nullopt), SatisfactionCache(Context) {
328 assert(pp.TUKind == TUKind);
329 TUScope = nullptr;
330
331 LoadedExternalKnownNamespaces = false;
332 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
333 ObjC().NSNumberLiteralMethods[I] = nullptr;
334
335 if (getLangOpts().ObjC)
336 ObjC().NSAPIObj.reset(p: new NSAPI(Context));
337
338 if (getLangOpts().CPlusPlus)
339 FieldCollector.reset(p: new CXXFieldCollector());
340
341 // Tell diagnostics how to render things from the AST library.
342 Diags.SetArgToStringFn(Fn: &FormatASTNodeDiagnosticArgument, Cookie: &Context);
343
344 // This evaluation context exists to ensure that there's always at least one
345 // valid evaluation context available. It is never removed from the
346 // evaluation stack.
347 ExprEvalContexts.emplace_back(
348 Args: ExpressionEvaluationContext::PotentiallyEvaluated, Args: 0, Args: CleanupInfo{},
349 Args: nullptr, Args: ExpressionEvaluationContextRecord::EK_Other);
350
351 // Initialization of data sharing attributes stack for OpenMP
352 OpenMP().InitDataSharingAttributesStack();
353
354 std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
355 std::make_unique<sema::SemaPPCallbacks>();
356 SemaPPCallbackHandler = Callbacks.get();
357 PP.addPPCallbacks(C: std::move(Callbacks));
358 SemaPPCallbackHandler->set(*this);
359
360 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
361}
362
363// Anchor Sema's type info to this TU.
364void Sema::anchor() {}
365
366void Sema::addImplicitTypedef(StringRef Name, QualType T) {
367 DeclarationName DN = &Context.Idents.get(Name);
368 if (IdResolver.begin(Name: DN) == IdResolver.end())
369 PushOnScopeChains(D: Context.buildImplicitTypedef(T, Name), S: TUScope);
370}
371
372void Sema::Initialize() {
373 // Create BuiltinVaListDecl *before* ExternalSemaSource::InitializeSema(this)
374 // because during initialization ASTReader can emit globals that require
375 // name mangling. And the name mangling uses BuiltinVaListDecl.
376 if (Context.getTargetInfo().hasBuiltinMSVaList())
377 (void)Context.getBuiltinMSVaListDecl();
378 (void)Context.getBuiltinVaListDecl();
379
380 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
381 SC->InitializeSema(S&: *this);
382
383 // Tell the external Sema source about this Sema object.
384 if (ExternalSemaSource *ExternalSema
385 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
386 ExternalSema->InitializeSema(S&: *this);
387
388 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
389 // will not be able to merge any duplicate __va_list_tag decls correctly.
390 VAListTagName = PP.getIdentifierInfo(Name: "__va_list_tag");
391
392 if (!TUScope)
393 return;
394
395 // Initialize predefined 128-bit integer types, if needed.
396 if (Context.getTargetInfo().hasInt128Type() ||
397 (Context.getAuxTargetInfo() &&
398 Context.getAuxTargetInfo()->hasInt128Type())) {
399 // If either of the 128-bit integer types are unavailable to name lookup,
400 // define them now.
401 DeclarationName Int128 = &Context.Idents.get(Name: "__int128_t");
402 if (IdResolver.begin(Name: Int128) == IdResolver.end())
403 PushOnScopeChains(D: Context.getInt128Decl(), S: TUScope);
404
405 DeclarationName UInt128 = &Context.Idents.get(Name: "__uint128_t");
406 if (IdResolver.begin(Name: UInt128) == IdResolver.end())
407 PushOnScopeChains(D: Context.getUInt128Decl(), S: TUScope);
408 }
409
410
411 // Initialize predefined Objective-C types:
412 if (getLangOpts().ObjC) {
413 // If 'SEL' does not yet refer to any declarations, make it refer to the
414 // predefined 'SEL'.
415 DeclarationName SEL = &Context.Idents.get(Name: "SEL");
416 if (IdResolver.begin(Name: SEL) == IdResolver.end())
417 PushOnScopeChains(D: Context.getObjCSelDecl(), S: TUScope);
418
419 // If 'id' does not yet refer to any declarations, make it refer to the
420 // predefined 'id'.
421 DeclarationName Id = &Context.Idents.get(Name: "id");
422 if (IdResolver.begin(Name: Id) == IdResolver.end())
423 PushOnScopeChains(D: Context.getObjCIdDecl(), S: TUScope);
424
425 // Create the built-in typedef for 'Class'.
426 DeclarationName Class = &Context.Idents.get(Name: "Class");
427 if (IdResolver.begin(Name: Class) == IdResolver.end())
428 PushOnScopeChains(D: Context.getObjCClassDecl(), S: TUScope);
429
430 // Create the built-in forward declaratino for 'Protocol'.
431 DeclarationName Protocol = &Context.Idents.get(Name: "Protocol");
432 if (IdResolver.begin(Name: Protocol) == IdResolver.end())
433 PushOnScopeChains(D: Context.getObjCProtocolDecl(), S: TUScope);
434 }
435
436 // Create the internal type for the *StringMakeConstantString builtins.
437 DeclarationName ConstantString = &Context.Idents.get(Name: "__NSConstantString");
438 if (IdResolver.begin(Name: ConstantString) == IdResolver.end())
439 PushOnScopeChains(D: Context.getCFConstantStringDecl(), S: TUScope);
440
441 // Initialize Microsoft "predefined C++ types".
442 if (getLangOpts().MSVCCompat) {
443 if (getLangOpts().CPlusPlus &&
444 IdResolver.begin(Name: &Context.Idents.get(Name: "type_info")) == IdResolver.end())
445 PushOnScopeChains(D: Context.getMSTypeInfoTagDecl(), S: TUScope);
446
447 addImplicitTypedef(Name: "size_t", T: Context.getSizeType());
448 }
449
450 // Initialize predefined OpenCL types and supported extensions and (optional)
451 // core features.
452 if (getLangOpts().OpenCL) {
453 getOpenCLOptions().addSupport(
454 FeaturesMap: Context.getTargetInfo().getSupportedOpenCLOpts(), Opts: getLangOpts());
455 addImplicitTypedef(Name: "sampler_t", T: Context.OCLSamplerTy);
456 addImplicitTypedef(Name: "event_t", T: Context.OCLEventTy);
457 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
458 if (OCLCompatibleVersion >= 200) {
459 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
460 addImplicitTypedef(Name: "clk_event_t", T: Context.OCLClkEventTy);
461 addImplicitTypedef(Name: "queue_t", T: Context.OCLQueueTy);
462 }
463 if (getLangOpts().OpenCLPipes)
464 addImplicitTypedef(Name: "reserve_id_t", T: Context.OCLReserveIDTy);
465 addImplicitTypedef(Name: "atomic_int", T: Context.getAtomicType(T: Context.IntTy));
466 addImplicitTypedef(Name: "atomic_uint",
467 T: Context.getAtomicType(T: Context.UnsignedIntTy));
468 addImplicitTypedef(Name: "atomic_float",
469 T: Context.getAtomicType(T: Context.FloatTy));
470 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
471 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
472 addImplicitTypedef(Name: "atomic_flag", T: Context.getAtomicType(T: Context.IntTy));
473
474
475 // OpenCL v2.0 s6.13.11.6:
476 // - The atomic_long and atomic_ulong types are supported if the
477 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
478 // extensions are supported.
479 // - The atomic_double type is only supported if double precision
480 // is supported and the cl_khr_int64_base_atomics and
481 // cl_khr_int64_extended_atomics extensions are supported.
482 // - If the device address space is 64-bits, the data types
483 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
484 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
485 // cl_khr_int64_extended_atomics extensions are supported.
486
487 auto AddPointerSizeDependentTypes = [&]() {
488 auto AtomicSizeT = Context.getAtomicType(T: Context.getSizeType());
489 auto AtomicIntPtrT = Context.getAtomicType(T: Context.getIntPtrType());
490 auto AtomicUIntPtrT = Context.getAtomicType(T: Context.getUIntPtrType());
491 auto AtomicPtrDiffT =
492 Context.getAtomicType(T: Context.getPointerDiffType());
493 addImplicitTypedef(Name: "atomic_size_t", T: AtomicSizeT);
494 addImplicitTypedef(Name: "atomic_intptr_t", T: AtomicIntPtrT);
495 addImplicitTypedef(Name: "atomic_uintptr_t", T: AtomicUIntPtrT);
496 addImplicitTypedef(Name: "atomic_ptrdiff_t", T: AtomicPtrDiffT);
497 };
498
499 if (Context.getTypeSize(T: Context.getSizeType()) == 32) {
500 AddPointerSizeDependentTypes();
501 }
502
503 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp16", LO: getLangOpts())) {
504 auto AtomicHalfT = Context.getAtomicType(T: Context.HalfTy);
505 addImplicitTypedef(Name: "atomic_half", T: AtomicHalfT);
506 }
507
508 std::vector<QualType> Atomic64BitTypes;
509 if (getOpenCLOptions().isSupported(Ext: "cl_khr_int64_base_atomics",
510 LO: getLangOpts()) &&
511 getOpenCLOptions().isSupported(Ext: "cl_khr_int64_extended_atomics",
512 LO: getLangOpts())) {
513 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp64", LO: getLangOpts())) {
514 auto AtomicDoubleT = Context.getAtomicType(T: Context.DoubleTy);
515 addImplicitTypedef(Name: "atomic_double", T: AtomicDoubleT);
516 Atomic64BitTypes.push_back(x: AtomicDoubleT);
517 }
518 auto AtomicLongT = Context.getAtomicType(T: Context.LongTy);
519 auto AtomicULongT = Context.getAtomicType(T: Context.UnsignedLongTy);
520 addImplicitTypedef(Name: "atomic_long", T: AtomicLongT);
521 addImplicitTypedef(Name: "atomic_ulong", T: AtomicULongT);
522
523
524 if (Context.getTypeSize(T: Context.getSizeType()) == 64) {
525 AddPointerSizeDependentTypes();
526 }
527 }
528 }
529
530#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
531 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
532 addImplicitTypedef(#ExtType, Context.Id##Ty); \
533 }
534#include "clang/Basic/OpenCLExtensionTypes.def"
535 }
536
537 if (Context.getTargetInfo().hasAArch64ACLETypes() ||
538 (Context.getAuxTargetInfo() &&
539 Context.getAuxTargetInfo()->hasAArch64ACLETypes())) {
540#define SVE_TYPE(Name, Id, SingletonId) \
541 addImplicitTypedef(#Name, Context.SingletonId);
542#define NEON_VECTOR_TYPE(Name, BaseType, ElBits, NumEls, VectorKind) \
543 addImplicitTypedef( \
544 #Name, Context.getVectorType(Context.BaseType, NumEls, VectorKind));
545#include "clang/Basic/AArch64ACLETypes.def"
546 }
547
548 if (Context.getTargetInfo().getTriple().isPPC64()) {
549#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
550 addImplicitTypedef(#Name, Context.Id##Ty);
551#include "clang/Basic/PPCTypes.def"
552#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
553 addImplicitTypedef(#Name, Context.Id##Ty);
554#include "clang/Basic/PPCTypes.def"
555 }
556
557 if (Context.getTargetInfo().hasRISCVVTypes()) {
558#define RVV_TYPE(Name, Id, SingletonId) \
559 addImplicitTypedef(Name, Context.SingletonId);
560#include "clang/Basic/RISCVVTypes.def"
561 }
562
563 if (Context.getTargetInfo().getTriple().isWasm() &&
564 Context.getTargetInfo().hasFeature(Feature: "reference-types")) {
565#define WASM_TYPE(Name, Id, SingletonId) \
566 addImplicitTypedef(Name, Context.SingletonId);
567#include "clang/Basic/WebAssemblyReferenceTypes.def"
568 }
569
570 if (Context.getTargetInfo().getTriple().isAMDGPU() ||
571 (Context.getTargetInfo().getTriple().isSPIRV() &&
572 Context.getTargetInfo().getTriple().getVendor() == llvm::Triple::AMD) ||
573 (Context.getAuxTargetInfo() &&
574 (Context.getAuxTargetInfo()->getTriple().isAMDGPU() ||
575 (Context.getAuxTargetInfo()->getTriple().isSPIRV() &&
576 Context.getAuxTargetInfo()->getTriple().getVendor() ==
577 llvm::Triple::AMD)))) {
578#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
579 addImplicitTypedef(Name, Context.SingletonId);
580#include "clang/Basic/AMDGPUTypes.def"
581 }
582
583 if (Context.getTargetInfo().hasBuiltinMSVaList()) {
584 DeclarationName MSVaList = &Context.Idents.get(Name: "__builtin_ms_va_list");
585 if (IdResolver.begin(Name: MSVaList) == IdResolver.end())
586 PushOnScopeChains(D: Context.getBuiltinMSVaListDecl(), S: TUScope);
587 }
588
589 DeclarationName BuiltinVaList = &Context.Idents.get(Name: "__builtin_va_list");
590 if (IdResolver.begin(Name: BuiltinVaList) == IdResolver.end())
591 PushOnScopeChains(D: Context.getBuiltinVaListDecl(), S: TUScope);
592}
593
594Sema::~Sema() {
595 assert(InstantiatingSpecializations.empty() &&
596 "failed to clean up an InstantiatingTemplate?");
597
598 if (VisContext) FreeVisContext();
599
600 // Kill all the active scopes.
601 for (sema::FunctionScopeInfo *FSI : FunctionScopes)
602 delete FSI;
603
604 // Tell the SemaConsumer to forget about us; we're going out of scope.
605 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
606 SC->ForgetSema();
607
608 // Detach from the external Sema source.
609 if (ExternalSemaSource *ExternalSema
610 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
611 ExternalSema->ForgetSema();
612
613 // Delete cached satisfactions.
614 std::vector<ConstraintSatisfaction *> Satisfactions;
615 Satisfactions.reserve(n: SatisfactionCache.size());
616 for (auto &Node : SatisfactionCache)
617 Satisfactions.push_back(x: &Node);
618 for (auto *Node : Satisfactions)
619 delete Node;
620
621 threadSafety::threadSafetyCleanup(Cache: ThreadSafetyDeclCache);
622
623 // Destroys data sharing attributes stack for OpenMP
624 OpenMP().DestroyDataSharingAttributesStack();
625
626 // Detach from the PP callback handler which outlives Sema since it's owned
627 // by the preprocessor.
628 SemaPPCallbackHandler->reset();
629}
630
631void Sema::runWithSufficientStackSpace(SourceLocation Loc,
632 llvm::function_ref<void()> Fn) {
633 StackHandler.runWithSufficientStackSpace(Loc, Fn);
634}
635
636bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
637 UnavailableAttr::ImplicitReason reason) {
638 // If we're not in a function, it's an error.
639 FunctionDecl *fn = dyn_cast<FunctionDecl>(Val: CurContext);
640 if (!fn) return false;
641
642 // If we're in template instantiation, it's an error.
643 if (inTemplateInstantiation())
644 return false;
645
646 // If that function's not in a system header, it's an error.
647 if (!Context.getSourceManager().isInSystemHeader(Loc: loc))
648 return false;
649
650 // If the function is already unavailable, it's not an error.
651 if (fn->hasAttr<UnavailableAttr>()) return true;
652
653 fn->addAttr(A: UnavailableAttr::CreateImplicit(Ctx&: Context, Message: "", ImplicitReason: reason, Range: loc));
654 return true;
655}
656
657ASTMutationListener *Sema::getASTMutationListener() const {
658 return getASTConsumer().GetASTMutationListener();
659}
660
661void Sema::addExternalSource(IntrusiveRefCntPtr<ExternalSemaSource> E) {
662 assert(E && "Cannot use with NULL ptr");
663
664 if (!ExternalSource) {
665 ExternalSource = std::move(E);
666 return;
667 }
668
669 if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(Val: ExternalSource.get()))
670 Ex->AddSource(Source: std::move(E));
671 else
672 ExternalSource = llvm::makeIntrusiveRefCnt<MultiplexExternalSemaSource>(
673 A&: ExternalSource, A: std::move(E));
674}
675
676void Sema::PrintStats() const {
677 llvm::errs() << "\n*** Semantic Analysis Stats:\n";
678 if (SFINAETrap *Trap = getSFINAEContext())
679 llvm::errs() << int(Trap->hasErrorOccurred())
680 << " SFINAE diagnostics trapped.\n";
681
682 BumpAlloc.PrintStats();
683 AnalysisWarnings.PrintStats();
684}
685
686void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
687 QualType SrcType,
688 SourceLocation Loc) {
689 std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
690 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
691 *ExprNullability != NullabilityKind::NullableResult))
692 return;
693
694 std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
695 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
696 return;
697
698 Diag(Loc, DiagID: diag::warn_nullability_lost) << SrcType << DstType;
699}
700
701// Generate diagnostics when adding or removing effects in a type conversion.
702void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType,
703 SourceLocation Loc) {
704 const auto SrcFX = FunctionEffectsRef::get(QT: SrcType);
705 const auto DstFX = FunctionEffectsRef::get(QT: DstType);
706 if (SrcFX != DstFX) {
707 for (const auto &Diff : FunctionEffectDiffVector(SrcFX, DstFX)) {
708 if (Diff.shouldDiagnoseConversion(SrcType, SrcFX, DstType, DstFX))
709 Diag(Loc, DiagID: diag::warn_invalid_add_func_effects) << Diff.effectName();
710 }
711 }
712}
713
714void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
715 // nullptr only exists from C++11 on, so don't warn on its absence earlier.
716 if (!getLangOpts().CPlusPlus11)
717 return;
718
719 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
720 return;
721
722 const Expr *EStripped = E->IgnoreParenImpCasts();
723 if (EStripped->getType()->isNullPtrType())
724 return;
725 if (isa<GNUNullExpr>(Val: EStripped))
726 return;
727
728 if (Diags.isIgnored(DiagID: diag::warn_zero_as_null_pointer_constant,
729 Loc: E->getBeginLoc()))
730 return;
731
732 // Don't diagnose the conversion from a 0 literal to a null pointer argument
733 // in a synthesized call to operator<=>.
734 if (!CodeSynthesisContexts.empty() &&
735 CodeSynthesisContexts.back().Kind ==
736 CodeSynthesisContext::RewritingOperatorAsSpaceship)
737 return;
738
739 // Ignore null pointers in defaulted comparison operators.
740 FunctionDecl *FD = getCurFunctionDecl();
741 if (FD && FD->isDefaulted()) {
742 return;
743 }
744
745 // If it is a macro from system header, and if the macro name is not "NULL",
746 // do not warn.
747 // Note that uses of "NULL" will be ignored above on systems that define it
748 // as __null.
749 SourceLocation MaybeMacroLoc = E->getBeginLoc();
750 if (Diags.getSuppressSystemWarnings() &&
751 SourceMgr.isInSystemMacro(loc: MaybeMacroLoc) &&
752 !findMacroSpelling(loc&: MaybeMacroLoc, name: "NULL"))
753 return;
754
755 Diag(Loc: E->getBeginLoc(), DiagID: diag::warn_zero_as_null_pointer_constant)
756 << FixItHint::CreateReplacement(RemoveRange: E->getSourceRange(), Code: "nullptr");
757}
758
759/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
760/// If there is already an implicit cast, merge into the existing one.
761/// The result is of the given category.
762ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
763 CastKind Kind, ExprValueKind VK,
764 const CXXCastPath *BasePath,
765 CheckedConversionKind CCK) {
766#ifndef NDEBUG
767 if (VK == VK_PRValue && !E->isPRValue()) {
768 switch (Kind) {
769 default:
770 llvm_unreachable(
771 ("can't implicitly cast glvalue to prvalue with this cast "
772 "kind: " +
773 std::string(CastExpr::getCastKindName(Kind)))
774 .c_str());
775 case CK_Dependent:
776 case CK_LValueToRValue:
777 case CK_ArrayToPointerDecay:
778 case CK_FunctionToPointerDecay:
779 case CK_ToVoid:
780 case CK_NonAtomicToAtomic:
781 case CK_HLSLArrayRValue:
782 case CK_HLSLAggregateSplatCast:
783 break;
784 }
785 }
786 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
787 "can't cast prvalue to glvalue");
788#endif
789
790 diagnoseNullableToNonnullConversion(DstType: Ty, SrcType: E->getType(), Loc: E->getBeginLoc());
791 diagnoseZeroToNullptrConversion(Kind, E);
792 if (Context.hasAnyFunctionEffects() && !isCast(CCK) &&
793 Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
794 diagnoseFunctionEffectConversion(DstType: Ty, SrcType: E->getType(), Loc: E->getBeginLoc());
795
796 QualType ExprTy = Context.getCanonicalType(T: E->getType());
797 QualType TypeTy = Context.getCanonicalType(T: Ty);
798
799 // This cast is used in place of a regular LValue to RValue cast for
800 // HLSL Array Parameter Types. It needs to be emitted even if
801 // ExprTy == TypeTy, except if E is an HLSLOutArgExpr
802 // Emitting a cast in that case will prevent HLSLOutArgExpr from
803 // being handled properly in EmitCallArg
804 if (Kind == CK_HLSLArrayRValue && !isa<HLSLOutArgExpr>(Val: E))
805 return ImplicitCastExpr::Create(Context, T: Ty, Kind, Operand: E, BasePath, Cat: VK,
806 FPO: CurFPFeatureOverrides());
807
808 if (ExprTy == TypeTy)
809 return E;
810
811 if (Kind == CK_ArrayToPointerDecay) {
812 // C++1z [conv.array]: The temporary materialization conversion is applied.
813 // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
814 if (getLangOpts().CPlusPlus && E->isPRValue()) {
815 // The temporary is an lvalue in C++98 and an xvalue otherwise.
816 ExprResult Materialized = CreateMaterializeTemporaryExpr(
817 T: E->getType(), Temporary: E, BoundToLvalueReference: !getLangOpts().CPlusPlus11);
818 if (Materialized.isInvalid())
819 return ExprError();
820 E = Materialized.get();
821 }
822 // C17 6.7.1p6 footnote 124: The implementation can treat any register
823 // declaration simply as an auto declaration. However, whether or not
824 // addressable storage is actually used, the address of any part of an
825 // object declared with storage-class specifier register cannot be
826 // computed, either explicitly(by use of the unary & operator as discussed
827 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
828 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
829 // array declared with storage-class specifier register is sizeof.
830 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
831 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
832 if (const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl())) {
833 if (VD->getStorageClass() == SC_Register) {
834 Diag(Loc: E->getExprLoc(), DiagID: diag::err_typecheck_address_of)
835 << /*register variable*/ 3 << E->getSourceRange();
836 return ExprError();
837 }
838 }
839 }
840 }
841 }
842
843 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(Val: E)) {
844 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
845 ImpCast->setType(Ty);
846 ImpCast->setValueKind(VK);
847 return E;
848 }
849 }
850
851 bool IsExplicitCast = isa<CStyleCastExpr>(Val: E) || isa<CXXStaticCastExpr>(Val: E) ||
852 isa<CXXFunctionalCastExpr>(Val: E);
853
854 if ((Kind == CK_IntegralCast || Kind == CK_IntegralToBoolean ||
855 (Kind == CK_NoOp && E->getType()->isIntegerType() &&
856 Ty->isIntegerType())) &&
857 IsExplicitCast) {
858 if (const auto *SourceOBT = E->getType()->getAs<OverflowBehaviorType>()) {
859 if (Ty->isIntegerType() && !Ty->isOverflowBehaviorType()) {
860 Ty = Context.getOverflowBehaviorType(Kind: SourceOBT->getBehaviorKind(), Wrapped: Ty);
861 }
862 }
863 }
864
865 return ImplicitCastExpr::Create(Context, T: Ty, Kind, Operand: E, BasePath, Cat: VK,
866 FPO: CurFPFeatureOverrides());
867}
868
869CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
870 switch (ScalarTy->getScalarTypeKind()) {
871 case Type::STK_Bool: return CK_NoOp;
872 case Type::STK_CPointer: return CK_PointerToBoolean;
873 case Type::STK_BlockPointer: return CK_PointerToBoolean;
874 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
875 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
876 case Type::STK_Integral: return CK_IntegralToBoolean;
877 case Type::STK_Floating: return CK_FloatingToBoolean;
878 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
879 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
880 case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
881 }
882 llvm_unreachable("unknown scalar type kind");
883}
884
885/// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
886static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
887 if (D->getMostRecentDecl()->isUsed())
888 return true;
889
890 if (D->isExternallyVisible())
891 return true;
892
893 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: D)) {
894 // If this is a function template and none of its specializations is used,
895 // we should warn.
896 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
897 for (const auto *Spec : Template->specializations())
898 if (ShouldRemoveFromUnused(SemaRef, D: Spec))
899 return true;
900
901 // UnusedFileScopedDecls stores the first declaration.
902 // The declaration may have become definition so check again.
903 const FunctionDecl *DeclToCheck;
904 if (FD->hasBody(Definition&: DeclToCheck))
905 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(D: DeclToCheck);
906
907 // Later redecls may add new information resulting in not having to warn,
908 // so check again.
909 DeclToCheck = FD->getMostRecentDecl();
910 if (DeclToCheck != FD)
911 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(D: DeclToCheck);
912 }
913
914 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) {
915 // If a variable usable in constant expressions is referenced,
916 // don't warn if it isn't used: if the value of a variable is required
917 // for the computation of a constant expression, it doesn't make sense to
918 // warn even if the variable isn't odr-used. (isReferenced doesn't
919 // precisely reflect that, but it's a decent approximation.)
920 if (VD->isReferenced() &&
921 VD->mightBeUsableInConstantExpressions(C: SemaRef->Context))
922 return true;
923
924 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
925 // If this is a variable template and none of its specializations is used,
926 // we should warn.
927 for (const auto *Spec : Template->specializations())
928 if (ShouldRemoveFromUnused(SemaRef, D: Spec))
929 return true;
930
931 // UnusedFileScopedDecls stores the first declaration.
932 // The declaration may have become definition so check again.
933 const VarDecl *DeclToCheck = VD->getDefinition();
934 if (DeclToCheck)
935 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(D: DeclToCheck);
936
937 // Later redecls may add new information resulting in not having to warn,
938 // so check again.
939 DeclToCheck = VD->getMostRecentDecl();
940 if (DeclToCheck != VD)
941 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(D: DeclToCheck);
942 }
943
944 return false;
945}
946
947static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
948 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
949 return FD->isExternC();
950 return cast<VarDecl>(Val: ND)->isExternC();
951}
952
953/// Determine whether ND is an external-linkage function or variable whose
954/// type has no linkage.
955bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
956 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
957 // because we also want to catch the case where its type has VisibleNoLinkage,
958 // which does not affect the linkage of VD.
959 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
960 !isExternalFormalLinkage(L: VD->getType()->getLinkage()) &&
961 !isFunctionOrVarDeclExternC(ND: VD);
962}
963
964bool Sema::isMainFileLoc(SourceLocation Loc) const {
965 if (TUKind != TU_Complete || getLangOpts().IsHeaderFile)
966 return false;
967 return SourceMgr.isInMainFile(Loc);
968}
969
970/// Obtains a sorted list of functions and variables that are undefined but
971/// ODR-used.
972void Sema::getUndefinedButUsed(
973 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
974 for (const auto &UndefinedUse : UndefinedButUsed) {
975 NamedDecl *ND = UndefinedUse.first;
976
977 // Ignore attributes that have become invalid.
978 if (ND->isInvalidDecl()) continue;
979
980 // __attribute__((weakref)) is basically a definition.
981 if (ND->hasAttr<WeakRefAttr>()) continue;
982
983 if (isa<CXXDeductionGuideDecl>(Val: ND))
984 continue;
985
986 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
987 // An exported function will always be emitted when defined, so even if
988 // the function is inline, it doesn't have to be emitted in this TU. An
989 // imported function implies that it has been exported somewhere else.
990 continue;
991 }
992
993 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) {
994 if (FD->isDefined())
995 continue;
996 if (FD->isExternallyVisible() &&
997 !isExternalWithNoLinkageType(VD: FD) &&
998 !FD->getMostRecentDecl()->isInlined() &&
999 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
1000 continue;
1001 if (FD->getBuiltinID())
1002 continue;
1003 } else {
1004 const auto *VD = cast<VarDecl>(Val: ND);
1005 if (VD->hasDefinition() != VarDecl::DeclarationOnly)
1006 continue;
1007 if (VD->isExternallyVisible() &&
1008 !isExternalWithNoLinkageType(VD) &&
1009 !VD->getMostRecentDecl()->isInline() &&
1010 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
1011 continue;
1012
1013 // Skip VarDecls that lack formal definitions but which we know are in
1014 // fact defined somewhere.
1015 if (VD->isKnownToBeDefined())
1016 continue;
1017 }
1018
1019 Undefined.push_back(Elt: std::make_pair(x&: ND, y: UndefinedUse.second));
1020 }
1021}
1022
1023/// checkUndefinedButUsed - Check for undefined objects with internal linkage
1024/// or that are inline.
1025static void checkUndefinedButUsed(Sema &S) {
1026 if (S.UndefinedButUsed.empty()) return;
1027
1028 // Collect all the still-undefined entities with internal linkage.
1029 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
1030 S.getUndefinedButUsed(Undefined);
1031 S.UndefinedButUsed.clear();
1032 if (Undefined.empty()) return;
1033
1034 for (const auto &Undef : Undefined) {
1035 ValueDecl *VD = cast<ValueDecl>(Val: Undef.first);
1036 SourceLocation UseLoc = Undef.second;
1037
1038 if (S.isExternalWithNoLinkageType(VD)) {
1039 // C++ [basic.link]p8:
1040 // A type without linkage shall not be used as the type of a variable
1041 // or function with external linkage unless
1042 // -- the entity has C language linkage
1043 // -- the entity is not odr-used or is defined in the same TU
1044 //
1045 // As an extension, accept this in cases where the type is externally
1046 // visible, since the function or variable actually can be defined in
1047 // another translation unit in that case.
1048 S.Diag(Loc: VD->getLocation(), DiagID: isExternallyVisible(L: VD->getType()->getLinkage())
1049 ? diag::ext_undefined_internal_type
1050 : diag::err_undefined_internal_type)
1051 << isa<VarDecl>(Val: VD) << VD;
1052 } else if (!VD->isExternallyVisible()) {
1053 // FIXME: We can promote this to an error. The function or variable can't
1054 // be defined anywhere else, so the program must necessarily violate the
1055 // one definition rule.
1056 bool IsImplicitBase = false;
1057 if (const auto *BaseD = dyn_cast<FunctionDecl>(Val: VD)) {
1058 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
1059 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
1060 TP: llvm::omp::TraitProperty::
1061 implementation_extension_disable_implicit_base)) {
1062 const auto *Func = cast<FunctionDecl>(
1063 Val: cast<DeclRefExpr>(Val: DVAttr->getVariantFuncRef())->getDecl());
1064 IsImplicitBase = BaseD->isImplicit() &&
1065 Func->getIdentifier()->isMangledOpenMPVariantName();
1066 }
1067 }
1068 if (!S.getLangOpts().OpenMP || !IsImplicitBase)
1069 S.Diag(Loc: VD->getLocation(), DiagID: diag::warn_undefined_internal)
1070 << isa<VarDecl>(Val: VD) << VD;
1071 } else if (auto *FD = dyn_cast<FunctionDecl>(Val: VD)) {
1072 (void)FD;
1073 assert(FD->getMostRecentDecl()->isInlined() &&
1074 "used object requires definition but isn't inline or internal?");
1075 // FIXME: This is ill-formed; we should reject.
1076 S.Diag(Loc: VD->getLocation(), DiagID: diag::warn_undefined_inline) << VD;
1077 } else {
1078 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
1079 "used var requires definition but isn't inline or internal?");
1080 S.Diag(Loc: VD->getLocation(), DiagID: diag::err_undefined_inline_var) << VD;
1081 }
1082 if (UseLoc.isValid())
1083 S.Diag(Loc: UseLoc, DiagID: diag::note_used_here);
1084 }
1085}
1086
1087void Sema::LoadExternalWeakUndeclaredIdentifiers() {
1088 if (!ExternalSource)
1089 return;
1090
1091 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
1092 ExternalSource->ReadWeakUndeclaredIdentifiers(WI&: WeakIDs);
1093 for (auto &WeakID : WeakIDs)
1094 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(X: WeakID.second);
1095}
1096
1097void Sema::LoadExternalExtnameUndeclaredIdentifiers() {
1098 if (!ExternalSource)
1099 return;
1100
1101 SmallVector<std::pair<IdentifierInfo *, AsmLabelAttr *>, 4> ExtnameIDs;
1102 ExternalSource->ReadExtnameUndeclaredIdentifiers(EI&: ExtnameIDs);
1103 for (auto &ExtnameID : ExtnameIDs)
1104 ExtnameUndeclaredIdentifiers[ExtnameID.first] = ExtnameID.second;
1105}
1106
1107typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
1108
1109/// Returns true, if all methods and nested classes of the given
1110/// CXXRecordDecl are defined in this translation unit.
1111///
1112/// Should only be called from ActOnEndOfTranslationUnit so that all
1113/// definitions are actually read.
1114static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
1115 RecordCompleteMap &MNCComplete) {
1116 RecordCompleteMap::iterator Cache = MNCComplete.find(Val: RD);
1117 if (Cache != MNCComplete.end())
1118 return Cache->second;
1119 if (!RD->isCompleteDefinition())
1120 return false;
1121 bool Complete = true;
1122 for (DeclContext::decl_iterator I = RD->decls_begin(),
1123 E = RD->decls_end();
1124 I != E && Complete; ++I) {
1125 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Val: *I))
1126 Complete = M->isDefined() || M->isDefaulted() ||
1127 (M->isPureVirtual() && !isa<CXXDestructorDecl>(Val: M));
1128 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(Val: *I))
1129 // If the template function is marked as late template parsed at this
1130 // point, it has not been instantiated and therefore we have not
1131 // performed semantic analysis on it yet, so we cannot know if the type
1132 // can be considered complete.
1133 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
1134 F->getTemplatedDecl()->isDefined();
1135 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(Val: *I)) {
1136 if (R->isInjectedClassName())
1137 continue;
1138 if (R->hasDefinition())
1139 Complete = MethodsAndNestedClassesComplete(RD: R->getDefinition(),
1140 MNCComplete);
1141 else
1142 Complete = false;
1143 }
1144 }
1145 MNCComplete[RD] = Complete;
1146 return Complete;
1147}
1148
1149/// Returns true, if the given CXXRecordDecl is fully defined in this
1150/// translation unit, i.e. all methods are defined or pure virtual and all
1151/// friends, friend functions and nested classes are fully defined in this
1152/// translation unit.
1153///
1154/// Should only be called from ActOnEndOfTranslationUnit so that all
1155/// definitions are actually read.
1156static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
1157 RecordCompleteMap &RecordsComplete,
1158 RecordCompleteMap &MNCComplete) {
1159 RecordCompleteMap::iterator Cache = RecordsComplete.find(Val: RD);
1160 if (Cache != RecordsComplete.end())
1161 return Cache->second;
1162 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
1163 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
1164 E = RD->friend_end();
1165 I != E && Complete; ++I) {
1166 // Check if friend classes and methods are complete.
1167 if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1168 // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1169 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1170 Complete = MethodsAndNestedClassesComplete(RD: FriendD, MNCComplete);
1171 else
1172 Complete = false;
1173 } else {
1174 // Friend functions are available through the NamedDecl of FriendDecl.
1175 if (const FunctionDecl *FD =
1176 dyn_cast<FunctionDecl>(Val: (*I)->getFriendDecl()))
1177 Complete = FD->isDefined();
1178 else
1179 // This is a template friend, give up.
1180 Complete = false;
1181 }
1182 }
1183 RecordsComplete[RD] = Complete;
1184 return Complete;
1185}
1186
1187void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1188 if (ExternalSource)
1189 ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1190 Decls&: UnusedLocalTypedefNameCandidates);
1191 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1192 if (TD->isReferenced())
1193 continue;
1194 Diag(Loc: TD->getLocation(), DiagID: diag::warn_unused_local_typedef)
1195 << isa<TypeAliasDecl>(Val: TD) << TD->getDeclName();
1196 }
1197 UnusedLocalTypedefNameCandidates.clear();
1198}
1199
1200void Sema::ActOnStartOfTranslationUnit() {
1201 if (getLangOpts().CPlusPlusModules &&
1202 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1203 HandleStartOfHeaderUnit();
1204}
1205
1206void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1207 if (Kind == TUFragmentKind::Global) {
1208 // Perform Pending Instantiations at the end of global module fragment so
1209 // that the module ownership of TU-level decls won't get messed.
1210 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1211 PerformPendingInstantiations();
1212 return;
1213 }
1214
1215 // Transfer late parsed template instantiations over to the pending template
1216 // instantiation list. During normal compilation, the late template parser
1217 // will be installed and instantiating these templates will succeed.
1218 //
1219 // If we are building a TU prefix for serialization, it is also safe to
1220 // transfer these over, even though they are not parsed. The end of the TU
1221 // should be outside of any eager template instantiation scope, so when this
1222 // AST is deserialized, these templates will not be parsed until the end of
1223 // the combined TU.
1224 PendingInstantiations.insert(position: PendingInstantiations.end(),
1225 first: LateParsedInstantiations.begin(),
1226 last: LateParsedInstantiations.end());
1227 LateParsedInstantiations.clear();
1228
1229 // If DefinedUsedVTables ends up marking any virtual member functions it
1230 // might lead to more pending template instantiations, which we then need
1231 // to instantiate.
1232 DefineUsedVTables();
1233
1234 // C++: Perform implicit template instantiations.
1235 //
1236 // FIXME: When we perform these implicit instantiations, we do not
1237 // carefully keep track of the point of instantiation (C++ [temp.point]).
1238 // This means that name lookup that occurs within the template
1239 // instantiation will always happen at the end of the translation unit,
1240 // so it will find some names that are not required to be found. This is
1241 // valid, but we could do better by diagnosing if an instantiation uses a
1242 // name that was not visible at its first point of instantiation.
1243 if (ExternalSource) {
1244 // Load pending instantiations from the external source.
1245 SmallVector<PendingImplicitInstantiation, 4> Pending;
1246 ExternalSource->ReadPendingInstantiations(Pending);
1247 for (auto PII : Pending)
1248 if (auto Func = dyn_cast<FunctionDecl>(Val: PII.first))
1249 Func->setInstantiationIsPending(true);
1250 PendingInstantiations.insert(position: PendingInstantiations.begin(),
1251 first: Pending.begin(), last: Pending.end());
1252 }
1253
1254 {
1255 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1256 PerformPendingInstantiations();
1257 }
1258
1259 emitDeferredDiags();
1260
1261 assert(LateParsedInstantiations.empty() &&
1262 "end of TU template instantiation should not create more "
1263 "late-parsed templates");
1264}
1265
1266void Sema::ActOnEndOfTranslationUnit() {
1267 assert(DelayedDiagnostics.getCurrentPool() == nullptr
1268 && "reached end of translation unit with a pool attached?");
1269
1270 // If code completion is enabled, don't perform any end-of-translation-unit
1271 // work.
1272 if (PP.isCodeCompletionEnabled())
1273 return;
1274
1275 // Complete translation units and modules define vtables and perform implicit
1276 // instantiations. PCH files do not.
1277 if (TUKind != TU_Prefix) {
1278 ObjC().DiagnoseUseOfUnimplementedSelectors();
1279
1280 ActOnEndOfTranslationUnitFragment(
1281 Kind: !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1282 Module::PrivateModuleFragment
1283 ? TUFragmentKind::Private
1284 : TUFragmentKind::Normal);
1285
1286 CheckDelayedMemberExceptionSpecs();
1287 } else {
1288 // If we are building a TU prefix for serialization, it is safe to transfer
1289 // these over, even though they are not parsed. The end of the TU should be
1290 // outside of any eager template instantiation scope, so when this AST is
1291 // deserialized, these templates will not be parsed until the end of the
1292 // combined TU.
1293 PendingInstantiations.insert(position: PendingInstantiations.end(),
1294 first: LateParsedInstantiations.begin(),
1295 last: LateParsedInstantiations.end());
1296 LateParsedInstantiations.clear();
1297
1298 if (LangOpts.PCHInstantiateTemplates) {
1299 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1300 PerformPendingInstantiations();
1301 }
1302 }
1303
1304 DiagnoseUnterminatedPragmaAlignPack();
1305 DiagnoseUnterminatedPragmaAttribute();
1306 OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget();
1307 DiagnosePrecisionLossInComplexDivision();
1308
1309 // All delayed member exception specs should be checked or we end up accepting
1310 // incompatible declarations.
1311 assert(DelayedOverridingExceptionSpecChecks.empty());
1312 assert(DelayedEquivalentExceptionSpecChecks.empty());
1313
1314 // All dllexport classes should have been processed already.
1315 assert(DelayedDllExportClasses.empty());
1316 assert(DelayedDllExportMemberFunctions.empty());
1317
1318 // Remove file scoped decls that turned out to be used.
1319 UnusedFileScopedDecls.erase(
1320 From: std::remove_if(first: UnusedFileScopedDecls.begin(source: nullptr, LocalOnly: true),
1321 last: UnusedFileScopedDecls.end(),
1322 pred: [this](const DeclaratorDecl *DD) {
1323 return ShouldRemoveFromUnused(SemaRef: this, D: DD);
1324 }),
1325 To: UnusedFileScopedDecls.end());
1326
1327 if (TUKind == TU_Prefix) {
1328 // Translation unit prefixes don't need any of the checking below.
1329 if (!PP.isIncrementalProcessingEnabled())
1330 TUScope = nullptr;
1331 return;
1332 }
1333
1334 // Check for #pragma weak identifiers that were never declared
1335 LoadExternalWeakUndeclaredIdentifiers();
1336 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1337 if (WeakIDs.second.empty())
1338 continue;
1339
1340 Decl *PrevDecl = LookupSingleName(S: TUScope, Name: WeakIDs.first, Loc: SourceLocation(),
1341 NameKind: LookupOrdinaryName);
1342 if (PrevDecl != nullptr &&
1343 !(isa<FunctionDecl>(Val: PrevDecl) || isa<VarDecl>(Val: PrevDecl)))
1344 for (const auto &WI : WeakIDs.second)
1345 Diag(Loc: WI.getLocation(), DiagID: diag::warn_attribute_wrong_decl_type)
1346 << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1347 else
1348 for (const auto &WI : WeakIDs.second)
1349 Diag(Loc: WI.getLocation(), DiagID: diag::warn_weak_identifier_undeclared)
1350 << WeakIDs.first;
1351 }
1352
1353 if (LangOpts.CPlusPlus11 &&
1354 !Diags.isIgnored(DiagID: diag::warn_delegating_ctor_cycle, Loc: SourceLocation()))
1355 CheckDelegatingCtorCycles();
1356
1357 if (!Diags.hasErrorOccurred()) {
1358 if (ExternalSource)
1359 ExternalSource->ReadUndefinedButUsed(Undefined&: UndefinedButUsed);
1360 checkUndefinedButUsed(S&: *this);
1361 }
1362
1363 // A global-module-fragment is only permitted within a module unit.
1364 if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1365 Module::ExplicitGlobalModuleFragment) {
1366 Diag(Loc: ModuleScopes.back().BeginLoc,
1367 DiagID: diag::err_module_declaration_missing_after_global_module_introducer);
1368 } else if (getLangOpts().getCompilingModule() ==
1369 LangOptions::CMK_ModuleInterface &&
1370 // We can't use ModuleScopes here since ModuleScopes is always
1371 // empty if we're compiling the BMI.
1372 !getASTContext().getCurrentNamedModule()) {
1373 // If we are building a module interface unit, we should have seen the
1374 // module declaration.
1375 //
1376 // FIXME: Make a better guess as to where to put the module declaration.
1377 Diag(Loc: getSourceManager().getLocForStartOfFile(
1378 FID: getSourceManager().getMainFileID()),
1379 DiagID: diag::err_module_declaration_missing);
1380 }
1381
1382 // Now we can decide whether the modules we're building need an initializer.
1383 if (Module *CurrentModule = getCurrentModule();
1384 CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1385 auto DoesModNeedInit = [this](Module *M) {
1386 if (!getASTContext().getModuleInitializers(M).empty())
1387 return true;
1388 for (auto [Exported, _] : M->Exports)
1389 if (Exported->isNamedModuleInterfaceHasInit())
1390 return true;
1391 for (Module *I : M->Imports)
1392 if (I->isNamedModuleInterfaceHasInit())
1393 return true;
1394
1395 return false;
1396 };
1397
1398 CurrentModule->NamedModuleHasInit =
1399 DoesModNeedInit(CurrentModule) ||
1400 llvm::any_of(Range: CurrentModule->submodules(), P: DoesModNeedInit);
1401 }
1402
1403 if (TUKind == TU_ClangModule) {
1404 // If we are building a module, resolve all of the exported declarations
1405 // now.
1406 if (Module *CurrentModule = PP.getCurrentModule()) {
1407 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1408
1409 SmallVector<Module *, 2> Stack;
1410 Stack.push_back(Elt: CurrentModule);
1411 while (!Stack.empty()) {
1412 Module *Mod = Stack.pop_back_val();
1413
1414 // Resolve the exported declarations and conflicts.
1415 // FIXME: Actually complain, once we figure out how to teach the
1416 // diagnostic client to deal with complaints in the module map at this
1417 // point.
1418 ModMap.resolveExports(Mod, /*Complain=*/false);
1419 ModMap.resolveUses(Mod, /*Complain=*/false);
1420 ModMap.resolveConflicts(Mod, /*Complain=*/false);
1421
1422 // Queue the submodules, so their exports will also be resolved.
1423 auto SubmodulesRange = Mod->submodules();
1424 Stack.append(in_start: SubmodulesRange.begin(), in_end: SubmodulesRange.end());
1425 }
1426 }
1427
1428 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1429 // modules when they are built, not every time they are used.
1430 emitAndClearUnusedLocalTypedefWarnings();
1431 }
1432
1433 // C++ standard modules. Diagnose cases where a function is declared inline
1434 // in the module purview but has no definition before the end of the TU or
1435 // the start of a Private Module Fragment (if one is present).
1436 if (!PendingInlineFuncDecls.empty()) {
1437 for (auto *FD : PendingInlineFuncDecls) {
1438 bool DefInPMF = false;
1439 if (auto *FDD = FD->getDefinition()) {
1440 DefInPMF = FDD->getOwningModule()->isPrivateModule();
1441 if (!DefInPMF)
1442 continue;
1443 }
1444 Diag(Loc: FD->getLocation(), DiagID: diag::err_export_inline_not_defined) << DefInPMF;
1445 // If we have a PMF it should be at the end of the ModuleScopes.
1446 if (DefInPMF &&
1447 ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1448 Diag(Loc: ModuleScopes.back().BeginLoc, DiagID: diag::note_private_module_fragment);
1449 }
1450 }
1451 PendingInlineFuncDecls.clear();
1452 }
1453
1454 // C99 6.9.2p2:
1455 // A declaration of an identifier for an object that has file
1456 // scope without an initializer, and without a storage-class
1457 // specifier or with the storage-class specifier static,
1458 // constitutes a tentative definition. If a translation unit
1459 // contains one or more tentative definitions for an identifier,
1460 // and the translation unit contains no external definition for
1461 // that identifier, then the behavior is exactly as if the
1462 // translation unit contains a file scope declaration of that
1463 // identifier, with the composite type as of the end of the
1464 // translation unit, with an initializer equal to 0.
1465 llvm::SmallPtrSet<VarDecl *, 32> Seen;
1466 for (TentativeDefinitionsType::iterator
1467 T = TentativeDefinitions.begin(source: ExternalSource.get()),
1468 TEnd = TentativeDefinitions.end();
1469 T != TEnd; ++T) {
1470 VarDecl *VD = (*T)->getActingDefinition();
1471
1472 // If the tentative definition was completed, getActingDefinition() returns
1473 // null. If we've already seen this variable before, insert()'s second
1474 // return value is false.
1475 if (!VD || VD->isInvalidDecl() || !Seen.insert(Ptr: VD).second)
1476 continue;
1477
1478 if (const IncompleteArrayType *ArrayT
1479 = Context.getAsIncompleteArrayType(T: VD->getType())) {
1480 // Set the length of the array to 1 (C99 6.9.2p5).
1481 Diag(Loc: VD->getLocation(), DiagID: diag::warn_tentative_incomplete_array);
1482 llvm::APInt One(Context.getTypeSize(T: Context.getSizeType()), true);
1483 QualType T = Context.getConstantArrayType(
1484 EltTy: ArrayT->getElementType(), ArySize: One, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
1485 VD->setType(T);
1486 } else if (RequireCompleteType(Loc: VD->getLocation(), T: VD->getType(),
1487 DiagID: diag::err_tentative_def_incomplete_type))
1488 VD->setInvalidDecl();
1489
1490 // No initialization is performed for a tentative definition.
1491 CheckCompleteVariableDeclaration(VD);
1492
1493 // In C, if the definition is const-qualified and has no initializer, it
1494 // is left uninitialized unless it has static or thread storage duration.
1495 QualType Type = VD->getType();
1496 if (!VD->isInvalidDecl() && !getLangOpts().CPlusPlus &&
1497 Type.isConstQualified() && !VD->getAnyInitializer()) {
1498 unsigned DiagID = diag::warn_default_init_const_unsafe;
1499 if (VD->getStorageDuration() == SD_Static ||
1500 VD->getStorageDuration() == SD_Thread)
1501 DiagID = diag::warn_default_init_const;
1502
1503 bool EmitCppCompat = !Diags.isIgnored(
1504 DiagID: diag::warn_cxx_compat_hack_fake_diagnostic_do_not_emit,
1505 Loc: VD->getLocation());
1506
1507 Diag(Loc: VD->getLocation(), DiagID) << Type << EmitCppCompat;
1508 }
1509
1510 // Notify the consumer that we've completed a tentative definition.
1511 if (!VD->isInvalidDecl())
1512 Consumer.CompleteTentativeDefinition(D: VD);
1513 }
1514
1515 // In incremental mode, tentative definitions belong to the current
1516 // partial translation unit (PTU). Once they have been completed and
1517 // emitted to codegen, drop them to prevent re-emission in future PTUs.
1518 if (PP.isIncrementalProcessingEnabled())
1519 TentativeDefinitions.erase(From: TentativeDefinitions.begin(source: ExternalSource.get()),
1520 To: TentativeDefinitions.end());
1521
1522 for (auto *D : ExternalDeclarations) {
1523 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1524 continue;
1525
1526 Consumer.CompleteExternalDeclaration(D);
1527 }
1528
1529 // Visit all pending #pragma export.
1530 for (const PendingPragmaInfo &Exported : PendingExportedNames.values()) {
1531 if (!Exported.Used)
1532 Diag(Loc: Exported.NameLoc, DiagID: diag::warn_failed_to_resolve_pragma) << "export";
1533 }
1534
1535 if (LangOpts.HLSL)
1536 HLSL().ActOnEndOfTranslationUnit(TU: getASTContext().getTranslationUnitDecl());
1537 if (LangOpts.OpenACC)
1538 OpenACC().ActOnEndOfTranslationUnit(
1539 TU: getASTContext().getTranslationUnitDecl());
1540
1541 // If there were errors, disable 'unused' warnings since they will mostly be
1542 // noise. Don't warn for a use from a module: either we should warn on all
1543 // file-scope declarations in modules or not at all, but whether the
1544 // declaration is used is immaterial.
1545 if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) {
1546 // Output warning for unused file scoped decls.
1547 for (UnusedFileScopedDeclsType::iterator
1548 I = UnusedFileScopedDecls.begin(source: ExternalSource.get()),
1549 E = UnusedFileScopedDecls.end();
1550 I != E; ++I) {
1551 if (ShouldRemoveFromUnused(SemaRef: this, D: *I))
1552 continue;
1553
1554 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: *I)) {
1555 const FunctionDecl *DiagD;
1556 if (!FD->hasBody(Definition&: DiagD))
1557 DiagD = FD;
1558 if (DiagD->isDeleted())
1559 continue; // Deleted functions are supposed to be unused.
1560 SourceRange DiagRange = DiagD->getLocation();
1561 if (const ASTTemplateArgumentListInfo *ASTTAL =
1562 DiagD->getTemplateSpecializationArgsAsWritten())
1563 DiagRange.setEnd(ASTTAL->RAngleLoc);
1564 if (DiagD->isReferenced()) {
1565 if (isa<CXXMethodDecl>(Val: DiagD))
1566 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unneeded_member_function)
1567 << DiagD << DiagRange;
1568 else {
1569 if (FD->getStorageClass() == SC_Static &&
1570 !FD->isInlineSpecified() &&
1571 !SourceMgr.isInMainFile(
1572 Loc: SourceMgr.getExpansionLoc(Loc: FD->getLocation())))
1573 Diag(Loc: DiagD->getLocation(),
1574 DiagID: diag::warn_unneeded_static_internal_decl)
1575 << DiagD << DiagRange;
1576 else
1577 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unneeded_internal_decl)
1578 << /*function=*/0 << DiagD << DiagRange;
1579 }
1580 } else if (!FD->isTargetMultiVersion() ||
1581 FD->isTargetMultiVersionDefault()) {
1582 if (FD->getDescribedFunctionTemplate())
1583 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unused_template)
1584 << /*function=*/0 << DiagD << DiagRange;
1585 else
1586 Diag(Loc: DiagD->getLocation(), DiagID: isa<CXXMethodDecl>(Val: DiagD)
1587 ? diag::warn_unused_member_function
1588 : diag::warn_unused_function)
1589 << DiagD << DiagRange;
1590 }
1591 } else {
1592 const VarDecl *DiagD = cast<VarDecl>(Val: *I)->getDefinition();
1593 if (!DiagD)
1594 DiagD = cast<VarDecl>(Val: *I);
1595 SourceRange DiagRange = DiagD->getLocation();
1596 if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(Val: DiagD)) {
1597 if (const ASTTemplateArgumentListInfo *ASTTAL =
1598 VTSD->getTemplateArgsAsWritten())
1599 DiagRange.setEnd(ASTTAL->RAngleLoc);
1600 }
1601 if (DiagD->isReferenced()) {
1602 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unneeded_internal_decl)
1603 << /*variable=*/1 << DiagD << DiagRange;
1604 } else if (DiagD->getDescribedVarTemplate()) {
1605 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unused_template)
1606 << /*variable=*/1 << DiagD << DiagRange;
1607 } else if (DiagD->getType().isConstQualified()) {
1608 const SourceManager &SM = SourceMgr;
1609 if (SM.getMainFileID() != SM.getFileID(SpellingLoc: DiagD->getLocation()) ||
1610 !PP.getLangOpts().IsHeaderFile)
1611 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unused_const_variable)
1612 << DiagD << DiagRange;
1613 } else {
1614 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unused_variable)
1615 << DiagD << DiagRange;
1616 }
1617 }
1618 }
1619
1620 emitAndClearUnusedLocalTypedefWarnings();
1621 }
1622
1623 if (!Diags.isIgnored(DiagID: diag::warn_unused_but_set_global, Loc: SourceLocation())) {
1624 // Diagnose unused-but-set static globals in a deterministic order.
1625 // Not tracking shadowing info for static globals; there's nothing to
1626 // shadow.
1627 struct LocAndDiag {
1628 SourceLocation Loc;
1629 PartialDiagnostic PD;
1630 };
1631 SmallVector<LocAndDiag, 16> DeclDiags;
1632 auto addDiag = [&DeclDiags](SourceLocation Loc, PartialDiagnostic PD) {
1633 DeclDiags.push_back(Elt: LocAndDiag{.Loc: Loc, .PD: std::move(PD)});
1634 };
1635
1636 // For -Wunused-but-set-variable we only care about variables that were
1637 // referenced by the TU end.
1638 for (const auto &Ref : RefsMinusAssignments) {
1639 const VarDecl *VD = Ref.first;
1640 // Only diagnose internal linkage file vars defined in the main file to
1641 // match -Wunused-variable behavior and avoid false positives from
1642 // headers.
1643 if (VD->isInternalLinkageFileVar() && isMainFileLoc(Loc: VD->getLocation()))
1644 DiagnoseUnusedButSetDecl(VD, DiagReceiver: addDiag);
1645 }
1646
1647 llvm::sort(C&: DeclDiags,
1648 Comp: [](const LocAndDiag &LHS, const LocAndDiag &RHS) -> bool {
1649 // Sorting purely for determinism; matches behavior in
1650 // Sema::ActOnPopScope.
1651 return LHS.Loc < RHS.Loc;
1652 });
1653 for (const LocAndDiag &D : DeclDiags)
1654 Diag(Loc: D.Loc, PD: D.PD);
1655 }
1656
1657 if (!Diags.isIgnored(DiagID: diag::warn_unused_private_field, Loc: SourceLocation())) {
1658 // FIXME: Load additional unused private field candidates from the external
1659 // source.
1660 RecordCompleteMap RecordsComplete;
1661 RecordCompleteMap MNCComplete;
1662 for (const NamedDecl *D : UnusedPrivateFields) {
1663 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Val: D->getDeclContext());
1664 if (RD && !RD->isUnion() && !D->hasAttr<UnusedAttr>() &&
1665 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1666 Diag(Loc: D->getLocation(), DiagID: diag::warn_unused_private_field)
1667 << D->getDeclName();
1668 }
1669 }
1670 }
1671
1672 if (!Diags.isIgnored(DiagID: diag::warn_mismatched_delete_new, Loc: SourceLocation())) {
1673 if (ExternalSource)
1674 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1675 for (const auto &DeletedFieldInfo : DeleteExprs) {
1676 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1677 AnalyzeDeleteExprMismatch(Field: DeletedFieldInfo.first, DeleteLoc: DeleteExprLoc.first,
1678 DeleteWasArrayForm: DeleteExprLoc.second);
1679 }
1680 }
1681 }
1682
1683 AnalysisWarnings.IssueWarnings(D: Context.getTranslationUnitDecl());
1684
1685 if (Context.hasAnyFunctionEffects())
1686 performFunctionEffectAnalysis(TU: Context.getTranslationUnitDecl());
1687
1688 // Check we've noticed that we're no longer parsing the initializer for every
1689 // variable. If we miss cases, then at best we have a performance issue and
1690 // at worst a rejects-valid bug.
1691 assert(ParsingInitForAutoVars.empty() &&
1692 "Didn't unmark var as having its initializer parsed");
1693
1694 if (!PP.isIncrementalProcessingEnabled())
1695 TUScope = nullptr;
1696
1697 checkExposure(TU: Context.getTranslationUnitDecl());
1698}
1699
1700
1701//===----------------------------------------------------------------------===//
1702// Helper functions.
1703//===----------------------------------------------------------------------===//
1704
1705DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1706 DeclContext *DC = CurContext;
1707
1708 while (true) {
1709 if (isa<BlockDecl>(Val: DC) || isa<EnumDecl>(Val: DC) || isa<CapturedDecl>(Val: DC) ||
1710 isa<RequiresExprBodyDecl>(Val: DC)) {
1711 DC = DC->getParent();
1712 } else if (!AllowLambda && isa<CXXMethodDecl>(Val: DC) &&
1713 cast<CXXMethodDecl>(Val: DC)->getOverloadedOperator() == OO_Call &&
1714 cast<CXXRecordDecl>(Val: DC->getParent())->isLambda()) {
1715 DC = DC->getParent()->getParent();
1716 } else break;
1717 }
1718
1719 return DC;
1720}
1721
1722/// getCurFunctionDecl - If inside of a function body, this returns a pointer
1723/// to the function decl for the function being parsed. If we're currently
1724/// in a 'block', this returns the containing context.
1725FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1726 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1727 return dyn_cast<FunctionDecl>(Val: DC);
1728}
1729
1730ObjCMethodDecl *Sema::getCurMethodDecl() {
1731 DeclContext *DC = getFunctionLevelDeclContext();
1732 while (isa<RecordDecl>(Val: DC))
1733 DC = DC->getParent();
1734 return dyn_cast<ObjCMethodDecl>(Val: DC);
1735}
1736
1737NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1738 DeclContext *DC = getFunctionLevelDeclContext();
1739 if (isa<ObjCMethodDecl>(Val: DC) || isa<FunctionDecl>(Val: DC))
1740 return cast<NamedDecl>(Val: DC);
1741 return nullptr;
1742}
1743
1744LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1745 if (getLangOpts().OpenCL)
1746 return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1747 return LangAS::Default;
1748}
1749
1750void Sema::EmitDiagnostic(unsigned DiagID, const DiagnosticBuilder &DB) {
1751 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1752 // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1753 // been made more painfully obvious by the refactor that introduced this
1754 // function, but it is possible that the incoming argument can be
1755 // eliminated. If it truly cannot be (for example, there is some reentrancy
1756 // issue I am not seeing yet), then there should at least be a clarifying
1757 // comment somewhere.
1758 Diagnostic DiagInfo(&Diags, DB);
1759 if (SFINAETrap *Trap = getSFINAEContext()) {
1760 sema::TemplateDeductionInfo *Info = Trap->getDeductionInfo();
1761 switch (DiagnosticIDs::getDiagnosticSFINAEResponse(DiagID: DiagInfo.getID())) {
1762 case DiagnosticIDs::SFINAE_Report:
1763 // We'll report the diagnostic below.
1764 break;
1765
1766 case DiagnosticIDs::SFINAE_SubstitutionFailure:
1767 // Count this failure so that we know that template argument deduction
1768 // has failed.
1769 Trap->setErrorOccurred();
1770
1771 // Make a copy of this suppressed diagnostic and store it with the
1772 // template-deduction information.
1773 if (Info && !Info->hasSFINAEDiagnostic())
1774 Info->addSFINAEDiagnostic(
1775 Loc: DiagInfo.getLocation(),
1776 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1777
1778 Diags.setLastDiagnosticIgnored(true);
1779 return;
1780
1781 case DiagnosticIDs::SFINAE_AccessControl: {
1782 // Per C++ Core Issue 1170, access control is part of SFINAE.
1783 // Additionally, the WithAccessChecking flag can be used to temporarily
1784 // make access control a part of SFINAE for the purposes of checking
1785 // type traits.
1786 if (!Trap->withAccessChecking() && !getLangOpts().CPlusPlus11)
1787 break;
1788
1789 SourceLocation Loc = DiagInfo.getLocation();
1790
1791 // Suppress this diagnostic.
1792 Trap->setErrorOccurred();
1793
1794 // Make a copy of this suppressed diagnostic and store it with the
1795 // template-deduction information.
1796 if (Info && !Info->hasSFINAEDiagnostic())
1797 Info->addSFINAEDiagnostic(
1798 Loc: DiagInfo.getLocation(),
1799 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1800
1801 Diags.setLastDiagnosticIgnored(true);
1802
1803 // Now produce a C++98 compatibility warning.
1804 Diag(Loc, DiagID: diag::warn_cxx98_compat_sfinae_access_control);
1805
1806 // The last diagnostic which Sema produced was ignored. Suppress any
1807 // notes attached to it.
1808 Diags.setLastDiagnosticIgnored(true);
1809 return;
1810 }
1811
1812 case DiagnosticIDs::SFINAE_Suppress:
1813 if (DiagnosticsEngine::Level Level = getDiagnostics().getDiagnosticLevel(
1814 DiagID: DiagInfo.getID(), Loc: DiagInfo.getLocation());
1815 Level == DiagnosticsEngine::Ignored)
1816 return;
1817 // Make a copy of this suppressed diagnostic and store it with the
1818 // template-deduction information;
1819 if (Info) {
1820 Info->addSuppressedDiagnostic(
1821 Loc: DiagInfo.getLocation(),
1822 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1823 if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1824 PrintContextStack(DiagFunc: [Info](SourceLocation Loc, PartialDiagnostic PD) {
1825 Info->addSuppressedDiagnostic(Loc, PD: std::move(PD));
1826 });
1827 }
1828
1829 // Suppress this diagnostic.
1830 Diags.setLastDiagnosticIgnored(true);
1831 return;
1832 }
1833 }
1834
1835 // Copy the diagnostic printing policy over the ASTContext printing policy.
1836 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
1837 Context.setPrintingPolicy(getPrintingPolicy());
1838
1839 // Emit the diagnostic.
1840 if (!Diags.EmitDiagnostic(DB))
1841 return;
1842
1843 // If this is not a note, and we're in a template instantiation
1844 // that is different from the last template instantiation where
1845 // we emitted an error, print a template instantiation
1846 // backtrace.
1847 if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1848 PrintContextStack();
1849}
1850
1851bool Sema::hasUncompilableErrorOccurred() const {
1852 if (getDiagnostics().hasUncompilableErrorOccurred())
1853 return true;
1854 auto *FD = dyn_cast<FunctionDecl>(Val: CurContext);
1855 if (!FD)
1856 return false;
1857 auto Loc = DeviceDeferredDiags.find(Val: FD);
1858 if (Loc == DeviceDeferredDiags.end())
1859 return false;
1860 for (auto PDAt : Loc->second) {
1861 if (Diags.getDiagnosticIDs()->isDefaultMappingAsError(
1862 DiagID: PDAt.second.getDiagID()))
1863 return true;
1864 }
1865 return false;
1866}
1867
1868// Print notes showing how we can reach FD starting from an a priori
1869// known-callable function. When a function has multiple callers, emit
1870// each call chain separately. The first note in each chain uses
1871// "called by" and subsequent notes use "which is called by".
1872static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1873 auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(Val: FD);
1874 if (FnIt == S.CUDA().DeviceKnownEmittedFns.end())
1875 return;
1876
1877 for (const auto &CallerInfo : FnIt->second) {
1878 if (S.Diags.hasFatalErrorOccurred())
1879 return;
1880 S.Diags.Report(Loc: CallerInfo.Loc, DiagID: diag::note_called_by) << CallerInfo.FD;
1881 // Walk up the rest of the chain using "which is called by".
1882 auto NextIt = S.CUDA().DeviceKnownEmittedFns.find(Val: CallerInfo.FD);
1883 while (NextIt != S.CUDA().DeviceKnownEmittedFns.end()) {
1884 if (S.Diags.hasFatalErrorOccurred())
1885 return;
1886 const auto &Next = NextIt->second.front();
1887 S.Diags.Report(Loc: Next.Loc, DiagID: diag::note_which_is_called_by) << Next.FD;
1888 NextIt = S.CUDA().DeviceKnownEmittedFns.find(Val: Next.FD);
1889 }
1890 }
1891}
1892
1893namespace {
1894
1895/// Helper class that emits deferred diagnostic messages if an entity directly
1896/// or indirectly using the function that causes the deferred diagnostic
1897/// messages is known to be emitted.
1898///
1899/// During parsing of AST, certain diagnostic messages are recorded as deferred
1900/// diagnostics since it is unknown whether the functions containing such
1901/// diagnostics will be emitted. A list of potentially emitted functions and
1902/// variables that may potentially trigger emission of functions are also
1903/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1904/// by each function to emit deferred diagnostics.
1905///
1906/// During the visit, certain OpenMP directives or initializer of variables
1907/// with certain OpenMP attributes will cause subsequent visiting of any
1908/// functions enter a state which is called OpenMP device context in this
1909/// implementation. The state is exited when the directive or initializer is
1910/// exited. This state can change the emission states of subsequent uses
1911/// of functions.
1912///
1913/// Conceptually the functions or variables to be visited form a use graph
1914/// where the parent node uses the child node. At any point of the visit,
1915/// the tree nodes traversed from the tree root to the current node form a use
1916/// stack. The emission state of the current node depends on two factors:
1917/// 1. the emission state of the root node
1918/// 2. whether the current node is in OpenMP device context
1919/// If the function is decided to be emitted, its contained deferred diagnostics
1920/// are emitted, together with the information about the use stack.
1921///
1922class DeferredDiagnosticsEmitter
1923 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1924public:
1925 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1926
1927 // Whether the function is already in the current use-path.
1928 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1929
1930 // The current use-path.
1931 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1932
1933 // Whether the visiting of the function has been done. Done[0] is for the
1934 // case not in OpenMP device context. Done[1] is for the case in OpenMP
1935 // device context. We need two sets because diagnostics emission may be
1936 // different depending on whether it is in OpenMP device context.
1937 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1938
1939 // Functions that need their deferred diagnostics emitted. Collected
1940 // during the graph walk and emitted afterwards so that all callers
1941 // are known when producing call chain notes.
1942 llvm::SetVector<CanonicalDeclPtr<const FunctionDecl>> FnsToEmit;
1943
1944 // Emission state of the root node of the current use graph.
1945 bool ShouldEmitRootNode;
1946
1947 // Current OpenMP device context level. It is initialized to 0 and each
1948 // entering of device context increases it by 1 and each exit decreases
1949 // it by 1. Non-zero value indicates it is currently in device context.
1950 unsigned InOMPDeviceContext;
1951
1952 DeferredDiagnosticsEmitter(Sema &S)
1953 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1954
1955 bool shouldVisitDiscardedStmt() const { return false; }
1956
1957 void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1958 ++InOMPDeviceContext;
1959 Inherited::VisitOMPTargetDirective(S: Node);
1960 --InOMPDeviceContext;
1961 }
1962
1963 void visitUsedDecl(SourceLocation Loc, Decl *D) {
1964 if (isa<VarDecl>(Val: D))
1965 return;
1966 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
1967 checkFunc(Loc, FD);
1968 else
1969 Inherited::visitUsedDecl(Loc, D);
1970 }
1971
1972 // Visitor member and parent dtors called by this dtor.
1973 void VisitCalledDestructors(CXXDestructorDecl *DD) {
1974 const CXXRecordDecl *RD = DD->getParent();
1975
1976 // Visit the dtors of all members
1977 for (const FieldDecl *FD : RD->fields()) {
1978 QualType FT = FD->getType();
1979 if (const auto *ClassDecl = FT->getAsCXXRecordDecl();
1980 ClassDecl &&
1981 (ClassDecl->isBeingDefined() || ClassDecl->isCompleteDefinition()))
1982 if (CXXDestructorDecl *MemberDtor = ClassDecl->getDestructor())
1983 asImpl().visitUsedDecl(Loc: MemberDtor->getLocation(), D: MemberDtor);
1984 }
1985
1986 // Also visit base class dtors
1987 for (const auto &Base : RD->bases()) {
1988 QualType BaseType = Base.getType();
1989 if (const auto *BaseDecl = BaseType->getAsCXXRecordDecl();
1990 BaseDecl &&
1991 (BaseDecl->isBeingDefined() || BaseDecl->isCompleteDefinition()))
1992 if (CXXDestructorDecl *BaseDtor = BaseDecl->getDestructor())
1993 asImpl().visitUsedDecl(Loc: BaseDtor->getLocation(), D: BaseDtor);
1994 }
1995 }
1996
1997 void VisitDeclStmt(DeclStmt *DS) {
1998 // Visit dtors called by variables that need destruction
1999 for (auto *D : DS->decls())
2000 if (auto *VD = dyn_cast<VarDecl>(Val: D))
2001 if (VD->isThisDeclarationADefinition() &&
2002 VD->needsDestruction(Ctx: S.Context)) {
2003 QualType VT = VD->getType();
2004 if (const auto *ClassDecl = VT->getAsCXXRecordDecl();
2005 ClassDecl && (ClassDecl->isBeingDefined() ||
2006 ClassDecl->isCompleteDefinition()))
2007 if (CXXDestructorDecl *Dtor = ClassDecl->getDestructor())
2008 asImpl().visitUsedDecl(Loc: Dtor->getLocation(), D: Dtor);
2009 }
2010
2011 Inherited::VisitDeclStmt(S: DS);
2012 }
2013 void checkVar(VarDecl *VD) {
2014 assert(VD->isFileVarDecl() &&
2015 "Should only check file-scope variables");
2016 if (auto *Init = VD->getInit()) {
2017 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
2018 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
2019 *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
2020 if (IsDev)
2021 ++InOMPDeviceContext;
2022 this->Visit(S: Init);
2023 if (IsDev)
2024 --InOMPDeviceContext;
2025 }
2026 }
2027
2028 void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
2029 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
2030 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
2031 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
2032 S.shouldIgnoreInHostDeviceCheck(Callee: FD) || InUsePath.count(Ptr: FD))
2033 return;
2034 // Finalize analysis of OpenMP-specific constructs.
2035 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
2036 (ShouldEmitRootNode || InOMPDeviceContext))
2037 S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, Callee: FD, Loc);
2038 if (Caller) {
2039 auto &Callers = S.CUDA().DeviceKnownEmittedFns[FD];
2040 CanonicalDeclPtr<const FunctionDecl> CanonCaller(Caller);
2041 if (llvm::none_of(Range&: Callers, P: [CanonCaller](const auto &C) {
2042 return C.FD == CanonCaller;
2043 }))
2044 Callers.push_back(Elt: {.FD: Caller, .Loc: Loc});
2045 }
2046 if (ShouldEmitRootNode || InOMPDeviceContext)
2047 FnsToEmit.insert(X: FD);
2048 // Do not revisit a function if the function body has been completely
2049 // visited before.
2050 if (!Done.insert(Ptr: FD).second)
2051 return;
2052 InUsePath.insert(Ptr: FD);
2053 UsePath.push_back(Elt: FD);
2054 if (auto *S = FD->getBody()) {
2055 this->Visit(S);
2056 }
2057 if (CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: FD))
2058 asImpl().VisitCalledDestructors(DD: Dtor);
2059 UsePath.pop_back();
2060 InUsePath.erase(Ptr: FD);
2061 }
2062
2063 void checkRecordedDecl(Decl *D) {
2064 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2065 ShouldEmitRootNode = S.getEmissionStatus(Decl: FD, /*Final=*/true) ==
2066 Sema::FunctionEmissionStatus::Emitted;
2067 checkFunc(Loc: SourceLocation(), FD);
2068 } else
2069 checkVar(VD: cast<VarDecl>(Val: D));
2070 }
2071
2072 void emitDeferredDiags(const FunctionDecl *FD) {
2073 auto It = S.DeviceDeferredDiags.find(Val: FD);
2074 if (It == S.DeviceDeferredDiags.end())
2075 return;
2076 bool HasWarningOrError = false;
2077 for (PartialDiagnosticAt &PDAt : It->second) {
2078 if (S.Diags.hasFatalErrorOccurred())
2079 return;
2080 const SourceLocation &Loc = PDAt.first;
2081 const PartialDiagnostic &PD = PDAt.second;
2082 HasWarningOrError |=
2083 S.getDiagnostics().getDiagnosticLevel(DiagID: PD.getDiagID(), Loc) >=
2084 DiagnosticsEngine::Warning;
2085 {
2086 DiagnosticBuilder Builder(S.Diags.Report(Loc, DiagID: PD.getDiagID()));
2087 PD.Emit(DB: Builder);
2088 }
2089 }
2090 if (HasWarningOrError)
2091 emitCallStackNotes(S, FD);
2092 }
2093
2094 void emitCollectedDiags() {
2095 for (const auto &FD : FnsToEmit)
2096 emitDeferredDiags(FD);
2097 }
2098};
2099} // namespace
2100
2101void Sema::emitDeferredDiags() {
2102 if (ExternalSource)
2103 ExternalSource->ReadDeclsToCheckForDeferredDiags(
2104 Decls&: DeclsToCheckForDeferredDiags);
2105
2106 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
2107 DeclsToCheckForDeferredDiags.empty())
2108 return;
2109
2110 DeferredDiagnosticsEmitter DDE(*this);
2111 for (auto *D : DeclsToCheckForDeferredDiags)
2112 DDE.checkRecordedDecl(D);
2113 DDE.emitCollectedDiags();
2114}
2115
2116// In CUDA, there are some constructs which may appear in semantically-valid
2117// code, but trigger errors if we ever generate code for the function in which
2118// they appear. Essentially every construct you're not allowed to use on the
2119// device falls into this category, because you are allowed to use these
2120// constructs in a __host__ __device__ function, but only if that function is
2121// never codegen'ed on the device.
2122//
2123// To handle semantic checking for these constructs, we keep track of the set of
2124// functions we know will be emitted, either because we could tell a priori that
2125// they would be emitted, or because they were transitively called by a
2126// known-emitted function.
2127//
2128// We also keep a partial call graph of which not-known-emitted functions call
2129// which other not-known-emitted functions.
2130//
2131// When we see something which is illegal if the current function is emitted
2132// (usually by way of DiagIfDeviceCode, DiagIfHostCode, or
2133// CheckCall), we first check if the current function is known-emitted. If
2134// so, we immediately output the diagnostic.
2135//
2136// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
2137// until we discover that the function is known-emitted, at which point we take
2138// it out of this map and emit the diagnostic.
2139
2140Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
2141 unsigned DiagID,
2142 const FunctionDecl *Fn,
2143 Sema &S)
2144 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
2145 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
2146 switch (K) {
2147 case K_Nop:
2148 break;
2149 case K_Immediate:
2150 case K_ImmediateWithCallStack:
2151 ImmediateDiag.emplace(
2152 args: ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
2153 break;
2154 case K_Deferred:
2155 assert(Fn && "Must have a function to attach the deferred diag to.");
2156 auto &Diags = S.DeviceDeferredDiags[Fn];
2157 PartialDiagId.emplace(args: Diags.size());
2158 Diags.emplace_back(args&: Loc, args: S.PDiag(DiagID));
2159 break;
2160 }
2161}
2162
2163Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
2164 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
2165 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
2166 PartialDiagId(D.PartialDiagId) {
2167 // Clean the previous diagnostics.
2168 D.ShowCallStack = false;
2169 D.ImmediateDiag.reset();
2170 D.PartialDiagId.reset();
2171}
2172
2173Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
2174 if (ImmediateDiag) {
2175 // Emit our diagnostic and, if it was a warning or error, output a callstack
2176 // if Fn isn't a priori known-emitted.
2177 ImmediateDiag.reset(); // Emit the immediate diag.
2178
2179 if (ShowCallStack) {
2180 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
2181 DiagID, Loc) >= DiagnosticsEngine::Warning;
2182 if (IsWarningOrError)
2183 emitCallStackNotes(S, FD: Fn);
2184 }
2185 } else {
2186 assert((!PartialDiagId || ShowCallStack) &&
2187 "Must always show call stack for deferred diags.");
2188 }
2189}
2190
2191Sema::SemaDiagnosticBuilder
2192Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
2193 FD = FD ? FD : getCurFunctionDecl();
2194 if (LangOpts.OpenMP)
2195 return LangOpts.OpenMPIsTargetDevice
2196 ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD)
2197 : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD);
2198 if (getLangOpts().CUDA)
2199 return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID)
2200 : CUDA().DiagIfHostCode(Loc, DiagID);
2201
2202 if (getLangOpts().SYCLIsDevice)
2203 return SYCL().DiagIfDeviceCode(Loc, DiagID);
2204
2205 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
2206 FD, *this);
2207}
2208
2209void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
2210 if (isUnevaluatedContext() || Ty.isNull())
2211 return;
2212
2213 // The original idea behind checkTypeSupport function is that unused
2214 // declarations can be replaced with an array of bytes of the same size during
2215 // codegen, such replacement doesn't seem to be possible for types without
2216 // constant byte size like zero length arrays. So, do a deep check for SYCL.
2217 if (D && LangOpts.SYCLIsDevice) {
2218 llvm::DenseSet<QualType> Visited;
2219 SYCL().deepTypeCheckForDevice(UsedAt: Loc, Visited, DeclToCheck: D);
2220 }
2221
2222 Decl *C = cast<Decl>(Val: getCurLexicalContext());
2223
2224 // Memcpy operations for structs containing a member with unsupported type
2225 // are ok, though.
2226 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: C)) {
2227 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
2228 MD->isTrivial())
2229 return;
2230
2231 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(Val: MD))
2232 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
2233 return;
2234 }
2235
2236 // Try to associate errors with the lexical context, if that is a function, or
2237 // the value declaration otherwise.
2238 const FunctionDecl *FD = isa<FunctionDecl>(Val: C)
2239 ? cast<FunctionDecl>(Val: C)
2240 : dyn_cast_or_null<FunctionDecl>(Val: D);
2241
2242 auto CheckDeviceType = [&](QualType Ty) {
2243 if (Ty->isDependentType())
2244 return;
2245
2246 if (Ty->isBitIntType()) {
2247 if (!Context.getTargetInfo().hasBitIntType()) {
2248 PartialDiagnostic PD = PDiag(DiagID: diag::err_target_unsupported_type);
2249 if (D)
2250 PD << D;
2251 else
2252 PD << "expression";
2253 targetDiag(Loc, PD, FD)
2254 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
2255 << Ty << Context.getTargetInfo().getTriple().str();
2256 }
2257 return;
2258 }
2259
2260 // Check if we are dealing with two 'long double' but with different
2261 // semantics.
2262 bool LongDoubleMismatched = false;
2263 if (Ty->isRealFloatingType() && Context.getTypeSize(T: Ty) == 128) {
2264 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(T: Ty);
2265 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
2266 !Context.getTargetInfo().hasFloat128Type()) ||
2267 (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
2268 !Context.getTargetInfo().hasIbm128Type()))
2269 LongDoubleMismatched = true;
2270 }
2271
2272 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
2273 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
2274 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
2275 (Ty->isIntegerType() && Context.getTypeSize(T: Ty) == 128 &&
2276 !Context.getTargetInfo().hasInt128Type()) ||
2277 (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
2278 !LangOpts.CUDAIsDevice) ||
2279 LongDoubleMismatched) {
2280 PartialDiagnostic PD = PDiag(DiagID: diag::err_target_unsupported_type);
2281 if (D)
2282 PD << D;
2283 else
2284 PD << "expression";
2285
2286 if (targetDiag(Loc, PD, FD)
2287 << true /*show bit size*/
2288 << static_cast<unsigned>(Context.getTypeSize(T: Ty)) << Ty
2289 << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2290 if (D)
2291 D->setInvalidDecl();
2292 }
2293 if (D)
2294 targetDiag(Loc: D->getLocation(), DiagID: diag::note_defined_here, FD) << D;
2295 }
2296 };
2297
2298 auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2299 if (LangOpts.SYCLIsDevice ||
2300 (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2301 LangOpts.CUDAIsDevice)
2302 CheckDeviceType(Ty);
2303
2304 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2305 const TargetInfo &TI = Context.getTargetInfo();
2306 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2307 PartialDiagnostic PD = PDiag(DiagID: diag::err_target_unsupported_type);
2308 if (D)
2309 PD << D;
2310 else
2311 PD << "expression";
2312
2313 if (Diag(Loc, PD) << false /*show bit size*/ << 0 << Ty
2314 << false /*return*/
2315 << TI.getTriple().str()) {
2316 if (D)
2317 D->setInvalidDecl();
2318 }
2319 if (D)
2320 targetDiag(Loc: D->getLocation(), DiagID: diag::note_defined_here, FD) << D;
2321 }
2322
2323 bool IsDouble = UnqualTy == Context.DoubleTy;
2324 bool IsFloat = UnqualTy == Context.FloatTy;
2325 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2326 PartialDiagnostic PD = PDiag(DiagID: diag::err_target_unsupported_type);
2327 if (D)
2328 PD << D;
2329 else
2330 PD << "expression";
2331
2332 if (Diag(Loc, PD) << false /*show bit size*/ << 0 << Ty << true /*return*/
2333 << TI.getTriple().str()) {
2334 if (D)
2335 D->setInvalidDecl();
2336 }
2337 if (D)
2338 targetDiag(Loc: D->getLocation(), DiagID: diag::note_defined_here, FD) << D;
2339 }
2340
2341 if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
2342 llvm::StringMap<bool> CallerFeatureMap;
2343 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
2344 RISCV().checkRVVTypeSupport(Ty, Loc, D, FeatureMap: CallerFeatureMap);
2345 }
2346
2347 // Don't allow SVE types in functions without a SVE target.
2348 if (Ty->isSVESizelessBuiltinType() && FD) {
2349 llvm::StringMap<bool> CallerFeatureMap;
2350 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
2351 ARM().checkSVETypeSupport(Ty, Loc, FD, FeatureMap: CallerFeatureMap);
2352 }
2353
2354 if (auto *VT = Ty->getAs<VectorType>();
2355 VT && FD &&
2356 (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
2357 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) &&
2358 (LangOpts.VScaleMin != LangOpts.VScaleStreamingMin ||
2359 LangOpts.VScaleMax != LangOpts.VScaleStreamingMax)) {
2360 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true)) {
2361 Diag(Loc, DiagID: diag::err_sve_fixed_vector_in_streaming_function)
2362 << Ty << /*Streaming*/ 0;
2363 } else if (const auto *FTy = FD->getType()->getAs<FunctionProtoType>()) {
2364 if (FTy->getAArch64SMEAttributes() &
2365 FunctionType::SME_PStateSMCompatibleMask) {
2366 Diag(Loc, DiagID: diag::err_sve_fixed_vector_in_streaming_function)
2367 << Ty << /*StreamingCompatible*/ 1;
2368 }
2369 }
2370 }
2371 };
2372
2373 CheckType(Ty);
2374 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Val&: Ty)) {
2375 for (const auto &ParamTy : FPTy->param_types())
2376 CheckType(ParamTy);
2377 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2378 }
2379 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Val&: Ty))
2380 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2381}
2382
2383bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2384 SourceLocation loc = locref;
2385 if (!loc.isMacroID()) return false;
2386
2387 // There's no good way right now to look at the intermediate
2388 // expansions, so just jump to the expansion location.
2389 loc = getSourceManager().getExpansionLoc(Loc: loc);
2390
2391 // If that's written with the name, stop here.
2392 SmallString<16> buffer;
2393 if (getPreprocessor().getSpelling(loc, buffer) == name) {
2394 locref = loc;
2395 return true;
2396 }
2397 return false;
2398}
2399
2400Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2401
2402 if (!Ctx)
2403 return nullptr;
2404
2405 Ctx = Ctx->getPrimaryContext();
2406 for (Scope *S = getCurScope(); S; S = S->getParent()) {
2407 // Ignore scopes that cannot have declarations. This is important for
2408 // out-of-line definitions of static class members.
2409 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2410 if (DeclContext *Entity = S->getEntity())
2411 if (Ctx == Entity->getPrimaryContext())
2412 return S;
2413 }
2414
2415 return nullptr;
2416}
2417
2418/// Enter a new function scope
2419void Sema::PushFunctionScope() {
2420 if (FunctionScopes.empty() && CachedFunctionScope) {
2421 // Use CachedFunctionScope to avoid allocating memory when possible.
2422 CachedFunctionScope->Clear();
2423 FunctionScopes.push_back(Elt: CachedFunctionScope.release());
2424 } else {
2425 FunctionScopes.push_back(Elt: new FunctionScopeInfo(getDiagnostics()));
2426 }
2427 if (LangOpts.OpenMP)
2428 OpenMP().pushOpenMPFunctionRegion();
2429}
2430
2431void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2432 FunctionScopes.push_back(Elt: new BlockScopeInfo(getDiagnostics(),
2433 BlockScope, Block));
2434 CapturingFunctionScopes++;
2435}
2436
2437LambdaScopeInfo *Sema::PushLambdaScope() {
2438 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2439 FunctionScopes.push_back(Elt: LSI);
2440 CapturingFunctionScopes++;
2441 return LSI;
2442}
2443
2444void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2445 if (LambdaScopeInfo *const LSI = getCurLambda()) {
2446 LSI->AutoTemplateParameterDepth = Depth;
2447 return;
2448 }
2449 llvm_unreachable(
2450 "Remove assertion if intentionally called in a non-lambda context.");
2451}
2452
2453// Check that the type of the VarDecl has an accessible copy constructor and
2454// resolve its destructor's exception specification.
2455// This also performs initialization of block variables when they are moved
2456// to the heap. It uses the same rules as applicable for implicit moves
2457// according to the C++ standard in effect ([class.copy.elision]p3).
2458static void checkEscapingByref(VarDecl *VD, Sema &S) {
2459 QualType T = VD->getType();
2460 EnterExpressionEvaluationContext scope(
2461 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2462 SourceLocation Loc = VD->getLocation();
2463 Expr *VarRef =
2464 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2465 ExprResult Result;
2466 auto IE = InitializedEntity::InitializeBlock(BlockVarLoc: Loc, Type: T);
2467 if (S.getLangOpts().CPlusPlus23) {
2468 auto *E = ImplicitCastExpr::Create(Context: S.Context, T, Kind: CK_NoOp, Operand: VarRef, BasePath: nullptr,
2469 Cat: VK_XValue, FPO: FPOptionsOverride());
2470 Result = S.PerformCopyInitialization(Entity: IE, EqualLoc: SourceLocation(), Init: E);
2471 } else {
2472 Result = S.PerformMoveOrCopyInitialization(
2473 Entity: IE, NRInfo: Sema::NamedReturnInfo{.Candidate: VD, .S: Sema::NamedReturnInfo::MoveEligible},
2474 Value: VarRef);
2475 }
2476
2477 if (!Result.isInvalid()) {
2478 Result = S.MaybeCreateExprWithCleanups(SubExpr: Result);
2479 Expr *Init = Result.getAs<Expr>();
2480 S.Context.setBlockVarCopyInit(VD, CopyExpr: Init, CanThrow: S.canThrow(E: Init));
2481 }
2482
2483 // The destructor's exception specification is needed when IRGen generates
2484 // block copy/destroy functions. Resolve it here.
2485 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2486 if (CXXDestructorDecl *DD = RD->getDestructor()) {
2487 auto *FPT = DD->getType()->castAs<FunctionProtoType>();
2488 S.ResolveExceptionSpec(Loc, FPT);
2489 }
2490}
2491
2492static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2493 // Set the EscapingByref flag of __block variables captured by
2494 // escaping blocks.
2495 for (const BlockDecl *BD : FSI.Blocks) {
2496 for (const BlockDecl::Capture &BC : BD->captures()) {
2497 VarDecl *VD = BC.getVariable();
2498 if (VD->hasAttr<BlocksAttr>()) {
2499 // Nothing to do if this is a __block variable captured by a
2500 // non-escaping block.
2501 if (BD->doesNotEscape())
2502 continue;
2503 VD->setEscapingByref();
2504 }
2505 // Check whether the captured variable is or contains an object of
2506 // non-trivial C union type.
2507 QualType CapType = BC.getVariable()->getType();
2508 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2509 CapType.hasNonTrivialToPrimitiveCopyCUnion())
2510 S.checkNonTrivialCUnion(QT: BC.getVariable()->getType(),
2511 Loc: BD->getCaretLocation(),
2512 UseContext: NonTrivialCUnionContext::BlockCapture,
2513 NonTrivialKind: Sema::NTCUK_Destruct | Sema::NTCUK_Copy);
2514 }
2515 }
2516
2517 for (VarDecl *VD : FSI.ByrefBlockVars) {
2518 // __block variables might require us to capture a copy-initializer.
2519 if (!VD->isEscapingByref())
2520 continue;
2521 // It's currently invalid to ever have a __block variable with an
2522 // array type; should we diagnose that here?
2523 // Regardless, we don't want to ignore array nesting when
2524 // constructing this copy.
2525 if (VD->getType()->isStructureOrClassType())
2526 checkEscapingByref(VD, S);
2527 }
2528}
2529
2530Sema::PoppedFunctionScopePtr
2531Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, Decl *D,
2532 QualType BlockType) {
2533 assert(!FunctionScopes.empty() && "mismatched push/pop!");
2534
2535 markEscapingByrefs(FSI: *FunctionScopes.back(), S&: *this);
2536
2537 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2538 PoppedFunctionScopeDeleter(this));
2539
2540 if (LangOpts.OpenMP)
2541 OpenMP().popOpenMPFunctionRegion(OldFSI: Scope.get());
2542
2543 // Issue any analysis-based warnings.
2544 if (WP && D) {
2545 inferNoReturnAttr(S&: *this, D);
2546 AnalysisWarnings.IssueWarnings(P: *WP, fscope: Scope.get(), D, BlockType);
2547 } else
2548 for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2549 Diag(Loc: PUD.Loc, PD: PUD.PD);
2550
2551 return Scope;
2552}
2553
2554void Sema::PoppedFunctionScopeDeleter::
2555operator()(sema::FunctionScopeInfo *Scope) const {
2556 if (!Scope->isPlainFunction())
2557 Self->CapturingFunctionScopes--;
2558 // Stash the function scope for later reuse if it's for a normal function.
2559 if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2560 Self->CachedFunctionScope.reset(p: Scope);
2561 else
2562 delete Scope;
2563}
2564
2565void Sema::PushCompoundScope(bool IsStmtExpr) {
2566 getCurFunction()->CompoundScopes.push_back(
2567 Elt: CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2568}
2569
2570void Sema::PopCompoundScope() {
2571 FunctionScopeInfo *CurFunction = getCurFunction();
2572 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2573
2574 CurFunction->CompoundScopes.pop_back();
2575}
2576
2577bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2578 return getCurFunction()->hasUnrecoverableErrorOccurred();
2579}
2580
2581void Sema::setFunctionHasBranchIntoScope() {
2582 if (!FunctionScopes.empty())
2583 FunctionScopes.back()->setHasBranchIntoScope();
2584}
2585
2586void Sema::setFunctionHasBranchProtectedScope() {
2587 if (!FunctionScopes.empty())
2588 FunctionScopes.back()->setHasBranchProtectedScope();
2589}
2590
2591void Sema::setFunctionHasIndirectGoto() {
2592 if (!FunctionScopes.empty())
2593 FunctionScopes.back()->setHasIndirectGoto();
2594}
2595
2596void Sema::setFunctionHasMustTail() {
2597 if (!FunctionScopes.empty())
2598 FunctionScopes.back()->setHasMustTail();
2599}
2600
2601BlockScopeInfo *Sema::getCurBlock() {
2602 if (FunctionScopes.empty())
2603 return nullptr;
2604
2605 auto CurBSI = dyn_cast<BlockScopeInfo>(Val: FunctionScopes.back());
2606 if (CurBSI && CurBSI->TheDecl &&
2607 !CurBSI->TheDecl->Encloses(DC: CurContext)) {
2608 // We have switched contexts due to template instantiation.
2609 assert(!CodeSynthesisContexts.empty());
2610 return nullptr;
2611 }
2612
2613 return CurBSI;
2614}
2615
2616FunctionScopeInfo *Sema::getEnclosingFunction() const {
2617 if (FunctionScopes.empty())
2618 return nullptr;
2619
2620 for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2621 if (isa<sema::BlockScopeInfo>(Val: FunctionScopes[e]))
2622 continue;
2623 return FunctionScopes[e];
2624 }
2625 return nullptr;
2626}
2627
2628CapturingScopeInfo *Sema::getEnclosingLambdaOrBlock() const {
2629 for (auto *Scope : llvm::reverse(C: FunctionScopes)) {
2630 if (auto *CSI = dyn_cast<CapturingScopeInfo>(Val: Scope)) {
2631 auto *LSI = dyn_cast<LambdaScopeInfo>(Val: CSI);
2632 if (LSI && LSI->Lambda && !LSI->Lambda->Encloses(DC: CurContext) &&
2633 LSI->AfterParameterList) {
2634 // We have switched contexts due to template instantiation.
2635 // FIXME: We should swap out the FunctionScopes during code synthesis
2636 // so that we don't need to check for this.
2637 assert(!CodeSynthesisContexts.empty());
2638 return nullptr;
2639 }
2640 return CSI;
2641 }
2642 }
2643 return nullptr;
2644}
2645
2646LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2647 if (FunctionScopes.empty())
2648 return nullptr;
2649
2650 auto I = FunctionScopes.rbegin();
2651 if (IgnoreNonLambdaCapturingScope) {
2652 auto E = FunctionScopes.rend();
2653 while (I != E && isa<CapturingScopeInfo>(Val: *I) && !isa<LambdaScopeInfo>(Val: *I))
2654 ++I;
2655 if (I == E)
2656 return nullptr;
2657 }
2658 auto *CurLSI = dyn_cast<LambdaScopeInfo>(Val: *I);
2659 if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2660 !CurLSI->Lambda->Encloses(DC: CurContext) && CurLSI->AfterParameterList) {
2661 // We have switched contexts due to template instantiation.
2662 assert(!CodeSynthesisContexts.empty());
2663 return nullptr;
2664 }
2665
2666 return CurLSI;
2667}
2668
2669// We have a generic lambda if we parsed auto parameters, or we have
2670// an associated template parameter list.
2671LambdaScopeInfo *Sema::getCurGenericLambda() {
2672 if (LambdaScopeInfo *LSI = getCurLambda()) {
2673 return (LSI->TemplateParams.size() ||
2674 LSI->GLTemplateParameterList) ? LSI : nullptr;
2675 }
2676 return nullptr;
2677}
2678
2679
2680void Sema::ActOnComment(SourceRange Comment) {
2681 if (!LangOpts.RetainCommentsFromSystemHeaders &&
2682 SourceMgr.isInSystemHeader(Loc: Comment.getBegin()))
2683 return;
2684 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2685 if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2686 SourceRange MagicMarkerRange(Comment.getBegin(),
2687 Comment.getBegin().getLocWithOffset(Offset: 3));
2688 StringRef MagicMarkerText;
2689 switch (RC.getKind()) {
2690 case RawComment::RCK_OrdinaryBCPL:
2691 MagicMarkerText = "///<";
2692 break;
2693 case RawComment::RCK_OrdinaryC:
2694 MagicMarkerText = "/**<";
2695 break;
2696 case RawComment::RCK_Invalid:
2697 // FIXME: are there other scenarios that could produce an invalid
2698 // raw comment here?
2699 Diag(Loc: Comment.getBegin(), DiagID: diag::warn_splice_in_doxygen_comment);
2700 return;
2701 default:
2702 llvm_unreachable("if this is an almost Doxygen comment, "
2703 "it should be ordinary");
2704 }
2705 Diag(Loc: Comment.getBegin(), DiagID: diag::warn_not_a_doxygen_trailing_member_comment) <<
2706 FixItHint::CreateReplacement(RemoveRange: MagicMarkerRange, Code: MagicMarkerText);
2707 }
2708 Context.addComment(RC);
2709}
2710
2711// Pin this vtable to this file.
2712ExternalSemaSource::~ExternalSemaSource() {}
2713char ExternalSemaSource::ID;
2714
2715void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2716void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2717
2718void ExternalSemaSource::ReadKnownNamespaces(
2719 SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2720}
2721
2722void ExternalSemaSource::ReadUndefinedButUsed(
2723 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2724
2725void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2726 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2727
2728bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2729 UnresolvedSetImpl &OverloadSet) {
2730 ZeroArgCallReturnTy = QualType();
2731 OverloadSet.clear();
2732
2733 const OverloadExpr *Overloads = nullptr;
2734 bool IsMemExpr = false;
2735 if (E.getType() == Context.OverloadTy) {
2736 OverloadExpr::FindResult FR = OverloadExpr::find(E: &E);
2737
2738 // Ignore overloads that are pointer-to-member constants.
2739 if (FR.HasFormOfMemberPointer)
2740 return false;
2741
2742 Overloads = FR.Expression;
2743 } else if (E.getType() == Context.BoundMemberTy) {
2744 Overloads = dyn_cast<UnresolvedMemberExpr>(Val: E.IgnoreParens());
2745 IsMemExpr = true;
2746 }
2747
2748 bool Ambiguous = false;
2749 bool IsMV = false;
2750
2751 if (Overloads) {
2752 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2753 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2754 OverloadSet.addDecl(D: *it);
2755
2756 // Check whether the function is a non-template, non-member which takes no
2757 // arguments.
2758 if (IsMemExpr)
2759 continue;
2760 if (const FunctionDecl *OverloadDecl
2761 = dyn_cast<FunctionDecl>(Val: (*it)->getUnderlyingDecl())) {
2762 if (OverloadDecl->getMinRequiredArguments() == 0) {
2763 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2764 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2765 OverloadDecl->isCPUSpecificMultiVersion()))) {
2766 ZeroArgCallReturnTy = QualType();
2767 Ambiguous = true;
2768 } else {
2769 ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2770 IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2771 OverloadDecl->isCPUSpecificMultiVersion();
2772 }
2773 }
2774 }
2775 }
2776
2777 // If it's not a member, use better machinery to try to resolve the call
2778 if (!IsMemExpr)
2779 return !ZeroArgCallReturnTy.isNull();
2780 }
2781
2782 // Attempt to call the member with no arguments - this will correctly handle
2783 // member templates with defaults/deduction of template arguments, overloads
2784 // with default arguments, etc.
2785 if (IsMemExpr && !E.isTypeDependent()) {
2786 Sema::TentativeAnalysisScope Trap(*this);
2787 ExprResult R = BuildCallToMemberFunction(S: nullptr, MemExpr: &E, LParenLoc: SourceLocation(), Args: {},
2788 RParenLoc: SourceLocation());
2789 if (R.isUsable()) {
2790 ZeroArgCallReturnTy = R.get()->getType();
2791 return true;
2792 }
2793 return false;
2794 }
2795
2796 if (const auto *DeclRef = dyn_cast<DeclRefExpr>(Val: E.IgnoreParens())) {
2797 if (const auto *Fun = dyn_cast<FunctionDecl>(Val: DeclRef->getDecl())) {
2798 if (Fun->getMinRequiredArguments() == 0)
2799 ZeroArgCallReturnTy = Fun->getReturnType();
2800 return true;
2801 }
2802 }
2803
2804 // We don't have an expression that's convenient to get a FunctionDecl from,
2805 // but we can at least check if the type is "function of 0 arguments".
2806 QualType ExprTy = E.getType();
2807 const FunctionType *FunTy = nullptr;
2808 QualType PointeeTy = ExprTy->getPointeeType();
2809 if (!PointeeTy.isNull())
2810 FunTy = PointeeTy->getAs<FunctionType>();
2811 if (!FunTy)
2812 FunTy = ExprTy->getAs<FunctionType>();
2813
2814 if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(Val: FunTy)) {
2815 if (FPT->getNumParams() == 0)
2816 ZeroArgCallReturnTy = FunTy->getReturnType();
2817 return true;
2818 }
2819 return false;
2820}
2821
2822/// Give notes for a set of overloads.
2823///
2824/// A companion to tryExprAsCall. In cases when the name that the programmer
2825/// wrote was an overloaded function, we may be able to make some guesses about
2826/// plausible overloads based on their return types; such guesses can be handed
2827/// off to this method to be emitted as notes.
2828///
2829/// \param Overloads - The overloads to note.
2830/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2831/// -fshow-overloads=best, this is the location to attach to the note about too
2832/// many candidates. Typically this will be the location of the original
2833/// ill-formed expression.
2834static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2835 const SourceLocation FinalNoteLoc) {
2836 unsigned ShownOverloads = 0;
2837 unsigned SuppressedOverloads = 0;
2838 for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2839 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2840 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2841 ++SuppressedOverloads;
2842 continue;
2843 }
2844
2845 const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2846 // Don't print overloads for non-default multiversioned functions.
2847 if (const auto *FD = Fn->getAsFunction()) {
2848 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2849 !FD->getAttr<TargetAttr>()->isDefaultVersion())
2850 continue;
2851 if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2852 !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2853 continue;
2854 }
2855 S.Diag(Loc: Fn->getLocation(), DiagID: diag::note_possible_target_of_call);
2856 ++ShownOverloads;
2857 }
2858
2859 S.Diags.overloadCandidatesShown(N: ShownOverloads);
2860
2861 if (SuppressedOverloads)
2862 S.Diag(Loc: FinalNoteLoc, DiagID: diag::note_ovl_too_many_candidates)
2863 << SuppressedOverloads;
2864}
2865
2866static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2867 const UnresolvedSetImpl &Overloads,
2868 bool (*IsPlausibleResult)(QualType)) {
2869 if (!IsPlausibleResult)
2870 return noteOverloads(S, Overloads, FinalNoteLoc: Loc);
2871
2872 UnresolvedSet<2> PlausibleOverloads;
2873 for (OverloadExpr::decls_iterator It = Overloads.begin(),
2874 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2875 const auto *OverloadDecl = cast<FunctionDecl>(Val: *It);
2876 QualType OverloadResultTy = OverloadDecl->getReturnType();
2877 if (IsPlausibleResult(OverloadResultTy))
2878 PlausibleOverloads.addDecl(D: It.getDecl());
2879 }
2880 noteOverloads(S, Overloads: PlausibleOverloads, FinalNoteLoc: Loc);
2881}
2882
2883/// Determine whether the given expression can be called by just
2884/// putting parentheses after it. Notably, expressions with unary
2885/// operators can't be because the unary operator will start parsing
2886/// outside the call.
2887static bool IsCallableWithAppend(const Expr *E) {
2888 E = E->IgnoreImplicit();
2889 return (!isa<CStyleCastExpr>(Val: E) &&
2890 !isa<UnaryOperator>(Val: E) &&
2891 !isa<BinaryOperator>(Val: E) &&
2892 !isa<CXXOperatorCallExpr>(Val: E));
2893}
2894
2895static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2896 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2897 E = UO->getSubExpr();
2898
2899 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(Val: E)) {
2900 if (ULE->getNumDecls() == 0)
2901 return false;
2902
2903 const NamedDecl *ND = *ULE->decls_begin();
2904 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
2905 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2906 }
2907 return false;
2908}
2909
2910bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2911 bool ForceComplain,
2912 bool (*IsPlausibleResult)(QualType)) {
2913 SourceLocation Loc = E.get()->getExprLoc();
2914 SourceRange Range = E.get()->getSourceRange();
2915 UnresolvedSet<4> Overloads;
2916
2917 // If this is a SFINAE context, don't try anything that might trigger ADL
2918 // prematurely.
2919 if (!isSFINAEContext()) {
2920 QualType ZeroArgCallTy;
2921 if (tryExprAsCall(E&: *E.get(), ZeroArgCallReturnTy&: ZeroArgCallTy, OverloadSet&: Overloads) &&
2922 !ZeroArgCallTy.isNull() &&
2923 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2924 // At this point, we know E is potentially callable with 0
2925 // arguments and that it returns something of a reasonable type,
2926 // so we can emit a fixit and carry on pretending that E was
2927 // actually a CallExpr.
2928 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Loc: Range.getEnd());
2929 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2930 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2931 << (IsCallableWithAppend(E: E.get())
2932 ? FixItHint::CreateInsertion(InsertionLoc: ParenInsertionLoc,
2933 Code: "()")
2934 : FixItHint());
2935 if (!IsMV)
2936 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2937
2938 // FIXME: Try this before emitting the fixit, and suppress diagnostics
2939 // while doing so.
2940 E = BuildCallExpr(S: nullptr, Fn: E.get(), LParenLoc: Range.getEnd(), ArgExprs: {},
2941 RParenLoc: Range.getEnd().getLocWithOffset(Offset: 1));
2942 return true;
2943 }
2944 }
2945 if (!ForceComplain) return false;
2946
2947 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2948 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2949 if (!IsMV)
2950 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2951 E = ExprError();
2952 return true;
2953}
2954
2955IdentifierInfo *Sema::getSuperIdentifier() const {
2956 if (!Ident_super)
2957 Ident_super = &Context.Idents.get(Name: "super");
2958 return Ident_super;
2959}
2960
2961void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2962 CapturedRegionKind K,
2963 unsigned OpenMPCaptureLevel) {
2964 auto *CSI = new CapturedRegionScopeInfo(
2965 getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2966 (getLangOpts().OpenMP && K == CR_OpenMP)
2967 ? OpenMP().getOpenMPNestingLevel()
2968 : 0,
2969 OpenMPCaptureLevel);
2970 CSI->ReturnType = Context.VoidTy;
2971 FunctionScopes.push_back(Elt: CSI);
2972 CapturingFunctionScopes++;
2973}
2974
2975CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2976 if (FunctionScopes.empty())
2977 return nullptr;
2978
2979 return dyn_cast<CapturedRegionScopeInfo>(Val: FunctionScopes.back());
2980}
2981
2982const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2983Sema::getMismatchingDeleteExpressions() const {
2984 return DeleteExprs;
2985}
2986
2987Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2988 : S(S), OldFPFeaturesState(S.CurFPFeatures),
2989 OldOverrides(S.FpPragmaStack.CurrentValue),
2990 OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2991 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2992
2993Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2994 S.CurFPFeatures = OldFPFeaturesState;
2995 S.FpPragmaStack.CurrentValue = OldOverrides;
2996 S.PP.setCurrentFPEvalMethod(PragmaLoc: OldFPPragmaLocation, Val: OldEvalMethod);
2997}
2998
2999bool Sema::isDeclaratorFunctionLike(Declarator &D) {
3000 assert(D.getCXXScopeSpec().isSet() &&
3001 "can only be called for qualified names");
3002
3003 auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
3004 LookupOrdinaryName, forRedeclarationInCurContext());
3005 DeclContext *DC = computeDeclContext(SS: D.getCXXScopeSpec(),
3006 EnteringContext: !D.getDeclSpec().isFriendSpecified());
3007 if (!DC)
3008 return false;
3009
3010 LookupQualifiedName(R&: LR, LookupCtx: DC);
3011 bool Result = llvm::all_of(Range&: LR, P: [](Decl *Dcl) {
3012 if (NamedDecl *ND = dyn_cast<NamedDecl>(Val: Dcl)) {
3013 ND = ND->getUnderlyingDecl();
3014 return isa<FunctionDecl>(Val: ND) || isa<FunctionTemplateDecl>(Val: ND) ||
3015 isa<UsingDecl>(Val: ND);
3016 }
3017 return false;
3018 });
3019 return Result;
3020}
3021
3022Attr *Sema::CreateAnnotationAttr(const AttributeCommonInfo &CI, StringRef Annot,
3023 MutableArrayRef<Expr *> Args) {
3024
3025 auto *A = AnnotateAttr::Create(Ctx&: Context, Annotation: Annot, Args: Args.data(), ArgsSize: Args.size(), CommonInfo: CI);
3026 if (!ConstantFoldAttrArgs(
3027 CI, Args: MutableArrayRef<Expr *>(A->args_begin(), A->args_end()))) {
3028 return nullptr;
3029 }
3030 return A;
3031}
3032
3033Attr *Sema::CreateAnnotationAttr(const ParsedAttr &AL) {
3034 // Make sure that there is a string literal as the annotation's first
3035 // argument.
3036 StringRef Str;
3037 if (!checkStringLiteralArgumentAttr(Attr: AL, ArgNum: 0, Str))
3038 return nullptr;
3039
3040 llvm::SmallVector<Expr *, 4> Args;
3041 Args.reserve(N: AL.getNumArgs() - 1);
3042 for (unsigned Idx = 1; Idx < AL.getNumArgs(); Idx++) {
3043 assert(!AL.isArgIdent(Idx));
3044 Args.push_back(Elt: AL.getArgAsExpr(Arg: Idx));
3045 }
3046
3047 return CreateAnnotationAttr(CI: AL, Annot: Str, Args);
3048}
3049