1//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the actions class which performs semantic analysis and
10// builds an AST out of a parse stream.
11//
12//===----------------------------------------------------------------------===//
13
14#include "UsedDeclVisitor.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/ASTDiagnostic.h"
17#include "clang/AST/Decl.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/DeclFriend.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/Expr.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/PrettyDeclStackTrace.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/AST/TypeOrdering.h"
26#include "clang/Basic/DarwinSDKInfo.h"
27#include "clang/Basic/DiagnosticOptions.h"
28#include "clang/Basic/PartialDiagnostic.h"
29#include "clang/Basic/SourceManager.h"
30#include "clang/Basic/TargetInfo.h"
31#include "clang/Lex/HeaderSearch.h"
32#include "clang/Lex/HeaderSearchOptions.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/CXXFieldCollector.h"
35#include "clang/Sema/EnterExpressionEvaluationContext.h"
36#include "clang/Sema/ExternalSemaSource.h"
37#include "clang/Sema/Initialization.h"
38#include "clang/Sema/MultiplexExternalSemaSource.h"
39#include "clang/Sema/ObjCMethodList.h"
40#include "clang/Sema/RISCVIntrinsicManager.h"
41#include "clang/Sema/Scope.h"
42#include "clang/Sema/ScopeInfo.h"
43#include "clang/Sema/SemaAMDGPU.h"
44#include "clang/Sema/SemaARM.h"
45#include "clang/Sema/SemaAVR.h"
46#include "clang/Sema/SemaBPF.h"
47#include "clang/Sema/SemaCUDA.h"
48#include "clang/Sema/SemaCodeCompletion.h"
49#include "clang/Sema/SemaConsumer.h"
50#include "clang/Sema/SemaDirectX.h"
51#include "clang/Sema/SemaHLSL.h"
52#include "clang/Sema/SemaHexagon.h"
53#include "clang/Sema/SemaLoongArch.h"
54#include "clang/Sema/SemaM68k.h"
55#include "clang/Sema/SemaMIPS.h"
56#include "clang/Sema/SemaMSP430.h"
57#include "clang/Sema/SemaNVPTX.h"
58#include "clang/Sema/SemaObjC.h"
59#include "clang/Sema/SemaOpenACC.h"
60#include "clang/Sema/SemaOpenCL.h"
61#include "clang/Sema/SemaOpenMP.h"
62#include "clang/Sema/SemaPPC.h"
63#include "clang/Sema/SemaPseudoObject.h"
64#include "clang/Sema/SemaRISCV.h"
65#include "clang/Sema/SemaSPIRV.h"
66#include "clang/Sema/SemaSYCL.h"
67#include "clang/Sema/SemaSwift.h"
68#include "clang/Sema/SemaSystemZ.h"
69#include "clang/Sema/SemaWasm.h"
70#include "clang/Sema/SemaX86.h"
71#include "clang/Sema/TemplateDeduction.h"
72#include "clang/Sema/TemplateInstCallback.h"
73#include "clang/Sema/TypoCorrection.h"
74#include "llvm/ADT/DenseMap.h"
75#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/SmallPtrSet.h"
77#include "llvm/Support/TimeProfiler.h"
78#include <optional>
79
80using namespace clang;
81using namespace sema;
82
83SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
84 return Lexer::getLocForEndOfToken(Loc, Offset, SM: SourceMgr, LangOpts);
85}
86
87SourceRange
88Sema::getRangeForNextToken(SourceLocation Loc, bool IncludeMacros,
89 bool IncludeComments,
90 std::optional<tok::TokenKind> ExpectedToken) {
91 if (!Loc.isValid())
92 return SourceRange();
93 std::optional<Token> NextToken =
94 Lexer::findNextToken(Loc, SM: SourceMgr, LangOpts, IncludeComments);
95 if (!NextToken)
96 return SourceRange();
97 if (ExpectedToken && NextToken->getKind() != *ExpectedToken)
98 return SourceRange();
99 SourceLocation TokenStart = NextToken->getLocation();
100 SourceLocation TokenEnd = NextToken->getLastLoc();
101 if (!TokenStart.isValid() || !TokenEnd.isValid())
102 return SourceRange();
103 if (!IncludeMacros && (TokenStart.isMacroID() || TokenEnd.isMacroID()))
104 return SourceRange();
105
106 return SourceRange(TokenStart, TokenEnd);
107}
108
109ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
110
111DarwinSDKInfo *
112Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
113 StringRef Platform) {
114 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
115 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
116 Diag(Loc, DiagID: diag::warn_missing_sdksettings_for_availability_checking)
117 << Platform;
118 WarnedDarwinSDKInfoMissing = true;
119 }
120 return SDKInfo;
121}
122
123DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
124 if (CachedDarwinSDKInfo)
125 return CachedDarwinSDKInfo->get();
126 auto SDKInfo = parseDarwinSDKInfo(
127 VFS&: PP.getFileManager().getVirtualFileSystem(),
128 SDKRootPath: PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
129 if (SDKInfo && *SDKInfo) {
130 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(args: std::move(**SDKInfo));
131 return CachedDarwinSDKInfo->get();
132 }
133 if (!SDKInfo)
134 llvm::consumeError(Err: SDKInfo.takeError());
135 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
136 return nullptr;
137}
138
139IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName(
140 const IdentifierInfo *ParamName, unsigned int Index) {
141 std::string InventedName;
142 llvm::raw_string_ostream OS(InventedName);
143
144 if (!ParamName)
145 OS << "auto:" << Index + 1;
146 else
147 OS << ParamName->getName() << ":auto";
148
149 return &Context.Idents.get(Name: OS.str());
150}
151
152PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
153 const Preprocessor &PP) {
154 PrintingPolicy Policy = Context.getPrintingPolicy();
155 // In diagnostics, we print _Bool as bool if the latter is defined as the
156 // former.
157 Policy.Bool = Context.getLangOpts().Bool;
158 if (!Policy.Bool) {
159 if (const MacroInfo *BoolMacro = PP.getMacroInfo(II: Context.getBoolName())) {
160 Policy.Bool = BoolMacro->isObjectLike() &&
161 BoolMacro->getNumTokens() == 1 &&
162 BoolMacro->getReplacementToken(Tok: 0).is(K: tok::kw__Bool);
163 }
164 }
165
166 // Shorten the data output if needed
167 Policy.EntireContentsOfLargeArray = false;
168
169 return Policy;
170}
171
172void Sema::ActOnTranslationUnitScope(Scope *S) {
173 TUScope = S;
174 PushDeclContext(S, DC: Context.getTranslationUnitDecl());
175}
176
177namespace clang {
178namespace sema {
179
180class SemaPPCallbacks : public PPCallbacks {
181 Sema *S = nullptr;
182 llvm::SmallVector<SourceLocation, 8> IncludeStack;
183 llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
184
185public:
186 void set(Sema &S) { this->S = &S; }
187
188 void reset() { S = nullptr; }
189
190 void FileChanged(SourceLocation Loc, FileChangeReason Reason,
191 SrcMgr::CharacteristicKind FileType,
192 FileID PrevFID) override {
193 if (!S)
194 return;
195 switch (Reason) {
196 case EnterFile: {
197 SourceManager &SM = S->getSourceManager();
198 SourceLocation IncludeLoc = SM.getIncludeLoc(FID: SM.getFileID(SpellingLoc: Loc));
199 if (IncludeLoc.isValid()) {
200 if (llvm::timeTraceProfilerEnabled()) {
201 OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID: SM.getFileID(SpellingLoc: Loc));
202 ProfilerStack.push_back(Elt: llvm::timeTraceAsyncProfilerBegin(
203 Name: "Source", Detail: FE ? FE->getName() : StringRef("<unknown>")));
204 }
205
206 IncludeStack.push_back(Elt: IncludeLoc);
207 S->DiagnoseNonDefaultPragmaAlignPack(
208 Kind: Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
209 IncludeLoc);
210 }
211 break;
212 }
213 case ExitFile:
214 if (!IncludeStack.empty()) {
215 if (llvm::timeTraceProfilerEnabled())
216 llvm::timeTraceProfilerEnd(E: ProfilerStack.pop_back_val());
217
218 S->DiagnoseNonDefaultPragmaAlignPack(
219 Kind: Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
220 IncludeLoc: IncludeStack.pop_back_val());
221 }
222 break;
223 default:
224 break;
225 }
226 }
227 void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
228 diag::Severity Mapping, StringRef Str) override {
229 // If one of the analysis-based diagnostics was enabled while processing
230 // a function, we want to note it in the analysis-based warnings so they
231 // can be run at the end of the function body even if the analysis warnings
232 // are disabled at that point.
233 SmallVector<diag::kind, 256> GroupDiags;
234 diag::Flavor Flavor =
235 Str[1] == 'W' ? diag::Flavor::WarningOrError : diag::Flavor::Remark;
236 StringRef Group = Str.substr(Start: 2);
237
238 if (S->PP.getDiagnostics().getDiagnosticIDs()->getDiagnosticsInGroup(
239 Flavor, Group, Diags&: GroupDiags))
240 return;
241
242 for (diag::kind K : GroupDiags) {
243 // Note: the cases in this switch should be kept in sync with the
244 // diagnostics in AnalysisBasedWarnings::getPolicyInEffectAt().
245 AnalysisBasedWarnings::Policy &Override =
246 S->AnalysisWarnings.getPolicyOverrides();
247 switch (K) {
248 default: break;
249 case diag::warn_unreachable:
250 case diag::warn_unreachable_break:
251 case diag::warn_unreachable_return:
252 case diag::warn_unreachable_loop_increment:
253 Override.enableCheckUnreachable = true;
254 break;
255 case diag::warn_double_lock:
256 Override.enableThreadSafetyAnalysis = true;
257 break;
258 case diag::warn_use_in_invalid_state:
259 Override.enableConsumedAnalysis = true;
260 break;
261 }
262 }
263 }
264};
265
266} // end namespace sema
267} // end namespace clang
268
269const unsigned Sema::MaxAlignmentExponent;
270const uint64_t Sema::MaximumAlignment;
271
272Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
273 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
274 : SemaBase(*this), CollectStats(false), TUKind(TUKind),
275 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
276 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
277 SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts),
278 AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
279 LateTemplateParser(nullptr), OpaqueParser(nullptr), CurContext(nullptr),
280 ExternalSource(nullptr), StackHandler(Diags), CurScope(nullptr),
281 Ident_super(nullptr), AMDGPUPtr(std::make_unique<SemaAMDGPU>(args&: *this)),
282 ARMPtr(std::make_unique<SemaARM>(args&: *this)),
283 AVRPtr(std::make_unique<SemaAVR>(args&: *this)),
284 BPFPtr(std::make_unique<SemaBPF>(args&: *this)),
285 CodeCompletionPtr(
286 std::make_unique<SemaCodeCompletion>(args&: *this, args&: CodeCompleter)),
287 CUDAPtr(std::make_unique<SemaCUDA>(args&: *this)),
288 DirectXPtr(std::make_unique<SemaDirectX>(args&: *this)),
289 HLSLPtr(std::make_unique<SemaHLSL>(args&: *this)),
290 HexagonPtr(std::make_unique<SemaHexagon>(args&: *this)),
291 LoongArchPtr(std::make_unique<SemaLoongArch>(args&: *this)),
292 M68kPtr(std::make_unique<SemaM68k>(args&: *this)),
293 MIPSPtr(std::make_unique<SemaMIPS>(args&: *this)),
294 MSP430Ptr(std::make_unique<SemaMSP430>(args&: *this)),
295 NVPTXPtr(std::make_unique<SemaNVPTX>(args&: *this)),
296 ObjCPtr(std::make_unique<SemaObjC>(args&: *this)),
297 OpenACCPtr(std::make_unique<SemaOpenACC>(args&: *this)),
298 OpenCLPtr(std::make_unique<SemaOpenCL>(args&: *this)),
299 OpenMPPtr(std::make_unique<SemaOpenMP>(args&: *this)),
300 PPCPtr(std::make_unique<SemaPPC>(args&: *this)),
301 PseudoObjectPtr(std::make_unique<SemaPseudoObject>(args&: *this)),
302 RISCVPtr(std::make_unique<SemaRISCV>(args&: *this)),
303 SPIRVPtr(std::make_unique<SemaSPIRV>(args&: *this)),
304 SYCLPtr(std::make_unique<SemaSYCL>(args&: *this)),
305 SwiftPtr(std::make_unique<SemaSwift>(args&: *this)),
306 SystemZPtr(std::make_unique<SemaSystemZ>(args&: *this)),
307 WasmPtr(std::make_unique<SemaWasm>(args&: *this)),
308 X86Ptr(std::make_unique<SemaX86>(args&: *this)),
309 MSPointerToMemberRepresentationMethod(
310 LangOpts.getMSPointerToMemberRepresentationMethod()),
311 MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
312 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
313 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
314 CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
315 FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
316 VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
317 StdCoroutineTraitsCache(nullptr), IdResolver(pp),
318 OriginalLexicalContext(nullptr), StdInitializerList(nullptr),
319 StdTypeIdentity(nullptr),
320 FullyCheckedComparisonCategories(
321 static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
322 StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr),
323 GlobalNewDeleteDeclared(false), DisableTypoCorrection(false),
324 TyposCorrected(0), IsBuildingRecoveryCallExpr(false),
325 CurrentInstantiationScope(nullptr), NonInstantiationEntries(0),
326 ArgPackSubstIndex(std::nullopt), SatisfactionCache(Context) {
327 assert(pp.TUKind == TUKind);
328 TUScope = nullptr;
329
330 LoadedExternalKnownNamespaces = false;
331 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
332 ObjC().NSNumberLiteralMethods[I] = nullptr;
333
334 if (getLangOpts().ObjC)
335 ObjC().NSAPIObj.reset(p: new NSAPI(Context));
336
337 if (getLangOpts().CPlusPlus)
338 FieldCollector.reset(p: new CXXFieldCollector());
339
340 // Tell diagnostics how to render things from the AST library.
341 Diags.SetArgToStringFn(Fn: &FormatASTNodeDiagnosticArgument, Cookie: &Context);
342
343 // This evaluation context exists to ensure that there's always at least one
344 // valid evaluation context available. It is never removed from the
345 // evaluation stack.
346 ExprEvalContexts.emplace_back(
347 Args: ExpressionEvaluationContext::PotentiallyEvaluated, Args: 0, Args: CleanupInfo{},
348 Args: nullptr, Args: ExpressionEvaluationContextRecord::EK_Other);
349
350 // Initialization of data sharing attributes stack for OpenMP
351 OpenMP().InitDataSharingAttributesStack();
352
353 std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
354 std::make_unique<sema::SemaPPCallbacks>();
355 SemaPPCallbackHandler = Callbacks.get();
356 PP.addPPCallbacks(C: std::move(Callbacks));
357 SemaPPCallbackHandler->set(*this);
358
359 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
360}
361
362// Anchor Sema's type info to this TU.
363void Sema::anchor() {}
364
365void Sema::addImplicitTypedef(StringRef Name, QualType T) {
366 DeclarationName DN = &Context.Idents.get(Name);
367 if (IdResolver.begin(Name: DN) == IdResolver.end())
368 PushOnScopeChains(D: Context.buildImplicitTypedef(T, Name), S: TUScope);
369}
370
371void Sema::Initialize() {
372 // Create BuiltinVaListDecl *before* ExternalSemaSource::InitializeSema(this)
373 // because during initialization ASTReader can emit globals that require
374 // name mangling. And the name mangling uses BuiltinVaListDecl.
375 if (Context.getTargetInfo().hasBuiltinMSVaList())
376 (void)Context.getBuiltinMSVaListDecl();
377 (void)Context.getBuiltinVaListDecl();
378
379 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
380 SC->InitializeSema(S&: *this);
381
382 // Tell the external Sema source about this Sema object.
383 if (ExternalSemaSource *ExternalSema
384 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
385 ExternalSema->InitializeSema(S&: *this);
386
387 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
388 // will not be able to merge any duplicate __va_list_tag decls correctly.
389 VAListTagName = PP.getIdentifierInfo(Name: "__va_list_tag");
390
391 if (!TUScope)
392 return;
393
394 // Initialize predefined 128-bit integer types, if needed.
395 if (Context.getTargetInfo().hasInt128Type() ||
396 (Context.getAuxTargetInfo() &&
397 Context.getAuxTargetInfo()->hasInt128Type())) {
398 // If either of the 128-bit integer types are unavailable to name lookup,
399 // define them now.
400 DeclarationName Int128 = &Context.Idents.get(Name: "__int128_t");
401 if (IdResolver.begin(Name: Int128) == IdResolver.end())
402 PushOnScopeChains(D: Context.getInt128Decl(), S: TUScope);
403
404 DeclarationName UInt128 = &Context.Idents.get(Name: "__uint128_t");
405 if (IdResolver.begin(Name: UInt128) == IdResolver.end())
406 PushOnScopeChains(D: Context.getUInt128Decl(), S: TUScope);
407 }
408
409
410 // Initialize predefined Objective-C types:
411 if (getLangOpts().ObjC) {
412 // If 'SEL' does not yet refer to any declarations, make it refer to the
413 // predefined 'SEL'.
414 DeclarationName SEL = &Context.Idents.get(Name: "SEL");
415 if (IdResolver.begin(Name: SEL) == IdResolver.end())
416 PushOnScopeChains(D: Context.getObjCSelDecl(), S: TUScope);
417
418 // If 'id' does not yet refer to any declarations, make it refer to the
419 // predefined 'id'.
420 DeclarationName Id = &Context.Idents.get(Name: "id");
421 if (IdResolver.begin(Name: Id) == IdResolver.end())
422 PushOnScopeChains(D: Context.getObjCIdDecl(), S: TUScope);
423
424 // Create the built-in typedef for 'Class'.
425 DeclarationName Class = &Context.Idents.get(Name: "Class");
426 if (IdResolver.begin(Name: Class) == IdResolver.end())
427 PushOnScopeChains(D: Context.getObjCClassDecl(), S: TUScope);
428
429 // Create the built-in forward declaratino for 'Protocol'.
430 DeclarationName Protocol = &Context.Idents.get(Name: "Protocol");
431 if (IdResolver.begin(Name: Protocol) == IdResolver.end())
432 PushOnScopeChains(D: Context.getObjCProtocolDecl(), S: TUScope);
433 }
434
435 // Create the internal type for the *StringMakeConstantString builtins.
436 DeclarationName ConstantString = &Context.Idents.get(Name: "__NSConstantString");
437 if (IdResolver.begin(Name: ConstantString) == IdResolver.end())
438 PushOnScopeChains(D: Context.getCFConstantStringDecl(), S: TUScope);
439
440 // Initialize Microsoft "predefined C++ types".
441 if (getLangOpts().MSVCCompat) {
442 if (getLangOpts().CPlusPlus &&
443 IdResolver.begin(Name: &Context.Idents.get(Name: "type_info")) == IdResolver.end())
444 PushOnScopeChains(D: Context.getMSTypeInfoTagDecl(), S: TUScope);
445
446 addImplicitTypedef(Name: "size_t", T: Context.getSizeType());
447 }
448
449 // Initialize predefined OpenCL types and supported extensions and (optional)
450 // core features.
451 if (getLangOpts().OpenCL) {
452 getOpenCLOptions().addSupport(
453 FeaturesMap: Context.getTargetInfo().getSupportedOpenCLOpts(), Opts: getLangOpts());
454 addImplicitTypedef(Name: "sampler_t", T: Context.OCLSamplerTy);
455 addImplicitTypedef(Name: "event_t", T: Context.OCLEventTy);
456 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
457 if (OCLCompatibleVersion >= 200) {
458 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
459 addImplicitTypedef(Name: "clk_event_t", T: Context.OCLClkEventTy);
460 addImplicitTypedef(Name: "queue_t", T: Context.OCLQueueTy);
461 }
462 if (getLangOpts().OpenCLPipes)
463 addImplicitTypedef(Name: "reserve_id_t", T: Context.OCLReserveIDTy);
464 addImplicitTypedef(Name: "atomic_int", T: Context.getAtomicType(T: Context.IntTy));
465 addImplicitTypedef(Name: "atomic_uint",
466 T: Context.getAtomicType(T: Context.UnsignedIntTy));
467 addImplicitTypedef(Name: "atomic_float",
468 T: Context.getAtomicType(T: Context.FloatTy));
469 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
470 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
471 addImplicitTypedef(Name: "atomic_flag", T: Context.getAtomicType(T: Context.IntTy));
472
473
474 // OpenCL v2.0 s6.13.11.6:
475 // - The atomic_long and atomic_ulong types are supported if the
476 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
477 // extensions are supported.
478 // - The atomic_double type is only supported if double precision
479 // is supported and the cl_khr_int64_base_atomics and
480 // cl_khr_int64_extended_atomics extensions are supported.
481 // - If the device address space is 64-bits, the data types
482 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
483 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
484 // cl_khr_int64_extended_atomics extensions are supported.
485
486 auto AddPointerSizeDependentTypes = [&]() {
487 auto AtomicSizeT = Context.getAtomicType(T: Context.getSizeType());
488 auto AtomicIntPtrT = Context.getAtomicType(T: Context.getIntPtrType());
489 auto AtomicUIntPtrT = Context.getAtomicType(T: Context.getUIntPtrType());
490 auto AtomicPtrDiffT =
491 Context.getAtomicType(T: Context.getPointerDiffType());
492 addImplicitTypedef(Name: "atomic_size_t", T: AtomicSizeT);
493 addImplicitTypedef(Name: "atomic_intptr_t", T: AtomicIntPtrT);
494 addImplicitTypedef(Name: "atomic_uintptr_t", T: AtomicUIntPtrT);
495 addImplicitTypedef(Name: "atomic_ptrdiff_t", T: AtomicPtrDiffT);
496 };
497
498 if (Context.getTypeSize(T: Context.getSizeType()) == 32) {
499 AddPointerSizeDependentTypes();
500 }
501
502 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp16", LO: getLangOpts())) {
503 auto AtomicHalfT = Context.getAtomicType(T: Context.HalfTy);
504 addImplicitTypedef(Name: "atomic_half", T: AtomicHalfT);
505 }
506
507 std::vector<QualType> Atomic64BitTypes;
508 if (getOpenCLOptions().isSupported(Ext: "cl_khr_int64_base_atomics",
509 LO: getLangOpts()) &&
510 getOpenCLOptions().isSupported(Ext: "cl_khr_int64_extended_atomics",
511 LO: getLangOpts())) {
512 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp64", LO: getLangOpts())) {
513 auto AtomicDoubleT = Context.getAtomicType(T: Context.DoubleTy);
514 addImplicitTypedef(Name: "atomic_double", T: AtomicDoubleT);
515 Atomic64BitTypes.push_back(x: AtomicDoubleT);
516 }
517 auto AtomicLongT = Context.getAtomicType(T: Context.LongTy);
518 auto AtomicULongT = Context.getAtomicType(T: Context.UnsignedLongTy);
519 addImplicitTypedef(Name: "atomic_long", T: AtomicLongT);
520 addImplicitTypedef(Name: "atomic_ulong", T: AtomicULongT);
521
522
523 if (Context.getTypeSize(T: Context.getSizeType()) == 64) {
524 AddPointerSizeDependentTypes();
525 }
526 }
527 }
528
529#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
530 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
531 addImplicitTypedef(#ExtType, Context.Id##Ty); \
532 }
533#include "clang/Basic/OpenCLExtensionTypes.def"
534 }
535
536 if (Context.getTargetInfo().hasAArch64ACLETypes() ||
537 (Context.getAuxTargetInfo() &&
538 Context.getAuxTargetInfo()->hasAArch64ACLETypes())) {
539#define SVE_TYPE(Name, Id, SingletonId) \
540 addImplicitTypedef(#Name, Context.SingletonId);
541#define NEON_VECTOR_TYPE(Name, BaseType, ElBits, NumEls, VectorKind) \
542 addImplicitTypedef( \
543 #Name, Context.getVectorType(Context.BaseType, NumEls, VectorKind));
544#include "clang/Basic/AArch64ACLETypes.def"
545 }
546
547 if (Context.getTargetInfo().getTriple().isPPC64()) {
548#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
549 addImplicitTypedef(#Name, Context.Id##Ty);
550#include "clang/Basic/PPCTypes.def"
551#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
552 addImplicitTypedef(#Name, Context.Id##Ty);
553#include "clang/Basic/PPCTypes.def"
554 }
555
556 if (Context.getTargetInfo().hasRISCVVTypes()) {
557#define RVV_TYPE(Name, Id, SingletonId) \
558 addImplicitTypedef(Name, Context.SingletonId);
559#include "clang/Basic/RISCVVTypes.def"
560 }
561
562 if (Context.getTargetInfo().getTriple().isWasm() &&
563 Context.getTargetInfo().hasFeature(Feature: "reference-types")) {
564#define WASM_TYPE(Name, Id, SingletonId) \
565 addImplicitTypedef(Name, Context.SingletonId);
566#include "clang/Basic/WebAssemblyReferenceTypes.def"
567 }
568
569 if (Context.getTargetInfo().getTriple().isAMDGPU() ||
570 (Context.getAuxTargetInfo() &&
571 Context.getAuxTargetInfo()->getTriple().isAMDGPU())) {
572#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
573 addImplicitTypedef(Name, Context.SingletonId);
574#include "clang/Basic/AMDGPUTypes.def"
575 }
576
577 if (Context.getTargetInfo().hasBuiltinMSVaList()) {
578 DeclarationName MSVaList = &Context.Idents.get(Name: "__builtin_ms_va_list");
579 if (IdResolver.begin(Name: MSVaList) == IdResolver.end())
580 PushOnScopeChains(D: Context.getBuiltinMSVaListDecl(), S: TUScope);
581 }
582
583 DeclarationName BuiltinVaList = &Context.Idents.get(Name: "__builtin_va_list");
584 if (IdResolver.begin(Name: BuiltinVaList) == IdResolver.end())
585 PushOnScopeChains(D: Context.getBuiltinVaListDecl(), S: TUScope);
586}
587
588Sema::~Sema() {
589 assert(InstantiatingSpecializations.empty() &&
590 "failed to clean up an InstantiatingTemplate?");
591
592 if (VisContext) FreeVisContext();
593
594 // Kill all the active scopes.
595 for (sema::FunctionScopeInfo *FSI : FunctionScopes)
596 delete FSI;
597
598 // Tell the SemaConsumer to forget about us; we're going out of scope.
599 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
600 SC->ForgetSema();
601
602 // Detach from the external Sema source.
603 if (ExternalSemaSource *ExternalSema
604 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
605 ExternalSema->ForgetSema();
606
607 // Delete cached satisfactions.
608 std::vector<ConstraintSatisfaction *> Satisfactions;
609 Satisfactions.reserve(n: SatisfactionCache.size());
610 for (auto &Node : SatisfactionCache)
611 Satisfactions.push_back(x: &Node);
612 for (auto *Node : Satisfactions)
613 delete Node;
614
615 threadSafety::threadSafetyCleanup(Cache: ThreadSafetyDeclCache);
616
617 // Destroys data sharing attributes stack for OpenMP
618 OpenMP().DestroyDataSharingAttributesStack();
619
620 // Detach from the PP callback handler which outlives Sema since it's owned
621 // by the preprocessor.
622 SemaPPCallbackHandler->reset();
623}
624
625void Sema::runWithSufficientStackSpace(SourceLocation Loc,
626 llvm::function_ref<void()> Fn) {
627 StackHandler.runWithSufficientStackSpace(Loc, Fn);
628}
629
630bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
631 UnavailableAttr::ImplicitReason reason) {
632 // If we're not in a function, it's an error.
633 FunctionDecl *fn = dyn_cast<FunctionDecl>(Val: CurContext);
634 if (!fn) return false;
635
636 // If we're in template instantiation, it's an error.
637 if (inTemplateInstantiation())
638 return false;
639
640 // If that function's not in a system header, it's an error.
641 if (!Context.getSourceManager().isInSystemHeader(Loc: loc))
642 return false;
643
644 // If the function is already unavailable, it's not an error.
645 if (fn->hasAttr<UnavailableAttr>()) return true;
646
647 fn->addAttr(A: UnavailableAttr::CreateImplicit(Ctx&: Context, Message: "", ImplicitReason: reason, Range: loc));
648 return true;
649}
650
651ASTMutationListener *Sema::getASTMutationListener() const {
652 return getASTConsumer().GetASTMutationListener();
653}
654
655void Sema::addExternalSource(IntrusiveRefCntPtr<ExternalSemaSource> E) {
656 assert(E && "Cannot use with NULL ptr");
657
658 if (!ExternalSource) {
659 ExternalSource = std::move(E);
660 return;
661 }
662
663 if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(Val: ExternalSource.get()))
664 Ex->AddSource(Source: std::move(E));
665 else
666 ExternalSource = llvm::makeIntrusiveRefCnt<MultiplexExternalSemaSource>(
667 A&: ExternalSource, A: std::move(E));
668}
669
670void Sema::PrintStats() const {
671 llvm::errs() << "\n*** Semantic Analysis Stats:\n";
672 if (SFINAETrap *Trap = getSFINAEContext())
673 llvm::errs() << int(Trap->hasErrorOccurred())
674 << " SFINAE diagnostics trapped.\n";
675
676 BumpAlloc.PrintStats();
677 AnalysisWarnings.PrintStats();
678}
679
680void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
681 QualType SrcType,
682 SourceLocation Loc) {
683 std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
684 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
685 *ExprNullability != NullabilityKind::NullableResult))
686 return;
687
688 std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
689 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
690 return;
691
692 Diag(Loc, DiagID: diag::warn_nullability_lost) << SrcType << DstType;
693}
694
695// Generate diagnostics when adding or removing effects in a type conversion.
696void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType,
697 SourceLocation Loc) {
698 const auto SrcFX = FunctionEffectsRef::get(QT: SrcType);
699 const auto DstFX = FunctionEffectsRef::get(QT: DstType);
700 if (SrcFX != DstFX) {
701 for (const auto &Diff : FunctionEffectDiffVector(SrcFX, DstFX)) {
702 if (Diff.shouldDiagnoseConversion(SrcType, SrcFX, DstType, DstFX))
703 Diag(Loc, DiagID: diag::warn_invalid_add_func_effects) << Diff.effectName();
704 }
705 }
706}
707
708void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
709 // nullptr only exists from C++11 on, so don't warn on its absence earlier.
710 if (!getLangOpts().CPlusPlus11)
711 return;
712
713 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
714 return;
715
716 const Expr *EStripped = E->IgnoreParenImpCasts();
717 if (EStripped->getType()->isNullPtrType())
718 return;
719 if (isa<GNUNullExpr>(Val: EStripped))
720 return;
721
722 if (Diags.isIgnored(DiagID: diag::warn_zero_as_null_pointer_constant,
723 Loc: E->getBeginLoc()))
724 return;
725
726 // Don't diagnose the conversion from a 0 literal to a null pointer argument
727 // in a synthesized call to operator<=>.
728 if (!CodeSynthesisContexts.empty() &&
729 CodeSynthesisContexts.back().Kind ==
730 CodeSynthesisContext::RewritingOperatorAsSpaceship)
731 return;
732
733 // Ignore null pointers in defaulted comparison operators.
734 FunctionDecl *FD = getCurFunctionDecl();
735 if (FD && FD->isDefaulted()) {
736 return;
737 }
738
739 // If it is a macro from system header, and if the macro name is not "NULL",
740 // do not warn.
741 // Note that uses of "NULL" will be ignored above on systems that define it
742 // as __null.
743 SourceLocation MaybeMacroLoc = E->getBeginLoc();
744 if (Diags.getSuppressSystemWarnings() &&
745 SourceMgr.isInSystemMacro(loc: MaybeMacroLoc) &&
746 !findMacroSpelling(loc&: MaybeMacroLoc, name: "NULL"))
747 return;
748
749 Diag(Loc: E->getBeginLoc(), DiagID: diag::warn_zero_as_null_pointer_constant)
750 << FixItHint::CreateReplacement(RemoveRange: E->getSourceRange(), Code: "nullptr");
751}
752
753/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
754/// If there is already an implicit cast, merge into the existing one.
755/// The result is of the given category.
756ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
757 CastKind Kind, ExprValueKind VK,
758 const CXXCastPath *BasePath,
759 CheckedConversionKind CCK) {
760#ifndef NDEBUG
761 if (VK == VK_PRValue && !E->isPRValue()) {
762 switch (Kind) {
763 default:
764 llvm_unreachable(
765 ("can't implicitly cast glvalue to prvalue with this cast "
766 "kind: " +
767 std::string(CastExpr::getCastKindName(Kind)))
768 .c_str());
769 case CK_Dependent:
770 case CK_LValueToRValue:
771 case CK_ArrayToPointerDecay:
772 case CK_FunctionToPointerDecay:
773 case CK_ToVoid:
774 case CK_NonAtomicToAtomic:
775 case CK_HLSLArrayRValue:
776 case CK_HLSLAggregateSplatCast:
777 break;
778 }
779 }
780 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
781 "can't cast prvalue to glvalue");
782#endif
783
784 diagnoseNullableToNonnullConversion(DstType: Ty, SrcType: E->getType(), Loc: E->getBeginLoc());
785 diagnoseZeroToNullptrConversion(Kind, E);
786 if (Context.hasAnyFunctionEffects() && !isCast(CCK) &&
787 Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
788 diagnoseFunctionEffectConversion(DstType: Ty, SrcType: E->getType(), Loc: E->getBeginLoc());
789
790 QualType ExprTy = Context.getCanonicalType(T: E->getType());
791 QualType TypeTy = Context.getCanonicalType(T: Ty);
792
793 // This cast is used in place of a regular LValue to RValue cast for
794 // HLSL Array Parameter Types. It needs to be emitted even if
795 // ExprTy == TypeTy, except if E is an HLSLOutArgExpr
796 // Emitting a cast in that case will prevent HLSLOutArgExpr from
797 // being handled properly in EmitCallArg
798 if (Kind == CK_HLSLArrayRValue && !isa<HLSLOutArgExpr>(Val: E))
799 return ImplicitCastExpr::Create(Context, T: Ty, Kind, Operand: E, BasePath, Cat: VK,
800 FPO: CurFPFeatureOverrides());
801
802 if (ExprTy == TypeTy)
803 return E;
804
805 if (Kind == CK_ArrayToPointerDecay) {
806 // C++1z [conv.array]: The temporary materialization conversion is applied.
807 // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
808 if (getLangOpts().CPlusPlus && E->isPRValue()) {
809 // The temporary is an lvalue in C++98 and an xvalue otherwise.
810 ExprResult Materialized = CreateMaterializeTemporaryExpr(
811 T: E->getType(), Temporary: E, BoundToLvalueReference: !getLangOpts().CPlusPlus11);
812 if (Materialized.isInvalid())
813 return ExprError();
814 E = Materialized.get();
815 }
816 // C17 6.7.1p6 footnote 124: The implementation can treat any register
817 // declaration simply as an auto declaration. However, whether or not
818 // addressable storage is actually used, the address of any part of an
819 // object declared with storage-class specifier register cannot be
820 // computed, either explicitly(by use of the unary & operator as discussed
821 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
822 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
823 // array declared with storage-class specifier register is sizeof.
824 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
825 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
826 if (const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl())) {
827 if (VD->getStorageClass() == SC_Register) {
828 Diag(Loc: E->getExprLoc(), DiagID: diag::err_typecheck_address_of)
829 << /*register variable*/ 3 << E->getSourceRange();
830 return ExprError();
831 }
832 }
833 }
834 }
835 }
836
837 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(Val: E)) {
838 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
839 ImpCast->setType(Ty);
840 ImpCast->setValueKind(VK);
841 return E;
842 }
843 }
844
845 bool IsExplicitCast = isa<CStyleCastExpr>(Val: E) || isa<CXXStaticCastExpr>(Val: E) ||
846 isa<CXXFunctionalCastExpr>(Val: E);
847
848 if ((Kind == CK_IntegralCast || Kind == CK_IntegralToBoolean ||
849 (Kind == CK_NoOp && E->getType()->isIntegerType() &&
850 Ty->isIntegerType())) &&
851 IsExplicitCast) {
852 if (const auto *SourceOBT = E->getType()->getAs<OverflowBehaviorType>()) {
853 if (Ty->isIntegerType() && !Ty->isOverflowBehaviorType()) {
854 Ty = Context.getOverflowBehaviorType(Kind: SourceOBT->getBehaviorKind(), Wrapped: Ty);
855 }
856 }
857 }
858
859 return ImplicitCastExpr::Create(Context, T: Ty, Kind, Operand: E, BasePath, Cat: VK,
860 FPO: CurFPFeatureOverrides());
861}
862
863CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
864 switch (ScalarTy->getScalarTypeKind()) {
865 case Type::STK_Bool: return CK_NoOp;
866 case Type::STK_CPointer: return CK_PointerToBoolean;
867 case Type::STK_BlockPointer: return CK_PointerToBoolean;
868 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
869 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
870 case Type::STK_Integral: return CK_IntegralToBoolean;
871 case Type::STK_Floating: return CK_FloatingToBoolean;
872 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
873 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
874 case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
875 }
876 llvm_unreachable("unknown scalar type kind");
877}
878
879/// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
880static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
881 if (D->getMostRecentDecl()->isUsed())
882 return true;
883
884 if (D->isExternallyVisible())
885 return true;
886
887 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: D)) {
888 // If this is a function template and none of its specializations is used,
889 // we should warn.
890 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
891 for (const auto *Spec : Template->specializations())
892 if (ShouldRemoveFromUnused(SemaRef, D: Spec))
893 return true;
894
895 // UnusedFileScopedDecls stores the first declaration.
896 // The declaration may have become definition so check again.
897 const FunctionDecl *DeclToCheck;
898 if (FD->hasBody(Definition&: DeclToCheck))
899 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(D: DeclToCheck);
900
901 // Later redecls may add new information resulting in not having to warn,
902 // so check again.
903 DeclToCheck = FD->getMostRecentDecl();
904 if (DeclToCheck != FD)
905 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(D: DeclToCheck);
906 }
907
908 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) {
909 // If a variable usable in constant expressions is referenced,
910 // don't warn if it isn't used: if the value of a variable is required
911 // for the computation of a constant expression, it doesn't make sense to
912 // warn even if the variable isn't odr-used. (isReferenced doesn't
913 // precisely reflect that, but it's a decent approximation.)
914 if (VD->isReferenced() &&
915 VD->mightBeUsableInConstantExpressions(C: SemaRef->Context))
916 return true;
917
918 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
919 // If this is a variable template and none of its specializations is used,
920 // we should warn.
921 for (const auto *Spec : Template->specializations())
922 if (ShouldRemoveFromUnused(SemaRef, D: Spec))
923 return true;
924
925 // UnusedFileScopedDecls stores the first declaration.
926 // The declaration may have become definition so check again.
927 const VarDecl *DeclToCheck = VD->getDefinition();
928 if (DeclToCheck)
929 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(D: DeclToCheck);
930
931 // Later redecls may add new information resulting in not having to warn,
932 // so check again.
933 DeclToCheck = VD->getMostRecentDecl();
934 if (DeclToCheck != VD)
935 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(D: DeclToCheck);
936 }
937
938 return false;
939}
940
941static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
942 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
943 return FD->isExternC();
944 return cast<VarDecl>(Val: ND)->isExternC();
945}
946
947/// Determine whether ND is an external-linkage function or variable whose
948/// type has no linkage.
949bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
950 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
951 // because we also want to catch the case where its type has VisibleNoLinkage,
952 // which does not affect the linkage of VD.
953 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
954 !isExternalFormalLinkage(L: VD->getType()->getLinkage()) &&
955 !isFunctionOrVarDeclExternC(ND: VD);
956}
957
958/// Obtains a sorted list of functions and variables that are undefined but
959/// ODR-used.
960void Sema::getUndefinedButUsed(
961 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
962 for (const auto &UndefinedUse : UndefinedButUsed) {
963 NamedDecl *ND = UndefinedUse.first;
964
965 // Ignore attributes that have become invalid.
966 if (ND->isInvalidDecl()) continue;
967
968 // __attribute__((weakref)) is basically a definition.
969 if (ND->hasAttr<WeakRefAttr>()) continue;
970
971 if (isa<CXXDeductionGuideDecl>(Val: ND))
972 continue;
973
974 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
975 // An exported function will always be emitted when defined, so even if
976 // the function is inline, it doesn't have to be emitted in this TU. An
977 // imported function implies that it has been exported somewhere else.
978 continue;
979 }
980
981 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) {
982 if (FD->isDefined())
983 continue;
984 if (FD->isExternallyVisible() &&
985 !isExternalWithNoLinkageType(VD: FD) &&
986 !FD->getMostRecentDecl()->isInlined() &&
987 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
988 continue;
989 if (FD->getBuiltinID())
990 continue;
991 } else {
992 const auto *VD = cast<VarDecl>(Val: ND);
993 if (VD->hasDefinition() != VarDecl::DeclarationOnly)
994 continue;
995 if (VD->isExternallyVisible() &&
996 !isExternalWithNoLinkageType(VD) &&
997 !VD->getMostRecentDecl()->isInline() &&
998 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
999 continue;
1000
1001 // Skip VarDecls that lack formal definitions but which we know are in
1002 // fact defined somewhere.
1003 if (VD->isKnownToBeDefined())
1004 continue;
1005 }
1006
1007 Undefined.push_back(Elt: std::make_pair(x&: ND, y: UndefinedUse.second));
1008 }
1009}
1010
1011/// checkUndefinedButUsed - Check for undefined objects with internal linkage
1012/// or that are inline.
1013static void checkUndefinedButUsed(Sema &S) {
1014 if (S.UndefinedButUsed.empty()) return;
1015
1016 // Collect all the still-undefined entities with internal linkage.
1017 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
1018 S.getUndefinedButUsed(Undefined);
1019 S.UndefinedButUsed.clear();
1020 if (Undefined.empty()) return;
1021
1022 for (const auto &Undef : Undefined) {
1023 ValueDecl *VD = cast<ValueDecl>(Val: Undef.first);
1024 SourceLocation UseLoc = Undef.second;
1025
1026 if (S.isExternalWithNoLinkageType(VD)) {
1027 // C++ [basic.link]p8:
1028 // A type without linkage shall not be used as the type of a variable
1029 // or function with external linkage unless
1030 // -- the entity has C language linkage
1031 // -- the entity is not odr-used or is defined in the same TU
1032 //
1033 // As an extension, accept this in cases where the type is externally
1034 // visible, since the function or variable actually can be defined in
1035 // another translation unit in that case.
1036 S.Diag(Loc: VD->getLocation(), DiagID: isExternallyVisible(L: VD->getType()->getLinkage())
1037 ? diag::ext_undefined_internal_type
1038 : diag::err_undefined_internal_type)
1039 << isa<VarDecl>(Val: VD) << VD;
1040 } else if (!VD->isExternallyVisible()) {
1041 // FIXME: We can promote this to an error. The function or variable can't
1042 // be defined anywhere else, so the program must necessarily violate the
1043 // one definition rule.
1044 bool IsImplicitBase = false;
1045 if (const auto *BaseD = dyn_cast<FunctionDecl>(Val: VD)) {
1046 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
1047 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
1048 TP: llvm::omp::TraitProperty::
1049 implementation_extension_disable_implicit_base)) {
1050 const auto *Func = cast<FunctionDecl>(
1051 Val: cast<DeclRefExpr>(Val: DVAttr->getVariantFuncRef())->getDecl());
1052 IsImplicitBase = BaseD->isImplicit() &&
1053 Func->getIdentifier()->isMangledOpenMPVariantName();
1054 }
1055 }
1056 if (!S.getLangOpts().OpenMP || !IsImplicitBase)
1057 S.Diag(Loc: VD->getLocation(), DiagID: diag::warn_undefined_internal)
1058 << isa<VarDecl>(Val: VD) << VD;
1059 } else if (auto *FD = dyn_cast<FunctionDecl>(Val: VD)) {
1060 (void)FD;
1061 assert(FD->getMostRecentDecl()->isInlined() &&
1062 "used object requires definition but isn't inline or internal?");
1063 // FIXME: This is ill-formed; we should reject.
1064 S.Diag(Loc: VD->getLocation(), DiagID: diag::warn_undefined_inline) << VD;
1065 } else {
1066 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
1067 "used var requires definition but isn't inline or internal?");
1068 S.Diag(Loc: VD->getLocation(), DiagID: diag::err_undefined_inline_var) << VD;
1069 }
1070 if (UseLoc.isValid())
1071 S.Diag(Loc: UseLoc, DiagID: diag::note_used_here);
1072 }
1073}
1074
1075void Sema::LoadExternalWeakUndeclaredIdentifiers() {
1076 if (!ExternalSource)
1077 return;
1078
1079 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
1080 ExternalSource->ReadWeakUndeclaredIdentifiers(WI&: WeakIDs);
1081 for (auto &WeakID : WeakIDs)
1082 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(X: WeakID.second);
1083}
1084
1085
1086typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
1087
1088/// Returns true, if all methods and nested classes of the given
1089/// CXXRecordDecl are defined in this translation unit.
1090///
1091/// Should only be called from ActOnEndOfTranslationUnit so that all
1092/// definitions are actually read.
1093static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
1094 RecordCompleteMap &MNCComplete) {
1095 RecordCompleteMap::iterator Cache = MNCComplete.find(Val: RD);
1096 if (Cache != MNCComplete.end())
1097 return Cache->second;
1098 if (!RD->isCompleteDefinition())
1099 return false;
1100 bool Complete = true;
1101 for (DeclContext::decl_iterator I = RD->decls_begin(),
1102 E = RD->decls_end();
1103 I != E && Complete; ++I) {
1104 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Val: *I))
1105 Complete = M->isDefined() || M->isDefaulted() ||
1106 (M->isPureVirtual() && !isa<CXXDestructorDecl>(Val: M));
1107 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(Val: *I))
1108 // If the template function is marked as late template parsed at this
1109 // point, it has not been instantiated and therefore we have not
1110 // performed semantic analysis on it yet, so we cannot know if the type
1111 // can be considered complete.
1112 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
1113 F->getTemplatedDecl()->isDefined();
1114 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(Val: *I)) {
1115 if (R->isInjectedClassName())
1116 continue;
1117 if (R->hasDefinition())
1118 Complete = MethodsAndNestedClassesComplete(RD: R->getDefinition(),
1119 MNCComplete);
1120 else
1121 Complete = false;
1122 }
1123 }
1124 MNCComplete[RD] = Complete;
1125 return Complete;
1126}
1127
1128/// Returns true, if the given CXXRecordDecl is fully defined in this
1129/// translation unit, i.e. all methods are defined or pure virtual and all
1130/// friends, friend functions and nested classes are fully defined in this
1131/// translation unit.
1132///
1133/// Should only be called from ActOnEndOfTranslationUnit so that all
1134/// definitions are actually read.
1135static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
1136 RecordCompleteMap &RecordsComplete,
1137 RecordCompleteMap &MNCComplete) {
1138 RecordCompleteMap::iterator Cache = RecordsComplete.find(Val: RD);
1139 if (Cache != RecordsComplete.end())
1140 return Cache->second;
1141 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
1142 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
1143 E = RD->friend_end();
1144 I != E && Complete; ++I) {
1145 // Check if friend classes and methods are complete.
1146 if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1147 // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1148 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1149 Complete = MethodsAndNestedClassesComplete(RD: FriendD, MNCComplete);
1150 else
1151 Complete = false;
1152 } else {
1153 // Friend functions are available through the NamedDecl of FriendDecl.
1154 if (const FunctionDecl *FD =
1155 dyn_cast<FunctionDecl>(Val: (*I)->getFriendDecl()))
1156 Complete = FD->isDefined();
1157 else
1158 // This is a template friend, give up.
1159 Complete = false;
1160 }
1161 }
1162 RecordsComplete[RD] = Complete;
1163 return Complete;
1164}
1165
1166void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1167 if (ExternalSource)
1168 ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1169 Decls&: UnusedLocalTypedefNameCandidates);
1170 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1171 if (TD->isReferenced())
1172 continue;
1173 Diag(Loc: TD->getLocation(), DiagID: diag::warn_unused_local_typedef)
1174 << isa<TypeAliasDecl>(Val: TD) << TD->getDeclName();
1175 }
1176 UnusedLocalTypedefNameCandidates.clear();
1177}
1178
1179void Sema::ActOnStartOfTranslationUnit() {
1180 if (getLangOpts().CPlusPlusModules &&
1181 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1182 HandleStartOfHeaderUnit();
1183}
1184
1185void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1186 if (Kind == TUFragmentKind::Global) {
1187 // Perform Pending Instantiations at the end of global module fragment so
1188 // that the module ownership of TU-level decls won't get messed.
1189 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1190 PerformPendingInstantiations();
1191 return;
1192 }
1193
1194 // Transfer late parsed template instantiations over to the pending template
1195 // instantiation list. During normal compilation, the late template parser
1196 // will be installed and instantiating these templates will succeed.
1197 //
1198 // If we are building a TU prefix for serialization, it is also safe to
1199 // transfer these over, even though they are not parsed. The end of the TU
1200 // should be outside of any eager template instantiation scope, so when this
1201 // AST is deserialized, these templates will not be parsed until the end of
1202 // the combined TU.
1203 PendingInstantiations.insert(position: PendingInstantiations.end(),
1204 first: LateParsedInstantiations.begin(),
1205 last: LateParsedInstantiations.end());
1206 LateParsedInstantiations.clear();
1207
1208 // If DefinedUsedVTables ends up marking any virtual member functions it
1209 // might lead to more pending template instantiations, which we then need
1210 // to instantiate.
1211 DefineUsedVTables();
1212
1213 // C++: Perform implicit template instantiations.
1214 //
1215 // FIXME: When we perform these implicit instantiations, we do not
1216 // carefully keep track of the point of instantiation (C++ [temp.point]).
1217 // This means that name lookup that occurs within the template
1218 // instantiation will always happen at the end of the translation unit,
1219 // so it will find some names that are not required to be found. This is
1220 // valid, but we could do better by diagnosing if an instantiation uses a
1221 // name that was not visible at its first point of instantiation.
1222 if (ExternalSource) {
1223 // Load pending instantiations from the external source.
1224 SmallVector<PendingImplicitInstantiation, 4> Pending;
1225 ExternalSource->ReadPendingInstantiations(Pending);
1226 for (auto PII : Pending)
1227 if (auto Func = dyn_cast<FunctionDecl>(Val: PII.first))
1228 Func->setInstantiationIsPending(true);
1229 PendingInstantiations.insert(position: PendingInstantiations.begin(),
1230 first: Pending.begin(), last: Pending.end());
1231 }
1232
1233 {
1234 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1235 PerformPendingInstantiations();
1236 }
1237
1238 emitDeferredDiags();
1239
1240 assert(LateParsedInstantiations.empty() &&
1241 "end of TU template instantiation should not create more "
1242 "late-parsed templates");
1243}
1244
1245void Sema::ActOnEndOfTranslationUnit() {
1246 assert(DelayedDiagnostics.getCurrentPool() == nullptr
1247 && "reached end of translation unit with a pool attached?");
1248
1249 // If code completion is enabled, don't perform any end-of-translation-unit
1250 // work.
1251 if (PP.isCodeCompletionEnabled())
1252 return;
1253
1254 // Complete translation units and modules define vtables and perform implicit
1255 // instantiations. PCH files do not.
1256 if (TUKind != TU_Prefix) {
1257 ObjC().DiagnoseUseOfUnimplementedSelectors();
1258
1259 ActOnEndOfTranslationUnitFragment(
1260 Kind: !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1261 Module::PrivateModuleFragment
1262 ? TUFragmentKind::Private
1263 : TUFragmentKind::Normal);
1264
1265 CheckDelayedMemberExceptionSpecs();
1266 } else {
1267 // If we are building a TU prefix for serialization, it is safe to transfer
1268 // these over, even though they are not parsed. The end of the TU should be
1269 // outside of any eager template instantiation scope, so when this AST is
1270 // deserialized, these templates will not be parsed until the end of the
1271 // combined TU.
1272 PendingInstantiations.insert(position: PendingInstantiations.end(),
1273 first: LateParsedInstantiations.begin(),
1274 last: LateParsedInstantiations.end());
1275 LateParsedInstantiations.clear();
1276
1277 if (LangOpts.PCHInstantiateTemplates) {
1278 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1279 PerformPendingInstantiations();
1280 }
1281 }
1282
1283 DiagnoseUnterminatedPragmaAlignPack();
1284 DiagnoseUnterminatedPragmaAttribute();
1285 OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget();
1286 DiagnosePrecisionLossInComplexDivision();
1287
1288 // All delayed member exception specs should be checked or we end up accepting
1289 // incompatible declarations.
1290 assert(DelayedOverridingExceptionSpecChecks.empty());
1291 assert(DelayedEquivalentExceptionSpecChecks.empty());
1292
1293 // All dllexport classes should have been processed already.
1294 assert(DelayedDllExportClasses.empty());
1295 assert(DelayedDllExportMemberFunctions.empty());
1296
1297 // Remove file scoped decls that turned out to be used.
1298 UnusedFileScopedDecls.erase(
1299 From: std::remove_if(first: UnusedFileScopedDecls.begin(source: nullptr, LocalOnly: true),
1300 last: UnusedFileScopedDecls.end(),
1301 pred: [this](const DeclaratorDecl *DD) {
1302 return ShouldRemoveFromUnused(SemaRef: this, D: DD);
1303 }),
1304 To: UnusedFileScopedDecls.end());
1305
1306 if (TUKind == TU_Prefix) {
1307 // Translation unit prefixes don't need any of the checking below.
1308 if (!PP.isIncrementalProcessingEnabled())
1309 TUScope = nullptr;
1310 return;
1311 }
1312
1313 // Check for #pragma weak identifiers that were never declared
1314 LoadExternalWeakUndeclaredIdentifiers();
1315 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1316 if (WeakIDs.second.empty())
1317 continue;
1318
1319 Decl *PrevDecl = LookupSingleName(S: TUScope, Name: WeakIDs.first, Loc: SourceLocation(),
1320 NameKind: LookupOrdinaryName);
1321 if (PrevDecl != nullptr &&
1322 !(isa<FunctionDecl>(Val: PrevDecl) || isa<VarDecl>(Val: PrevDecl)))
1323 for (const auto &WI : WeakIDs.second)
1324 Diag(Loc: WI.getLocation(), DiagID: diag::warn_attribute_wrong_decl_type)
1325 << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1326 else
1327 for (const auto &WI : WeakIDs.second)
1328 Diag(Loc: WI.getLocation(), DiagID: diag::warn_weak_identifier_undeclared)
1329 << WeakIDs.first;
1330 }
1331
1332 if (LangOpts.CPlusPlus11 &&
1333 !Diags.isIgnored(DiagID: diag::warn_delegating_ctor_cycle, Loc: SourceLocation()))
1334 CheckDelegatingCtorCycles();
1335
1336 if (!Diags.hasErrorOccurred()) {
1337 if (ExternalSource)
1338 ExternalSource->ReadUndefinedButUsed(Undefined&: UndefinedButUsed);
1339 checkUndefinedButUsed(S&: *this);
1340 }
1341
1342 // A global-module-fragment is only permitted within a module unit.
1343 if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1344 Module::ExplicitGlobalModuleFragment) {
1345 Diag(Loc: ModuleScopes.back().BeginLoc,
1346 DiagID: diag::err_module_declaration_missing_after_global_module_introducer);
1347 } else if (getLangOpts().getCompilingModule() ==
1348 LangOptions::CMK_ModuleInterface &&
1349 // We can't use ModuleScopes here since ModuleScopes is always
1350 // empty if we're compiling the BMI.
1351 !getASTContext().getCurrentNamedModule()) {
1352 // If we are building a module interface unit, we should have seen the
1353 // module declaration.
1354 //
1355 // FIXME: Make a better guess as to where to put the module declaration.
1356 Diag(Loc: getSourceManager().getLocForStartOfFile(
1357 FID: getSourceManager().getMainFileID()),
1358 DiagID: diag::err_module_declaration_missing);
1359 }
1360
1361 // Now we can decide whether the modules we're building need an initializer.
1362 if (Module *CurrentModule = getCurrentModule();
1363 CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1364 auto DoesModNeedInit = [this](Module *M) {
1365 if (!getASTContext().getModuleInitializers(M).empty())
1366 return true;
1367 for (auto [Exported, _] : M->Exports)
1368 if (Exported->isNamedModuleInterfaceHasInit())
1369 return true;
1370 for (Module *I : M->Imports)
1371 if (I->isNamedModuleInterfaceHasInit())
1372 return true;
1373
1374 return false;
1375 };
1376
1377 CurrentModule->NamedModuleHasInit =
1378 DoesModNeedInit(CurrentModule) ||
1379 llvm::any_of(Range: CurrentModule->submodules(), P: DoesModNeedInit);
1380 }
1381
1382 if (TUKind == TU_ClangModule) {
1383 // If we are building a module, resolve all of the exported declarations
1384 // now.
1385 if (Module *CurrentModule = PP.getCurrentModule()) {
1386 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1387
1388 SmallVector<Module *, 2> Stack;
1389 Stack.push_back(Elt: CurrentModule);
1390 while (!Stack.empty()) {
1391 Module *Mod = Stack.pop_back_val();
1392
1393 // Resolve the exported declarations and conflicts.
1394 // FIXME: Actually complain, once we figure out how to teach the
1395 // diagnostic client to deal with complaints in the module map at this
1396 // point.
1397 ModMap.resolveExports(Mod, /*Complain=*/false);
1398 ModMap.resolveUses(Mod, /*Complain=*/false);
1399 ModMap.resolveConflicts(Mod, /*Complain=*/false);
1400
1401 // Queue the submodules, so their exports will also be resolved.
1402 auto SubmodulesRange = Mod->submodules();
1403 Stack.append(in_start: SubmodulesRange.begin(), in_end: SubmodulesRange.end());
1404 }
1405 }
1406
1407 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1408 // modules when they are built, not every time they are used.
1409 emitAndClearUnusedLocalTypedefWarnings();
1410 }
1411
1412 // C++ standard modules. Diagnose cases where a function is declared inline
1413 // in the module purview but has no definition before the end of the TU or
1414 // the start of a Private Module Fragment (if one is present).
1415 if (!PendingInlineFuncDecls.empty()) {
1416 for (auto *D : PendingInlineFuncDecls) {
1417 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1418 bool DefInPMF = false;
1419 if (auto *FDD = FD->getDefinition()) {
1420 DefInPMF = FDD->getOwningModule()->isPrivateModule();
1421 if (!DefInPMF)
1422 continue;
1423 }
1424 Diag(Loc: FD->getLocation(), DiagID: diag::err_export_inline_not_defined)
1425 << DefInPMF;
1426 // If we have a PMF it should be at the end of the ModuleScopes.
1427 if (DefInPMF &&
1428 ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1429 Diag(Loc: ModuleScopes.back().BeginLoc,
1430 DiagID: diag::note_private_module_fragment);
1431 }
1432 }
1433 }
1434 PendingInlineFuncDecls.clear();
1435 }
1436
1437 // C99 6.9.2p2:
1438 // A declaration of an identifier for an object that has file
1439 // scope without an initializer, and without a storage-class
1440 // specifier or with the storage-class specifier static,
1441 // constitutes a tentative definition. If a translation unit
1442 // contains one or more tentative definitions for an identifier,
1443 // and the translation unit contains no external definition for
1444 // that identifier, then the behavior is exactly as if the
1445 // translation unit contains a file scope declaration of that
1446 // identifier, with the composite type as of the end of the
1447 // translation unit, with an initializer equal to 0.
1448 llvm::SmallPtrSet<VarDecl *, 32> Seen;
1449 for (TentativeDefinitionsType::iterator
1450 T = TentativeDefinitions.begin(source: ExternalSource.get()),
1451 TEnd = TentativeDefinitions.end();
1452 T != TEnd; ++T) {
1453 VarDecl *VD = (*T)->getActingDefinition();
1454
1455 // If the tentative definition was completed, getActingDefinition() returns
1456 // null. If we've already seen this variable before, insert()'s second
1457 // return value is false.
1458 if (!VD || VD->isInvalidDecl() || !Seen.insert(Ptr: VD).second)
1459 continue;
1460
1461 if (const IncompleteArrayType *ArrayT
1462 = Context.getAsIncompleteArrayType(T: VD->getType())) {
1463 // Set the length of the array to 1 (C99 6.9.2p5).
1464 Diag(Loc: VD->getLocation(), DiagID: diag::warn_tentative_incomplete_array);
1465 llvm::APInt One(Context.getTypeSize(T: Context.getSizeType()), true);
1466 QualType T = Context.getConstantArrayType(
1467 EltTy: ArrayT->getElementType(), ArySize: One, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
1468 VD->setType(T);
1469 } else if (RequireCompleteType(Loc: VD->getLocation(), T: VD->getType(),
1470 DiagID: diag::err_tentative_def_incomplete_type))
1471 VD->setInvalidDecl();
1472
1473 // No initialization is performed for a tentative definition.
1474 CheckCompleteVariableDeclaration(VD);
1475
1476 // In C, if the definition is const-qualified and has no initializer, it
1477 // is left uninitialized unless it has static or thread storage duration.
1478 QualType Type = VD->getType();
1479 if (!VD->isInvalidDecl() && !getLangOpts().CPlusPlus &&
1480 Type.isConstQualified() && !VD->getAnyInitializer()) {
1481 unsigned DiagID = diag::warn_default_init_const_unsafe;
1482 if (VD->getStorageDuration() == SD_Static ||
1483 VD->getStorageDuration() == SD_Thread)
1484 DiagID = diag::warn_default_init_const;
1485
1486 bool EmitCppCompat = !Diags.isIgnored(
1487 DiagID: diag::warn_cxx_compat_hack_fake_diagnostic_do_not_emit,
1488 Loc: VD->getLocation());
1489
1490 Diag(Loc: VD->getLocation(), DiagID) << Type << EmitCppCompat;
1491 }
1492
1493 // Notify the consumer that we've completed a tentative definition.
1494 if (!VD->isInvalidDecl())
1495 Consumer.CompleteTentativeDefinition(D: VD);
1496 }
1497
1498 // In incremental mode, tentative definitions belong to the current
1499 // partial translation unit (PTU). Once they have been completed and
1500 // emitted to codegen, drop them to prevent re-emission in future PTUs.
1501 if (PP.isIncrementalProcessingEnabled())
1502 TentativeDefinitions.erase(From: TentativeDefinitions.begin(source: ExternalSource.get()),
1503 To: TentativeDefinitions.end());
1504
1505 for (auto *D : ExternalDeclarations) {
1506 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1507 continue;
1508
1509 Consumer.CompleteExternalDeclaration(D);
1510 }
1511
1512 // Visit all pending #pragma export.
1513 for (const PendingPragmaInfo &Exported : PendingExportedNames.values()) {
1514 if (!Exported.Used)
1515 Diag(Loc: Exported.NameLoc, DiagID: diag::warn_failed_to_resolve_pragma) << "export";
1516 }
1517
1518 if (LangOpts.HLSL)
1519 HLSL().ActOnEndOfTranslationUnit(TU: getASTContext().getTranslationUnitDecl());
1520 if (LangOpts.OpenACC)
1521 OpenACC().ActOnEndOfTranslationUnit(
1522 TU: getASTContext().getTranslationUnitDecl());
1523
1524 // If there were errors, disable 'unused' warnings since they will mostly be
1525 // noise. Don't warn for a use from a module: either we should warn on all
1526 // file-scope declarations in modules or not at all, but whether the
1527 // declaration is used is immaterial.
1528 if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) {
1529 // Output warning for unused file scoped decls.
1530 for (UnusedFileScopedDeclsType::iterator
1531 I = UnusedFileScopedDecls.begin(source: ExternalSource.get()),
1532 E = UnusedFileScopedDecls.end();
1533 I != E; ++I) {
1534 if (ShouldRemoveFromUnused(SemaRef: this, D: *I))
1535 continue;
1536
1537 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: *I)) {
1538 const FunctionDecl *DiagD;
1539 if (!FD->hasBody(Definition&: DiagD))
1540 DiagD = FD;
1541 if (DiagD->isDeleted())
1542 continue; // Deleted functions are supposed to be unused.
1543 SourceRange DiagRange = DiagD->getLocation();
1544 if (const ASTTemplateArgumentListInfo *ASTTAL =
1545 DiagD->getTemplateSpecializationArgsAsWritten())
1546 DiagRange.setEnd(ASTTAL->RAngleLoc);
1547 if (DiagD->isReferenced()) {
1548 if (isa<CXXMethodDecl>(Val: DiagD))
1549 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unneeded_member_function)
1550 << DiagD << DiagRange;
1551 else {
1552 if (FD->getStorageClass() == SC_Static &&
1553 !FD->isInlineSpecified() &&
1554 !SourceMgr.isInMainFile(
1555 Loc: SourceMgr.getExpansionLoc(Loc: FD->getLocation())))
1556 Diag(Loc: DiagD->getLocation(),
1557 DiagID: diag::warn_unneeded_static_internal_decl)
1558 << DiagD << DiagRange;
1559 else
1560 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unneeded_internal_decl)
1561 << /*function=*/0 << DiagD << DiagRange;
1562 }
1563 } else if (!FD->isTargetMultiVersion() ||
1564 FD->isTargetMultiVersionDefault()) {
1565 if (FD->getDescribedFunctionTemplate())
1566 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unused_template)
1567 << /*function=*/0 << DiagD << DiagRange;
1568 else
1569 Diag(Loc: DiagD->getLocation(), DiagID: isa<CXXMethodDecl>(Val: DiagD)
1570 ? diag::warn_unused_member_function
1571 : diag::warn_unused_function)
1572 << DiagD << DiagRange;
1573 }
1574 } else {
1575 const VarDecl *DiagD = cast<VarDecl>(Val: *I)->getDefinition();
1576 if (!DiagD)
1577 DiagD = cast<VarDecl>(Val: *I);
1578 SourceRange DiagRange = DiagD->getLocation();
1579 if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(Val: DiagD)) {
1580 if (const ASTTemplateArgumentListInfo *ASTTAL =
1581 VTSD->getTemplateArgsAsWritten())
1582 DiagRange.setEnd(ASTTAL->RAngleLoc);
1583 }
1584 if (DiagD->isReferenced()) {
1585 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unneeded_internal_decl)
1586 << /*variable=*/1 << DiagD << DiagRange;
1587 } else if (DiagD->getDescribedVarTemplate()) {
1588 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unused_template)
1589 << /*variable=*/1 << DiagD << DiagRange;
1590 } else if (DiagD->getType().isConstQualified()) {
1591 const SourceManager &SM = SourceMgr;
1592 if (SM.getMainFileID() != SM.getFileID(SpellingLoc: DiagD->getLocation()) ||
1593 !PP.getLangOpts().IsHeaderFile)
1594 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unused_const_variable)
1595 << DiagD << DiagRange;
1596 } else {
1597 Diag(Loc: DiagD->getLocation(), DiagID: diag::warn_unused_variable)
1598 << DiagD << DiagRange;
1599 }
1600 }
1601 }
1602
1603 emitAndClearUnusedLocalTypedefWarnings();
1604 }
1605
1606 if (!Diags.isIgnored(DiagID: diag::warn_unused_private_field, Loc: SourceLocation())) {
1607 // FIXME: Load additional unused private field candidates from the external
1608 // source.
1609 RecordCompleteMap RecordsComplete;
1610 RecordCompleteMap MNCComplete;
1611 for (const NamedDecl *D : UnusedPrivateFields) {
1612 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Val: D->getDeclContext());
1613 if (RD && !RD->isUnion() &&
1614 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1615 Diag(Loc: D->getLocation(), DiagID: diag::warn_unused_private_field)
1616 << D->getDeclName();
1617 }
1618 }
1619 }
1620
1621 if (!Diags.isIgnored(DiagID: diag::warn_mismatched_delete_new, Loc: SourceLocation())) {
1622 if (ExternalSource)
1623 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1624 for (const auto &DeletedFieldInfo : DeleteExprs) {
1625 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1626 AnalyzeDeleteExprMismatch(Field: DeletedFieldInfo.first, DeleteLoc: DeleteExprLoc.first,
1627 DeleteWasArrayForm: DeleteExprLoc.second);
1628 }
1629 }
1630 }
1631
1632 AnalysisWarnings.IssueWarnings(D: Context.getTranslationUnitDecl());
1633
1634 if (Context.hasAnyFunctionEffects())
1635 performFunctionEffectAnalysis(TU: Context.getTranslationUnitDecl());
1636
1637 // Check we've noticed that we're no longer parsing the initializer for every
1638 // variable. If we miss cases, then at best we have a performance issue and
1639 // at worst a rejects-valid bug.
1640 assert(ParsingInitForAutoVars.empty() &&
1641 "Didn't unmark var as having its initializer parsed");
1642
1643 if (!PP.isIncrementalProcessingEnabled())
1644 TUScope = nullptr;
1645
1646 checkExposure(TU: Context.getTranslationUnitDecl());
1647}
1648
1649
1650//===----------------------------------------------------------------------===//
1651// Helper functions.
1652//===----------------------------------------------------------------------===//
1653
1654DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1655 DeclContext *DC = CurContext;
1656
1657 while (true) {
1658 if (isa<BlockDecl>(Val: DC) || isa<EnumDecl>(Val: DC) || isa<CapturedDecl>(Val: DC) ||
1659 isa<RequiresExprBodyDecl>(Val: DC)) {
1660 DC = DC->getParent();
1661 } else if (!AllowLambda && isa<CXXMethodDecl>(Val: DC) &&
1662 cast<CXXMethodDecl>(Val: DC)->getOverloadedOperator() == OO_Call &&
1663 cast<CXXRecordDecl>(Val: DC->getParent())->isLambda()) {
1664 DC = DC->getParent()->getParent();
1665 } else break;
1666 }
1667
1668 return DC;
1669}
1670
1671/// getCurFunctionDecl - If inside of a function body, this returns a pointer
1672/// to the function decl for the function being parsed. If we're currently
1673/// in a 'block', this returns the containing context.
1674FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1675 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1676 return dyn_cast<FunctionDecl>(Val: DC);
1677}
1678
1679ObjCMethodDecl *Sema::getCurMethodDecl() {
1680 DeclContext *DC = getFunctionLevelDeclContext();
1681 while (isa<RecordDecl>(Val: DC))
1682 DC = DC->getParent();
1683 return dyn_cast<ObjCMethodDecl>(Val: DC);
1684}
1685
1686NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1687 DeclContext *DC = getFunctionLevelDeclContext();
1688 if (isa<ObjCMethodDecl>(Val: DC) || isa<FunctionDecl>(Val: DC))
1689 return cast<NamedDecl>(Val: DC);
1690 return nullptr;
1691}
1692
1693LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1694 if (getLangOpts().OpenCL)
1695 return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1696 return LangAS::Default;
1697}
1698
1699void Sema::EmitDiagnostic(unsigned DiagID, const DiagnosticBuilder &DB) {
1700 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1701 // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1702 // been made more painfully obvious by the refactor that introduced this
1703 // function, but it is possible that the incoming argument can be
1704 // eliminated. If it truly cannot be (for example, there is some reentrancy
1705 // issue I am not seeing yet), then there should at least be a clarifying
1706 // comment somewhere.
1707 Diagnostic DiagInfo(&Diags, DB);
1708 if (SFINAETrap *Trap = getSFINAEContext()) {
1709 sema::TemplateDeductionInfo *Info = Trap->getDeductionInfo();
1710 switch (DiagnosticIDs::getDiagnosticSFINAEResponse(DiagID: DiagInfo.getID())) {
1711 case DiagnosticIDs::SFINAE_Report:
1712 // We'll report the diagnostic below.
1713 break;
1714
1715 case DiagnosticIDs::SFINAE_SubstitutionFailure:
1716 // Count this failure so that we know that template argument deduction
1717 // has failed.
1718 Trap->setErrorOccurred();
1719
1720 // Make a copy of this suppressed diagnostic and store it with the
1721 // template-deduction information.
1722 if (Info && !Info->hasSFINAEDiagnostic())
1723 Info->addSFINAEDiagnostic(
1724 Loc: DiagInfo.getLocation(),
1725 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1726
1727 Diags.setLastDiagnosticIgnored(true);
1728 return;
1729
1730 case DiagnosticIDs::SFINAE_AccessControl: {
1731 // Per C++ Core Issue 1170, access control is part of SFINAE.
1732 // Additionally, the WithAccessChecking flag can be used to temporarily
1733 // make access control a part of SFINAE for the purposes of checking
1734 // type traits.
1735 if (!Trap->withAccessChecking() && !getLangOpts().CPlusPlus11)
1736 break;
1737
1738 SourceLocation Loc = DiagInfo.getLocation();
1739
1740 // Suppress this diagnostic.
1741 Trap->setErrorOccurred();
1742
1743 // Make a copy of this suppressed diagnostic and store it with the
1744 // template-deduction information.
1745 if (Info && !Info->hasSFINAEDiagnostic())
1746 Info->addSFINAEDiagnostic(
1747 Loc: DiagInfo.getLocation(),
1748 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1749
1750 Diags.setLastDiagnosticIgnored(true);
1751
1752 // Now produce a C++98 compatibility warning.
1753 Diag(Loc, DiagID: diag::warn_cxx98_compat_sfinae_access_control);
1754
1755 // The last diagnostic which Sema produced was ignored. Suppress any
1756 // notes attached to it.
1757 Diags.setLastDiagnosticIgnored(true);
1758 return;
1759 }
1760
1761 case DiagnosticIDs::SFINAE_Suppress:
1762 if (DiagnosticsEngine::Level Level = getDiagnostics().getDiagnosticLevel(
1763 DiagID: DiagInfo.getID(), Loc: DiagInfo.getLocation());
1764 Level == DiagnosticsEngine::Ignored)
1765 return;
1766 // Make a copy of this suppressed diagnostic and store it with the
1767 // template-deduction information;
1768 if (Info) {
1769 Info->addSuppressedDiagnostic(
1770 Loc: DiagInfo.getLocation(),
1771 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1772 if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1773 PrintContextStack(DiagFunc: [Info](SourceLocation Loc, PartialDiagnostic PD) {
1774 Info->addSuppressedDiagnostic(Loc, PD: std::move(PD));
1775 });
1776 }
1777
1778 // Suppress this diagnostic.
1779 Diags.setLastDiagnosticIgnored(true);
1780 return;
1781 }
1782 }
1783
1784 // Copy the diagnostic printing policy over the ASTContext printing policy.
1785 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
1786 Context.setPrintingPolicy(getPrintingPolicy());
1787
1788 // Emit the diagnostic.
1789 if (!Diags.EmitDiagnostic(DB))
1790 return;
1791
1792 // If this is not a note, and we're in a template instantiation
1793 // that is different from the last template instantiation where
1794 // we emitted an error, print a template instantiation
1795 // backtrace.
1796 if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1797 PrintContextStack();
1798}
1799
1800bool Sema::hasUncompilableErrorOccurred() const {
1801 if (getDiagnostics().hasUncompilableErrorOccurred())
1802 return true;
1803 auto *FD = dyn_cast<FunctionDecl>(Val: CurContext);
1804 if (!FD)
1805 return false;
1806 auto Loc = DeviceDeferredDiags.find(Val: FD);
1807 if (Loc == DeviceDeferredDiags.end())
1808 return false;
1809 for (auto PDAt : Loc->second) {
1810 if (Diags.getDiagnosticIDs()->isDefaultMappingAsError(
1811 DiagID: PDAt.second.getDiagID()))
1812 return true;
1813 }
1814 return false;
1815}
1816
1817// Print notes showing how we can reach FD starting from an a priori
1818// known-callable function.
1819static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1820 auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(Val: FD);
1821 while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) {
1822 // Respect error limit.
1823 if (S.Diags.hasFatalErrorOccurred())
1824 return;
1825 DiagnosticBuilder Builder(
1826 S.Diags.Report(Loc: FnIt->second.Loc, DiagID: diag::note_called_by));
1827 Builder << FnIt->second.FD;
1828 FnIt = S.CUDA().DeviceKnownEmittedFns.find(Val: FnIt->second.FD);
1829 }
1830}
1831
1832namespace {
1833
1834/// Helper class that emits deferred diagnostic messages if an entity directly
1835/// or indirectly using the function that causes the deferred diagnostic
1836/// messages is known to be emitted.
1837///
1838/// During parsing of AST, certain diagnostic messages are recorded as deferred
1839/// diagnostics since it is unknown whether the functions containing such
1840/// diagnostics will be emitted. A list of potentially emitted functions and
1841/// variables that may potentially trigger emission of functions are also
1842/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1843/// by each function to emit deferred diagnostics.
1844///
1845/// During the visit, certain OpenMP directives or initializer of variables
1846/// with certain OpenMP attributes will cause subsequent visiting of any
1847/// functions enter a state which is called OpenMP device context in this
1848/// implementation. The state is exited when the directive or initializer is
1849/// exited. This state can change the emission states of subsequent uses
1850/// of functions.
1851///
1852/// Conceptually the functions or variables to be visited form a use graph
1853/// where the parent node uses the child node. At any point of the visit,
1854/// the tree nodes traversed from the tree root to the current node form a use
1855/// stack. The emission state of the current node depends on two factors:
1856/// 1. the emission state of the root node
1857/// 2. whether the current node is in OpenMP device context
1858/// If the function is decided to be emitted, its contained deferred diagnostics
1859/// are emitted, together with the information about the use stack.
1860///
1861class DeferredDiagnosticsEmitter
1862 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1863public:
1864 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1865
1866 // Whether the function is already in the current use-path.
1867 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1868
1869 // The current use-path.
1870 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1871
1872 // Whether the visiting of the function has been done. Done[0] is for the
1873 // case not in OpenMP device context. Done[1] is for the case in OpenMP
1874 // device context. We need two sets because diagnostics emission may be
1875 // different depending on whether it is in OpenMP device context.
1876 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1877
1878 // Emission state of the root node of the current use graph.
1879 bool ShouldEmitRootNode;
1880
1881 // Current OpenMP device context level. It is initialized to 0 and each
1882 // entering of device context increases it by 1 and each exit decreases
1883 // it by 1. Non-zero value indicates it is currently in device context.
1884 unsigned InOMPDeviceContext;
1885
1886 DeferredDiagnosticsEmitter(Sema &S)
1887 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1888
1889 bool shouldVisitDiscardedStmt() const { return false; }
1890
1891 void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1892 ++InOMPDeviceContext;
1893 Inherited::VisitOMPTargetDirective(S: Node);
1894 --InOMPDeviceContext;
1895 }
1896
1897 void visitUsedDecl(SourceLocation Loc, Decl *D) {
1898 if (isa<VarDecl>(Val: D))
1899 return;
1900 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
1901 checkFunc(Loc, FD);
1902 else
1903 Inherited::visitUsedDecl(Loc, D);
1904 }
1905
1906 // Visitor member and parent dtors called by this dtor.
1907 void VisitCalledDestructors(CXXDestructorDecl *DD) {
1908 const CXXRecordDecl *RD = DD->getParent();
1909
1910 // Visit the dtors of all members
1911 for (const FieldDecl *FD : RD->fields()) {
1912 QualType FT = FD->getType();
1913 if (const auto *ClassDecl = FT->getAsCXXRecordDecl();
1914 ClassDecl &&
1915 (ClassDecl->isBeingDefined() || ClassDecl->isCompleteDefinition()))
1916 if (CXXDestructorDecl *MemberDtor = ClassDecl->getDestructor())
1917 asImpl().visitUsedDecl(Loc: MemberDtor->getLocation(), D: MemberDtor);
1918 }
1919
1920 // Also visit base class dtors
1921 for (const auto &Base : RD->bases()) {
1922 QualType BaseType = Base.getType();
1923 if (const auto *BaseDecl = BaseType->getAsCXXRecordDecl();
1924 BaseDecl &&
1925 (BaseDecl->isBeingDefined() || BaseDecl->isCompleteDefinition()))
1926 if (CXXDestructorDecl *BaseDtor = BaseDecl->getDestructor())
1927 asImpl().visitUsedDecl(Loc: BaseDtor->getLocation(), D: BaseDtor);
1928 }
1929 }
1930
1931 void VisitDeclStmt(DeclStmt *DS) {
1932 // Visit dtors called by variables that need destruction
1933 for (auto *D : DS->decls())
1934 if (auto *VD = dyn_cast<VarDecl>(Val: D))
1935 if (VD->isThisDeclarationADefinition() &&
1936 VD->needsDestruction(Ctx: S.Context)) {
1937 QualType VT = VD->getType();
1938 if (const auto *ClassDecl = VT->getAsCXXRecordDecl();
1939 ClassDecl && (ClassDecl->isBeingDefined() ||
1940 ClassDecl->isCompleteDefinition()))
1941 if (CXXDestructorDecl *Dtor = ClassDecl->getDestructor())
1942 asImpl().visitUsedDecl(Loc: Dtor->getLocation(), D: Dtor);
1943 }
1944
1945 Inherited::VisitDeclStmt(S: DS);
1946 }
1947 void checkVar(VarDecl *VD) {
1948 assert(VD->isFileVarDecl() &&
1949 "Should only check file-scope variables");
1950 if (auto *Init = VD->getInit()) {
1951 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1952 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1953 *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1954 if (IsDev)
1955 ++InOMPDeviceContext;
1956 this->Visit(S: Init);
1957 if (IsDev)
1958 --InOMPDeviceContext;
1959 }
1960 }
1961
1962 void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1963 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1964 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1965 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1966 S.shouldIgnoreInHostDeviceCheck(Callee: FD) || InUsePath.count(Ptr: FD))
1967 return;
1968 // Finalize analysis of OpenMP-specific constructs.
1969 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1970 (ShouldEmitRootNode || InOMPDeviceContext))
1971 S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, Callee: FD, Loc);
1972 if (Caller)
1973 S.CUDA().DeviceKnownEmittedFns[FD] = {.FD: Caller, .Loc: Loc};
1974 // Always emit deferred diagnostics for the direct users. This does not
1975 // lead to explosion of diagnostics since each user is visited at most
1976 // twice.
1977 if (ShouldEmitRootNode || InOMPDeviceContext)
1978 emitDeferredDiags(FD, ShowCallStack: Caller);
1979 // Do not revisit a function if the function body has been completely
1980 // visited before.
1981 if (!Done.insert(Ptr: FD).second)
1982 return;
1983 InUsePath.insert(Ptr: FD);
1984 UsePath.push_back(Elt: FD);
1985 if (auto *S = FD->getBody()) {
1986 this->Visit(S);
1987 }
1988 if (CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: FD))
1989 asImpl().VisitCalledDestructors(DD: Dtor);
1990 UsePath.pop_back();
1991 InUsePath.erase(Ptr: FD);
1992 }
1993
1994 void checkRecordedDecl(Decl *D) {
1995 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1996 ShouldEmitRootNode = S.getEmissionStatus(Decl: FD, /*Final=*/true) ==
1997 Sema::FunctionEmissionStatus::Emitted;
1998 checkFunc(Loc: SourceLocation(), FD);
1999 } else
2000 checkVar(VD: cast<VarDecl>(Val: D));
2001 }
2002
2003 // Emit any deferred diagnostics for FD
2004 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
2005 auto It = S.DeviceDeferredDiags.find(Val: FD);
2006 if (It == S.DeviceDeferredDiags.end())
2007 return;
2008 bool HasWarningOrError = false;
2009 bool FirstDiag = true;
2010 for (PartialDiagnosticAt &PDAt : It->second) {
2011 // Respect error limit.
2012 if (S.Diags.hasFatalErrorOccurred())
2013 return;
2014 const SourceLocation &Loc = PDAt.first;
2015 const PartialDiagnostic &PD = PDAt.second;
2016 HasWarningOrError |=
2017 S.getDiagnostics().getDiagnosticLevel(DiagID: PD.getDiagID(), Loc) >=
2018 DiagnosticsEngine::Warning;
2019 {
2020 DiagnosticBuilder Builder(S.Diags.Report(Loc, DiagID: PD.getDiagID()));
2021 PD.Emit(DB: Builder);
2022 }
2023 // Emit the note on the first diagnostic in case too many diagnostics
2024 // cause the note not emitted.
2025 if (FirstDiag && HasWarningOrError && ShowCallStack) {
2026 emitCallStackNotes(S, FD);
2027 FirstDiag = false;
2028 }
2029 }
2030 }
2031};
2032} // namespace
2033
2034void Sema::emitDeferredDiags() {
2035 if (ExternalSource)
2036 ExternalSource->ReadDeclsToCheckForDeferredDiags(
2037 Decls&: DeclsToCheckForDeferredDiags);
2038
2039 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
2040 DeclsToCheckForDeferredDiags.empty())
2041 return;
2042
2043 DeferredDiagnosticsEmitter DDE(*this);
2044 for (auto *D : DeclsToCheckForDeferredDiags)
2045 DDE.checkRecordedDecl(D);
2046}
2047
2048// In CUDA, there are some constructs which may appear in semantically-valid
2049// code, but trigger errors if we ever generate code for the function in which
2050// they appear. Essentially every construct you're not allowed to use on the
2051// device falls into this category, because you are allowed to use these
2052// constructs in a __host__ __device__ function, but only if that function is
2053// never codegen'ed on the device.
2054//
2055// To handle semantic checking for these constructs, we keep track of the set of
2056// functions we know will be emitted, either because we could tell a priori that
2057// they would be emitted, or because they were transitively called by a
2058// known-emitted function.
2059//
2060// We also keep a partial call graph of which not-known-emitted functions call
2061// which other not-known-emitted functions.
2062//
2063// When we see something which is illegal if the current function is emitted
2064// (usually by way of DiagIfDeviceCode, DiagIfHostCode, or
2065// CheckCall), we first check if the current function is known-emitted. If
2066// so, we immediately output the diagnostic.
2067//
2068// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
2069// until we discover that the function is known-emitted, at which point we take
2070// it out of this map and emit the diagnostic.
2071
2072Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
2073 unsigned DiagID,
2074 const FunctionDecl *Fn,
2075 Sema &S)
2076 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
2077 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
2078 switch (K) {
2079 case K_Nop:
2080 break;
2081 case K_Immediate:
2082 case K_ImmediateWithCallStack:
2083 ImmediateDiag.emplace(
2084 args: ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
2085 break;
2086 case K_Deferred:
2087 assert(Fn && "Must have a function to attach the deferred diag to.");
2088 auto &Diags = S.DeviceDeferredDiags[Fn];
2089 PartialDiagId.emplace(args: Diags.size());
2090 Diags.emplace_back(args&: Loc, args: S.PDiag(DiagID));
2091 break;
2092 }
2093}
2094
2095Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
2096 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
2097 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
2098 PartialDiagId(D.PartialDiagId) {
2099 // Clean the previous diagnostics.
2100 D.ShowCallStack = false;
2101 D.ImmediateDiag.reset();
2102 D.PartialDiagId.reset();
2103}
2104
2105Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
2106 if (ImmediateDiag) {
2107 // Emit our diagnostic and, if it was a warning or error, output a callstack
2108 // if Fn isn't a priori known-emitted.
2109 ImmediateDiag.reset(); // Emit the immediate diag.
2110
2111 if (ShowCallStack) {
2112 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
2113 DiagID, Loc) >= DiagnosticsEngine::Warning;
2114 if (IsWarningOrError)
2115 emitCallStackNotes(S, FD: Fn);
2116 }
2117 } else {
2118 assert((!PartialDiagId || ShowCallStack) &&
2119 "Must always show call stack for deferred diags.");
2120 }
2121}
2122
2123Sema::SemaDiagnosticBuilder
2124Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
2125 FD = FD ? FD : getCurFunctionDecl();
2126 if (LangOpts.OpenMP)
2127 return LangOpts.OpenMPIsTargetDevice
2128 ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD)
2129 : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD);
2130 if (getLangOpts().CUDA)
2131 return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID)
2132 : CUDA().DiagIfHostCode(Loc, DiagID);
2133
2134 if (getLangOpts().SYCLIsDevice)
2135 return SYCL().DiagIfDeviceCode(Loc, DiagID);
2136
2137 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
2138 FD, *this);
2139}
2140
2141void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
2142 if (isUnevaluatedContext() || Ty.isNull())
2143 return;
2144
2145 // The original idea behind checkTypeSupport function is that unused
2146 // declarations can be replaced with an array of bytes of the same size during
2147 // codegen, such replacement doesn't seem to be possible for types without
2148 // constant byte size like zero length arrays. So, do a deep check for SYCL.
2149 if (D && LangOpts.SYCLIsDevice) {
2150 llvm::DenseSet<QualType> Visited;
2151 SYCL().deepTypeCheckForDevice(UsedAt: Loc, Visited, DeclToCheck: D);
2152 }
2153
2154 Decl *C = cast<Decl>(Val: getCurLexicalContext());
2155
2156 // Memcpy operations for structs containing a member with unsupported type
2157 // are ok, though.
2158 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: C)) {
2159 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
2160 MD->isTrivial())
2161 return;
2162
2163 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(Val: MD))
2164 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
2165 return;
2166 }
2167
2168 // Try to associate errors with the lexical context, if that is a function, or
2169 // the value declaration otherwise.
2170 const FunctionDecl *FD = isa<FunctionDecl>(Val: C)
2171 ? cast<FunctionDecl>(Val: C)
2172 : dyn_cast_or_null<FunctionDecl>(Val: D);
2173
2174 auto CheckDeviceType = [&](QualType Ty) {
2175 if (Ty->isDependentType())
2176 return;
2177
2178 if (Ty->isBitIntType()) {
2179 if (!Context.getTargetInfo().hasBitIntType()) {
2180 PartialDiagnostic PD = PDiag(DiagID: diag::err_target_unsupported_type);
2181 if (D)
2182 PD << D;
2183 else
2184 PD << "expression";
2185 targetDiag(Loc, PD, FD)
2186 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
2187 << Ty << Context.getTargetInfo().getTriple().str();
2188 }
2189 return;
2190 }
2191
2192 // Check if we are dealing with two 'long double' but with different
2193 // semantics.
2194 bool LongDoubleMismatched = false;
2195 if (Ty->isRealFloatingType() && Context.getTypeSize(T: Ty) == 128) {
2196 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(T: Ty);
2197 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
2198 !Context.getTargetInfo().hasFloat128Type()) ||
2199 (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
2200 !Context.getTargetInfo().hasIbm128Type()))
2201 LongDoubleMismatched = true;
2202 }
2203
2204 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
2205 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
2206 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
2207 (Ty->isIntegerType() && Context.getTypeSize(T: Ty) == 128 &&
2208 !Context.getTargetInfo().hasInt128Type()) ||
2209 (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
2210 !LangOpts.CUDAIsDevice) ||
2211 LongDoubleMismatched) {
2212 PartialDiagnostic PD = PDiag(DiagID: diag::err_target_unsupported_type);
2213 if (D)
2214 PD << D;
2215 else
2216 PD << "expression";
2217
2218 if (targetDiag(Loc, PD, FD)
2219 << true /*show bit size*/
2220 << static_cast<unsigned>(Context.getTypeSize(T: Ty)) << Ty
2221 << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2222 if (D)
2223 D->setInvalidDecl();
2224 }
2225 if (D)
2226 targetDiag(Loc: D->getLocation(), DiagID: diag::note_defined_here, FD) << D;
2227 }
2228 };
2229
2230 auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2231 if (LangOpts.SYCLIsDevice ||
2232 (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2233 LangOpts.CUDAIsDevice)
2234 CheckDeviceType(Ty);
2235
2236 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2237 const TargetInfo &TI = Context.getTargetInfo();
2238 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2239 PartialDiagnostic PD = PDiag(DiagID: diag::err_target_unsupported_type);
2240 if (D)
2241 PD << D;
2242 else
2243 PD << "expression";
2244
2245 if (Diag(Loc, PD) << false /*show bit size*/ << 0 << Ty
2246 << false /*return*/
2247 << TI.getTriple().str()) {
2248 if (D)
2249 D->setInvalidDecl();
2250 }
2251 if (D)
2252 targetDiag(Loc: D->getLocation(), DiagID: diag::note_defined_here, FD) << D;
2253 }
2254
2255 bool IsDouble = UnqualTy == Context.DoubleTy;
2256 bool IsFloat = UnqualTy == Context.FloatTy;
2257 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2258 PartialDiagnostic PD = PDiag(DiagID: diag::err_target_unsupported_type);
2259 if (D)
2260 PD << D;
2261 else
2262 PD << "expression";
2263
2264 if (Diag(Loc, PD) << false /*show bit size*/ << 0 << Ty << true /*return*/
2265 << TI.getTriple().str()) {
2266 if (D)
2267 D->setInvalidDecl();
2268 }
2269 if (D)
2270 targetDiag(Loc: D->getLocation(), DiagID: diag::note_defined_here, FD) << D;
2271 }
2272
2273 if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
2274 llvm::StringMap<bool> CallerFeatureMap;
2275 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
2276 RISCV().checkRVVTypeSupport(Ty, Loc, D, FeatureMap: CallerFeatureMap);
2277 }
2278
2279 // Don't allow SVE types in functions without a SVE target.
2280 if (Ty->isSVESizelessBuiltinType() && FD) {
2281 llvm::StringMap<bool> CallerFeatureMap;
2282 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
2283 ARM().checkSVETypeSupport(Ty, Loc, FD, FeatureMap: CallerFeatureMap);
2284 }
2285
2286 if (auto *VT = Ty->getAs<VectorType>();
2287 VT && FD &&
2288 (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
2289 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) &&
2290 (LangOpts.VScaleMin != LangOpts.VScaleStreamingMin ||
2291 LangOpts.VScaleMax != LangOpts.VScaleStreamingMax)) {
2292 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true)) {
2293 Diag(Loc, DiagID: diag::err_sve_fixed_vector_in_streaming_function)
2294 << Ty << /*Streaming*/ 0;
2295 } else if (const auto *FTy = FD->getType()->getAs<FunctionProtoType>()) {
2296 if (FTy->getAArch64SMEAttributes() &
2297 FunctionType::SME_PStateSMCompatibleMask) {
2298 Diag(Loc, DiagID: diag::err_sve_fixed_vector_in_streaming_function)
2299 << Ty << /*StreamingCompatible*/ 1;
2300 }
2301 }
2302 }
2303 };
2304
2305 CheckType(Ty);
2306 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Val&: Ty)) {
2307 for (const auto &ParamTy : FPTy->param_types())
2308 CheckType(ParamTy);
2309 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2310 }
2311 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Val&: Ty))
2312 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2313}
2314
2315bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2316 SourceLocation loc = locref;
2317 if (!loc.isMacroID()) return false;
2318
2319 // There's no good way right now to look at the intermediate
2320 // expansions, so just jump to the expansion location.
2321 loc = getSourceManager().getExpansionLoc(Loc: loc);
2322
2323 // If that's written with the name, stop here.
2324 SmallString<16> buffer;
2325 if (getPreprocessor().getSpelling(loc, buffer) == name) {
2326 locref = loc;
2327 return true;
2328 }
2329 return false;
2330}
2331
2332Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2333
2334 if (!Ctx)
2335 return nullptr;
2336
2337 Ctx = Ctx->getPrimaryContext();
2338 for (Scope *S = getCurScope(); S; S = S->getParent()) {
2339 // Ignore scopes that cannot have declarations. This is important for
2340 // out-of-line definitions of static class members.
2341 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2342 if (DeclContext *Entity = S->getEntity())
2343 if (Ctx == Entity->getPrimaryContext())
2344 return S;
2345 }
2346
2347 return nullptr;
2348}
2349
2350/// Enter a new function scope
2351void Sema::PushFunctionScope() {
2352 if (FunctionScopes.empty() && CachedFunctionScope) {
2353 // Use CachedFunctionScope to avoid allocating memory when possible.
2354 CachedFunctionScope->Clear();
2355 FunctionScopes.push_back(Elt: CachedFunctionScope.release());
2356 } else {
2357 FunctionScopes.push_back(Elt: new FunctionScopeInfo(getDiagnostics()));
2358 }
2359 if (LangOpts.OpenMP)
2360 OpenMP().pushOpenMPFunctionRegion();
2361}
2362
2363void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2364 FunctionScopes.push_back(Elt: new BlockScopeInfo(getDiagnostics(),
2365 BlockScope, Block));
2366 CapturingFunctionScopes++;
2367}
2368
2369LambdaScopeInfo *Sema::PushLambdaScope() {
2370 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2371 FunctionScopes.push_back(Elt: LSI);
2372 CapturingFunctionScopes++;
2373 return LSI;
2374}
2375
2376void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2377 if (LambdaScopeInfo *const LSI = getCurLambda()) {
2378 LSI->AutoTemplateParameterDepth = Depth;
2379 return;
2380 }
2381 llvm_unreachable(
2382 "Remove assertion if intentionally called in a non-lambda context.");
2383}
2384
2385// Check that the type of the VarDecl has an accessible copy constructor and
2386// resolve its destructor's exception specification.
2387// This also performs initialization of block variables when they are moved
2388// to the heap. It uses the same rules as applicable for implicit moves
2389// according to the C++ standard in effect ([class.copy.elision]p3).
2390static void checkEscapingByref(VarDecl *VD, Sema &S) {
2391 QualType T = VD->getType();
2392 EnterExpressionEvaluationContext scope(
2393 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2394 SourceLocation Loc = VD->getLocation();
2395 Expr *VarRef =
2396 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2397 ExprResult Result;
2398 auto IE = InitializedEntity::InitializeBlock(BlockVarLoc: Loc, Type: T);
2399 if (S.getLangOpts().CPlusPlus23) {
2400 auto *E = ImplicitCastExpr::Create(Context: S.Context, T, Kind: CK_NoOp, Operand: VarRef, BasePath: nullptr,
2401 Cat: VK_XValue, FPO: FPOptionsOverride());
2402 Result = S.PerformCopyInitialization(Entity: IE, EqualLoc: SourceLocation(), Init: E);
2403 } else {
2404 Result = S.PerformMoveOrCopyInitialization(
2405 Entity: IE, NRInfo: Sema::NamedReturnInfo{.Candidate: VD, .S: Sema::NamedReturnInfo::MoveEligible},
2406 Value: VarRef);
2407 }
2408
2409 if (!Result.isInvalid()) {
2410 Result = S.MaybeCreateExprWithCleanups(SubExpr: Result);
2411 Expr *Init = Result.getAs<Expr>();
2412 S.Context.setBlockVarCopyInit(VD, CopyExpr: Init, CanThrow: S.canThrow(E: Init));
2413 }
2414
2415 // The destructor's exception specification is needed when IRGen generates
2416 // block copy/destroy functions. Resolve it here.
2417 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2418 if (CXXDestructorDecl *DD = RD->getDestructor()) {
2419 auto *FPT = DD->getType()->castAs<FunctionProtoType>();
2420 S.ResolveExceptionSpec(Loc, FPT);
2421 }
2422}
2423
2424static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2425 // Set the EscapingByref flag of __block variables captured by
2426 // escaping blocks.
2427 for (const BlockDecl *BD : FSI.Blocks) {
2428 for (const BlockDecl::Capture &BC : BD->captures()) {
2429 VarDecl *VD = BC.getVariable();
2430 if (VD->hasAttr<BlocksAttr>()) {
2431 // Nothing to do if this is a __block variable captured by a
2432 // non-escaping block.
2433 if (BD->doesNotEscape())
2434 continue;
2435 VD->setEscapingByref();
2436 }
2437 // Check whether the captured variable is or contains an object of
2438 // non-trivial C union type.
2439 QualType CapType = BC.getVariable()->getType();
2440 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2441 CapType.hasNonTrivialToPrimitiveCopyCUnion())
2442 S.checkNonTrivialCUnion(QT: BC.getVariable()->getType(),
2443 Loc: BD->getCaretLocation(),
2444 UseContext: NonTrivialCUnionContext::BlockCapture,
2445 NonTrivialKind: Sema::NTCUK_Destruct | Sema::NTCUK_Copy);
2446 }
2447 }
2448
2449 for (VarDecl *VD : FSI.ByrefBlockVars) {
2450 // __block variables might require us to capture a copy-initializer.
2451 if (!VD->isEscapingByref())
2452 continue;
2453 // It's currently invalid to ever have a __block variable with an
2454 // array type; should we diagnose that here?
2455 // Regardless, we don't want to ignore array nesting when
2456 // constructing this copy.
2457 if (VD->getType()->isStructureOrClassType())
2458 checkEscapingByref(VD, S);
2459 }
2460}
2461
2462Sema::PoppedFunctionScopePtr
2463Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, Decl *D,
2464 QualType BlockType) {
2465 assert(!FunctionScopes.empty() && "mismatched push/pop!");
2466
2467 markEscapingByrefs(FSI: *FunctionScopes.back(), S&: *this);
2468
2469 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2470 PoppedFunctionScopeDeleter(this));
2471
2472 if (LangOpts.OpenMP)
2473 OpenMP().popOpenMPFunctionRegion(OldFSI: Scope.get());
2474
2475 // Issue any analysis-based warnings.
2476 if (WP && D) {
2477 inferNoReturnAttr(S&: *this, D);
2478 AnalysisWarnings.IssueWarnings(P: *WP, fscope: Scope.get(), D, BlockType);
2479 } else
2480 for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2481 Diag(Loc: PUD.Loc, PD: PUD.PD);
2482
2483 return Scope;
2484}
2485
2486void Sema::PoppedFunctionScopeDeleter::
2487operator()(sema::FunctionScopeInfo *Scope) const {
2488 if (!Scope->isPlainFunction())
2489 Self->CapturingFunctionScopes--;
2490 // Stash the function scope for later reuse if it's for a normal function.
2491 if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2492 Self->CachedFunctionScope.reset(p: Scope);
2493 else
2494 delete Scope;
2495}
2496
2497void Sema::PushCompoundScope(bool IsStmtExpr) {
2498 getCurFunction()->CompoundScopes.push_back(
2499 Elt: CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2500}
2501
2502void Sema::PopCompoundScope() {
2503 FunctionScopeInfo *CurFunction = getCurFunction();
2504 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2505
2506 CurFunction->CompoundScopes.pop_back();
2507}
2508
2509bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2510 return getCurFunction()->hasUnrecoverableErrorOccurred();
2511}
2512
2513void Sema::setFunctionHasBranchIntoScope() {
2514 if (!FunctionScopes.empty())
2515 FunctionScopes.back()->setHasBranchIntoScope();
2516}
2517
2518void Sema::setFunctionHasBranchProtectedScope() {
2519 if (!FunctionScopes.empty())
2520 FunctionScopes.back()->setHasBranchProtectedScope();
2521}
2522
2523void Sema::setFunctionHasIndirectGoto() {
2524 if (!FunctionScopes.empty())
2525 FunctionScopes.back()->setHasIndirectGoto();
2526}
2527
2528void Sema::setFunctionHasMustTail() {
2529 if (!FunctionScopes.empty())
2530 FunctionScopes.back()->setHasMustTail();
2531}
2532
2533BlockScopeInfo *Sema::getCurBlock() {
2534 if (FunctionScopes.empty())
2535 return nullptr;
2536
2537 auto CurBSI = dyn_cast<BlockScopeInfo>(Val: FunctionScopes.back());
2538 if (CurBSI && CurBSI->TheDecl &&
2539 !CurBSI->TheDecl->Encloses(DC: CurContext)) {
2540 // We have switched contexts due to template instantiation.
2541 assert(!CodeSynthesisContexts.empty());
2542 return nullptr;
2543 }
2544
2545 return CurBSI;
2546}
2547
2548FunctionScopeInfo *Sema::getEnclosingFunction() const {
2549 if (FunctionScopes.empty())
2550 return nullptr;
2551
2552 for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2553 if (isa<sema::BlockScopeInfo>(Val: FunctionScopes[e]))
2554 continue;
2555 return FunctionScopes[e];
2556 }
2557 return nullptr;
2558}
2559
2560CapturingScopeInfo *Sema::getEnclosingLambdaOrBlock() const {
2561 for (auto *Scope : llvm::reverse(C: FunctionScopes)) {
2562 if (auto *CSI = dyn_cast<CapturingScopeInfo>(Val: Scope)) {
2563 auto *LSI = dyn_cast<LambdaScopeInfo>(Val: CSI);
2564 if (LSI && LSI->Lambda && !LSI->Lambda->Encloses(DC: CurContext) &&
2565 LSI->AfterParameterList) {
2566 // We have switched contexts due to template instantiation.
2567 // FIXME: We should swap out the FunctionScopes during code synthesis
2568 // so that we don't need to check for this.
2569 assert(!CodeSynthesisContexts.empty());
2570 return nullptr;
2571 }
2572 return CSI;
2573 }
2574 }
2575 return nullptr;
2576}
2577
2578LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2579 if (FunctionScopes.empty())
2580 return nullptr;
2581
2582 auto I = FunctionScopes.rbegin();
2583 if (IgnoreNonLambdaCapturingScope) {
2584 auto E = FunctionScopes.rend();
2585 while (I != E && isa<CapturingScopeInfo>(Val: *I) && !isa<LambdaScopeInfo>(Val: *I))
2586 ++I;
2587 if (I == E)
2588 return nullptr;
2589 }
2590 auto *CurLSI = dyn_cast<LambdaScopeInfo>(Val: *I);
2591 if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2592 !CurLSI->Lambda->Encloses(DC: CurContext) && CurLSI->AfterParameterList) {
2593 // We have switched contexts due to template instantiation.
2594 assert(!CodeSynthesisContexts.empty());
2595 return nullptr;
2596 }
2597
2598 return CurLSI;
2599}
2600
2601// We have a generic lambda if we parsed auto parameters, or we have
2602// an associated template parameter list.
2603LambdaScopeInfo *Sema::getCurGenericLambda() {
2604 if (LambdaScopeInfo *LSI = getCurLambda()) {
2605 return (LSI->TemplateParams.size() ||
2606 LSI->GLTemplateParameterList) ? LSI : nullptr;
2607 }
2608 return nullptr;
2609}
2610
2611
2612void Sema::ActOnComment(SourceRange Comment) {
2613 if (!LangOpts.RetainCommentsFromSystemHeaders &&
2614 SourceMgr.isInSystemHeader(Loc: Comment.getBegin()))
2615 return;
2616 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2617 if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2618 SourceRange MagicMarkerRange(Comment.getBegin(),
2619 Comment.getBegin().getLocWithOffset(Offset: 3));
2620 StringRef MagicMarkerText;
2621 switch (RC.getKind()) {
2622 case RawComment::RCK_OrdinaryBCPL:
2623 MagicMarkerText = "///<";
2624 break;
2625 case RawComment::RCK_OrdinaryC:
2626 MagicMarkerText = "/**<";
2627 break;
2628 case RawComment::RCK_Invalid:
2629 // FIXME: are there other scenarios that could produce an invalid
2630 // raw comment here?
2631 Diag(Loc: Comment.getBegin(), DiagID: diag::warn_splice_in_doxygen_comment);
2632 return;
2633 default:
2634 llvm_unreachable("if this is an almost Doxygen comment, "
2635 "it should be ordinary");
2636 }
2637 Diag(Loc: Comment.getBegin(), DiagID: diag::warn_not_a_doxygen_trailing_member_comment) <<
2638 FixItHint::CreateReplacement(RemoveRange: MagicMarkerRange, Code: MagicMarkerText);
2639 }
2640 Context.addComment(RC);
2641}
2642
2643// Pin this vtable to this file.
2644ExternalSemaSource::~ExternalSemaSource() {}
2645char ExternalSemaSource::ID;
2646
2647void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2648void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2649
2650void ExternalSemaSource::ReadKnownNamespaces(
2651 SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2652}
2653
2654void ExternalSemaSource::ReadUndefinedButUsed(
2655 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2656
2657void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2658 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2659
2660bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2661 UnresolvedSetImpl &OverloadSet) {
2662 ZeroArgCallReturnTy = QualType();
2663 OverloadSet.clear();
2664
2665 const OverloadExpr *Overloads = nullptr;
2666 bool IsMemExpr = false;
2667 if (E.getType() == Context.OverloadTy) {
2668 OverloadExpr::FindResult FR = OverloadExpr::find(E: &E);
2669
2670 // Ignore overloads that are pointer-to-member constants.
2671 if (FR.HasFormOfMemberPointer)
2672 return false;
2673
2674 Overloads = FR.Expression;
2675 } else if (E.getType() == Context.BoundMemberTy) {
2676 Overloads = dyn_cast<UnresolvedMemberExpr>(Val: E.IgnoreParens());
2677 IsMemExpr = true;
2678 }
2679
2680 bool Ambiguous = false;
2681 bool IsMV = false;
2682
2683 if (Overloads) {
2684 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2685 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2686 OverloadSet.addDecl(D: *it);
2687
2688 // Check whether the function is a non-template, non-member which takes no
2689 // arguments.
2690 if (IsMemExpr)
2691 continue;
2692 if (const FunctionDecl *OverloadDecl
2693 = dyn_cast<FunctionDecl>(Val: (*it)->getUnderlyingDecl())) {
2694 if (OverloadDecl->getMinRequiredArguments() == 0) {
2695 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2696 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2697 OverloadDecl->isCPUSpecificMultiVersion()))) {
2698 ZeroArgCallReturnTy = QualType();
2699 Ambiguous = true;
2700 } else {
2701 ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2702 IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2703 OverloadDecl->isCPUSpecificMultiVersion();
2704 }
2705 }
2706 }
2707 }
2708
2709 // If it's not a member, use better machinery to try to resolve the call
2710 if (!IsMemExpr)
2711 return !ZeroArgCallReturnTy.isNull();
2712 }
2713
2714 // Attempt to call the member with no arguments - this will correctly handle
2715 // member templates with defaults/deduction of template arguments, overloads
2716 // with default arguments, etc.
2717 if (IsMemExpr && !E.isTypeDependent()) {
2718 Sema::TentativeAnalysisScope Trap(*this);
2719 ExprResult R = BuildCallToMemberFunction(S: nullptr, MemExpr: &E, LParenLoc: SourceLocation(), Args: {},
2720 RParenLoc: SourceLocation());
2721 if (R.isUsable()) {
2722 ZeroArgCallReturnTy = R.get()->getType();
2723 return true;
2724 }
2725 return false;
2726 }
2727
2728 if (const auto *DeclRef = dyn_cast<DeclRefExpr>(Val: E.IgnoreParens())) {
2729 if (const auto *Fun = dyn_cast<FunctionDecl>(Val: DeclRef->getDecl())) {
2730 if (Fun->getMinRequiredArguments() == 0)
2731 ZeroArgCallReturnTy = Fun->getReturnType();
2732 return true;
2733 }
2734 }
2735
2736 // We don't have an expression that's convenient to get a FunctionDecl from,
2737 // but we can at least check if the type is "function of 0 arguments".
2738 QualType ExprTy = E.getType();
2739 const FunctionType *FunTy = nullptr;
2740 QualType PointeeTy = ExprTy->getPointeeType();
2741 if (!PointeeTy.isNull())
2742 FunTy = PointeeTy->getAs<FunctionType>();
2743 if (!FunTy)
2744 FunTy = ExprTy->getAs<FunctionType>();
2745
2746 if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(Val: FunTy)) {
2747 if (FPT->getNumParams() == 0)
2748 ZeroArgCallReturnTy = FunTy->getReturnType();
2749 return true;
2750 }
2751 return false;
2752}
2753
2754/// Give notes for a set of overloads.
2755///
2756/// A companion to tryExprAsCall. In cases when the name that the programmer
2757/// wrote was an overloaded function, we may be able to make some guesses about
2758/// plausible overloads based on their return types; such guesses can be handed
2759/// off to this method to be emitted as notes.
2760///
2761/// \param Overloads - The overloads to note.
2762/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2763/// -fshow-overloads=best, this is the location to attach to the note about too
2764/// many candidates. Typically this will be the location of the original
2765/// ill-formed expression.
2766static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2767 const SourceLocation FinalNoteLoc) {
2768 unsigned ShownOverloads = 0;
2769 unsigned SuppressedOverloads = 0;
2770 for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2771 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2772 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2773 ++SuppressedOverloads;
2774 continue;
2775 }
2776
2777 const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2778 // Don't print overloads for non-default multiversioned functions.
2779 if (const auto *FD = Fn->getAsFunction()) {
2780 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2781 !FD->getAttr<TargetAttr>()->isDefaultVersion())
2782 continue;
2783 if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2784 !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2785 continue;
2786 }
2787 S.Diag(Loc: Fn->getLocation(), DiagID: diag::note_possible_target_of_call);
2788 ++ShownOverloads;
2789 }
2790
2791 S.Diags.overloadCandidatesShown(N: ShownOverloads);
2792
2793 if (SuppressedOverloads)
2794 S.Diag(Loc: FinalNoteLoc, DiagID: diag::note_ovl_too_many_candidates)
2795 << SuppressedOverloads;
2796}
2797
2798static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2799 const UnresolvedSetImpl &Overloads,
2800 bool (*IsPlausibleResult)(QualType)) {
2801 if (!IsPlausibleResult)
2802 return noteOverloads(S, Overloads, FinalNoteLoc: Loc);
2803
2804 UnresolvedSet<2> PlausibleOverloads;
2805 for (OverloadExpr::decls_iterator It = Overloads.begin(),
2806 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2807 const auto *OverloadDecl = cast<FunctionDecl>(Val: *It);
2808 QualType OverloadResultTy = OverloadDecl->getReturnType();
2809 if (IsPlausibleResult(OverloadResultTy))
2810 PlausibleOverloads.addDecl(D: It.getDecl());
2811 }
2812 noteOverloads(S, Overloads: PlausibleOverloads, FinalNoteLoc: Loc);
2813}
2814
2815/// Determine whether the given expression can be called by just
2816/// putting parentheses after it. Notably, expressions with unary
2817/// operators can't be because the unary operator will start parsing
2818/// outside the call.
2819static bool IsCallableWithAppend(const Expr *E) {
2820 E = E->IgnoreImplicit();
2821 return (!isa<CStyleCastExpr>(Val: E) &&
2822 !isa<UnaryOperator>(Val: E) &&
2823 !isa<BinaryOperator>(Val: E) &&
2824 !isa<CXXOperatorCallExpr>(Val: E));
2825}
2826
2827static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2828 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2829 E = UO->getSubExpr();
2830
2831 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(Val: E)) {
2832 if (ULE->getNumDecls() == 0)
2833 return false;
2834
2835 const NamedDecl *ND = *ULE->decls_begin();
2836 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
2837 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2838 }
2839 return false;
2840}
2841
2842bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2843 bool ForceComplain,
2844 bool (*IsPlausibleResult)(QualType)) {
2845 SourceLocation Loc = E.get()->getExprLoc();
2846 SourceRange Range = E.get()->getSourceRange();
2847 UnresolvedSet<4> Overloads;
2848
2849 // If this is a SFINAE context, don't try anything that might trigger ADL
2850 // prematurely.
2851 if (!isSFINAEContext()) {
2852 QualType ZeroArgCallTy;
2853 if (tryExprAsCall(E&: *E.get(), ZeroArgCallReturnTy&: ZeroArgCallTy, OverloadSet&: Overloads) &&
2854 !ZeroArgCallTy.isNull() &&
2855 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2856 // At this point, we know E is potentially callable with 0
2857 // arguments and that it returns something of a reasonable type,
2858 // so we can emit a fixit and carry on pretending that E was
2859 // actually a CallExpr.
2860 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Loc: Range.getEnd());
2861 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2862 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2863 << (IsCallableWithAppend(E: E.get())
2864 ? FixItHint::CreateInsertion(InsertionLoc: ParenInsertionLoc,
2865 Code: "()")
2866 : FixItHint());
2867 if (!IsMV)
2868 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2869
2870 // FIXME: Try this before emitting the fixit, and suppress diagnostics
2871 // while doing so.
2872 E = BuildCallExpr(S: nullptr, Fn: E.get(), LParenLoc: Range.getEnd(), ArgExprs: {},
2873 RParenLoc: Range.getEnd().getLocWithOffset(Offset: 1));
2874 return true;
2875 }
2876 }
2877 if (!ForceComplain) return false;
2878
2879 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2880 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2881 if (!IsMV)
2882 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2883 E = ExprError();
2884 return true;
2885}
2886
2887IdentifierInfo *Sema::getSuperIdentifier() const {
2888 if (!Ident_super)
2889 Ident_super = &Context.Idents.get(Name: "super");
2890 return Ident_super;
2891}
2892
2893void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2894 CapturedRegionKind K,
2895 unsigned OpenMPCaptureLevel) {
2896 auto *CSI = new CapturedRegionScopeInfo(
2897 getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2898 (getLangOpts().OpenMP && K == CR_OpenMP)
2899 ? OpenMP().getOpenMPNestingLevel()
2900 : 0,
2901 OpenMPCaptureLevel);
2902 CSI->ReturnType = Context.VoidTy;
2903 FunctionScopes.push_back(Elt: CSI);
2904 CapturingFunctionScopes++;
2905}
2906
2907CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2908 if (FunctionScopes.empty())
2909 return nullptr;
2910
2911 return dyn_cast<CapturedRegionScopeInfo>(Val: FunctionScopes.back());
2912}
2913
2914const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2915Sema::getMismatchingDeleteExpressions() const {
2916 return DeleteExprs;
2917}
2918
2919Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2920 : S(S), OldFPFeaturesState(S.CurFPFeatures),
2921 OldOverrides(S.FpPragmaStack.CurrentValue),
2922 OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2923 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2924
2925Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2926 S.CurFPFeatures = OldFPFeaturesState;
2927 S.FpPragmaStack.CurrentValue = OldOverrides;
2928 S.PP.setCurrentFPEvalMethod(PragmaLoc: OldFPPragmaLocation, Val: OldEvalMethod);
2929}
2930
2931bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2932 assert(D.getCXXScopeSpec().isSet() &&
2933 "can only be called for qualified names");
2934
2935 auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2936 LookupOrdinaryName, forRedeclarationInCurContext());
2937 DeclContext *DC = computeDeclContext(SS: D.getCXXScopeSpec(),
2938 EnteringContext: !D.getDeclSpec().isFriendSpecified());
2939 if (!DC)
2940 return false;
2941
2942 LookupQualifiedName(R&: LR, LookupCtx: DC);
2943 bool Result = llvm::all_of(Range&: LR, P: [](Decl *Dcl) {
2944 if (NamedDecl *ND = dyn_cast<NamedDecl>(Val: Dcl)) {
2945 ND = ND->getUnderlyingDecl();
2946 return isa<FunctionDecl>(Val: ND) || isa<FunctionTemplateDecl>(Val: ND) ||
2947 isa<UsingDecl>(Val: ND);
2948 }
2949 return false;
2950 });
2951 return Result;
2952}
2953
2954Attr *Sema::CreateAnnotationAttr(const AttributeCommonInfo &CI, StringRef Annot,
2955 MutableArrayRef<Expr *> Args) {
2956
2957 auto *A = AnnotateAttr::Create(Ctx&: Context, Annotation: Annot, Args: Args.data(), ArgsSize: Args.size(), CommonInfo: CI);
2958 if (!ConstantFoldAttrArgs(
2959 CI, Args: MutableArrayRef<Expr *>(A->args_begin(), A->args_end()))) {
2960 return nullptr;
2961 }
2962 return A;
2963}
2964
2965Attr *Sema::CreateAnnotationAttr(const ParsedAttr &AL) {
2966 // Make sure that there is a string literal as the annotation's first
2967 // argument.
2968 StringRef Str;
2969 if (!checkStringLiteralArgumentAttr(Attr: AL, ArgNum: 0, Str))
2970 return nullptr;
2971
2972 llvm::SmallVector<Expr *, 4> Args;
2973 Args.reserve(N: AL.getNumArgs() - 1);
2974 for (unsigned Idx = 1; Idx < AL.getNumArgs(); Idx++) {
2975 assert(!AL.isArgIdent(Idx));
2976 Args.push_back(Elt: AL.getArgAsExpr(Arg: Idx));
2977 }
2978
2979 return CreateAnnotationAttr(CI: AL, Annot: Str, Args);
2980}
2981