1//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-module state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenModule.h"
14#include "ABIInfo.h"
15#include "CGBlocks.h"
16#include "CGCUDARuntime.h"
17#include "CGCXXABI.h"
18#include "CGCall.h"
19#include "CGDebugInfo.h"
20#include "CGHLSLRuntime.h"
21#include "CGObjCRuntime.h"
22#include "CGOpenCLRuntime.h"
23#include "CGOpenMPRuntime.h"
24#include "CGOpenMPRuntimeGPU.h"
25#include "CodeGenFunction.h"
26#include "CodeGenPGO.h"
27#include "ConstantEmitter.h"
28#include "CoverageMappingGen.h"
29#include "TargetInfo.h"
30#include "clang/AST/ASTContext.h"
31#include "clang/AST/ASTLambda.h"
32#include "clang/AST/CharUnits.h"
33#include "clang/AST/Decl.h"
34#include "clang/AST/DeclCXX.h"
35#include "clang/AST/DeclObjC.h"
36#include "clang/AST/DeclTemplate.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/RecursiveASTVisitor.h"
39#include "clang/AST/StmtVisitor.h"
40#include "clang/Basic/Builtins.h"
41#include "clang/Basic/CodeGenOptions.h"
42#include "clang/Basic/Diagnostic.h"
43#include "clang/Basic/DiagnosticFrontend.h"
44#include "clang/Basic/Module.h"
45#include "clang/Basic/SourceManager.h"
46#include "clang/Basic/TargetInfo.h"
47#include "clang/Basic/Version.h"
48#include "clang/CodeGen/BackendUtil.h"
49#include "clang/CodeGen/ConstantInitBuilder.h"
50#include "llvm/ADT/STLExtras.h"
51#include "llvm/ADT/StringExtras.h"
52#include "llvm/ADT/StringSwitch.h"
53#include "llvm/Analysis/TargetLibraryInfo.h"
54#include "llvm/BinaryFormat/ELF.h"
55#include "llvm/IR/AttributeMask.h"
56#include "llvm/IR/CallingConv.h"
57#include "llvm/IR/DataLayout.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/Module.h"
61#include "llvm/IR/ProfileSummary.h"
62#include "llvm/ProfileData/InstrProfReader.h"
63#include "llvm/ProfileData/SampleProf.h"
64#include "llvm/Support/ARMBuildAttributes.h"
65#include "llvm/Support/CRC.h"
66#include "llvm/Support/CodeGen.h"
67#include "llvm/Support/CommandLine.h"
68#include "llvm/Support/ConvertUTF.h"
69#include "llvm/Support/ErrorHandling.h"
70#include "llvm/Support/Hash.h"
71#include "llvm/Support/TimeProfiler.h"
72#include "llvm/TargetParser/AArch64TargetParser.h"
73#include "llvm/TargetParser/RISCVISAInfo.h"
74#include "llvm/TargetParser/Triple.h"
75#include "llvm/TargetParser/X86TargetParser.h"
76#include "llvm/Transforms/Instrumentation/KCFI.h"
77#include "llvm/Transforms/Utils/BuildLibCalls.h"
78#include <optional>
79#include <set>
80
81using namespace clang;
82using namespace CodeGen;
83
84static llvm::cl::opt<bool> LimitedCoverage(
85 "limited-coverage-experimental", llvm::cl::Hidden,
86 llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
87
88static const char AnnotationSection[] = "llvm.metadata";
89static constexpr auto ErrnoTBAAMDName = "llvm.errno.tbaa";
90
91static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
92 switch (CGM.getContext().getCXXABIKind()) {
93 case TargetCXXABI::AppleARM64:
94 case TargetCXXABI::Fuchsia:
95 case TargetCXXABI::GenericAArch64:
96 case TargetCXXABI::GenericARM:
97 case TargetCXXABI::iOS:
98 case TargetCXXABI::WatchOS:
99 case TargetCXXABI::GenericMIPS:
100 case TargetCXXABI::GenericItanium:
101 case TargetCXXABI::WebAssembly:
102 case TargetCXXABI::XL:
103 return CreateItaniumCXXABI(CGM);
104 case TargetCXXABI::Microsoft:
105 return CreateMicrosoftCXXABI(CGM);
106 }
107
108 llvm_unreachable("invalid C++ ABI kind");
109}
110
111static std::unique_ptr<TargetCodeGenInfo>
112createTargetCodeGenInfo(CodeGenModule &CGM) {
113 const TargetInfo &Target = CGM.getTarget();
114 const llvm::Triple &Triple = Target.getTriple();
115 const CodeGenOptions &CodeGenOpts = CGM.getCodeGenOpts();
116
117 switch (Triple.getArch()) {
118 default:
119 return createDefaultTargetCodeGenInfo(CGM);
120
121 case llvm::Triple::m68k:
122 return createM68kTargetCodeGenInfo(CGM);
123 case llvm::Triple::mips:
124 case llvm::Triple::mipsel:
125 if (Triple.getOS() == llvm::Triple::Win32)
126 return createWindowsMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true);
127 return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true);
128
129 case llvm::Triple::mips64:
130 case llvm::Triple::mips64el:
131 return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/false);
132
133 case llvm::Triple::avr: {
134 // For passing parameters, R8~R25 are used on avr, and R18~R25 are used
135 // on avrtiny. For passing return value, R18~R25 are used on avr, and
136 // R22~R25 are used on avrtiny.
137 unsigned NPR = Target.getABI() == "avrtiny" ? 6 : 18;
138 unsigned NRR = Target.getABI() == "avrtiny" ? 4 : 8;
139 return createAVRTargetCodeGenInfo(CGM, NPR, NRR);
140 }
141
142 case llvm::Triple::aarch64:
143 case llvm::Triple::aarch64_32:
144 case llvm::Triple::aarch64_be: {
145 AArch64ABIKind Kind = AArch64ABIKind::AAPCS;
146 if (Target.getABI() == "darwinpcs")
147 Kind = AArch64ABIKind::DarwinPCS;
148 else if (Triple.isOSWindows())
149 return createWindowsAArch64TargetCodeGenInfo(CGM, K: AArch64ABIKind::Win64);
150 else if (Target.getABI() == "aapcs-soft")
151 Kind = AArch64ABIKind::AAPCSSoft;
152
153 return createAArch64TargetCodeGenInfo(CGM, Kind);
154 }
155
156 case llvm::Triple::wasm32:
157 case llvm::Triple::wasm64: {
158 WebAssemblyABIKind Kind = WebAssemblyABIKind::MVP;
159 if (Target.getABI() == "experimental-mv")
160 Kind = WebAssemblyABIKind::ExperimentalMV;
161 return createWebAssemblyTargetCodeGenInfo(CGM, K: Kind);
162 }
163
164 case llvm::Triple::arm:
165 case llvm::Triple::armeb:
166 case llvm::Triple::thumb:
167 case llvm::Triple::thumbeb: {
168 if (Triple.getOS() == llvm::Triple::Win32)
169 return createWindowsARMTargetCodeGenInfo(CGM, K: ARMABIKind::AAPCS_VFP);
170
171 ARMABIKind Kind = ARMABIKind::AAPCS;
172 StringRef ABIStr = Target.getABI();
173 if (ABIStr == "apcs-gnu")
174 Kind = ARMABIKind::APCS;
175 else if (ABIStr == "aapcs16")
176 Kind = ARMABIKind::AAPCS16_VFP;
177 else if (CodeGenOpts.FloatABI == "hard" ||
178 (CodeGenOpts.FloatABI != "soft" && Triple.isHardFloatABI()))
179 Kind = ARMABIKind::AAPCS_VFP;
180
181 return createARMTargetCodeGenInfo(CGM, Kind);
182 }
183
184 case llvm::Triple::ppc: {
185 if (Triple.isOSAIX())
186 return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/false);
187
188 bool IsSoftFloat =
189 CodeGenOpts.FloatABI == "soft" || Target.hasFeature(Feature: "spe");
190 return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat);
191 }
192 case llvm::Triple::ppcle: {
193 bool IsSoftFloat =
194 CodeGenOpts.FloatABI == "soft" || Target.hasFeature(Feature: "spe");
195 return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat);
196 }
197 case llvm::Triple::ppc64:
198 if (Triple.isOSAIX())
199 return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/true);
200
201 if (Triple.isOSBinFormatELF()) {
202 PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv1;
203 if (Target.getABI() == "elfv2")
204 Kind = PPC64_SVR4_ABIKind::ELFv2;
205 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
206
207 return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat);
208 }
209 return createPPC64TargetCodeGenInfo(CGM);
210 case llvm::Triple::ppc64le: {
211 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
212 PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv2;
213 if (Target.getABI() == "elfv1")
214 Kind = PPC64_SVR4_ABIKind::ELFv1;
215 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
216
217 return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat);
218 }
219
220 case llvm::Triple::nvptx:
221 case llvm::Triple::nvptx64:
222 return createNVPTXTargetCodeGenInfo(CGM);
223
224 case llvm::Triple::msp430:
225 return createMSP430TargetCodeGenInfo(CGM);
226
227 case llvm::Triple::riscv32:
228 case llvm::Triple::riscv64:
229 case llvm::Triple::riscv32be:
230 case llvm::Triple::riscv64be: {
231 StringRef ABIStr = Target.getABI();
232 unsigned XLen = Target.getPointerWidth(AddrSpace: LangAS::Default);
233 unsigned ABIFLen = 0;
234 if (ABIStr.ends_with(Suffix: "f"))
235 ABIFLen = 32;
236 else if (ABIStr.ends_with(Suffix: "d"))
237 ABIFLen = 64;
238 bool EABI = ABIStr.ends_with(Suffix: "e");
239 return createRISCVTargetCodeGenInfo(CGM, XLen, FLen: ABIFLen, EABI);
240 }
241
242 case llvm::Triple::systemz: {
243 bool SoftFloat = CodeGenOpts.FloatABI == "soft";
244 bool HasVector = !SoftFloat && Target.getABI() == "vector";
245 return createSystemZTargetCodeGenInfo(CGM, HasVector, SoftFloatABI: SoftFloat);
246 }
247
248 case llvm::Triple::tce:
249 case llvm::Triple::tcele:
250 return createTCETargetCodeGenInfo(CGM);
251
252 case llvm::Triple::x86: {
253 bool IsDarwinVectorABI = Triple.isOSDarwin();
254 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
255
256 if (Triple.getOS() == llvm::Triple::Win32) {
257 return createWinX86_32TargetCodeGenInfo(
258 CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI,
259 NumRegisterParameters: CodeGenOpts.NumRegisterParameters);
260 }
261 return createX86_32TargetCodeGenInfo(
262 CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI,
263 NumRegisterParameters: CodeGenOpts.NumRegisterParameters, SoftFloatABI: CodeGenOpts.FloatABI == "soft");
264 }
265
266 case llvm::Triple::x86_64: {
267 StringRef ABI = Target.getABI();
268 X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512
269 : ABI == "avx" ? X86AVXABILevel::AVX
270 : X86AVXABILevel::None);
271
272 switch (Triple.getOS()) {
273 case llvm::Triple::UEFI:
274 case llvm::Triple::Win32:
275 return createWinX86_64TargetCodeGenInfo(CGM, AVXLevel);
276 default:
277 return createX86_64TargetCodeGenInfo(CGM, AVXLevel);
278 }
279 }
280 case llvm::Triple::hexagon:
281 return createHexagonTargetCodeGenInfo(CGM);
282 case llvm::Triple::lanai:
283 return createLanaiTargetCodeGenInfo(CGM);
284 case llvm::Triple::r600:
285 return createAMDGPUTargetCodeGenInfo(CGM);
286 case llvm::Triple::amdgcn:
287 return createAMDGPUTargetCodeGenInfo(CGM);
288 case llvm::Triple::sparc:
289 return createSparcV8TargetCodeGenInfo(CGM);
290 case llvm::Triple::sparcv9:
291 return createSparcV9TargetCodeGenInfo(CGM);
292 case llvm::Triple::xcore:
293 return createXCoreTargetCodeGenInfo(CGM);
294 case llvm::Triple::arc:
295 return createARCTargetCodeGenInfo(CGM);
296 case llvm::Triple::spir:
297 case llvm::Triple::spir64:
298 return createCommonSPIRTargetCodeGenInfo(CGM);
299 case llvm::Triple::spirv32:
300 case llvm::Triple::spirv64:
301 case llvm::Triple::spirv:
302 return createSPIRVTargetCodeGenInfo(CGM);
303 case llvm::Triple::dxil:
304 return createDirectXTargetCodeGenInfo(CGM);
305 case llvm::Triple::ve:
306 return createVETargetCodeGenInfo(CGM);
307 case llvm::Triple::csky: {
308 bool IsSoftFloat = !Target.hasFeature(Feature: "hard-float-abi");
309 bool hasFP64 =
310 Target.hasFeature(Feature: "fpuv2_df") || Target.hasFeature(Feature: "fpuv3_df");
311 return createCSKYTargetCodeGenInfo(CGM, FLen: IsSoftFloat ? 0
312 : hasFP64 ? 64
313 : 32);
314 }
315 case llvm::Triple::bpfeb:
316 case llvm::Triple::bpfel:
317 return createBPFTargetCodeGenInfo(CGM);
318 case llvm::Triple::loongarch32:
319 case llvm::Triple::loongarch64: {
320 StringRef ABIStr = Target.getABI();
321 unsigned ABIFRLen = 0;
322 if (ABIStr.ends_with(Suffix: "f"))
323 ABIFRLen = 32;
324 else if (ABIStr.ends_with(Suffix: "d"))
325 ABIFRLen = 64;
326 return createLoongArchTargetCodeGenInfo(
327 CGM, GRLen: Target.getPointerWidth(AddrSpace: LangAS::Default), FLen: ABIFRLen);
328 }
329 }
330}
331
332const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
333 if (!TheTargetCodeGenInfo)
334 TheTargetCodeGenInfo = createTargetCodeGenInfo(CGM&: *this);
335 return *TheTargetCodeGenInfo;
336}
337
338static void checkDataLayoutConsistency(const TargetInfo &Target,
339 llvm::LLVMContext &Context,
340 const LangOptions &Opts) {
341#ifndef NDEBUG
342 // Don't verify non-standard ABI configurations.
343 if (Opts.AlignDouble || Opts.OpenCL)
344 return;
345
346 llvm::Triple Triple = Target.getTriple();
347 llvm::DataLayout DL(Target.getDataLayoutString());
348 auto Check = [&](const char *Name, llvm::Type *Ty, unsigned Alignment) {
349 llvm::Align DLAlign = DL.getABITypeAlign(Ty);
350 llvm::Align ClangAlign(Alignment / 8);
351 if (DLAlign != ClangAlign) {
352 llvm::errs() << "For target " << Triple.str() << " type " << Name
353 << " mapping to " << *Ty << " has data layout alignment "
354 << DLAlign.value() << " while clang specifies "
355 << ClangAlign.value() << "\n";
356 abort();
357 }
358 };
359
360 Check("bool", llvm::Type::getIntNTy(Context, Target.BoolWidth),
361 Target.BoolAlign);
362 Check("short", llvm::Type::getIntNTy(Context, Target.ShortWidth),
363 Target.ShortAlign);
364 Check("int", llvm::Type::getIntNTy(Context, Target.IntWidth),
365 Target.IntAlign);
366 Check("long", llvm::Type::getIntNTy(Context, Target.LongWidth),
367 Target.LongAlign);
368 // FIXME: M68k specifies incorrect long long alignment in both LLVM and Clang.
369 if (Triple.getArch() != llvm::Triple::m68k)
370 Check("long long", llvm::Type::getIntNTy(Context, Target.LongLongWidth),
371 Target.LongLongAlign);
372 // FIXME: There are int128 alignment mismatches on multiple targets.
373 if (Target.hasInt128Type() && !Target.getTargetOpts().ForceEnableInt128 &&
374 !Triple.isAMDGPU() && !Triple.isSPIRV() &&
375 Triple.getArch() != llvm::Triple::ve)
376 Check("__int128", llvm::Type::getIntNTy(Context, 128), Target.Int128Align);
377
378 if (Target.hasFloat16Type())
379 Check("half", llvm::Type::getFloatingPointTy(Context, *Target.HalfFormat),
380 Target.HalfAlign);
381 if (Target.hasBFloat16Type())
382 Check("bfloat", llvm::Type::getBFloatTy(Context), Target.BFloat16Align);
383 Check("float", llvm::Type::getFloatingPointTy(Context, *Target.FloatFormat),
384 Target.FloatAlign);
385 Check("double", llvm::Type::getFloatingPointTy(Context, *Target.DoubleFormat),
386 Target.DoubleAlign);
387 Check("long double",
388 llvm::Type::getFloatingPointTy(Context, *Target.LongDoubleFormat),
389 Target.LongDoubleAlign);
390 if (Target.hasFloat128Type())
391 Check("__float128", llvm::Type::getFP128Ty(Context), Target.Float128Align);
392 if (Target.hasIbm128Type())
393 Check("__ibm128", llvm::Type::getPPC_FP128Ty(Context), Target.Ibm128Align);
394
395 Check("void*", llvm::PointerType::getUnqual(Context), Target.PointerAlign);
396
397 if (Target.vectorsAreElementAligned() != DL.vectorsAreElementAligned()) {
398 llvm::errs() << "Datalayout for target " << Triple.str()
399 << " sets element-aligned vectors to '"
400 << Target.vectorsAreElementAligned()
401 << "' but clang specifies '" << DL.vectorsAreElementAligned()
402 << "'\n";
403 abort();
404 }
405#endif
406}
407
408CodeGenModule::CodeGenModule(ASTContext &C,
409 IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
410 const HeaderSearchOptions &HSO,
411 const PreprocessorOptions &PPO,
412 const CodeGenOptions &CGO, llvm::Module &M,
413 DiagnosticsEngine &diags,
414 CoverageSourceInfo *CoverageInfo)
415 : Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO),
416 PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
417 Target(C.getTargetInfo()), ABI(createCXXABI(CGM&: *this)),
418 VMContext(M.getContext()), VTables(*this), StackHandler(diags),
419 SanitizerMD(new SanitizerMetadata(*this)),
420 AtomicOpts(Target.getAtomicOpts()) {
421
422 // Initialize the type cache.
423 Types.reset(p: new CodeGenTypes(*this));
424 llvm::LLVMContext &LLVMContext = M.getContext();
425 VoidTy = llvm::Type::getVoidTy(C&: LLVMContext);
426 Int8Ty = llvm::Type::getInt8Ty(C&: LLVMContext);
427 Int16Ty = llvm::Type::getInt16Ty(C&: LLVMContext);
428 Int32Ty = llvm::Type::getInt32Ty(C&: LLVMContext);
429 Int64Ty = llvm::Type::getInt64Ty(C&: LLVMContext);
430 HalfTy = llvm::Type::getHalfTy(C&: LLVMContext);
431 BFloatTy = llvm::Type::getBFloatTy(C&: LLVMContext);
432 FloatTy = llvm::Type::getFloatTy(C&: LLVMContext);
433 DoubleTy = llvm::Type::getDoubleTy(C&: LLVMContext);
434 PointerWidthInBits = C.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default);
435 PointerAlignInBytes =
436 C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getPointerAlign(AddrSpace: LangAS::Default))
437 .getQuantity();
438 SizeSizeInBytes =
439 C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getMaxPointerWidth()).getQuantity();
440 IntAlignInBytes =
441 C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getIntAlign()).getQuantity();
442 CharTy =
443 llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getCharWidth());
444 IntTy = llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getIntWidth());
445 IntPtrTy = llvm::IntegerType::get(C&: LLVMContext,
446 NumBits: C.getTargetInfo().getMaxPointerWidth());
447 Int8PtrTy = llvm::PointerType::get(C&: LLVMContext,
448 AddressSpace: C.getTargetAddressSpace(AS: LangAS::Default));
449 const llvm::DataLayout &DL = M.getDataLayout();
450 AllocaInt8PtrTy =
451 llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getAllocaAddrSpace());
452 GlobalsInt8PtrTy =
453 llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getDefaultGlobalsAddressSpace());
454 ConstGlobalsPtrTy = llvm::PointerType::get(
455 C&: LLVMContext, AddressSpace: C.getTargetAddressSpace(AS: GetGlobalConstantAddressSpace()));
456 ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
457
458 // Build C++20 Module initializers.
459 // TODO: Add Microsoft here once we know the mangling required for the
460 // initializers.
461 CXX20ModuleInits =
462 LangOpts.CPlusPlusModules && getCXXABI().getMangleContext().getKind() ==
463 ItaniumMangleContext::MK_Itanium;
464
465 RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
466
467 if (LangOpts.ObjC)
468 createObjCRuntime();
469 if (LangOpts.OpenCL)
470 createOpenCLRuntime();
471 if (LangOpts.OpenMP)
472 createOpenMPRuntime();
473 if (LangOpts.CUDA)
474 createCUDARuntime();
475 if (LangOpts.HLSL)
476 createHLSLRuntime();
477
478 // Enable TBAA unless it's suppressed. TSan and TySan need TBAA even at O0.
479 if (LangOpts.Sanitize.hasOneOf(K: SanitizerKind::Thread | SanitizerKind::Type) ||
480 (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
481 TBAA.reset(p: new CodeGenTBAA(Context, getTypes(), TheModule, CodeGenOpts,
482 getLangOpts()));
483
484 // If debug info or coverage generation is enabled, create the CGDebugInfo
485 // object.
486 if (CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo ||
487 CodeGenOpts.CoverageNotesFile.size() ||
488 CodeGenOpts.CoverageDataFile.size())
489 DebugInfo.reset(p: new CGDebugInfo(*this));
490 else if (getTriple().isOSWindows())
491 // On Windows targets, we want to emit compiler info even if debug info is
492 // otherwise disabled. Use a temporary CGDebugInfo instance to emit only
493 // basic compiler metadata.
494 CGDebugInfo(*this);
495
496 Block.GlobalUniqueCount = 0;
497
498 if (C.getLangOpts().ObjC)
499 ObjCData.reset(p: new ObjCEntrypoints());
500
501 if (CodeGenOpts.hasProfileClangUse()) {
502 auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
503 Path: CodeGenOpts.ProfileInstrumentUsePath, FS&: *FS,
504 RemappingPath: CodeGenOpts.ProfileRemappingFile);
505 if (auto E = ReaderOrErr.takeError()) {
506 llvm::handleAllErrors(E: std::move(E), Handlers: [&](const llvm::ErrorInfoBase &EI) {
507 Diags.Report(DiagID: diag::err_reading_profile)
508 << CodeGenOpts.ProfileInstrumentUsePath << EI.message();
509 });
510 return;
511 }
512 PGOReader = std::move(ReaderOrErr.get());
513 }
514
515 // If coverage mapping generation is enabled, create the
516 // CoverageMappingModuleGen object.
517 if (CodeGenOpts.CoverageMapping)
518 CoverageMapping.reset(p: new CoverageMappingModuleGen(*this, *CoverageInfo));
519
520 // Generate the module name hash here if needed.
521 if (CodeGenOpts.UniqueInternalLinkageNames &&
522 !getModule().getSourceFileName().empty()) {
523 std::string Path = getModule().getSourceFileName();
524 // Check if a path substitution is needed from the MacroPrefixMap.
525 for (const auto &Entry : LangOpts.MacroPrefixMap)
526 if (Path.rfind(str: Entry.first, pos: 0) != std::string::npos) {
527 Path = Entry.second + Path.substr(pos: Entry.first.size());
528 break;
529 }
530 ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(FName: Path);
531 }
532
533 // Record mregparm value now so it is visible through all of codegen.
534 if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
535 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "NumRegisterParameters",
536 Val: CodeGenOpts.NumRegisterParameters);
537
538 // If there are any functions that are marked for Windows secure hot-patching,
539 // then build the list of functions now.
540 if (!CGO.MSSecureHotPatchFunctionsFile.empty() ||
541 !CGO.MSSecureHotPatchFunctionsList.empty()) {
542 if (!CGO.MSSecureHotPatchFunctionsFile.empty()) {
543 auto BufOrErr = FS->getBufferForFile(Name: CGO.MSSecureHotPatchFunctionsFile);
544 if (BufOrErr) {
545 const llvm::MemoryBuffer &FileBuffer = **BufOrErr;
546 for (llvm::line_iterator I(FileBuffer.getMemBufferRef(), true), E;
547 I != E; ++I)
548 this->MSHotPatchFunctions.push_back(x: std::string{*I});
549 } else {
550 auto &DE = Context.getDiagnostics();
551 DE.Report(DiagID: diag::err_open_hotpatch_file_failed)
552 << CGO.MSSecureHotPatchFunctionsFile
553 << BufOrErr.getError().message();
554 }
555 }
556
557 for (const auto &FuncName : CGO.MSSecureHotPatchFunctionsList)
558 this->MSHotPatchFunctions.push_back(x: FuncName);
559
560 llvm::sort(C&: this->MSHotPatchFunctions);
561 }
562
563 if (!Context.getAuxTargetInfo())
564 checkDataLayoutConsistency(Target: Context.getTargetInfo(), Context&: LLVMContext, Opts: LangOpts);
565}
566
567CodeGenModule::~CodeGenModule() {}
568
569void CodeGenModule::createObjCRuntime() {
570 // This is just isGNUFamily(), but we want to force implementors of
571 // new ABIs to decide how best to do this.
572 switch (LangOpts.ObjCRuntime.getKind()) {
573 case ObjCRuntime::GNUstep:
574 case ObjCRuntime::GCC:
575 case ObjCRuntime::ObjFW:
576 ObjCRuntime.reset(p: CreateGNUObjCRuntime(CGM&: *this));
577 return;
578
579 case ObjCRuntime::FragileMacOSX:
580 case ObjCRuntime::MacOSX:
581 case ObjCRuntime::iOS:
582 case ObjCRuntime::WatchOS:
583 ObjCRuntime.reset(p: CreateMacObjCRuntime(CGM&: *this));
584 return;
585 }
586 llvm_unreachable("bad runtime kind");
587}
588
589void CodeGenModule::createOpenCLRuntime() {
590 OpenCLRuntime.reset(p: new CGOpenCLRuntime(*this));
591}
592
593void CodeGenModule::createOpenMPRuntime() {
594 if (!LangOpts.OMPHostIRFile.empty() && !FS->exists(Path: LangOpts.OMPHostIRFile))
595 Diags.Report(DiagID: diag::err_omp_host_ir_file_not_found)
596 << LangOpts.OMPHostIRFile;
597
598 // Select a specialized code generation class based on the target, if any.
599 // If it does not exist use the default implementation.
600 switch (getTriple().getArch()) {
601 case llvm::Triple::nvptx:
602 case llvm::Triple::nvptx64:
603 case llvm::Triple::amdgcn:
604 case llvm::Triple::spirv64:
605 assert(
606 getLangOpts().OpenMPIsTargetDevice &&
607 "OpenMP AMDGPU/NVPTX/SPIRV is only prepared to deal with device code.");
608 OpenMPRuntime.reset(p: new CGOpenMPRuntimeGPU(*this));
609 break;
610 default:
611 if (LangOpts.OpenMPSimd)
612 OpenMPRuntime.reset(p: new CGOpenMPSIMDRuntime(*this));
613 else
614 OpenMPRuntime.reset(p: new CGOpenMPRuntime(*this));
615 break;
616 }
617}
618
619void CodeGenModule::createCUDARuntime() {
620 CUDARuntime.reset(p: CreateNVCUDARuntime(CGM&: *this));
621}
622
623void CodeGenModule::createHLSLRuntime() {
624 HLSLRuntime.reset(p: new CGHLSLRuntime(*this));
625}
626
627void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
628 Replacements[Name] = C;
629}
630
631void CodeGenModule::applyReplacements() {
632 for (auto &I : Replacements) {
633 StringRef MangledName = I.first;
634 llvm::Constant *Replacement = I.second;
635 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
636 if (!Entry)
637 continue;
638 auto *OldF = cast<llvm::Function>(Val: Entry);
639 auto *NewF = dyn_cast<llvm::Function>(Val: Replacement);
640 if (!NewF) {
641 if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Val: Replacement)) {
642 NewF = dyn_cast<llvm::Function>(Val: Alias->getAliasee());
643 } else {
644 auto *CE = cast<llvm::ConstantExpr>(Val: Replacement);
645 assert(CE->getOpcode() == llvm::Instruction::BitCast ||
646 CE->getOpcode() == llvm::Instruction::GetElementPtr);
647 NewF = dyn_cast<llvm::Function>(Val: CE->getOperand(i_nocapture: 0));
648 }
649 }
650
651 // Replace old with new, but keep the old order.
652 OldF->replaceAllUsesWith(V: Replacement);
653 if (NewF) {
654 NewF->removeFromParent();
655 OldF->getParent()->getFunctionList().insertAfter(where: OldF->getIterator(),
656 New: NewF);
657 }
658 OldF->eraseFromParent();
659 }
660}
661
662void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
663 GlobalValReplacements.push_back(Elt: std::make_pair(x&: GV, y&: C));
664}
665
666void CodeGenModule::applyGlobalValReplacements() {
667 for (auto &I : GlobalValReplacements) {
668 llvm::GlobalValue *GV = I.first;
669 llvm::Constant *C = I.second;
670
671 GV->replaceAllUsesWith(V: C);
672 GV->eraseFromParent();
673 }
674}
675
676// This is only used in aliases that we created and we know they have a
677// linear structure.
678static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) {
679 const llvm::Constant *C;
680 if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: GV))
681 C = GA->getAliasee();
682 else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(Val: GV))
683 C = GI->getResolver();
684 else
685 return GV;
686
687 const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(Val: C->stripPointerCasts());
688 if (!AliaseeGV)
689 return nullptr;
690
691 const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject();
692 if (FinalGV == GV)
693 return nullptr;
694
695 return FinalGV;
696}
697
698static bool checkAliasedGlobal(
699 const ASTContext &Context, DiagnosticsEngine &Diags, SourceLocation Location,
700 bool IsIFunc, const llvm::GlobalValue *Alias, const llvm::GlobalValue *&GV,
701 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames,
702 SourceRange AliasRange) {
703 GV = getAliasedGlobal(GV: Alias);
704 if (!GV) {
705 Diags.Report(Loc: Location, DiagID: diag::err_cyclic_alias) << IsIFunc;
706 return false;
707 }
708
709 if (GV->hasCommonLinkage()) {
710 const llvm::Triple &Triple = Context.getTargetInfo().getTriple();
711 if (Triple.getObjectFormat() == llvm::Triple::XCOFF) {
712 Diags.Report(Loc: Location, DiagID: diag::err_alias_to_common);
713 return false;
714 }
715 }
716
717 if (GV->isDeclaration()) {
718 Diags.Report(Loc: Location, DiagID: diag::err_alias_to_undefined) << IsIFunc << IsIFunc;
719 Diags.Report(Loc: Location, DiagID: diag::note_alias_requires_mangled_name)
720 << IsIFunc << IsIFunc;
721 // Provide a note if the given function is not found and exists as a
722 // mangled name.
723 for (const auto &[Decl, Name] : MangledDeclNames) {
724 if (const auto *ND = dyn_cast<NamedDecl>(Val: Decl.getDecl())) {
725 IdentifierInfo *II = ND->getIdentifier();
726 if (II && II->getName() == GV->getName()) {
727 Diags.Report(Loc: Location, DiagID: diag::note_alias_mangled_name_alternative)
728 << Name
729 << FixItHint::CreateReplacement(
730 RemoveRange: AliasRange,
731 Code: (Twine(IsIFunc ? "ifunc" : "alias") + "(\"" + Name + "\")")
732 .str());
733 }
734 }
735 }
736 return false;
737 }
738
739 if (IsIFunc) {
740 // Check resolver function type.
741 const auto *F = dyn_cast<llvm::Function>(Val: GV);
742 if (!F) {
743 Diags.Report(Loc: Location, DiagID: diag::err_alias_to_undefined)
744 << IsIFunc << IsIFunc;
745 return false;
746 }
747
748 llvm::FunctionType *FTy = F->getFunctionType();
749 if (!FTy->getReturnType()->isPointerTy()) {
750 Diags.Report(Loc: Location, DiagID: diag::err_ifunc_resolver_return);
751 return false;
752 }
753 }
754
755 return true;
756}
757
758// Emit a warning if toc-data attribute is requested for global variables that
759// have aliases and remove the toc-data attribute.
760static void checkAliasForTocData(llvm::GlobalVariable *GVar,
761 const CodeGenOptions &CodeGenOpts,
762 DiagnosticsEngine &Diags,
763 SourceLocation Location) {
764 if (GVar->hasAttribute(Kind: "toc-data")) {
765 auto GVId = GVar->getName();
766 // Is this a global variable specified by the user as local?
767 if ((llvm::binary_search(Range: CodeGenOpts.TocDataVarsUserSpecified, Value&: GVId))) {
768 Diags.Report(Loc: Location, DiagID: diag::warn_toc_unsupported_type)
769 << GVId << "the variable has an alias";
770 }
771 llvm::AttributeSet CurrAttributes = GVar->getAttributes();
772 llvm::AttributeSet NewAttributes =
773 CurrAttributes.removeAttribute(C&: GVar->getContext(), Kind: "toc-data");
774 GVar->setAttributes(NewAttributes);
775 }
776}
777
778void CodeGenModule::checkAliases() {
779 // Check if the constructed aliases are well formed. It is really unfortunate
780 // that we have to do this in CodeGen, but we only construct mangled names
781 // and aliases during codegen.
782 bool Error = false;
783 DiagnosticsEngine &Diags = getDiags();
784 for (const GlobalDecl &GD : Aliases) {
785 const auto *D = cast<ValueDecl>(Val: GD.getDecl());
786 SourceLocation Location;
787 SourceRange Range;
788 bool IsIFunc = D->hasAttr<IFuncAttr>();
789 if (const Attr *A = D->getDefiningAttr()) {
790 Location = A->getLocation();
791 Range = A->getRange();
792 } else
793 llvm_unreachable("Not an alias or ifunc?");
794
795 StringRef MangledName = getMangledName(GD);
796 llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName);
797 const llvm::GlobalValue *GV = nullptr;
798 if (!checkAliasedGlobal(Context: getContext(), Diags, Location, IsIFunc, Alias, GV,
799 MangledDeclNames, AliasRange: Range)) {
800 Error = true;
801 continue;
802 }
803
804 if (getContext().getTargetInfo().getTriple().isOSAIX())
805 if (const llvm::GlobalVariable *GVar =
806 dyn_cast<const llvm::GlobalVariable>(Val: GV))
807 checkAliasForTocData(GVar: const_cast<llvm::GlobalVariable *>(GVar),
808 CodeGenOpts: getCodeGenOpts(), Diags, Location);
809
810 llvm::Constant *Aliasee =
811 IsIFunc ? cast<llvm::GlobalIFunc>(Val: Alias)->getResolver()
812 : cast<llvm::GlobalAlias>(Val: Alias)->getAliasee();
813
814 llvm::GlobalValue *AliaseeGV;
815 if (auto CE = dyn_cast<llvm::ConstantExpr>(Val: Aliasee))
816 AliaseeGV = cast<llvm::GlobalValue>(Val: CE->getOperand(i_nocapture: 0));
817 else
818 AliaseeGV = cast<llvm::GlobalValue>(Val: Aliasee);
819
820 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
821 StringRef AliasSection = SA->getName();
822 if (AliasSection != AliaseeGV->getSection())
823 Diags.Report(Loc: SA->getLocation(), DiagID: diag::warn_alias_with_section)
824 << AliasSection << IsIFunc << IsIFunc;
825 }
826
827 // We have to handle alias to weak aliases in here. LLVM itself disallows
828 // this since the object semantics would not match the IL one. For
829 // compatibility with gcc we implement it by just pointing the alias
830 // to its aliasee's aliasee. We also warn, since the user is probably
831 // expecting the link to be weak.
832 if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: AliaseeGV)) {
833 if (GA->isInterposable()) {
834 Diags.Report(Loc: Location, DiagID: diag::warn_alias_to_weak_alias)
835 << GV->getName() << GA->getName() << IsIFunc;
836 Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
837 C: GA->getAliasee(), Ty: Alias->getType());
838
839 if (IsIFunc)
840 cast<llvm::GlobalIFunc>(Val: Alias)->setResolver(Aliasee);
841 else
842 cast<llvm::GlobalAlias>(Val: Alias)->setAliasee(Aliasee);
843 }
844 }
845 // ifunc resolvers are usually implemented to run before sanitizer
846 // initialization. Disable instrumentation to prevent the ordering issue.
847 if (IsIFunc)
848 cast<llvm::Function>(Val: Aliasee)->addFnAttr(
849 Kind: llvm::Attribute::DisableSanitizerInstrumentation);
850 }
851 if (!Error)
852 return;
853
854 for (const GlobalDecl &GD : Aliases) {
855 StringRef MangledName = getMangledName(GD);
856 llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName);
857 Alias->replaceAllUsesWith(V: llvm::PoisonValue::get(T: Alias->getType()));
858 Alias->eraseFromParent();
859 }
860}
861
862void CodeGenModule::clear() {
863 DeferredDeclsToEmit.clear();
864 EmittedDeferredDecls.clear();
865 DeferredAnnotations.clear();
866 if (OpenMPRuntime)
867 OpenMPRuntime->clear();
868}
869
870void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
871 StringRef MainFile) {
872 if (!hasDiagnostics())
873 return;
874 if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
875 if (MainFile.empty())
876 MainFile = "<stdin>";
877 Diags.Report(DiagID: diag::warn_profile_data_unprofiled) << MainFile;
878 } else {
879 if (Mismatched > 0)
880 Diags.Report(DiagID: diag::warn_profile_data_out_of_date) << Visited << Mismatched;
881
882 if (Missing > 0)
883 Diags.Report(DiagID: diag::warn_profile_data_missing) << Visited << Missing;
884 }
885}
886
887static std::optional<llvm::GlobalValue::VisibilityTypes>
888getLLVMVisibility(clang::LangOptions::VisibilityFromDLLStorageClassKinds K) {
889 // Map to LLVM visibility.
890 switch (K) {
891 case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Keep:
892 return std::nullopt;
893 case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Default:
894 return llvm::GlobalValue::DefaultVisibility;
895 case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Hidden:
896 return llvm::GlobalValue::HiddenVisibility;
897 case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Protected:
898 return llvm::GlobalValue::ProtectedVisibility;
899 }
900 llvm_unreachable("unknown option value!");
901}
902
903static void
904setLLVMVisibility(llvm::GlobalValue &GV,
905 std::optional<llvm::GlobalValue::VisibilityTypes> V) {
906 if (!V)
907 return;
908
909 // Reset DSO locality before setting the visibility. This removes
910 // any effects that visibility options and annotations may have
911 // had on the DSO locality. Setting the visibility will implicitly set
912 // appropriate globals to DSO Local; however, this will be pessimistic
913 // w.r.t. to the normal compiler IRGen.
914 GV.setDSOLocal(false);
915 GV.setVisibility(*V);
916}
917
918static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
919 llvm::Module &M) {
920 if (!LO.VisibilityFromDLLStorageClass)
921 return;
922
923 std::optional<llvm::GlobalValue::VisibilityTypes> DLLExportVisibility =
924 getLLVMVisibility(K: LO.getDLLExportVisibility());
925
926 std::optional<llvm::GlobalValue::VisibilityTypes>
927 NoDLLStorageClassVisibility =
928 getLLVMVisibility(K: LO.getNoDLLStorageClassVisibility());
929
930 std::optional<llvm::GlobalValue::VisibilityTypes>
931 ExternDeclDLLImportVisibility =
932 getLLVMVisibility(K: LO.getExternDeclDLLImportVisibility());
933
934 std::optional<llvm::GlobalValue::VisibilityTypes>
935 ExternDeclNoDLLStorageClassVisibility =
936 getLLVMVisibility(K: LO.getExternDeclNoDLLStorageClassVisibility());
937
938 for (llvm::GlobalValue &GV : M.global_values()) {
939 if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
940 continue;
941
942 if (GV.isDeclarationForLinker())
943 setLLVMVisibility(GV, V: GV.getDLLStorageClass() ==
944 llvm::GlobalValue::DLLImportStorageClass
945 ? ExternDeclDLLImportVisibility
946 : ExternDeclNoDLLStorageClassVisibility);
947 else
948 setLLVMVisibility(GV, V: GV.getDLLStorageClass() ==
949 llvm::GlobalValue::DLLExportStorageClass
950 ? DLLExportVisibility
951 : NoDLLStorageClassVisibility);
952
953 GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
954 }
955}
956
957static bool isStackProtectorOn(const LangOptions &LangOpts,
958 const llvm::Triple &Triple,
959 clang::LangOptions::StackProtectorMode Mode) {
960 if (Triple.isGPU())
961 return false;
962 return LangOpts.getStackProtector() == Mode;
963}
964
965std::optional<llvm::Attribute::AttrKind>
966CodeGenModule::StackProtectorAttribute(const Decl *D) const {
967 if (D && D->hasAttr<NoStackProtectorAttr>())
968 ; // Do nothing.
969 else if (D && D->hasAttr<StrictGuardStackCheckAttr>() &&
970 isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPOn))
971 return llvm::Attribute::StackProtectStrong;
972 else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPOn))
973 return llvm::Attribute::StackProtect;
974 else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPStrong))
975 return llvm::Attribute::StackProtectStrong;
976 else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPReq))
977 return llvm::Attribute::StackProtectReq;
978 return std::nullopt;
979}
980
981void CodeGenModule::Release() {
982 Module *Primary = getContext().getCurrentNamedModule();
983 if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule())
984 EmitModuleInitializers(Primary);
985 EmitDeferred();
986 DeferredDecls.insert_range(R&: EmittedDeferredDecls);
987 EmittedDeferredDecls.clear();
988 EmitVTablesOpportunistically();
989 applyGlobalValReplacements();
990 applyReplacements();
991 emitMultiVersionFunctions();
992 emitPFPFieldsWithEvaluatedOffset();
993
994 if (Context.getLangOpts().IncrementalExtensions &&
995 GlobalTopLevelStmtBlockInFlight.first) {
996 const TopLevelStmtDecl *TLSD = GlobalTopLevelStmtBlockInFlight.second;
997 GlobalTopLevelStmtBlockInFlight.first->FinishFunction(EndLoc: TLSD->getEndLoc());
998 GlobalTopLevelStmtBlockInFlight = {nullptr, nullptr};
999 }
1000
1001 // Module implementations are initialized the same way as a regular TU that
1002 // imports one or more modules.
1003 if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition())
1004 EmitCXXModuleInitFunc(Primary);
1005 else
1006 EmitCXXGlobalInitFunc();
1007 EmitCXXGlobalCleanUpFunc();
1008 registerGlobalDtorsWithAtExit();
1009 EmitCXXThreadLocalInitFunc();
1010 if (ObjCRuntime)
1011 if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
1012 AddGlobalCtor(Ctor: ObjCInitFunction);
1013 if (Context.getLangOpts().CUDA && CUDARuntime) {
1014 if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
1015 AddGlobalCtor(Ctor: CudaCtorFunction);
1016 }
1017 if (OpenMPRuntime) {
1018 OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
1019 OpenMPRuntime->clear();
1020 }
1021 if (PGOReader) {
1022 getModule().setProfileSummary(
1023 M: PGOReader->getSummary(/* UseCS */ false).getMD(Context&: VMContext),
1024 Kind: llvm::ProfileSummary::PSK_Instr);
1025 if (PGOStats.hasDiagnostics())
1026 PGOStats.reportDiagnostics(Diags&: getDiags(), MainFile: getCodeGenOpts().MainFileName);
1027 }
1028 llvm::stable_sort(Range&: GlobalCtors, C: [](const Structor &L, const Structor &R) {
1029 return L.LexOrder < R.LexOrder;
1030 });
1031 EmitCtorList(Fns&: GlobalCtors, GlobalName: "llvm.global_ctors");
1032 EmitCtorList(Fns&: GlobalDtors, GlobalName: "llvm.global_dtors");
1033 EmitGlobalAnnotations();
1034 EmitStaticExternCAliases();
1035 checkAliases();
1036 EmitDeferredUnusedCoverageMappings();
1037 CodeGenPGO(*this).setValueProfilingFlag(getModule());
1038 CodeGenPGO(*this).setProfileVersion(getModule());
1039 if (CoverageMapping)
1040 CoverageMapping->emit();
1041 if (CodeGenOpts.SanitizeCfiCrossDso) {
1042 CodeGenFunction(*this).EmitCfiCheckFail();
1043 CodeGenFunction(*this).EmitCfiCheckStub();
1044 }
1045 if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI))
1046 finalizeKCFITypes();
1047 emitAtAvailableLinkGuard();
1048 if (Context.getTargetInfo().getTriple().isWasm())
1049 EmitMainVoidAlias();
1050
1051 if (getTriple().isAMDGPU() ||
1052 (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD)) {
1053 // Emit amdhsa_code_object_version module flag, which is code object version
1054 // times 100.
1055 if (getTarget().getTargetOpts().CodeObjectVersion !=
1056 llvm::CodeObjectVersionKind::COV_None) {
1057 getModule().addModuleFlag(Behavior: llvm::Module::Error,
1058 Key: "amdhsa_code_object_version",
1059 Val: getTarget().getTargetOpts().CodeObjectVersion);
1060 }
1061
1062 // Currently, "-mprintf-kind" option is only supported for HIP
1063 if (LangOpts.HIP) {
1064 auto *MDStr = llvm::MDString::get(
1065 Context&: getLLVMContext(), Str: (getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
1066 TargetOptions::AMDGPUPrintfKind::Hostcall)
1067 ? "hostcall"
1068 : "buffered");
1069 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "amdgpu_printf_kind",
1070 Val: MDStr);
1071 }
1072 }
1073
1074 // Emit a global array containing all external kernels or device variables
1075 // used by host functions and mark it as used for CUDA/HIP. This is necessary
1076 // to get kernels or device variables in archives linked in even if these
1077 // kernels or device variables are only used in host functions.
1078 if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) {
1079 SmallVector<llvm::Constant *, 8> UsedArray;
1080 for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) {
1081 GlobalDecl GD;
1082 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
1083 GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
1084 else
1085 GD = GlobalDecl(D);
1086 UsedArray.push_back(Elt: llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
1087 C: GetAddrOfGlobal(GD), Ty: Int8PtrTy));
1088 }
1089
1090 llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: Int8PtrTy, NumElements: UsedArray.size());
1091
1092 auto *GV = new llvm::GlobalVariable(
1093 getModule(), ATy, false, llvm::GlobalValue::InternalLinkage,
1094 llvm::ConstantArray::get(T: ATy, V: UsedArray), "__clang_gpu_used_external");
1095 addCompilerUsedGlobal(GV);
1096 }
1097 if (LangOpts.HIP) {
1098 // Emit a unique ID so that host and device binaries from the same
1099 // compilation unit can be associated.
1100 auto *GV = new llvm::GlobalVariable(
1101 getModule(), Int8Ty, false, llvm::GlobalValue::ExternalLinkage,
1102 llvm::Constant::getNullValue(Ty: Int8Ty),
1103 "__hip_cuid_" + getContext().getCUIDHash());
1104 getSanitizerMetadata()->disableSanitizerForGlobal(GV);
1105 addCompilerUsedGlobal(GV);
1106 }
1107 emitLLVMUsed();
1108 if (SanStats)
1109 SanStats->finish();
1110
1111 if (CodeGenOpts.Autolink &&
1112 (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
1113 EmitModuleLinkOptions();
1114 }
1115
1116 // On ELF we pass the dependent library specifiers directly to the linker
1117 // without manipulating them. This is in contrast to other platforms where
1118 // they are mapped to a specific linker option by the compiler. This
1119 // difference is a result of the greater variety of ELF linkers and the fact
1120 // that ELF linkers tend to handle libraries in a more complicated fashion
1121 // than on other platforms. This forces us to defer handling the dependent
1122 // libs to the linker.
1123 //
1124 // CUDA/HIP device and host libraries are different. Currently there is no
1125 // way to differentiate dependent libraries for host or device. Existing
1126 // usage of #pragma comment(lib, *) is intended for host libraries on
1127 // Windows. Therefore emit llvm.dependent-libraries only for host.
1128 if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
1129 auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.dependent-libraries");
1130 for (auto *MD : ELFDependentLibraries)
1131 NMD->addOperand(M: MD);
1132 }
1133
1134 if (CodeGenOpts.DwarfVersion) {
1135 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "Dwarf Version",
1136 Val: CodeGenOpts.DwarfVersion);
1137 }
1138
1139 if (CodeGenOpts.Dwarf64)
1140 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "DWARF64", Val: 1);
1141
1142 if (Context.getLangOpts().SemanticInterposition)
1143 // Require various optimization to respect semantic interposition.
1144 getModule().setSemanticInterposition(true);
1145
1146 if (CodeGenOpts.EmitCodeView) {
1147 // Indicate that we want CodeView in the metadata.
1148 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeView", Val: 1);
1149 }
1150 if (CodeGenOpts.CodeViewGHash) {
1151 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeViewGHash", Val: 1);
1152 }
1153 if (CodeGenOpts.ControlFlowGuard) {
1154 // Function ID tables and checks for Control Flow Guard.
1155 getModule().addModuleFlag(
1156 Behavior: llvm::Module::Warning, Key: "cfguard",
1157 Val: static_cast<unsigned>(llvm::ControlFlowGuardMode::Enabled));
1158 } else if (CodeGenOpts.ControlFlowGuardNoChecks) {
1159 // Function ID tables for Control Flow Guard.
1160 getModule().addModuleFlag(
1161 Behavior: llvm::Module::Warning, Key: "cfguard",
1162 Val: static_cast<unsigned>(llvm::ControlFlowGuardMode::TableOnly));
1163 }
1164 if (CodeGenOpts.EHContGuard) {
1165 // Function ID tables for EH Continuation Guard.
1166 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ehcontguard", Val: 1);
1167 }
1168 if (Context.getLangOpts().Kernel) {
1169 // Note if we are compiling with /kernel.
1170 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ms-kernel", Val: 1);
1171 }
1172 if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
1173 // We don't support LTO with 2 with different StrictVTablePointers
1174 // FIXME: we could support it by stripping all the information introduced
1175 // by StrictVTablePointers.
1176
1177 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "StrictVTablePointers",Val: 1);
1178
1179 llvm::Metadata *Ops[2] = {
1180 llvm::MDString::get(Context&: VMContext, Str: "StrictVTablePointers"),
1181 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
1182 Ty: llvm::Type::getInt32Ty(C&: VMContext), V: 1))};
1183
1184 getModule().addModuleFlag(Behavior: llvm::Module::Require,
1185 Key: "StrictVTablePointersRequirement",
1186 Val: llvm::MDNode::get(Context&: VMContext, MDs: Ops));
1187 }
1188 if (getModuleDebugInfo() || getTriple().isOSWindows())
1189 // We support a single version in the linked module. The LLVM
1190 // parser will drop debug info with a different version number
1191 // (and warn about it, too).
1192 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "Debug Info Version",
1193 Val: llvm::DEBUG_METADATA_VERSION);
1194
1195 // We need to record the widths of enums and wchar_t, so that we can generate
1196 // the correct build attributes in the ARM backend. wchar_size is also used by
1197 // TargetLibraryInfo.
1198 uint64_t WCharWidth =
1199 Context.getTypeSizeInChars(T: Context.getWideCharType()).getQuantity();
1200 if (WCharWidth != getTriple().getDefaultWCharSize())
1201 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "wchar_size", Val: WCharWidth);
1202
1203 if (getTriple().isOSzOS()) {
1204 getModule().addModuleFlag(Behavior: llvm::Module::Warning,
1205 Key: "zos_product_major_version",
1206 Val: uint32_t(CLANG_VERSION_MAJOR));
1207 getModule().addModuleFlag(Behavior: llvm::Module::Warning,
1208 Key: "zos_product_minor_version",
1209 Val: uint32_t(CLANG_VERSION_MINOR));
1210 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "zos_product_patchlevel",
1211 Val: uint32_t(CLANG_VERSION_PATCHLEVEL));
1212 std::string ProductId = getClangVendor() + "clang";
1213 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_product_id",
1214 Val: llvm::MDString::get(Context&: VMContext, Str: ProductId));
1215
1216 // Record the language because we need it for the PPA2.
1217 StringRef lang_str = languageToString(
1218 L: LangStandard::getLangStandardForKind(K: LangOpts.LangStd).Language);
1219 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_cu_language",
1220 Val: llvm::MDString::get(Context&: VMContext, Str: lang_str));
1221
1222 time_t TT = PreprocessorOpts.SourceDateEpoch
1223 ? *PreprocessorOpts.SourceDateEpoch
1224 : std::time(timer: nullptr);
1225 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "zos_translation_time",
1226 Val: static_cast<uint64_t>(TT));
1227
1228 // Multiple modes will be supported here.
1229 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_le_char_mode",
1230 Val: llvm::MDString::get(Context&: VMContext, Str: "ascii"));
1231 }
1232
1233 llvm::Triple T = Context.getTargetInfo().getTriple();
1234 if (T.isARM() || T.isThumb()) {
1235 // The minimum width of an enum in bytes
1236 uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
1237 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "min_enum_size", Val: EnumWidth);
1238 }
1239
1240 if (T.isRISCV()) {
1241 StringRef ABIStr = Target.getABI();
1242 llvm::LLVMContext &Ctx = TheModule.getContext();
1243 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "target-abi",
1244 Val: llvm::MDString::get(Context&: Ctx, Str: ABIStr));
1245
1246 // Add the canonical ISA string as metadata so the backend can set the ELF
1247 // attributes correctly. We use AppendUnique so LTO will keep all of the
1248 // unique ISA strings that were linked together.
1249 const std::vector<std::string> &Features =
1250 getTarget().getTargetOpts().Features;
1251 auto ParseResult =
1252 llvm::RISCVISAInfo::parseFeatures(XLen: T.isRISCV64() ? 64 : 32, Features);
1253 if (!errorToBool(Err: ParseResult.takeError()))
1254 getModule().addModuleFlag(
1255 Behavior: llvm::Module::AppendUnique, Key: "riscv-isa",
1256 Val: llvm::MDNode::get(
1257 Context&: Ctx, MDs: llvm::MDString::get(Context&: Ctx, Str: (*ParseResult)->toString())));
1258 }
1259
1260 if (CodeGenOpts.SanitizeCfiCrossDso) {
1261 // Indicate that we want cross-DSO control flow integrity checks.
1262 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "Cross-DSO CFI", Val: 1);
1263 }
1264
1265 if (CodeGenOpts.WholeProgramVTables) {
1266 // Indicate whether VFE was enabled for this module, so that the
1267 // vcall_visibility metadata added under whole program vtables is handled
1268 // appropriately in the optimizer.
1269 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "Virtual Function Elim",
1270 Val: CodeGenOpts.VirtualFunctionElimination);
1271 }
1272
1273 if (LangOpts.Sanitize.has(K: SanitizerKind::CFIICall)) {
1274 getModule().addModuleFlag(Behavior: llvm::Module::Override,
1275 Key: "CFI Canonical Jump Tables",
1276 Val: CodeGenOpts.SanitizeCfiCanonicalJumpTables);
1277 }
1278
1279 if (CodeGenOpts.SanitizeCfiICallNormalizeIntegers) {
1280 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "cfi-normalize-integers",
1281 Val: 1);
1282 }
1283
1284 if (!CodeGenOpts.UniqueSourceFileIdentifier.empty()) {
1285 getModule().addModuleFlag(
1286 Behavior: llvm::Module::Append, Key: "Unique Source File Identifier",
1287 Val: llvm::MDTuple::get(
1288 Context&: TheModule.getContext(),
1289 MDs: llvm::MDString::get(Context&: TheModule.getContext(),
1290 Str: CodeGenOpts.UniqueSourceFileIdentifier)));
1291 }
1292
1293 if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) {
1294 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi", Val: 1);
1295 // KCFI assumes patchable-function-prefix is the same for all indirectly
1296 // called functions. Store the expected offset for code generation.
1297 if (CodeGenOpts.PatchableFunctionEntryOffset)
1298 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-offset",
1299 Val: CodeGenOpts.PatchableFunctionEntryOffset);
1300 if (CodeGenOpts.SanitizeKcfiArity)
1301 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-arity", Val: 1);
1302 // Store the hash algorithm choice for use in LLVM passes
1303 getModule().addModuleFlag(
1304 Behavior: llvm::Module::Override, Key: "kcfi-hash",
1305 Val: llvm::MDString::get(
1306 Context&: getLLVMContext(),
1307 Str: llvm::stringifyKCFIHashAlgorithm(Algorithm: CodeGenOpts.SanitizeKcfiHash)));
1308 }
1309
1310 if (CodeGenOpts.CFProtectionReturn &&
1311 Target.checkCFProtectionReturnSupported(Diags&: getDiags())) {
1312 // Indicate that we want to instrument return control flow protection.
1313 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-return",
1314 Val: 1);
1315 }
1316
1317 if (CodeGenOpts.CFProtectionBranch &&
1318 Target.checkCFProtectionBranchSupported(Diags&: getDiags())) {
1319 // Indicate that we want to instrument branch control flow protection.
1320 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-branch",
1321 Val: 1);
1322
1323 auto Scheme = CodeGenOpts.getCFBranchLabelScheme();
1324 if (Target.checkCFBranchLabelSchemeSupported(Scheme, Diags&: getDiags())) {
1325 if (Scheme == CFBranchLabelSchemeKind::Default)
1326 Scheme = Target.getDefaultCFBranchLabelScheme();
1327 getModule().addModuleFlag(
1328 Behavior: llvm::Module::Error, Key: "cf-branch-label-scheme",
1329 Val: llvm::MDString::get(Context&: getLLVMContext(),
1330 Str: getCFBranchLabelSchemeFlagVal(Scheme)));
1331 }
1332 }
1333
1334 if (CodeGenOpts.FunctionReturnThunks)
1335 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "function_return_thunk_extern", Val: 1);
1336
1337 if (CodeGenOpts.IndirectBranchCSPrefix)
1338 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "indirect_branch_cs_prefix", Val: 1);
1339
1340 // Add module metadata for return address signing (ignoring
1341 // non-leaf/all) and stack tagging. These are actually turned on by function
1342 // attributes, but we use module metadata to emit build attributes. This is
1343 // needed for LTO, where the function attributes are inside bitcode
1344 // serialised into a global variable by the time build attributes are
1345 // emitted, so we can't access them. LTO objects could be compiled with
1346 // different flags therefore module flags are set to "Min" behavior to achieve
1347 // the same end result of the normal build where e.g BTI is off if any object
1348 // doesn't support it.
1349 if (Context.getTargetInfo().hasFeature(Feature: "ptrauth") &&
1350 LangOpts.getSignReturnAddressScope() !=
1351 LangOptions::SignReturnAddressScopeKind::None)
1352 getModule().addModuleFlag(Behavior: llvm::Module::Override,
1353 Key: "sign-return-address-buildattr", Val: 1);
1354 if (LangOpts.Sanitize.has(K: SanitizerKind::MemtagStack))
1355 getModule().addModuleFlag(Behavior: llvm::Module::Override,
1356 Key: "tag-stack-memory-buildattr", Val: 1);
1357
1358 if (T.isARM() || T.isThumb() || T.isAArch64()) {
1359 // Previously 1 is used and meant for the backed to derive the function
1360 // attribute form it. 2 now means function attributes already set for all
1361 // functions in this module, so no need to propagate those from the module
1362 // flag. Value is only used in case of LTO module merge because the backend
1363 // will see all required function attribute set already. Value is used
1364 // before modules got merged. Any posive value means the feature is active
1365 // and required binary markings need to be emit accordingly.
1366 if (LangOpts.BranchTargetEnforcement)
1367 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-target-enforcement",
1368 Val: 2);
1369 if (LangOpts.BranchProtectionPAuthLR)
1370 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-protection-pauth-lr",
1371 Val: 2);
1372 if (LangOpts.GuardedControlStack)
1373 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "guarded-control-stack", Val: 2);
1374 if (LangOpts.hasSignReturnAddress())
1375 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address", Val: 2);
1376 if (LangOpts.isSignReturnAddressScopeAll())
1377 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address-all",
1378 Val: 2);
1379 if (!LangOpts.isSignReturnAddressWithAKey())
1380 getModule().addModuleFlag(Behavior: llvm::Module::Min,
1381 Key: "sign-return-address-with-bkey", Val: 2);
1382
1383 if (LangOpts.PointerAuthELFGOT)
1384 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "ptrauth-elf-got", Val: 1);
1385
1386 if (getTriple().isOSLinux()) {
1387 if (LangOpts.PointerAuthCalls)
1388 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "ptrauth-sign-personality",
1389 Val: 1);
1390 assert(getTriple().isOSBinFormatELF());
1391 using namespace llvm::ELF;
1392 uint64_t PAuthABIVersion =
1393 (LangOpts.PointerAuthIntrinsics
1394 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INTRINSICS) |
1395 (LangOpts.PointerAuthCalls
1396 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_CALLS) |
1397 (LangOpts.PointerAuthReturns
1398 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_RETURNS) |
1399 (LangOpts.PointerAuthAuthTraps
1400 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_AUTHTRAPS) |
1401 (LangOpts.PointerAuthVTPtrAddressDiscrimination
1402 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRADDRDISCR) |
1403 (LangOpts.PointerAuthVTPtrTypeDiscrimination
1404 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR) |
1405 (LangOpts.PointerAuthInitFini
1406 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI) |
1407 (LangOpts.PointerAuthInitFiniAddressDiscrimination
1408 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINIADDRDISC) |
1409 (LangOpts.PointerAuthELFGOT
1410 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT) |
1411 (LangOpts.PointerAuthIndirectGotos
1412 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOTOS) |
1413 (LangOpts.PointerAuthTypeInfoVTPtrDiscrimination
1414 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_TYPEINFOVPTRDISCR) |
1415 (LangOpts.PointerAuthFunctionTypeDiscrimination
1416 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_FPTRTYPEDISCR);
1417 static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_FPTRTYPEDISCR ==
1418 AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST,
1419 "Update when new enum items are defined");
1420 if (PAuthABIVersion != 0) {
1421 getModule().addModuleFlag(Behavior: llvm::Module::Error,
1422 Key: "aarch64-elf-pauthabi-platform",
1423 Val: AARCH64_PAUTH_PLATFORM_LLVM_LINUX);
1424 getModule().addModuleFlag(Behavior: llvm::Module::Error,
1425 Key: "aarch64-elf-pauthabi-version",
1426 Val: PAuthABIVersion);
1427 }
1428 }
1429 }
1430 if ((T.isARM() || T.isThumb()) && getTriple().isTargetAEABI() &&
1431 getTriple().isOSBinFormatELF()) {
1432 uint32_t TagVal = 0;
1433 llvm::Module::ModFlagBehavior DenormalTagBehavior = llvm::Module::Max;
1434 if (getCodeGenOpts().FPDenormalMode ==
1435 llvm::DenormalMode::getPositiveZero()) {
1436 TagVal = llvm::ARMBuildAttrs::PositiveZero;
1437 } else if (getCodeGenOpts().FPDenormalMode ==
1438 llvm::DenormalMode::getIEEE()) {
1439 TagVal = llvm::ARMBuildAttrs::IEEEDenormals;
1440 DenormalTagBehavior = llvm::Module::Override;
1441 } else if (getCodeGenOpts().FPDenormalMode ==
1442 llvm::DenormalMode::getPreserveSign()) {
1443 TagVal = llvm::ARMBuildAttrs::PreserveFPSign;
1444 }
1445 getModule().addModuleFlag(Behavior: DenormalTagBehavior, Key: "arm-eabi-fp-denormal",
1446 Val: TagVal);
1447
1448 if (getLangOpts().getDefaultExceptionMode() !=
1449 LangOptions::FPExceptionModeKind::FPE_Ignore)
1450 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "arm-eabi-fp-exceptions",
1451 Val: llvm::ARMBuildAttrs::Allowed);
1452
1453 if (getLangOpts().NoHonorNaNs && getLangOpts().NoHonorInfs)
1454 TagVal = llvm::ARMBuildAttrs::AllowIEEENormal;
1455 else
1456 TagVal = llvm::ARMBuildAttrs::AllowIEEE754;
1457 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "arm-eabi-fp-number-model",
1458 Val: TagVal);
1459 }
1460
1461 if (CodeGenOpts.StackClashProtector)
1462 getModule().addModuleFlag(
1463 Behavior: llvm::Module::Override, Key: "probe-stack",
1464 Val: llvm::MDString::get(Context&: TheModule.getContext(), Str: "inline-asm"));
1465
1466 if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096)
1467 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "stack-probe-size",
1468 Val: CodeGenOpts.StackProbeSize);
1469
1470 if (!CodeGenOpts.MemoryProfileOutput.empty()) {
1471 llvm::LLVMContext &Ctx = TheModule.getContext();
1472 getModule().addModuleFlag(
1473 Behavior: llvm::Module::Error, Key: "MemProfProfileFilename",
1474 Val: llvm::MDString::get(Context&: Ctx, Str: CodeGenOpts.MemoryProfileOutput));
1475 }
1476
1477 if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
1478 // Indicate whether __nvvm_reflect should be configured to flush denormal
1479 // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
1480 // property.)
1481 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "nvvm-reflect-ftz",
1482 Val: CodeGenOpts.FP32DenormalMode.Output !=
1483 llvm::DenormalMode::IEEE);
1484 }
1485
1486 if (LangOpts.EHAsynch)
1487 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "eh-asynch", Val: 1);
1488
1489 // Emit Import Call section.
1490 if (CodeGenOpts.ImportCallOptimization)
1491 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "import-call-optimization",
1492 Val: 1);
1493
1494 // Enable unwind v2 (epilog).
1495 if (CodeGenOpts.getWinX64EHUnwindV2() != llvm::WinX64EHUnwindV2Mode::Disabled)
1496 getModule().addModuleFlag(
1497 Behavior: llvm::Module::Warning, Key: "winx64-eh-unwindv2",
1498 Val: static_cast<unsigned>(CodeGenOpts.getWinX64EHUnwindV2()));
1499
1500 // Indicate whether this Module was compiled with -fopenmp
1501 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
1502 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp", Val: LangOpts.OpenMP);
1503 if (getLangOpts().OpenMPIsTargetDevice)
1504 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp-device",
1505 Val: LangOpts.OpenMP);
1506
1507 // Emit OpenCL specific module metadata: OpenCL/SPIR version.
1508 if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) {
1509 EmitOpenCLMetadata();
1510 // Emit SPIR version.
1511 if (getTriple().isSPIR()) {
1512 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
1513 // opencl.spir.version named metadata.
1514 // C++ for OpenCL has a distinct mapping for version compatibility with
1515 // OpenCL.
1516 auto Version = LangOpts.getOpenCLCompatibleVersion();
1517 llvm::Metadata *SPIRVerElts[] = {
1518 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
1519 Ty: Int32Ty, V: Version / 100)),
1520 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
1521 Ty: Int32Ty, V: (Version / 100 > 1) ? 0 : 2))};
1522 llvm::NamedMDNode *SPIRVerMD =
1523 TheModule.getOrInsertNamedMetadata(Name: "opencl.spir.version");
1524 llvm::LLVMContext &Ctx = TheModule.getContext();
1525 SPIRVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: SPIRVerElts));
1526 }
1527 }
1528
1529 // HLSL related end of code gen work items.
1530 if (LangOpts.HLSL)
1531 getHLSLRuntime().finishCodeGen();
1532
1533 if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
1534 assert(PLevel < 3 && "Invalid PIC Level");
1535 getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
1536 if (Context.getLangOpts().PIE)
1537 getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
1538 }
1539
1540 if (getCodeGenOpts().CodeModel.size() > 0) {
1541 unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
1542 .Case(S: "tiny", Value: llvm::CodeModel::Tiny)
1543 .Case(S: "small", Value: llvm::CodeModel::Small)
1544 .Case(S: "kernel", Value: llvm::CodeModel::Kernel)
1545 .Case(S: "medium", Value: llvm::CodeModel::Medium)
1546 .Case(S: "large", Value: llvm::CodeModel::Large)
1547 .Default(Value: ~0u);
1548 if (CM != ~0u) {
1549 llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
1550 getModule().setCodeModel(codeModel);
1551
1552 if ((CM == llvm::CodeModel::Medium || CM == llvm::CodeModel::Large) &&
1553 Context.getTargetInfo().getTriple().getArch() ==
1554 llvm::Triple::x86_64) {
1555 getModule().setLargeDataThreshold(getCodeGenOpts().LargeDataThreshold);
1556 }
1557 }
1558 }
1559
1560 if (CodeGenOpts.NoPLT)
1561 getModule().setRtLibUseGOT();
1562 if (getTriple().isOSBinFormatELF() &&
1563 CodeGenOpts.DirectAccessExternalData !=
1564 getModule().getDirectAccessExternalData()) {
1565 getModule().setDirectAccessExternalData(
1566 CodeGenOpts.DirectAccessExternalData);
1567 }
1568 if (CodeGenOpts.UnwindTables)
1569 getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables));
1570
1571 switch (CodeGenOpts.getFramePointer()) {
1572 case CodeGenOptions::FramePointerKind::None:
1573 // 0 ("none") is the default.
1574 break;
1575 case CodeGenOptions::FramePointerKind::Reserved:
1576 getModule().setFramePointer(llvm::FramePointerKind::Reserved);
1577 break;
1578 case CodeGenOptions::FramePointerKind::NonLeafNoReserve:
1579 getModule().setFramePointer(llvm::FramePointerKind::NonLeafNoReserve);
1580 break;
1581 case CodeGenOptions::FramePointerKind::NonLeaf:
1582 getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
1583 break;
1584 case CodeGenOptions::FramePointerKind::All:
1585 getModule().setFramePointer(llvm::FramePointerKind::All);
1586 break;
1587 }
1588
1589 SimplifyPersonality();
1590
1591 if (getCodeGenOpts().EmitDeclMetadata)
1592 EmitDeclMetadata();
1593
1594 if (getCodeGenOpts().CoverageNotesFile.size() ||
1595 getCodeGenOpts().CoverageDataFile.size())
1596 EmitCoverageFile();
1597
1598 if (CGDebugInfo *DI = getModuleDebugInfo())
1599 DI->finalize();
1600
1601 if (getCodeGenOpts().EmitVersionIdentMetadata)
1602 EmitVersionIdentMetadata();
1603
1604 if (!getCodeGenOpts().RecordCommandLine.empty())
1605 EmitCommandLineMetadata();
1606
1607 if (!getCodeGenOpts().StackProtectorGuard.empty())
1608 getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard);
1609 if (!getCodeGenOpts().StackProtectorGuardReg.empty())
1610 getModule().setStackProtectorGuardReg(
1611 getCodeGenOpts().StackProtectorGuardReg);
1612 if (!getCodeGenOpts().StackProtectorGuardSymbol.empty())
1613 getModule().setStackProtectorGuardSymbol(
1614 getCodeGenOpts().StackProtectorGuardSymbol);
1615 if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX)
1616 getModule().setStackProtectorGuardOffset(
1617 getCodeGenOpts().StackProtectorGuardOffset);
1618 if (getCodeGenOpts().StackAlignment)
1619 getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment);
1620 if (getCodeGenOpts().SkipRaxSetup)
1621 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "SkipRaxSetup", Val: 1);
1622 if (getLangOpts().RegCall4)
1623 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "RegCallv4", Val: 1);
1624
1625 if (getContext().getTargetInfo().getMaxTLSAlign())
1626 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "MaxTLSAlign",
1627 Val: getContext().getTargetInfo().getMaxTLSAlign());
1628
1629 getTargetCodeGenInfo().emitTargetGlobals(CGM&: *this);
1630
1631 getTargetCodeGenInfo().emitTargetMetadata(CGM&: *this, MangledDeclNames);
1632
1633 EmitBackendOptionsMetadata(CodeGenOpts: getCodeGenOpts());
1634
1635 // If there is device offloading code embed it in the host now.
1636 EmbedObject(M: &getModule(), CGOpts: CodeGenOpts, VFS&: *getFileSystem(), Diags&: getDiags());
1637
1638 // Set visibility from DLL storage class
1639 // We do this at the end of LLVM IR generation; after any operation
1640 // that might affect the DLL storage class or the visibility, and
1641 // before anything that might act on these.
1642 setVisibilityFromDLLStorageClass(LO: LangOpts, M&: getModule());
1643
1644 // Check the tail call symbols are truly undefined.
1645 if (!MustTailCallUndefinedGlobals.empty()) {
1646 if (getTriple().isPPC()) {
1647 for (auto &I : MustTailCallUndefinedGlobals) {
1648 if (!I.first->isDefined())
1649 getDiags().Report(Loc: I.second, DiagID: diag::err_ppc_impossible_musttail) << 2;
1650 else {
1651 StringRef MangledName = getMangledName(GD: GlobalDecl(I.first));
1652 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
1653 if (!Entry || Entry->isWeakForLinker() ||
1654 Entry->isDeclarationForLinker())
1655 getDiags().Report(Loc: I.second, DiagID: diag::err_ppc_impossible_musttail) << 2;
1656 }
1657 }
1658 } else if (getTriple().isMIPS()) {
1659 for (auto &I : MustTailCallUndefinedGlobals) {
1660 const FunctionDecl *FD = I.first;
1661 StringRef MangledName = getMangledName(GD: GlobalDecl(FD));
1662 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
1663
1664 if (!Entry)
1665 continue;
1666
1667 bool CalleeIsLocal;
1668 if (Entry->isDeclarationForLinker()) {
1669 // For declarations, only visibility can indicate locality.
1670 CalleeIsLocal =
1671 Entry->hasHiddenVisibility() || Entry->hasProtectedVisibility();
1672 } else {
1673 CalleeIsLocal = Entry->isDSOLocal();
1674 }
1675
1676 if (!CalleeIsLocal)
1677 getDiags().Report(Loc: I.second, DiagID: diag::err_mips_impossible_musttail) << 1;
1678 }
1679 }
1680 }
1681
1682 // Emit `!llvm.errno.tbaa`, a module-level metadata that specifies the TBAA
1683 // for an int access. This allows LLVM to reason about what memory can be
1684 // accessed by certain library calls that only touch errno.
1685 if (TBAA) {
1686 TBAAAccessInfo TBAAInfo = getTBAAAccessInfo(AccessType: Context.IntTy);
1687 if (llvm::MDNode *IntegerNode = getTBAAAccessTagInfo(Info: TBAAInfo)) {
1688 auto *ErrnoTBAAMD = TheModule.getOrInsertNamedMetadata(Name: ErrnoTBAAMDName);
1689 ErrnoTBAAMD->addOperand(M: IntegerNode);
1690 }
1691 }
1692}
1693
1694void CodeGenModule::EmitOpenCLMetadata() {
1695 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
1696 // opencl.ocl.version named metadata node.
1697 // C++ for OpenCL has a distinct mapping for versions compatible with OpenCL.
1698 auto CLVersion = LangOpts.getOpenCLCompatibleVersion();
1699
1700 auto EmitVersion = [this](StringRef MDName, int Version) {
1701 llvm::Metadata *OCLVerElts[] = {
1702 llvm::ConstantAsMetadata::get(
1703 C: llvm::ConstantInt::get(Ty: Int32Ty, V: Version / 100)),
1704 llvm::ConstantAsMetadata::get(
1705 C: llvm::ConstantInt::get(Ty: Int32Ty, V: (Version % 100) / 10))};
1706 llvm::NamedMDNode *OCLVerMD = TheModule.getOrInsertNamedMetadata(Name: MDName);
1707 llvm::LLVMContext &Ctx = TheModule.getContext();
1708 OCLVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: OCLVerElts));
1709 };
1710
1711 EmitVersion("opencl.ocl.version", CLVersion);
1712 if (LangOpts.OpenCLCPlusPlus) {
1713 // In addition to the OpenCL compatible version, emit the C++ version.
1714 EmitVersion("opencl.cxx.version", LangOpts.OpenCLCPlusPlusVersion);
1715 }
1716}
1717
1718void CodeGenModule::EmitBackendOptionsMetadata(
1719 const CodeGenOptions &CodeGenOpts) {
1720 if (getTriple().isRISCV()) {
1721 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "SmallDataLimit",
1722 Val: CodeGenOpts.SmallDataLimit);
1723 }
1724
1725 // Set AllocToken configuration for backend pipeline.
1726 if (LangOpts.AllocTokenMode) {
1727 StringRef S = llvm::getAllocTokenModeAsString(Mode: *LangOpts.AllocTokenMode);
1728 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "alloc-token-mode",
1729 Val: llvm::MDString::get(Context&: VMContext, Str: S));
1730 }
1731 if (LangOpts.AllocTokenMax)
1732 getModule().addModuleFlag(
1733 Behavior: llvm::Module::Error, Key: "alloc-token-max",
1734 Val: llvm::ConstantInt::get(Ty: llvm::Type::getInt64Ty(C&: VMContext),
1735 V: *LangOpts.AllocTokenMax));
1736 if (CodeGenOpts.SanitizeAllocTokenFastABI)
1737 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "alloc-token-fast-abi", Val: 1);
1738 if (CodeGenOpts.SanitizeAllocTokenExtended)
1739 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "alloc-token-extended", Val: 1);
1740}
1741
1742void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
1743 // Make sure that this type is translated.
1744 getTypes().UpdateCompletedType(TD);
1745}
1746
1747void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
1748 // Make sure that this type is translated.
1749 getTypes().RefreshTypeCacheForClass(RD);
1750}
1751
1752llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
1753 if (!TBAA)
1754 return nullptr;
1755 return TBAA->getTypeInfo(QTy);
1756}
1757
1758TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
1759 if (!TBAA)
1760 return TBAAAccessInfo();
1761 if (getLangOpts().CUDAIsDevice) {
1762 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
1763 // access info.
1764 if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
1765 if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
1766 nullptr)
1767 return TBAAAccessInfo();
1768 } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
1769 if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
1770 nullptr)
1771 return TBAAAccessInfo();
1772 }
1773 }
1774 return TBAA->getAccessInfo(AccessType);
1775}
1776
1777TBAAAccessInfo
1778CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
1779 if (!TBAA)
1780 return TBAAAccessInfo();
1781 return TBAA->getVTablePtrAccessInfo(VTablePtrType);
1782}
1783
1784llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
1785 if (!TBAA)
1786 return nullptr;
1787 return TBAA->getTBAAStructInfo(QTy);
1788}
1789
1790llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) {
1791 if (!TBAA)
1792 return nullptr;
1793 return TBAA->getBaseTypeInfo(QTy);
1794}
1795
1796llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) {
1797 if (!TBAA)
1798 return nullptr;
1799 return TBAA->getAccessTagInfo(Info);
1800}
1801
1802TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,
1803 TBAAAccessInfo TargetInfo) {
1804 if (!TBAA)
1805 return TBAAAccessInfo();
1806 return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
1807}
1808
1809TBAAAccessInfo
1810CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
1811 TBAAAccessInfo InfoB) {
1812 if (!TBAA)
1813 return TBAAAccessInfo();
1814 return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
1815}
1816
1817TBAAAccessInfo
1818CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
1819 TBAAAccessInfo SrcInfo) {
1820 if (!TBAA)
1821 return TBAAAccessInfo();
1822 return TBAA->mergeTBAAInfoForConditionalOperator(InfoA: DestInfo, InfoB: SrcInfo);
1823}
1824
1825void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
1826 TBAAAccessInfo TBAAInfo) {
1827 if (llvm::MDNode *Tag = getTBAAAccessTagInfo(Info: TBAAInfo))
1828 Inst->setMetadata(KindID: llvm::LLVMContext::MD_tbaa, Node: Tag);
1829}
1830
1831void CodeGenModule::DecorateInstructionWithInvariantGroup(
1832 llvm::Instruction *I, const CXXRecordDecl *RD) {
1833 I->setMetadata(KindID: llvm::LLVMContext::MD_invariant_group,
1834 Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: {}));
1835}
1836
1837void CodeGenModule::Error(SourceLocation loc, StringRef message) {
1838 unsigned diagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, FormatString: "%0");
1839 getDiags().Report(Loc: Context.getFullLoc(Loc: loc), DiagID: diagID) << message;
1840}
1841
1842/// ErrorUnsupported - Print out an error that codegen doesn't support the
1843/// specified stmt yet.
1844void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
1845 std::string Msg = Type;
1846 getDiags().Report(Loc: Context.getFullLoc(Loc: S->getBeginLoc()),
1847 DiagID: diag::err_codegen_unsupported)
1848 << Msg << S->getSourceRange();
1849}
1850
1851void CodeGenModule::ErrorUnsupported(const Stmt *S, llvm::StringRef Type) {
1852 getDiags().Report(Loc: Context.getFullLoc(Loc: S->getBeginLoc()),
1853 DiagID: diag::err_codegen_unsupported)
1854 << Type << S->getSourceRange();
1855}
1856
1857/// ErrorUnsupported - Print out an error that codegen doesn't support the
1858/// specified decl yet.
1859void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
1860 std::string Msg = Type;
1861 getDiags().Report(Loc: Context.getFullLoc(Loc: D->getLocation()),
1862 DiagID: diag::err_codegen_unsupported)
1863 << Msg;
1864}
1865
1866void CodeGenModule::runWithSufficientStackSpace(SourceLocation Loc,
1867 llvm::function_ref<void()> Fn) {
1868 StackHandler.runWithSufficientStackSpace(Loc, Fn);
1869}
1870
1871llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
1872 return llvm::ConstantInt::get(Ty: SizeTy, V: size.getQuantity());
1873}
1874
1875void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
1876 const NamedDecl *D) const {
1877 // Internal definitions always have default visibility.
1878 if (GV->hasLocalLinkage()) {
1879 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1880 return;
1881 }
1882 if (!D)
1883 return;
1884
1885 // Set visibility for definitions, and for declarations if requested globally
1886 // or set explicitly.
1887 LinkageInfo LV = D->getLinkageAndVisibility();
1888
1889 // OpenMP declare target variables must be visible to the host so they can
1890 // be registered. We require protected visibility unless the variable has
1891 // the DT_nohost modifier and does not need to be registered.
1892 if (Context.getLangOpts().OpenMP &&
1893 Context.getLangOpts().OpenMPIsTargetDevice && isa<VarDecl>(Val: D) &&
1894 D->hasAttr<OMPDeclareTargetDeclAttr>() &&
1895 D->getAttr<OMPDeclareTargetDeclAttr>()->getDevType() !=
1896 OMPDeclareTargetDeclAttr::DT_NoHost &&
1897 LV.getVisibility() == HiddenVisibility) {
1898 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
1899 return;
1900 }
1901
1902 if (Context.getLangOpts().HLSL && !D->isInExportDeclContext()) {
1903 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
1904 return;
1905 }
1906
1907 if (GV->hasDLLExportStorageClass() || GV->hasDLLImportStorageClass()) {
1908 // Reject incompatible dlllstorage and visibility annotations.
1909 if (!LV.isVisibilityExplicit())
1910 return;
1911 if (GV->hasDLLExportStorageClass()) {
1912 if (LV.getVisibility() == HiddenVisibility)
1913 getDiags().Report(Loc: D->getLocation(),
1914 DiagID: diag::err_hidden_visibility_dllexport);
1915 } else if (LV.getVisibility() != DefaultVisibility) {
1916 getDiags().Report(Loc: D->getLocation(),
1917 DiagID: diag::err_non_default_visibility_dllimport);
1918 }
1919 return;
1920 }
1921
1922 if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
1923 !GV->isDeclarationForLinker())
1924 GV->setVisibility(GetLLVMVisibility(V: LV.getVisibility()));
1925}
1926
1927static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
1928 llvm::GlobalValue *GV) {
1929 if (GV->hasLocalLinkage())
1930 return true;
1931
1932 if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
1933 return true;
1934
1935 // DLLImport explicitly marks the GV as external.
1936 if (GV->hasDLLImportStorageClass())
1937 return false;
1938
1939 const llvm::Triple &TT = CGM.getTriple();
1940 const auto &CGOpts = CGM.getCodeGenOpts();
1941 if (TT.isOSCygMing()) {
1942 // In MinGW, variables without DLLImport can still be automatically
1943 // imported from a DLL by the linker; don't mark variables that
1944 // potentially could come from another DLL as DSO local.
1945
1946 // With EmulatedTLS, TLS variables can be autoimported from other DLLs
1947 // (and this actually happens in the public interface of libstdc++), so
1948 // such variables can't be marked as DSO local. (Native TLS variables
1949 // can't be dllimported at all, though.)
1950 if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(Val: GV) &&
1951 (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS) &&
1952 CGOpts.AutoImport)
1953 return false;
1954 }
1955
1956 // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
1957 // remain unresolved in the link, they can be resolved to zero, which is
1958 // outside the current DSO.
1959 if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
1960 return false;
1961
1962 // Every other GV is local on COFF.
1963 // Make an exception for windows OS in the triple: Some firmware builds use
1964 // *-win32-macho triples. This (accidentally?) produced windows relocations
1965 // without GOT tables in older clang versions; Keep this behaviour.
1966 // FIXME: even thread local variables?
1967 if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
1968 return true;
1969
1970 // Only handle COFF and ELF for now.
1971 if (!TT.isOSBinFormatELF())
1972 return false;
1973
1974 // If this is not an executable, don't assume anything is local.
1975 llvm::Reloc::Model RM = CGOpts.RelocationModel;
1976 const auto &LOpts = CGM.getLangOpts();
1977 if (RM != llvm::Reloc::Static && !LOpts.PIE) {
1978 // On ELF, if -fno-semantic-interposition is specified and the target
1979 // supports local aliases, there will be neither CC1
1980 // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
1981 // dso_local on the function if using a local alias is preferable (can avoid
1982 // PLT indirection).
1983 if (!(isa<llvm::Function>(Val: GV) && GV->canBenefitFromLocalAlias()))
1984 return false;
1985 return !(CGM.getLangOpts().SemanticInterposition ||
1986 CGM.getLangOpts().HalfNoSemanticInterposition);
1987 }
1988
1989 // A definition cannot be preempted from an executable.
1990 if (!GV->isDeclarationForLinker())
1991 return true;
1992
1993 // Most PIC code sequences that assume that a symbol is local cannot produce a
1994 // 0 if it turns out the symbol is undefined. While this is ABI and relocation
1995 // depended, it seems worth it to handle it here.
1996 if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
1997 return false;
1998
1999 // PowerPC64 prefers TOC indirection to avoid copy relocations.
2000 if (TT.isPPC64())
2001 return false;
2002
2003 if (CGOpts.DirectAccessExternalData) {
2004 // If -fdirect-access-external-data (default for -fno-pic), set dso_local
2005 // for non-thread-local variables. If the symbol is not defined in the
2006 // executable, a copy relocation will be needed at link time. dso_local is
2007 // excluded for thread-local variables because they generally don't support
2008 // copy relocations.
2009 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: GV))
2010 if (!Var->isThreadLocal())
2011 return true;
2012
2013 // -fno-pic sets dso_local on a function declaration to allow direct
2014 // accesses when taking its address (similar to a data symbol). If the
2015 // function is not defined in the executable, a canonical PLT entry will be
2016 // needed at link time. -fno-direct-access-external-data can avoid the
2017 // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
2018 // it could just cause trouble without providing perceptible benefits.
2019 if (isa<llvm::Function>(Val: GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
2020 return true;
2021 }
2022
2023 // If we can use copy relocations we can assume it is local.
2024
2025 // Otherwise don't assume it is local.
2026 return false;
2027}
2028
2029void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
2030 GV->setDSOLocal(shouldAssumeDSOLocal(CGM: *this, GV));
2031}
2032
2033void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
2034 GlobalDecl GD) const {
2035 const auto *D = dyn_cast<NamedDecl>(Val: GD.getDecl());
2036 // C++ destructors have a few C++ ABI specific special cases.
2037 if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(Val: D)) {
2038 getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, DT: GD.getDtorType());
2039 return;
2040 }
2041 setDLLImportDLLExport(GV, D);
2042}
2043
2044void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
2045 const NamedDecl *D) const {
2046 if (D && D->isExternallyVisible()) {
2047 if (D->hasAttr<DLLImportAttr>())
2048 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2049 else if ((D->hasAttr<DLLExportAttr>() ||
2050 shouldMapVisibilityToDLLExport(D)) &&
2051 !GV->isDeclarationForLinker())
2052 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
2053 }
2054}
2055
2056void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
2057 GlobalDecl GD) const {
2058 setDLLImportDLLExport(GV, GD);
2059 setGVPropertiesAux(GV, D: dyn_cast<NamedDecl>(Val: GD.getDecl()));
2060}
2061
2062void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
2063 const NamedDecl *D) const {
2064 setDLLImportDLLExport(GV, D);
2065 setGVPropertiesAux(GV, D);
2066}
2067
2068void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
2069 const NamedDecl *D) const {
2070 setGlobalVisibility(GV, D);
2071 setDSOLocal(GV);
2072 GV->setPartition(CodeGenOpts.SymbolPartition);
2073}
2074
2075static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
2076 return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
2077 .Case(S: "global-dynamic", Value: llvm::GlobalVariable::GeneralDynamicTLSModel)
2078 .Case(S: "local-dynamic", Value: llvm::GlobalVariable::LocalDynamicTLSModel)
2079 .Case(S: "initial-exec", Value: llvm::GlobalVariable::InitialExecTLSModel)
2080 .Case(S: "local-exec", Value: llvm::GlobalVariable::LocalExecTLSModel);
2081}
2082
2083llvm::GlobalVariable::ThreadLocalMode
2084CodeGenModule::GetDefaultLLVMTLSModel() const {
2085 switch (CodeGenOpts.getDefaultTLSModel()) {
2086 case CodeGenOptions::GeneralDynamicTLSModel:
2087 return llvm::GlobalVariable::GeneralDynamicTLSModel;
2088 case CodeGenOptions::LocalDynamicTLSModel:
2089 return llvm::GlobalVariable::LocalDynamicTLSModel;
2090 case CodeGenOptions::InitialExecTLSModel:
2091 return llvm::GlobalVariable::InitialExecTLSModel;
2092 case CodeGenOptions::LocalExecTLSModel:
2093 return llvm::GlobalVariable::LocalExecTLSModel;
2094 }
2095 llvm_unreachable("Invalid TLS model!");
2096}
2097
2098void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
2099 assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
2100
2101 llvm::GlobalValue::ThreadLocalMode TLM;
2102 TLM = GetDefaultLLVMTLSModel();
2103
2104 // Override the TLS model if it is explicitly specified.
2105 if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
2106 TLM = GetLLVMTLSModel(S: Attr->getModel());
2107 }
2108
2109 GV->setThreadLocalMode(TLM);
2110}
2111
2112static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
2113 StringRef Name) {
2114 const TargetInfo &Target = CGM.getTarget();
2115 return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
2116}
2117
2118static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
2119 const CPUSpecificAttr *Attr,
2120 unsigned CPUIndex,
2121 raw_ostream &Out) {
2122 // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
2123 // supported.
2124 if (Attr)
2125 Out << getCPUSpecificMangling(CGM, Name: Attr->getCPUName(Index: CPUIndex)->getName());
2126 else if (CGM.getTarget().supportsIFunc())
2127 Out << ".resolver";
2128}
2129
2130// Returns true if GD is a function decl with internal linkage and
2131// needs a unique suffix after the mangled name.
2132static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
2133 CodeGenModule &CGM) {
2134 const Decl *D = GD.getDecl();
2135 return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(Val: D) &&
2136 (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
2137}
2138
2139static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
2140 const NamedDecl *ND,
2141 bool OmitMultiVersionMangling = false) {
2142 SmallString<256> Buffer;
2143 llvm::raw_svector_ostream Out(Buffer);
2144 MangleContext &MC = CGM.getCXXABI().getMangleContext();
2145 if (!CGM.getModuleNameHash().empty())
2146 MC.needsUniqueInternalLinkageNames();
2147 bool ShouldMangle = MC.shouldMangleDeclName(D: ND);
2148 if (ShouldMangle)
2149 MC.mangleName(GD: GD.getWithDecl(D: ND), Out);
2150 else {
2151 IdentifierInfo *II = ND->getIdentifier();
2152 assert(II && "Attempt to mangle unnamed decl.");
2153 const auto *FD = dyn_cast<FunctionDecl>(Val: ND);
2154
2155 if (FD &&
2156 FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
2157 if (CGM.getLangOpts().RegCall4)
2158 Out << "__regcall4__" << II->getName();
2159 else
2160 Out << "__regcall3__" << II->getName();
2161 } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
2162 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
2163 Out << "__device_stub__" << II->getName();
2164 } else if (FD &&
2165 DeviceKernelAttr::isOpenCLSpelling(
2166 A: FD->getAttr<DeviceKernelAttr>()) &&
2167 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
2168 Out << "__clang_ocl_kern_imp_" << II->getName();
2169 } else {
2170 Out << II->getName();
2171 }
2172 }
2173
2174 // Check if the module name hash should be appended for internal linkage
2175 // symbols. This should come before multi-version target suffixes are
2176 // appended. This is to keep the name and module hash suffix of the
2177 // internal linkage function together. The unique suffix should only be
2178 // added when name mangling is done to make sure that the final name can
2179 // be properly demangled. For example, for C functions without prototypes,
2180 // name mangling is not done and the unique suffix should not be appeneded
2181 // then.
2182 if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
2183 assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&
2184 "Hash computed when not explicitly requested");
2185 Out << CGM.getModuleNameHash();
2186 }
2187
2188 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
2189 if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
2190 switch (FD->getMultiVersionKind()) {
2191 case MultiVersionKind::CPUDispatch:
2192 case MultiVersionKind::CPUSpecific:
2193 AppendCPUSpecificCPUDispatchMangling(CGM,
2194 Attr: FD->getAttr<CPUSpecificAttr>(),
2195 CPUIndex: GD.getMultiVersionIndex(), Out);
2196 break;
2197 case MultiVersionKind::Target: {
2198 auto *Attr = FD->getAttr<TargetAttr>();
2199 assert(Attr && "Expected TargetAttr to be present "
2200 "for attribute mangling");
2201 const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
2202 Info.appendAttributeMangling(Attr, Out);
2203 break;
2204 }
2205 case MultiVersionKind::TargetVersion: {
2206 auto *Attr = FD->getAttr<TargetVersionAttr>();
2207 assert(Attr && "Expected TargetVersionAttr to be present "
2208 "for attribute mangling");
2209 const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
2210 Info.appendAttributeMangling(Attr, Out);
2211 break;
2212 }
2213 case MultiVersionKind::TargetClones: {
2214 auto *Attr = FD->getAttr<TargetClonesAttr>();
2215 assert(Attr && "Expected TargetClonesAttr to be present "
2216 "for attribute mangling");
2217 unsigned Index = GD.getMultiVersionIndex();
2218 const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
2219 Info.appendAttributeMangling(Attr, Index, Out);
2220 break;
2221 }
2222 case MultiVersionKind::None:
2223 llvm_unreachable("None multiversion type isn't valid here");
2224 }
2225 }
2226
2227 // Make unique name for device side static file-scope variable for HIP.
2228 if (CGM.getContext().shouldExternalize(D: ND) &&
2229 CGM.getLangOpts().GPURelocatableDeviceCode &&
2230 CGM.getLangOpts().CUDAIsDevice)
2231 CGM.printPostfixForExternalizedDecl(OS&: Out, D: ND);
2232
2233 return std::string(Out.str());
2234}
2235
2236void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
2237 const FunctionDecl *FD,
2238 StringRef &CurName) {
2239 if (!FD->isMultiVersion())
2240 return;
2241
2242 // Get the name of what this would be without the 'target' attribute. This
2243 // allows us to lookup the version that was emitted when this wasn't a
2244 // multiversion function.
2245 std::string NonTargetName =
2246 getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
2247 GlobalDecl OtherGD;
2248 if (lookupRepresentativeDecl(MangledName: NonTargetName, Result&: OtherGD)) {
2249 assert(OtherGD.getCanonicalDecl()
2250 .getDecl()
2251 ->getAsFunction()
2252 ->isMultiVersion() &&
2253 "Other GD should now be a multiversioned function");
2254 // OtherFD is the version of this function that was mangled BEFORE
2255 // becoming a MultiVersion function. It potentially needs to be updated.
2256 const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
2257 .getDecl()
2258 ->getAsFunction()
2259 ->getMostRecentDecl();
2260 std::string OtherName = getMangledNameImpl(CGM&: *this, GD: OtherGD, ND: OtherFD);
2261 // This is so that if the initial version was already the 'default'
2262 // version, we don't try to update it.
2263 if (OtherName != NonTargetName) {
2264 // Remove instead of erase, since others may have stored the StringRef
2265 // to this.
2266 const auto ExistingRecord = Manglings.find(Key: NonTargetName);
2267 if (ExistingRecord != std::end(cont&: Manglings))
2268 Manglings.remove(KeyValue: &(*ExistingRecord));
2269 auto Result = Manglings.insert(KV: std::make_pair(x&: OtherName, y&: OtherGD));
2270 StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
2271 Result.first->first();
2272 // If this is the current decl is being created, make sure we update the name.
2273 if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
2274 CurName = OtherNameRef;
2275 if (llvm::GlobalValue *Entry = GetGlobalValue(Ref: NonTargetName))
2276 Entry->setName(OtherName);
2277 }
2278 }
2279}
2280
2281StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
2282 GlobalDecl CanonicalGD = GD.getCanonicalDecl();
2283
2284 // Some ABIs don't have constructor variants. Make sure that base and
2285 // complete constructors get mangled the same.
2286 if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: CanonicalGD.getDecl())) {
2287 if (!getTarget().getCXXABI().hasConstructorVariants()) {
2288 CXXCtorType OrigCtorType = GD.getCtorType();
2289 assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
2290 if (OrigCtorType == Ctor_Base)
2291 CanonicalGD = GlobalDecl(CD, Ctor_Complete);
2292 }
2293 }
2294
2295 // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
2296 // static device variable depends on whether the variable is referenced by
2297 // a host or device host function. Therefore the mangled name cannot be
2298 // cached.
2299 if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(D: GD.getDecl())) {
2300 auto FoundName = MangledDeclNames.find(Key: CanonicalGD);
2301 if (FoundName != MangledDeclNames.end())
2302 return FoundName->second;
2303 }
2304
2305 // Keep the first result in the case of a mangling collision.
2306 const auto *ND = cast<NamedDecl>(Val: GD.getDecl());
2307 std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND);
2308
2309 // Ensure either we have different ABIs between host and device compilations,
2310 // says host compilation following MSVC ABI but device compilation follows
2311 // Itanium C++ ABI or, if they follow the same ABI, kernel names after
2312 // mangling should be the same after name stubbing. The later checking is
2313 // very important as the device kernel name being mangled in host-compilation
2314 // is used to resolve the device binaries to be executed. Inconsistent naming
2315 // result in undefined behavior. Even though we cannot check that naming
2316 // directly between host- and device-compilations, the host- and
2317 // device-mangling in host compilation could help catching certain ones.
2318 assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
2319 getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice ||
2320 (getContext().getAuxTargetInfo() &&
2321 (getContext().getAuxTargetInfo()->getCXXABI() !=
2322 getContext().getTargetInfo().getCXXABI())) ||
2323 getCUDARuntime().getDeviceSideName(ND) ==
2324 getMangledNameImpl(
2325 *this,
2326 GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
2327 ND));
2328
2329 // This invariant should hold true in the future.
2330 // Prior work:
2331 // https://discourse.llvm.org/t/rfc-clang-diagnostic-for-demangling-failures/82835/8
2332 // https://github.com/llvm/llvm-project/issues/111345
2333 // assert(!((StringRef(MangledName).starts_with("_Z") ||
2334 // StringRef(MangledName).starts_with("?")) &&
2335 // !GD.getDecl()->hasAttr<AsmLabelAttr>() &&
2336 // llvm::demangle(MangledName) == MangledName) &&
2337 // "LLVM demangler must demangle clang-generated names");
2338
2339 auto Result = Manglings.insert(KV: std::make_pair(x&: MangledName, y&: GD));
2340 return MangledDeclNames[CanonicalGD] = Result.first->first();
2341}
2342
2343StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
2344 const BlockDecl *BD) {
2345 MangleContext &MangleCtx = getCXXABI().getMangleContext();
2346 const Decl *D = GD.getDecl();
2347
2348 SmallString<256> Buffer;
2349 llvm::raw_svector_ostream Out(Buffer);
2350 if (!D)
2351 MangleCtx.mangleGlobalBlock(BD,
2352 ID: dyn_cast_or_null<VarDecl>(Val: initializedGlobalDecl.getDecl()), Out);
2353 else if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: D))
2354 MangleCtx.mangleCtorBlock(CD, CT: GD.getCtorType(), BD, Out);
2355 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: D))
2356 MangleCtx.mangleDtorBlock(CD: DD, DT: GD.getDtorType(), BD, Out);
2357 else
2358 MangleCtx.mangleBlock(DC: cast<DeclContext>(Val: D), BD, Out);
2359
2360 auto Result = Manglings.insert(KV: std::make_pair(x: Out.str(), y&: BD));
2361 return Result.first->first();
2362}
2363
2364const GlobalDecl CodeGenModule::getMangledNameDecl(StringRef Name) {
2365 auto it = MangledDeclNames.begin();
2366 while (it != MangledDeclNames.end()) {
2367 if (it->second == Name)
2368 return it->first;
2369 it++;
2370 }
2371 return GlobalDecl();
2372}
2373
2374llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
2375 return getModule().getNamedValue(Name);
2376}
2377
2378/// AddGlobalCtor - Add a function to the list that will be called before
2379/// main() runs.
2380void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
2381 unsigned LexOrder,
2382 llvm::Constant *AssociatedData) {
2383 // FIXME: Type coercion of void()* types.
2384 GlobalCtors.push_back(x: Structor(Priority, LexOrder, Ctor, AssociatedData));
2385}
2386
2387/// AddGlobalDtor - Add a function to the list that will be called
2388/// when the module is unloaded.
2389void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
2390 bool IsDtorAttrFunc) {
2391 if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
2392 (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
2393 DtorsUsingAtExit[Priority].push_back(NewVal: Dtor);
2394 return;
2395 }
2396
2397 // FIXME: Type coercion of void()* types.
2398 GlobalDtors.push_back(x: Structor(Priority, ~0U, Dtor, nullptr));
2399}
2400
2401void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
2402 if (Fns.empty()) return;
2403
2404 const PointerAuthSchema &InitFiniAuthSchema =
2405 getCodeGenOpts().PointerAuth.InitFiniPointers;
2406
2407 // Ctor function type is ptr.
2408 llvm::PointerType *PtrTy = llvm::PointerType::get(
2409 C&: getLLVMContext(), AddressSpace: TheModule.getDataLayout().getProgramAddressSpace());
2410
2411 // Get the type of a ctor entry, { i32, ptr, ptr }.
2412 llvm::StructType *CtorStructTy = llvm::StructType::get(elt1: Int32Ty, elts: PtrTy, elts: PtrTy);
2413
2414 // Construct the constructor and destructor arrays.
2415 ConstantInitBuilder Builder(*this);
2416 auto Ctors = Builder.beginArray(eltTy: CtorStructTy);
2417 for (const auto &I : Fns) {
2418 auto Ctor = Ctors.beginStruct(ty: CtorStructTy);
2419 Ctor.addInt(intTy: Int32Ty, value: I.Priority);
2420 if (InitFiniAuthSchema) {
2421 llvm::Constant *StorageAddress =
2422 (InitFiniAuthSchema.isAddressDiscriminated()
2423 ? llvm::ConstantExpr::getIntToPtr(
2424 C: llvm::ConstantInt::get(
2425 Ty: IntPtrTy,
2426 V: llvm::ConstantPtrAuth::AddrDiscriminator_CtorsDtors),
2427 Ty: PtrTy)
2428 : nullptr);
2429 llvm::Constant *SignedCtorPtr = getConstantSignedPointer(
2430 Pointer: I.Initializer, Key: InitFiniAuthSchema.getKey(), StorageAddress,
2431 OtherDiscriminator: llvm::ConstantInt::get(
2432 Ty: SizeTy, V: InitFiniAuthSchema.getConstantDiscrimination()));
2433 Ctor.add(value: SignedCtorPtr);
2434 } else {
2435 Ctor.add(value: I.Initializer);
2436 }
2437 if (I.AssociatedData)
2438 Ctor.add(value: I.AssociatedData);
2439 else
2440 Ctor.addNullPointer(ptrTy: PtrTy);
2441 Ctor.finishAndAddTo(parent&: Ctors);
2442 }
2443
2444 auto List = Ctors.finishAndCreateGlobal(args&: GlobalName, args: getPointerAlign(),
2445 /*constant*/ args: false,
2446 args: llvm::GlobalValue::AppendingLinkage);
2447
2448 // The LTO linker doesn't seem to like it when we set an alignment
2449 // on appending variables. Take it off as a workaround.
2450 List->setAlignment(std::nullopt);
2451
2452 Fns.clear();
2453}
2454
2455llvm::GlobalValue::LinkageTypes
2456CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
2457 const auto *D = cast<FunctionDecl>(Val: GD.getDecl());
2458
2459 GVALinkage Linkage = getContext().GetGVALinkageForFunction(FD: D);
2460
2461 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Val: D))
2462 return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, DT: GD.getDtorType());
2463
2464 return getLLVMLinkageForDeclarator(D, Linkage);
2465}
2466
2467llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
2468 llvm::MDString *MDS = dyn_cast<llvm::MDString>(Val: MD);
2469 if (!MDS) return nullptr;
2470
2471 return llvm::ConstantInt::get(Ty: Int64Ty, V: llvm::MD5Hash(Str: MDS->getString()));
2472}
2473
2474static QualType GeneralizeTransparentUnion(QualType Ty) {
2475 const RecordType *UT = Ty->getAsUnionType();
2476 if (!UT)
2477 return Ty;
2478 const RecordDecl *UD = UT->getDecl()->getDefinitionOrSelf();
2479 if (!UD->hasAttr<TransparentUnionAttr>())
2480 return Ty;
2481 if (!UD->fields().empty())
2482 return UD->fields().begin()->getType();
2483 return Ty;
2484}
2485
2486// If `GeneralizePointers` is true, generalizes types to a void pointer with the
2487// qualifiers of the originally pointed-to type, e.g. 'const char *' and 'char *
2488// const *' generalize to 'const void *' while 'char *' and 'const char **'
2489// generalize to 'void *'.
2490static QualType GeneralizeType(ASTContext &Ctx, QualType Ty,
2491 bool GeneralizePointers) {
2492 Ty = GeneralizeTransparentUnion(Ty);
2493
2494 if (!GeneralizePointers || !Ty->isPointerType())
2495 return Ty;
2496
2497 return Ctx.getPointerType(
2498 T: QualType(Ctx.VoidTy)
2499 .withCVRQualifiers(CVR: Ty->getPointeeType().getCVRQualifiers()));
2500}
2501
2502// Apply type generalization to a FunctionType's return and argument types
2503static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty,
2504 bool GeneralizePointers) {
2505 if (auto *FnType = Ty->getAs<FunctionProtoType>()) {
2506 SmallVector<QualType, 8> GeneralizedParams;
2507 for (auto &Param : FnType->param_types())
2508 GeneralizedParams.push_back(
2509 Elt: GeneralizeType(Ctx, Ty: Param, GeneralizePointers));
2510
2511 return Ctx.getFunctionType(
2512 ResultTy: GeneralizeType(Ctx, Ty: FnType->getReturnType(), GeneralizePointers),
2513 Args: GeneralizedParams, EPI: FnType->getExtProtoInfo());
2514 }
2515
2516 if (auto *FnType = Ty->getAs<FunctionNoProtoType>())
2517 return Ctx.getFunctionNoProtoType(
2518 ResultTy: GeneralizeType(Ctx, Ty: FnType->getReturnType(), GeneralizePointers));
2519
2520 llvm_unreachable("Encountered unknown FunctionType");
2521}
2522
2523llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T, StringRef Salt) {
2524 T = GeneralizeFunctionType(
2525 Ctx&: getContext(), Ty: T, GeneralizePointers: getCodeGenOpts().SanitizeCfiICallGeneralizePointers);
2526 if (auto *FnType = T->getAs<FunctionProtoType>())
2527 T = getContext().getFunctionType(
2528 ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(),
2529 EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None));
2530
2531 std::string OutName;
2532 llvm::raw_string_ostream Out(OutName);
2533 getCXXABI().getMangleContext().mangleCanonicalTypeName(
2534 T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
2535
2536 if (!Salt.empty())
2537 Out << "." << Salt;
2538
2539 if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
2540 Out << ".normalized";
2541 if (getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
2542 Out << ".generalized";
2543
2544 return llvm::ConstantInt::get(
2545 Ty: Int32Ty, V: llvm::getKCFITypeID(MangledTypeName: OutName, Algorithm: getCodeGenOpts().SanitizeKcfiHash));
2546}
2547
2548void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
2549 const CGFunctionInfo &Info,
2550 llvm::Function *F, bool IsThunk) {
2551 unsigned CallingConv;
2552 llvm::AttributeList PAL;
2553 ConstructAttributeList(Name: F->getName(), Info, CalleeInfo: GD, Attrs&: PAL, CallingConv,
2554 /*AttrOnCallSite=*/false, IsThunk);
2555 if (CallingConv == llvm::CallingConv::X86_VectorCall &&
2556 getTarget().getTriple().isWindowsArm64EC()) {
2557 SourceLocation Loc;
2558 if (const Decl *D = GD.getDecl())
2559 Loc = D->getLocation();
2560
2561 Error(loc: Loc, message: "__vectorcall calling convention is not currently supported");
2562 }
2563 F->setAttributes(PAL);
2564 F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2565}
2566
2567static void removeImageAccessQualifier(std::string& TyName) {
2568 std::string ReadOnlyQual("__read_only");
2569 std::string::size_type ReadOnlyPos = TyName.find(str: ReadOnlyQual);
2570 if (ReadOnlyPos != std::string::npos)
2571 // "+ 1" for the space after access qualifier.
2572 TyName.erase(pos: ReadOnlyPos, n: ReadOnlyQual.size() + 1);
2573 else {
2574 std::string WriteOnlyQual("__write_only");
2575 std::string::size_type WriteOnlyPos = TyName.find(str: WriteOnlyQual);
2576 if (WriteOnlyPos != std::string::npos)
2577 TyName.erase(pos: WriteOnlyPos, n: WriteOnlyQual.size() + 1);
2578 else {
2579 std::string ReadWriteQual("__read_write");
2580 std::string::size_type ReadWritePos = TyName.find(str: ReadWriteQual);
2581 if (ReadWritePos != std::string::npos)
2582 TyName.erase(pos: ReadWritePos, n: ReadWriteQual.size() + 1);
2583 }
2584 }
2585}
2586
2587// Returns the address space id that should be produced to the
2588// kernel_arg_addr_space metadata. This is always fixed to the ids
2589// as specified in the SPIR 2.0 specification in order to differentiate
2590// for example in clGetKernelArgInfo() implementation between the address
2591// spaces with targets without unique mapping to the OpenCL address spaces
2592// (basically all single AS CPUs).
2593static unsigned ArgInfoAddressSpace(LangAS AS) {
2594 switch (AS) {
2595 case LangAS::opencl_global:
2596 return 1;
2597 case LangAS::opencl_constant:
2598 return 2;
2599 case LangAS::opencl_local:
2600 return 3;
2601 case LangAS::opencl_generic:
2602 return 4; // Not in SPIR 2.0 specs.
2603 case LangAS::opencl_global_device:
2604 return 5;
2605 case LangAS::opencl_global_host:
2606 return 6;
2607 default:
2608 return 0; // Assume private.
2609 }
2610}
2611
2612void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn,
2613 const FunctionDecl *FD,
2614 CodeGenFunction *CGF) {
2615 assert(((FD && CGF) || (!FD && !CGF)) &&
2616 "Incorrect use - FD and CGF should either be both null or not!");
2617 // Create MDNodes that represent the kernel arg metadata.
2618 // Each MDNode is a list in the form of "key", N number of values which is
2619 // the same number of values as their are kernel arguments.
2620
2621 const PrintingPolicy &Policy = Context.getPrintingPolicy();
2622
2623 // MDNode for the kernel argument address space qualifiers.
2624 SmallVector<llvm::Metadata *, 8> addressQuals;
2625
2626 // MDNode for the kernel argument access qualifiers (images only).
2627 SmallVector<llvm::Metadata *, 8> accessQuals;
2628
2629 // MDNode for the kernel argument type names.
2630 SmallVector<llvm::Metadata *, 8> argTypeNames;
2631
2632 // MDNode for the kernel argument base type names.
2633 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
2634
2635 // MDNode for the kernel argument type qualifiers.
2636 SmallVector<llvm::Metadata *, 8> argTypeQuals;
2637
2638 // MDNode for the kernel argument names.
2639 SmallVector<llvm::Metadata *, 8> argNames;
2640
2641 if (FD && CGF)
2642 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
2643 const ParmVarDecl *parm = FD->getParamDecl(i);
2644 // Get argument name.
2645 argNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: parm->getName()));
2646
2647 if (!getLangOpts().OpenCL)
2648 continue;
2649 QualType ty = parm->getType();
2650 std::string typeQuals;
2651
2652 // Get image and pipe access qualifier:
2653 if (ty->isImageType() || ty->isPipeType()) {
2654 const Decl *PDecl = parm;
2655 if (const auto *TD = ty->getAs<TypedefType>())
2656 PDecl = TD->getDecl();
2657 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
2658 if (A && A->isWriteOnly())
2659 accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "write_only"));
2660 else if (A && A->isReadWrite())
2661 accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_write"));
2662 else
2663 accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_only"));
2664 } else
2665 accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "none"));
2666
2667 auto getTypeSpelling = [&](QualType Ty) {
2668 auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
2669
2670 if (Ty.isCanonical()) {
2671 StringRef typeNameRef = typeName;
2672 // Turn "unsigned type" to "utype"
2673 if (typeNameRef.consume_front(Prefix: "unsigned "))
2674 return std::string("u") + typeNameRef.str();
2675 if (typeNameRef.consume_front(Prefix: "signed "))
2676 return typeNameRef.str();
2677 }
2678
2679 return typeName;
2680 };
2681
2682 if (ty->isPointerType()) {
2683 QualType pointeeTy = ty->getPointeeType();
2684
2685 // Get address qualifier.
2686 addressQuals.push_back(
2687 Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32(
2688 C: ArgInfoAddressSpace(AS: pointeeTy.getAddressSpace()))));
2689
2690 // Get argument type name.
2691 std::string typeName = getTypeSpelling(pointeeTy) + "*";
2692 std::string baseTypeName =
2693 getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
2694 argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName));
2695 argBaseTypeNames.push_back(
2696 Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName));
2697
2698 // Get argument type qualifiers:
2699 if (ty.isRestrictQualified())
2700 typeQuals = "restrict";
2701 if (pointeeTy.isConstQualified() ||
2702 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
2703 typeQuals += typeQuals.empty() ? "const" : " const";
2704 if (pointeeTy.isVolatileQualified())
2705 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
2706 } else {
2707 uint32_t AddrSpc = 0;
2708 bool isPipe = ty->isPipeType();
2709 if (ty->isImageType() || isPipe)
2710 AddrSpc = ArgInfoAddressSpace(AS: LangAS::opencl_global);
2711
2712 addressQuals.push_back(
2713 Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32(C: AddrSpc)));
2714
2715 // Get argument type name.
2716 ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
2717 std::string typeName = getTypeSpelling(ty);
2718 std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
2719
2720 // Remove access qualifiers on images
2721 // (as they are inseparable from type in clang implementation,
2722 // but OpenCL spec provides a special query to get access qualifier
2723 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
2724 if (ty->isImageType()) {
2725 removeImageAccessQualifier(TyName&: typeName);
2726 removeImageAccessQualifier(TyName&: baseTypeName);
2727 }
2728
2729 argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName));
2730 argBaseTypeNames.push_back(
2731 Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName));
2732
2733 if (isPipe)
2734 typeQuals = "pipe";
2735 }
2736 argTypeQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeQuals));
2737 }
2738
2739 if (getLangOpts().OpenCL) {
2740 Fn->setMetadata(Kind: "kernel_arg_addr_space",
2741 Node: llvm::MDNode::get(Context&: VMContext, MDs: addressQuals));
2742 Fn->setMetadata(Kind: "kernel_arg_access_qual",
2743 Node: llvm::MDNode::get(Context&: VMContext, MDs: accessQuals));
2744 Fn->setMetadata(Kind: "kernel_arg_type",
2745 Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeNames));
2746 Fn->setMetadata(Kind: "kernel_arg_base_type",
2747 Node: llvm::MDNode::get(Context&: VMContext, MDs: argBaseTypeNames));
2748 Fn->setMetadata(Kind: "kernel_arg_type_qual",
2749 Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeQuals));
2750 }
2751 if (getCodeGenOpts().EmitOpenCLArgMetadata ||
2752 getCodeGenOpts().HIPSaveKernelArgName)
2753 Fn->setMetadata(Kind: "kernel_arg_name",
2754 Node: llvm::MDNode::get(Context&: VMContext, MDs: argNames));
2755}
2756
2757/// Determines whether the language options require us to model
2758/// unwind exceptions. We treat -fexceptions as mandating this
2759/// except under the fragile ObjC ABI with only ObjC exceptions
2760/// enabled. This means, for example, that C with -fexceptions
2761/// enables this.
2762static bool hasUnwindExceptions(const LangOptions &LangOpts) {
2763 // If exceptions are completely disabled, obviously this is false.
2764 if (!LangOpts.Exceptions) return false;
2765
2766 // If C++ exceptions are enabled, this is true.
2767 if (LangOpts.CXXExceptions) return true;
2768
2769 // If ObjC exceptions are enabled, this depends on the ABI.
2770 if (LangOpts.ObjCExceptions) {
2771 return LangOpts.ObjCRuntime.hasUnwindExceptions();
2772 }
2773
2774 return true;
2775}
2776
2777static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
2778 const CXXMethodDecl *MD) {
2779 // Check that the type metadata can ever actually be used by a call.
2780 if (!CGM.getCodeGenOpts().LTOUnit ||
2781 !CGM.HasHiddenLTOVisibility(RD: MD->getParent()))
2782 return false;
2783
2784 // Only functions whose address can be taken with a member function pointer
2785 // need this sort of type metadata.
2786 return MD->isImplicitObjectMemberFunction() && !MD->isVirtual() &&
2787 !isa<CXXConstructorDecl, CXXDestructorDecl>(Val: MD);
2788}
2789
2790SmallVector<const CXXRecordDecl *, 0>
2791CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
2792 llvm::SetVector<const CXXRecordDecl *> MostBases;
2793
2794 std::function<void (const CXXRecordDecl *)> CollectMostBases;
2795 CollectMostBases = [&](const CXXRecordDecl *RD) {
2796 if (RD->getNumBases() == 0)
2797 MostBases.insert(X: RD);
2798 for (const CXXBaseSpecifier &B : RD->bases())
2799 CollectMostBases(B.getType()->getAsCXXRecordDecl());
2800 };
2801 CollectMostBases(RD);
2802 return MostBases.takeVector();
2803}
2804
2805void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
2806 llvm::Function *F) {
2807 llvm::AttrBuilder B(F->getContext());
2808
2809 if ((!D || !D->hasAttr<NoUwtableAttr>()) && CodeGenOpts.UnwindTables)
2810 B.addUWTableAttr(Kind: llvm::UWTableKind(CodeGenOpts.UnwindTables));
2811
2812 if (CodeGenOpts.StackClashProtector)
2813 B.addAttribute(A: "probe-stack", V: "inline-asm");
2814
2815 if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096)
2816 B.addAttribute(A: "stack-probe-size",
2817 V: std::to_string(val: CodeGenOpts.StackProbeSize));
2818
2819 if (!hasUnwindExceptions(LangOpts))
2820 B.addAttribute(Val: llvm::Attribute::NoUnwind);
2821
2822 if (std::optional<llvm::Attribute::AttrKind> Attr =
2823 StackProtectorAttribute(D)) {
2824 B.addAttribute(Val: *Attr);
2825 }
2826
2827 if (!D) {
2828 // Non-entry HLSL functions must always be inlined.
2829 if (getLangOpts().HLSL && !F->hasFnAttribute(Kind: llvm::Attribute::NoInline))
2830 B.addAttribute(Val: llvm::Attribute::AlwaysInline);
2831 // If we don't have a declaration to control inlining, the function isn't
2832 // explicitly marked as alwaysinline for semantic reasons, and inlining is
2833 // disabled, mark the function as noinline.
2834 else if (!F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline) &&
2835 CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
2836 B.addAttribute(Val: llvm::Attribute::NoInline);
2837
2838 F->addFnAttrs(Attrs: B);
2839 return;
2840 }
2841
2842 // Handle SME attributes that apply to function definitions,
2843 // rather than to function prototypes.
2844 if (D->hasAttr<ArmLocallyStreamingAttr>())
2845 B.addAttribute(A: "aarch64_pstate_sm_body");
2846
2847 if (auto *Attr = D->getAttr<ArmNewAttr>()) {
2848 if (Attr->isNewZA())
2849 B.addAttribute(A: "aarch64_new_za");
2850 if (Attr->isNewZT0())
2851 B.addAttribute(A: "aarch64_new_zt0");
2852 }
2853
2854 // Track whether we need to add the optnone LLVM attribute,
2855 // starting with the default for this optimization level.
2856 bool ShouldAddOptNone =
2857 !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
2858 // We can't add optnone in the following cases, it won't pass the verifier.
2859 ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
2860 ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
2861
2862 // Non-entry HLSL functions must always be inlined.
2863 if (getLangOpts().HLSL && !F->hasFnAttribute(Kind: llvm::Attribute::NoInline) &&
2864 !D->hasAttr<NoInlineAttr>()) {
2865 B.addAttribute(Val: llvm::Attribute::AlwaysInline);
2866 } else if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
2867 !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) {
2868 // Add optnone, but do so only if the function isn't always_inline.
2869 B.addAttribute(Val: llvm::Attribute::OptimizeNone);
2870
2871 // OptimizeNone implies noinline; we should not be inlining such functions.
2872 B.addAttribute(Val: llvm::Attribute::NoInline);
2873
2874 // We still need to handle naked functions even though optnone subsumes
2875 // much of their semantics.
2876 if (D->hasAttr<NakedAttr>())
2877 B.addAttribute(Val: llvm::Attribute::Naked);
2878
2879 // OptimizeNone wins over OptimizeForSize and MinSize.
2880 F->removeFnAttr(Kind: llvm::Attribute::OptimizeForSize);
2881 F->removeFnAttr(Kind: llvm::Attribute::MinSize);
2882 } else if (D->hasAttr<NakedAttr>()) {
2883 // Naked implies noinline: we should not be inlining such functions.
2884 B.addAttribute(Val: llvm::Attribute::Naked);
2885 B.addAttribute(Val: llvm::Attribute::NoInline);
2886 } else if (D->hasAttr<NoDuplicateAttr>()) {
2887 B.addAttribute(Val: llvm::Attribute::NoDuplicate);
2888 } else if (D->hasAttr<NoInlineAttr>() &&
2889 !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) {
2890 // Add noinline if the function isn't always_inline.
2891 B.addAttribute(Val: llvm::Attribute::NoInline);
2892 } else if (D->hasAttr<AlwaysInlineAttr>() &&
2893 !F->hasFnAttribute(Kind: llvm::Attribute::NoInline)) {
2894 // (noinline wins over always_inline, and we can't specify both in IR)
2895 B.addAttribute(Val: llvm::Attribute::AlwaysInline);
2896 } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
2897 // If we're not inlining, then force everything that isn't always_inline to
2898 // carry an explicit noinline attribute.
2899 if (!F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline))
2900 B.addAttribute(Val: llvm::Attribute::NoInline);
2901 } else {
2902 // Otherwise, propagate the inline hint attribute and potentially use its
2903 // absence to mark things as noinline.
2904 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2905 // Search function and template pattern redeclarations for inline.
2906 auto CheckForInline = [](const FunctionDecl *FD) {
2907 auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
2908 return Redecl->isInlineSpecified();
2909 };
2910 if (any_of(Range: FD->redecls(), P: CheckRedeclForInline))
2911 return true;
2912 const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
2913 if (!Pattern)
2914 return false;
2915 return any_of(Range: Pattern->redecls(), P: CheckRedeclForInline);
2916 };
2917 if (CheckForInline(FD)) {
2918 B.addAttribute(Val: llvm::Attribute::InlineHint);
2919 } else if (CodeGenOpts.getInlining() ==
2920 CodeGenOptions::OnlyHintInlining &&
2921 !FD->isInlined() &&
2922 !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) {
2923 B.addAttribute(Val: llvm::Attribute::NoInline);
2924 }
2925 }
2926 }
2927
2928 // Add other optimization related attributes if we are optimizing this
2929 // function.
2930 if (!D->hasAttr<OptimizeNoneAttr>()) {
2931 if (D->hasAttr<ColdAttr>()) {
2932 if (!ShouldAddOptNone)
2933 B.addAttribute(Val: llvm::Attribute::OptimizeForSize);
2934 B.addAttribute(Val: llvm::Attribute::Cold);
2935 }
2936 if (D->hasAttr<HotAttr>())
2937 B.addAttribute(Val: llvm::Attribute::Hot);
2938 if (D->hasAttr<MinSizeAttr>())
2939 B.addAttribute(Val: llvm::Attribute::MinSize);
2940 }
2941
2942 // Add `nooutline` if Outlining is disabled with a command-line flag or a
2943 // function attribute.
2944 if (CodeGenOpts.DisableOutlining || D->hasAttr<NoOutlineAttr>())
2945 B.addAttribute(Val: llvm::Attribute::NoOutline);
2946
2947 F->addFnAttrs(Attrs: B);
2948
2949 llvm::MaybeAlign ExplicitAlignment;
2950 if (unsigned alignment = D->getMaxAlignment() / Context.getCharWidth())
2951 ExplicitAlignment = llvm::Align(alignment);
2952 else if (LangOpts.FunctionAlignment)
2953 ExplicitAlignment = llvm::Align(1ull << LangOpts.FunctionAlignment);
2954
2955 if (ExplicitAlignment) {
2956 F->setAlignment(ExplicitAlignment);
2957 F->setPreferredAlignment(ExplicitAlignment);
2958 } else if (LangOpts.PreferredFunctionAlignment) {
2959 F->setPreferredAlignment(llvm::Align(LangOpts.PreferredFunctionAlignment));
2960 }
2961
2962 // Some C++ ABIs require 2-byte alignment for member functions, in order to
2963 // reserve a bit for differentiating between virtual and non-virtual member
2964 // functions. If the current target's C++ ABI requires this and this is a
2965 // member function, set its alignment accordingly.
2966 if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
2967 if (isa<CXXMethodDecl>(Val: D) && F->getPointerAlignment(DL: getDataLayout()) < 2)
2968 F->setAlignment(std::max(a: llvm::Align(2), b: F->getAlign().valueOrOne()));
2969 }
2970
2971 // In the cross-dso CFI mode with canonical jump tables, we want !type
2972 // attributes on definitions only.
2973 if (CodeGenOpts.SanitizeCfiCrossDso &&
2974 CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
2975 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2976 // Skip available_externally functions. They won't be codegen'ed in the
2977 // current module anyway.
2978 if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
2979 createFunctionTypeMetadataForIcall(FD, F);
2980 }
2981 }
2982
2983 if (CodeGenOpts.CallGraphSection) {
2984 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
2985 createIndirectFunctionTypeMD(FD, F);
2986 }
2987
2988 // Emit type metadata on member functions for member function pointer checks.
2989 // These are only ever necessary on definitions; we're guaranteed that the
2990 // definition will be present in the LTO unit as a result of LTO visibility.
2991 auto *MD = dyn_cast<CXXMethodDecl>(Val: D);
2992 if (MD && requiresMemberFunctionPointerTypeMetadata(CGM&: *this, MD)) {
2993 for (const CXXRecordDecl *Base : getMostBaseClasses(RD: MD->getParent())) {
2994 llvm::Metadata *Id =
2995 CreateMetadataIdentifierForType(T: Context.getMemberPointerType(
2996 T: MD->getType(), /*Qualifier=*/std::nullopt, Cls: Base));
2997 F->addTypeMetadata(Offset: 0, TypeID: Id);
2998 }
2999 }
3000}
3001
3002void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
3003 const Decl *D = GD.getDecl();
3004 if (isa_and_nonnull<NamedDecl>(Val: D))
3005 setGVProperties(GV, GD);
3006 else
3007 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
3008
3009 if (D && D->hasAttr<UsedAttr>())
3010 addUsedOrCompilerUsedGlobal(GV);
3011
3012 if (const auto *VD = dyn_cast_if_present<VarDecl>(Val: D);
3013 VD &&
3014 ((CodeGenOpts.KeepPersistentStorageVariables &&
3015 (VD->getStorageDuration() == SD_Static ||
3016 VD->getStorageDuration() == SD_Thread)) ||
3017 (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static &&
3018 VD->getType().isConstQualified())))
3019 addUsedOrCompilerUsedGlobal(GV);
3020}
3021
3022/// Get the feature delta from the default feature map for the given target CPU.
3023static std::vector<std::string>
3024getFeatureDeltaFromDefault(const CodeGenModule &CGM, StringRef TargetCPU,
3025 llvm::StringMap<bool> &FeatureMap) {
3026 llvm::StringMap<bool> DefaultFeatureMap;
3027 CGM.getTarget().initFeatureMap(
3028 Features&: DefaultFeatureMap, Diags&: CGM.getContext().getDiagnostics(), CPU: TargetCPU, FeatureVec: {});
3029
3030 std::vector<std::string> Delta;
3031 for (const auto &[K, V] : FeatureMap) {
3032 auto DefaultIt = DefaultFeatureMap.find(Key: K);
3033 if (DefaultIt == DefaultFeatureMap.end() || DefaultIt->getValue() != V)
3034 Delta.push_back(x: (V ? "+" : "-") + K.str());
3035 }
3036
3037 return Delta;
3038}
3039
3040bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
3041 llvm::AttrBuilder &Attrs,
3042 bool SetTargetFeatures) {
3043 // Add target-cpu and target-features attributes to functions. If
3044 // we have a decl for the function and it has a target attribute then
3045 // parse that and add it to the feature set.
3046 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
3047 StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
3048 std::vector<std::string> Features;
3049 const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: GD.getDecl());
3050 FD = FD ? FD->getMostRecentDecl() : FD;
3051 const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
3052 const auto *TV = FD ? FD->getAttr<TargetVersionAttr>() : nullptr;
3053 assert((!TD || !TV) && "both target_version and target specified");
3054 const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
3055 const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr;
3056 bool AddedAttr = false;
3057 if (TD || TV || SD || TC) {
3058 llvm::StringMap<bool> FeatureMap;
3059 getContext().getFunctionFeatureMap(FeatureMap, GD);
3060
3061 // Now add the target-cpu and target-features to the function.
3062 // While we populated the feature map above, we still need to
3063 // get and parse the target attribute so we can get the cpu for
3064 // the function.
3065 if (TD) {
3066 ParsedTargetAttr ParsedAttr =
3067 Target.parseTargetAttr(Str: TD->getFeaturesStr());
3068 if (!ParsedAttr.CPU.empty() &&
3069 getTarget().isValidCPUName(Name: ParsedAttr.CPU)) {
3070 TargetCPU = ParsedAttr.CPU;
3071 TuneCPU = ""; // Clear the tune CPU.
3072 }
3073 if (!ParsedAttr.Tune.empty() &&
3074 getTarget().isValidCPUName(Name: ParsedAttr.Tune))
3075 TuneCPU = ParsedAttr.Tune;
3076 }
3077
3078 if (SD) {
3079 // Apply the given CPU name as the 'tune-cpu' so that the optimizer can
3080 // favor this processor.
3081 TuneCPU = SD->getCPUName(Index: GD.getMultiVersionIndex())->getName();
3082 }
3083
3084 // For AMDGPU, only emit delta features (features that differ from the
3085 // target CPU's defaults). Other targets might want to follow a similar
3086 // pattern.
3087 if (getTarget().getTriple().isAMDGPU()) {
3088 Features = getFeatureDeltaFromDefault(CGM: *this, TargetCPU, FeatureMap);
3089 } else {
3090 // Produce the canonical string for this set of features.
3091 for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
3092 Features.push_back(x: (Entry.getValue() ? "+" : "-") +
3093 Entry.getKey().str());
3094 }
3095 } else {
3096 // Otherwise just add the existing target cpu and target features to the
3097 // function.
3098 if (SetTargetFeatures && getTarget().getTriple().isAMDGPU()) {
3099 llvm::StringMap<bool> FeatureMap;
3100 if (FD) {
3101 getContext().getFunctionFeatureMap(FeatureMap, GD);
3102 } else {
3103 getTarget().initFeatureMap(Features&: FeatureMap, Diags&: getContext().getDiagnostics(),
3104 CPU: TargetCPU,
3105 FeatureVec: getTarget().getTargetOpts().Features);
3106 }
3107 Features = getFeatureDeltaFromDefault(CGM: *this, TargetCPU, FeatureMap);
3108 } else {
3109 Features = getTarget().getTargetOpts().Features;
3110 }
3111 }
3112
3113 if (!TargetCPU.empty()) {
3114 Attrs.addAttribute(A: "target-cpu", V: TargetCPU);
3115 AddedAttr = true;
3116 }
3117 if (!TuneCPU.empty()) {
3118 Attrs.addAttribute(A: "tune-cpu", V: TuneCPU);
3119 AddedAttr = true;
3120 }
3121 if (!Features.empty() && SetTargetFeatures) {
3122 llvm::erase_if(C&: Features, P: [&](const std::string& F) {
3123 return getTarget().isReadOnlyFeature(Feature: F.substr(pos: 1));
3124 });
3125 llvm::sort(C&: Features);
3126 Attrs.addAttribute(A: "target-features", V: llvm::join(R&: Features, Separator: ","));
3127 AddedAttr = true;
3128 }
3129 // Add metadata for AArch64 Function Multi Versioning.
3130 if (getTarget().getTriple().isAArch64()) {
3131 llvm::SmallVector<StringRef, 8> Feats;
3132 bool IsDefault = false;
3133 if (TV) {
3134 IsDefault = TV->isDefaultVersion();
3135 TV->getFeatures(Out&: Feats);
3136 } else if (TC) {
3137 IsDefault = TC->isDefaultVersion(Index: GD.getMultiVersionIndex());
3138 TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex());
3139 }
3140 if (IsDefault) {
3141 Attrs.addAttribute(A: "fmv-features");
3142 AddedAttr = true;
3143 } else if (!Feats.empty()) {
3144 // Sort features and remove duplicates.
3145 std::set<StringRef> OrderedFeats(Feats.begin(), Feats.end());
3146 std::string FMVFeatures;
3147 for (StringRef F : OrderedFeats)
3148 FMVFeatures.append(str: "," + F.str());
3149 Attrs.addAttribute(A: "fmv-features", V: FMVFeatures.substr(pos: 1));
3150 AddedAttr = true;
3151 }
3152 }
3153 return AddedAttr;
3154}
3155
3156void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
3157 llvm::GlobalObject *GO) {
3158 const Decl *D = GD.getDecl();
3159 SetCommonAttributes(GD, GV: GO);
3160
3161 if (D) {
3162 if (auto *GV = dyn_cast<llvm::GlobalVariable>(Val: GO)) {
3163 if (D->hasAttr<RetainAttr>())
3164 addUsedGlobal(GV);
3165 if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
3166 GV->addAttribute(Kind: "bss-section", Val: SA->getName());
3167 if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
3168 GV->addAttribute(Kind: "data-section", Val: SA->getName());
3169 if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
3170 GV->addAttribute(Kind: "rodata-section", Val: SA->getName());
3171 if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
3172 GV->addAttribute(Kind: "relro-section", Val: SA->getName());
3173 }
3174
3175 if (auto *F = dyn_cast<llvm::Function>(Val: GO)) {
3176 if (D->hasAttr<RetainAttr>())
3177 addUsedGlobal(GV: F);
3178 if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
3179 if (!D->getAttr<SectionAttr>())
3180 F->setSection(SA->getName());
3181
3182 llvm::AttrBuilder Attrs(F->getContext());
3183 if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
3184 // We know that GetCPUAndFeaturesAttributes will always have the
3185 // newest set, since it has the newest possible FunctionDecl, so the
3186 // new ones should replace the old.
3187 llvm::AttributeMask RemoveAttrs;
3188 RemoveAttrs.addAttribute(A: "target-cpu");
3189 RemoveAttrs.addAttribute(A: "target-features");
3190 RemoveAttrs.addAttribute(A: "fmv-features");
3191 RemoveAttrs.addAttribute(A: "tune-cpu");
3192 F->removeFnAttrs(Attrs: RemoveAttrs);
3193 F->addFnAttrs(Attrs);
3194 }
3195 }
3196
3197 if (const auto *CSA = D->getAttr<CodeSegAttr>())
3198 GO->setSection(CSA->getName());
3199 else if (const auto *SA = D->getAttr<SectionAttr>())
3200 GO->setSection(SA->getName());
3201 }
3202
3203 getTargetCodeGenInfo().setTargetAttributes(D, GV: GO, M&: *this);
3204}
3205
3206void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
3207 llvm::Function *F,
3208 const CGFunctionInfo &FI) {
3209 const Decl *D = GD.getDecl();
3210 SetLLVMFunctionAttributes(GD, Info: FI, F, /*IsThunk=*/false);
3211 SetLLVMFunctionAttributesForDefinition(D, F);
3212
3213 F->setLinkage(llvm::Function::InternalLinkage);
3214
3215 setNonAliasAttributes(GD, GO: F);
3216}
3217
3218static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
3219 // Set linkage and visibility in case we never see a definition.
3220 LinkageInfo LV = ND->getLinkageAndVisibility();
3221 // Don't set internal linkage on declarations.
3222 // "extern_weak" is overloaded in LLVM; we probably should have
3223 // separate linkage types for this.
3224 if (isExternallyVisible(L: LV.getLinkage()) &&
3225 (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
3226 GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
3227}
3228
3229static bool hasExistingGeneralizedTypeMD(llvm::Function *F) {
3230 llvm::MDNode *MD = F->getMetadata(KindID: llvm::LLVMContext::MD_type);
3231 return MD && MD->hasGeneralizedMDString();
3232}
3233
3234void CodeGenModule::createIndirectFunctionTypeMD(const FunctionDecl *FD,
3235 llvm::Function *F) {
3236 // Return if generalized type metadata is already attached.
3237 if (hasExistingGeneralizedTypeMD(F))
3238 return;
3239
3240 // All functions which are not internal linkage could be indirect targets.
3241 // Address taken functions with internal linkage could be indirect targets.
3242 if (!F->hasLocalLinkage() ||
3243 F->getFunction().hasAddressTaken(nullptr, /*IgnoreCallbackUses=*/true,
3244 /*IgnoreAssumeLikeCalls=*/true,
3245 /*IgnoreLLVMUsed=*/IngoreLLVMUsed: false))
3246 F->addTypeMetadata(Offset: 0, TypeID: CreateMetadataIdentifierGeneralized(T: FD->getType()));
3247}
3248
3249void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
3250 llvm::Function *F) {
3251 // Only if we are checking indirect calls.
3252 if (!LangOpts.Sanitize.has(K: SanitizerKind::CFIICall))
3253 return;
3254
3255 // Non-static class methods are handled via vtable or member function pointer
3256 // checks elsewhere.
3257 if (isa<CXXMethodDecl>(Val: FD) && !cast<CXXMethodDecl>(Val: FD)->isStatic())
3258 return;
3259
3260 QualType FnType = GeneralizeFunctionType(Ctx&: getContext(), Ty: FD->getType(),
3261 /*GeneralizePointers=*/false);
3262 llvm::Metadata *MD = CreateMetadataIdentifierForType(T: FnType);
3263 F->addTypeMetadata(Offset: 0, TypeID: MD);
3264 // Add the generalized identifier if not added already.
3265 if (!hasExistingGeneralizedTypeMD(F)) {
3266 QualType GenPtrFnType = GeneralizeFunctionType(Ctx&: getContext(), Ty: FD->getType(),
3267 /*GeneralizePointers=*/true);
3268 F->addTypeMetadata(Offset: 0, TypeID: CreateMetadataIdentifierGeneralized(T: GenPtrFnType));
3269 }
3270
3271 // Emit a hash-based bit set entry for cross-DSO calls.
3272 if (CodeGenOpts.SanitizeCfiCrossDso)
3273 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
3274 F->addTypeMetadata(Offset: 0, TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId));
3275}
3276
3277void CodeGenModule::createCalleeTypeMetadataForIcall(const QualType &QT,
3278 llvm::CallBase *CB) {
3279 // Only if needed for call graph section and only for indirect calls that are
3280 // visible externally.
3281 // TODO: Handle local linkage symbols so they are not left out of call graph
3282 // reducing precision.
3283 if (!CodeGenOpts.CallGraphSection || !CB->isIndirectCall() ||
3284 !isExternallyVisible(L: QT->getLinkage()))
3285 return;
3286
3287 llvm::Metadata *TypeIdMD = CreateMetadataIdentifierGeneralized(T: QT);
3288 llvm::MDTuple *TypeTuple = llvm::MDTuple::get(
3289 Context&: getLLVMContext(), MDs: {llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
3290 Ty: llvm::Type::getInt64Ty(C&: getLLVMContext()), V: 0)),
3291 TypeIdMD});
3292 llvm::MDTuple *MDN = llvm::MDNode::get(Context&: getLLVMContext(), MDs: {TypeTuple});
3293 CB->setMetadata(KindID: llvm::LLVMContext::MD_callee_type, Node: MDN);
3294}
3295
3296void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
3297 llvm::LLVMContext &Ctx = F->getContext();
3298 llvm::MDBuilder MDB(Ctx);
3299 llvm::StringRef Salt;
3300
3301 if (const auto *FP = FD->getType()->getAs<FunctionProtoType>())
3302 if (const auto &Info = FP->getExtraAttributeInfo())
3303 Salt = Info.CFISalt;
3304
3305 F->setMetadata(KindID: llvm::LLVMContext::MD_kcfi_type,
3306 Node: llvm::MDNode::get(Context&: Ctx, MDs: MDB.createConstant(C: CreateKCFITypeId(
3307 T: FD->getType(), Salt))));
3308}
3309
3310static bool allowKCFIIdentifier(StringRef Name) {
3311 // KCFI type identifier constants are only necessary for external assembly
3312 // functions, which means it's safe to skip unusual names. Subset of
3313 // MCAsmInfo::isAcceptableChar() and MCAsmInfoXCOFF::isAcceptableChar().
3314 return llvm::all_of(Range&: Name, P: [](const char &C) {
3315 return llvm::isAlnum(C) || C == '_' || C == '.';
3316 });
3317}
3318
3319void CodeGenModule::finalizeKCFITypes() {
3320 llvm::Module &M = getModule();
3321 for (auto &F : M.functions()) {
3322 // Remove KCFI type metadata from non-address-taken local functions.
3323 bool AddressTaken = F.hasAddressTaken();
3324 if (!AddressTaken && F.hasLocalLinkage())
3325 F.eraseMetadata(KindID: llvm::LLVMContext::MD_kcfi_type);
3326
3327 // Generate a constant with the expected KCFI type identifier for all
3328 // address-taken function declarations to support annotating indirectly
3329 // called assembly functions.
3330 if (!AddressTaken || !F.isDeclaration())
3331 continue;
3332
3333 const llvm::ConstantInt *Type;
3334 if (const llvm::MDNode *MD = F.getMetadata(KindID: llvm::LLVMContext::MD_kcfi_type))
3335 Type = llvm::mdconst::extract<llvm::ConstantInt>(MD: MD->getOperand(I: 0));
3336 else
3337 continue;
3338
3339 StringRef Name = F.getName();
3340 if (!allowKCFIIdentifier(Name))
3341 continue;
3342
3343 std::string Asm = (".weak __kcfi_typeid_" + Name + "\n.set __kcfi_typeid_" +
3344 Name + ", " + Twine(Type->getZExtValue()) + " /* " +
3345 Twine(Type->getSExtValue()) + " */\n")
3346 .str();
3347 M.appendModuleInlineAsm(Asm);
3348 }
3349}
3350
3351void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
3352 bool IsIncompleteFunction,
3353 bool IsThunk) {
3354
3355 if (F->getIntrinsicID() != llvm::Intrinsic::not_intrinsic) {
3356 // If this is an intrinsic function, the attributes will have been set
3357 // when the function was created.
3358 return;
3359 }
3360
3361 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
3362
3363 if (!IsIncompleteFunction)
3364 SetLLVMFunctionAttributes(GD, Info: getTypes().arrangeGlobalDeclaration(GD), F,
3365 IsThunk);
3366
3367 // Add the Returned attribute for "this", except for iOS 5 and earlier
3368 // where substantial code, including the libstdc++ dylib, was compiled with
3369 // GCC and does not actually return "this".
3370 if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
3371 !(getTriple().isiOS() && getTriple().isOSVersionLT(Major: 6))) {
3372 assert(!F->arg_empty() &&
3373 F->arg_begin()->getType()
3374 ->canLosslesslyBitCastTo(F->getReturnType()) &&
3375 "unexpected this return");
3376 F->addParamAttr(ArgNo: 0, Kind: llvm::Attribute::Returned);
3377 }
3378
3379 // Only a few attributes are set on declarations; these may later be
3380 // overridden by a definition.
3381
3382 setLinkageForGV(GV: F, ND: FD);
3383 setGVProperties(GV: F, D: FD);
3384
3385 // Setup target-specific attributes.
3386 if (!IsIncompleteFunction && F->isDeclaration())
3387 getTargetCodeGenInfo().setTargetAttributes(D: FD, GV: F, M&: *this);
3388
3389 if (const auto *CSA = FD->getAttr<CodeSegAttr>())
3390 F->setSection(CSA->getName());
3391 else if (const auto *SA = FD->getAttr<SectionAttr>())
3392 F->setSection(SA->getName());
3393
3394 if (const auto *EA = FD->getAttr<ErrorAttr>()) {
3395 if (EA->isError())
3396 F->addFnAttr(Kind: "dontcall-error", Val: EA->getUserDiagnostic());
3397 else if (EA->isWarning())
3398 F->addFnAttr(Kind: "dontcall-warn", Val: EA->getUserDiagnostic());
3399 }
3400
3401 // If we plan on emitting this inline builtin, we can't treat it as a builtin.
3402 if (FD->isInlineBuiltinDeclaration()) {
3403 const FunctionDecl *FDBody;
3404 bool HasBody = FD->hasBody(Definition&: FDBody);
3405 (void)HasBody;
3406 assert(HasBody && "Inline builtin declarations should always have an "
3407 "available body!");
3408 if (shouldEmitFunction(GD: FDBody))
3409 F->addFnAttr(Kind: llvm::Attribute::NoBuiltin);
3410 }
3411
3412 if (FD->isReplaceableGlobalAllocationFunction()) {
3413 // A replaceable global allocation function does not act like a builtin by
3414 // default, only if it is invoked by a new-expression or delete-expression.
3415 F->addFnAttr(Kind: llvm::Attribute::NoBuiltin);
3416 }
3417
3418 if (isa<CXXConstructorDecl>(Val: FD) || isa<CXXDestructorDecl>(Val: FD))
3419 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3420 else if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD))
3421 if (MD->isVirtual())
3422 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3423
3424 // Don't emit entries for function declarations in the cross-DSO mode. This
3425 // is handled with better precision by the receiving DSO. But if jump tables
3426 // are non-canonical then we need type metadata in order to produce the local
3427 // jump table.
3428 if (!CodeGenOpts.SanitizeCfiCrossDso ||
3429 !CodeGenOpts.SanitizeCfiCanonicalJumpTables)
3430 createFunctionTypeMetadataForIcall(FD, F);
3431
3432 if (CodeGenOpts.CallGraphSection)
3433 createIndirectFunctionTypeMD(FD, F);
3434
3435 if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI))
3436 setKCFIType(FD, F);
3437
3438 if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
3439 getOpenMPRuntime().emitDeclareSimdFunction(FD, Fn: F);
3440
3441 if (CodeGenOpts.InlineMaxStackSize != UINT_MAX)
3442 F->addFnAttr(Kind: "inline-max-stacksize", Val: llvm::utostr(X: CodeGenOpts.InlineMaxStackSize));
3443
3444 if (const auto *CB = FD->getAttr<CallbackAttr>()) {
3445 // Annotate the callback behavior as metadata:
3446 // - The callback callee (as argument number).
3447 // - The callback payloads (as argument numbers).
3448 llvm::LLVMContext &Ctx = F->getContext();
3449 llvm::MDBuilder MDB(Ctx);
3450
3451 // The payload indices are all but the first one in the encoding. The first
3452 // identifies the callback callee.
3453 int CalleeIdx = *CB->encoding_begin();
3454 ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
3455 F->addMetadata(KindID: llvm::LLVMContext::MD_callback,
3456 MD&: *llvm::MDNode::get(Context&: Ctx, MDs: {MDB.createCallbackEncoding(
3457 CalleeArgNo: CalleeIdx, Arguments: PayloadIndices,
3458 /* VarArgsArePassed */ false)}));
3459 }
3460}
3461
3462void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
3463 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
3464 "Only globals with definition can force usage.");
3465 LLVMUsed.emplace_back(args&: GV);
3466}
3467
3468void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
3469 assert(!GV->isDeclaration() &&
3470 "Only globals with definition can force usage.");
3471 LLVMCompilerUsed.emplace_back(args&: GV);
3472}
3473
3474void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) {
3475 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
3476 "Only globals with definition can force usage.");
3477 if (getTriple().isOSBinFormatELF())
3478 LLVMCompilerUsed.emplace_back(args&: GV);
3479 else
3480 LLVMUsed.emplace_back(args&: GV);
3481}
3482
3483static void emitUsed(CodeGenModule &CGM, StringRef Name,
3484 std::vector<llvm::WeakTrackingVH> &List) {
3485 // Don't create llvm.used if there is no need.
3486 if (List.empty())
3487 return;
3488
3489 // Convert List to what ConstantArray needs.
3490 SmallVector<llvm::Constant*, 8> UsedArray;
3491 UsedArray.resize(N: List.size());
3492 for (unsigned i = 0, e = List.size(); i != e; ++i) {
3493 UsedArray[i] =
3494 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
3495 C: cast<llvm::Constant>(Val: &*List[i]), Ty: CGM.Int8PtrTy);
3496 }
3497
3498 if (UsedArray.empty())
3499 return;
3500 llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: CGM.Int8PtrTy, NumElements: UsedArray.size());
3501
3502 auto *GV = new llvm::GlobalVariable(
3503 CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
3504 llvm::ConstantArray::get(T: ATy, V: UsedArray), Name);
3505
3506 GV->setSection("llvm.metadata");
3507}
3508
3509void CodeGenModule::emitLLVMUsed() {
3510 emitUsed(CGM&: *this, Name: "llvm.used", List&: LLVMUsed);
3511 emitUsed(CGM&: *this, Name: "llvm.compiler.used", List&: LLVMCompilerUsed);
3512}
3513
3514void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
3515 auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opts);
3516 LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts));
3517}
3518
3519void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
3520 llvm::SmallString<32> Opt;
3521 getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
3522 if (Opt.empty())
3523 return;
3524 auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt);
3525 LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts));
3526}
3527
3528void CodeGenModule::AddDependentLib(StringRef Lib) {
3529 auto &C = getLLVMContext();
3530 if (getTarget().getTriple().isOSBinFormatELF()) {
3531 ELFDependentLibraries.push_back(
3532 Elt: llvm::MDNode::get(Context&: C, MDs: llvm::MDString::get(Context&: C, Str: Lib)));
3533 return;
3534 }
3535
3536 llvm::SmallString<24> Opt;
3537 getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
3538 auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt);
3539 LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: C, MDs: MDOpts));
3540}
3541
3542/// Add link options implied by the given module, including modules
3543/// it depends on, using a postorder walk.
3544static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
3545 SmallVectorImpl<llvm::MDNode *> &Metadata,
3546 llvm::SmallPtrSet<Module *, 16> &Visited) {
3547 // Import this module's parent.
3548 if (Mod->Parent && Visited.insert(Ptr: Mod->Parent).second) {
3549 addLinkOptionsPostorder(CGM, Mod: Mod->Parent, Metadata, Visited);
3550 }
3551
3552 // Import this module's dependencies.
3553 for (Module *Import : llvm::reverse(C&: Mod->Imports)) {
3554 if (Visited.insert(Ptr: Import).second)
3555 addLinkOptionsPostorder(CGM, Mod: Import, Metadata, Visited);
3556 }
3557
3558 // Add linker options to link against the libraries/frameworks
3559 // described by this module.
3560 llvm::LLVMContext &Context = CGM.getLLVMContext();
3561 bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
3562
3563 // For modules that use export_as for linking, use that module
3564 // name instead.
3565 if (Mod->UseExportAsModuleLinkName)
3566 return;
3567
3568 for (const Module::LinkLibrary &LL : llvm::reverse(C&: Mod->LinkLibraries)) {
3569 // Link against a framework. Frameworks are currently Darwin only, so we
3570 // don't to ask TargetCodeGenInfo for the spelling of the linker option.
3571 if (LL.IsFramework) {
3572 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, Str: "-framework"),
3573 llvm::MDString::get(Context, Str: LL.Library)};
3574
3575 Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args));
3576 continue;
3577 }
3578
3579 // Link against a library.
3580 if (IsELF) {
3581 llvm::Metadata *Args[2] = {
3582 llvm::MDString::get(Context, Str: "lib"),
3583 llvm::MDString::get(Context, Str: LL.Library),
3584 };
3585 Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args));
3586 } else {
3587 llvm::SmallString<24> Opt;
3588 CGM.getTargetCodeGenInfo().getDependentLibraryOption(Lib: LL.Library, Opt);
3589 auto *OptString = llvm::MDString::get(Context, Str: Opt);
3590 Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: OptString));
3591 }
3592 }
3593}
3594
3595void CodeGenModule::EmitModuleInitializers(clang::Module *Primary) {
3596 assert(Primary->isNamedModuleUnit() &&
3597 "We should only emit module initializers for named modules.");
3598
3599 // Emit the initializers in the order that sub-modules appear in the
3600 // source, first Global Module Fragments, if present.
3601 if (auto GMF = Primary->getGlobalModuleFragment()) {
3602 for (Decl *D : getContext().getModuleInitializers(M: GMF)) {
3603 if (isa<ImportDecl>(Val: D))
3604 continue;
3605 assert(isa<VarDecl>(D) && "GMF initializer decl is not a var?");
3606 EmitTopLevelDecl(D);
3607 }
3608 }
3609 // Second any associated with the module, itself.
3610 for (Decl *D : getContext().getModuleInitializers(M: Primary)) {
3611 // Skip import decls, the inits for those are called explicitly.
3612 if (isa<ImportDecl>(Val: D))
3613 continue;
3614 EmitTopLevelDecl(D);
3615 }
3616 // Third any associated with the Privat eMOdule Fragment, if present.
3617 if (auto PMF = Primary->getPrivateModuleFragment()) {
3618 for (Decl *D : getContext().getModuleInitializers(M: PMF)) {
3619 // Skip import decls, the inits for those are called explicitly.
3620 if (isa<ImportDecl>(Val: D))
3621 continue;
3622 assert(isa<VarDecl>(D) && "PMF initializer decl is not a var?");
3623 EmitTopLevelDecl(D);
3624 }
3625 }
3626}
3627
3628void CodeGenModule::EmitModuleLinkOptions() {
3629 // Collect the set of all of the modules we want to visit to emit link
3630 // options, which is essentially the imported modules and all of their
3631 // non-explicit child modules.
3632 llvm::SetVector<clang::Module *> LinkModules;
3633 llvm::SmallPtrSet<clang::Module *, 16> Visited;
3634 SmallVector<clang::Module *, 16> Stack;
3635
3636 // Seed the stack with imported modules.
3637 for (Module *M : ImportedModules) {
3638 // Do not add any link flags when an implementation TU of a module imports
3639 // a header of that same module.
3640 if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
3641 !getLangOpts().isCompilingModule())
3642 continue;
3643 if (Visited.insert(Ptr: M).second)
3644 Stack.push_back(Elt: M);
3645 }
3646
3647 // Find all of the modules to import, making a little effort to prune
3648 // non-leaf modules.
3649 while (!Stack.empty()) {
3650 clang::Module *Mod = Stack.pop_back_val();
3651
3652 bool AnyChildren = false;
3653
3654 // Visit the submodules of this module.
3655 for (const auto &SM : Mod->submodules()) {
3656 // Skip explicit children; they need to be explicitly imported to be
3657 // linked against.
3658 if (SM->IsExplicit)
3659 continue;
3660
3661 if (Visited.insert(Ptr: SM).second) {
3662 Stack.push_back(Elt: SM);
3663 AnyChildren = true;
3664 }
3665 }
3666
3667 // We didn't find any children, so add this module to the list of
3668 // modules to link against.
3669 if (!AnyChildren) {
3670 LinkModules.insert(X: Mod);
3671 }
3672 }
3673
3674 // Add link options for all of the imported modules in reverse topological
3675 // order. We don't do anything to try to order import link flags with respect
3676 // to linker options inserted by things like #pragma comment().
3677 SmallVector<llvm::MDNode *, 16> MetadataArgs;
3678 Visited.clear();
3679 for (Module *M : LinkModules)
3680 if (Visited.insert(Ptr: M).second)
3681 addLinkOptionsPostorder(CGM&: *this, Mod: M, Metadata&: MetadataArgs, Visited);
3682 std::reverse(first: MetadataArgs.begin(), last: MetadataArgs.end());
3683 LinkerOptionsMetadata.append(in_start: MetadataArgs.begin(), in_end: MetadataArgs.end());
3684
3685 // Add the linker options metadata flag.
3686 if (!LinkerOptionsMetadata.empty()) {
3687 auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.linker.options");
3688 for (auto *MD : LinkerOptionsMetadata)
3689 NMD->addOperand(M: MD);
3690 }
3691}
3692
3693void CodeGenModule::EmitDeferred() {
3694 // Emit deferred declare target declarations.
3695 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
3696 getOpenMPRuntime().emitDeferredTargetDecls();
3697
3698 // Emit code for any potentially referenced deferred decls. Since a
3699 // previously unused static decl may become used during the generation of code
3700 // for a static function, iterate until no changes are made.
3701
3702 if (!DeferredVTables.empty()) {
3703 EmitDeferredVTables();
3704
3705 // Emitting a vtable doesn't directly cause more vtables to
3706 // become deferred, although it can cause functions to be
3707 // emitted that then need those vtables.
3708 assert(DeferredVTables.empty());
3709 }
3710
3711 // Emit CUDA/HIP static device variables referenced by host code only.
3712 // Note we should not clear CUDADeviceVarODRUsedByHost since it is still
3713 // needed for further handling.
3714 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
3715 llvm::append_range(C&: DeferredDeclsToEmit,
3716 R&: getContext().CUDADeviceVarODRUsedByHost);
3717
3718 // Stop if we're out of both deferred vtables and deferred declarations.
3719 if (DeferredDeclsToEmit.empty())
3720 return;
3721
3722 // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
3723 // work, it will not interfere with this.
3724 std::vector<GlobalDecl> CurDeclsToEmit;
3725 CurDeclsToEmit.swap(x&: DeferredDeclsToEmit);
3726
3727 for (GlobalDecl &D : CurDeclsToEmit) {
3728 // Functions declared with the sycl_kernel_entry_point attribute are
3729 // emitted normally during host compilation. During device compilation,
3730 // a SYCL kernel caller offload entry point function is generated and
3731 // emitted in place of each of these functions.
3732 if (const auto *FD = D.getDecl()->getAsFunction()) {
3733 if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>() &&
3734 FD->isDefined()) {
3735 // Functions with an invalid sycl_kernel_entry_point attribute are
3736 // ignored during device compilation.
3737 if (!FD->getAttr<SYCLKernelEntryPointAttr>()->isInvalidAttr()) {
3738 // Generate and emit the SYCL kernel caller function.
3739 EmitSYCLKernelCaller(KernelEntryPointFn: FD, Ctx&: getContext());
3740 // Recurse to emit any symbols directly or indirectly referenced
3741 // by the SYCL kernel caller function.
3742 EmitDeferred();
3743 }
3744 // Do not emit the sycl_kernel_entry_point attributed function.
3745 continue;
3746 }
3747 }
3748
3749 // We should call GetAddrOfGlobal with IsForDefinition set to true in order
3750 // to get GlobalValue with exactly the type we need, not something that
3751 // might had been created for another decl with the same mangled name but
3752 // different type.
3753 llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
3754 Val: GetAddrOfGlobal(GD: D, IsForDefinition: ForDefinition));
3755
3756 // In case of different address spaces, we may still get a cast, even with
3757 // IsForDefinition equal to true. Query mangled names table to get
3758 // GlobalValue.
3759 if (!GV)
3760 GV = GetGlobalValue(Name: getMangledName(GD: D));
3761
3762 // Make sure GetGlobalValue returned non-null.
3763 assert(GV);
3764
3765 // Check to see if we've already emitted this. This is necessary
3766 // for a couple of reasons: first, decls can end up in the
3767 // deferred-decls queue multiple times, and second, decls can end
3768 // up with definitions in unusual ways (e.g. by an extern inline
3769 // function acquiring a strong function redefinition). Just
3770 // ignore these cases.
3771 if (!GV->isDeclaration())
3772 continue;
3773
3774 // If this is OpenMP, check if it is legal to emit this global normally.
3775 if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD: D))
3776 continue;
3777
3778 // Otherwise, emit the definition and move on to the next one.
3779 EmitGlobalDefinition(D, GV);
3780
3781 // If we found out that we need to emit more decls, do that recursively.
3782 // This has the advantage that the decls are emitted in a DFS and related
3783 // ones are close together, which is convenient for testing.
3784 if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
3785 EmitDeferred();
3786 assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
3787 }
3788 }
3789}
3790
3791void CodeGenModule::EmitVTablesOpportunistically() {
3792 // Try to emit external vtables as available_externally if they have emitted
3793 // all inlined virtual functions. It runs after EmitDeferred() and therefore
3794 // is not allowed to create new references to things that need to be emitted
3795 // lazily. Note that it also uses fact that we eagerly emitting RTTI.
3796
3797 assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
3798 && "Only emit opportunistic vtables with optimizations");
3799
3800 for (const CXXRecordDecl *RD : OpportunisticVTables) {
3801 assert(getVTables().isVTableExternal(RD) &&
3802 "This queue should only contain external vtables");
3803 if (getCXXABI().canSpeculativelyEmitVTable(RD))
3804 VTables.GenerateClassData(RD);
3805 }
3806 OpportunisticVTables.clear();
3807}
3808
3809void CodeGenModule::EmitGlobalAnnotations() {
3810 for (const auto& [MangledName, VD] : DeferredAnnotations) {
3811 llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName);
3812 if (GV)
3813 AddGlobalAnnotations(D: VD, GV);
3814 }
3815 DeferredAnnotations.clear();
3816
3817 if (Annotations.empty())
3818 return;
3819
3820 // Create a new global variable for the ConstantStruct in the Module.
3821 llvm::Constant *Array = llvm::ConstantArray::get(T: llvm::ArrayType::get(
3822 ElementType: Annotations[0]->getType(), NumElements: Annotations.size()), V: Annotations);
3823 auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
3824 llvm::GlobalValue::AppendingLinkage,
3825 Array, "llvm.global.annotations");
3826 gv->setSection(AnnotationSection);
3827}
3828
3829llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
3830 llvm::Constant *&AStr = AnnotationStrings[Str];
3831 if (AStr)
3832 return AStr;
3833
3834 // Not found yet, create a new global.
3835 llvm::Constant *s = llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Str);
3836 auto *gv = new llvm::GlobalVariable(
3837 getModule(), s->getType(), true, llvm::GlobalValue::PrivateLinkage, s,
3838 ".str", nullptr, llvm::GlobalValue::NotThreadLocal,
3839 ConstGlobalsPtrTy->getAddressSpace());
3840 gv->setSection(AnnotationSection);
3841 gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3842 AStr = gv;
3843 return gv;
3844}
3845
3846llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
3847 SourceManager &SM = getContext().getSourceManager();
3848 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
3849 if (PLoc.isValid())
3850 return EmitAnnotationString(Str: PLoc.getFilename());
3851 return EmitAnnotationString(Str: SM.getBufferName(Loc));
3852}
3853
3854llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
3855 SourceManager &SM = getContext().getSourceManager();
3856 PresumedLoc PLoc = SM.getPresumedLoc(Loc: L);
3857 unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
3858 SM.getExpansionLineNumber(Loc: L);
3859 return llvm::ConstantInt::get(Ty: Int32Ty, V: LineNo);
3860}
3861
3862llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
3863 ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
3864 if (Exprs.empty())
3865 return llvm::ConstantPointerNull::get(T: ConstGlobalsPtrTy);
3866
3867 llvm::FoldingSetNodeID ID;
3868 for (Expr *E : Exprs) {
3869 ID.Add(x: cast<clang::ConstantExpr>(Val: E)->getAPValueResult());
3870 }
3871 llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()];
3872 if (Lookup)
3873 return Lookup;
3874
3875 llvm::SmallVector<llvm::Constant *, 4> LLVMArgs;
3876 LLVMArgs.reserve(N: Exprs.size());
3877 ConstantEmitter ConstEmiter(*this);
3878 llvm::transform(Range&: Exprs, d_first: std::back_inserter(x&: LLVMArgs), F: [&](const Expr *E) {
3879 const auto *CE = cast<clang::ConstantExpr>(Val: E);
3880 return ConstEmiter.emitAbstract(loc: CE->getBeginLoc(), value: CE->getAPValueResult(),
3881 T: CE->getType());
3882 });
3883 auto *Struct = llvm::ConstantStruct::getAnon(V: LLVMArgs);
3884 auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true,
3885 llvm::GlobalValue::PrivateLinkage, Struct,
3886 ".args");
3887 GV->setSection(AnnotationSection);
3888 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3889
3890 Lookup = GV;
3891 return GV;
3892}
3893
3894llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
3895 const AnnotateAttr *AA,
3896 SourceLocation L) {
3897 // Get the globals for file name, annotation, and the line number.
3898 llvm::Constant *AnnoGV = EmitAnnotationString(Str: AA->getAnnotation()),
3899 *UnitGV = EmitAnnotationUnit(Loc: L),
3900 *LineNoCst = EmitAnnotationLineNo(L),
3901 *Args = EmitAnnotationArgs(Attr: AA);
3902
3903 llvm::Constant *GVInGlobalsAS = GV;
3904 if (GV->getAddressSpace() !=
3905 getDataLayout().getDefaultGlobalsAddressSpace()) {
3906 GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast(
3907 C: GV,
3908 Ty: llvm::PointerType::get(
3909 C&: GV->getContext(), AddressSpace: getDataLayout().getDefaultGlobalsAddressSpace()));
3910 }
3911
3912 // Create the ConstantStruct for the global annotation.
3913 llvm::Constant *Fields[] = {
3914 GVInGlobalsAS, AnnoGV, UnitGV, LineNoCst, Args,
3915 };
3916 return llvm::ConstantStruct::getAnon(V: Fields);
3917}
3918
3919void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
3920 llvm::GlobalValue *GV) {
3921 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
3922 // Get the struct elements for these annotations.
3923 for (const auto *I : D->specific_attrs<AnnotateAttr>())
3924 Annotations.push_back(x: EmitAnnotateAttr(GV, AA: I, L: D->getLocation()));
3925}
3926
3927bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
3928 SourceLocation Loc) const {
3929 const auto &NoSanitizeL = getContext().getNoSanitizeList();
3930 // NoSanitize by function name.
3931 if (NoSanitizeL.containsFunction(Mask: Kind, FunctionName: Fn->getName()))
3932 return true;
3933 // NoSanitize by location. Check "mainfile" prefix.
3934 auto &SM = Context.getSourceManager();
3935 FileEntryRef MainFile = *SM.getFileEntryRefForID(FID: SM.getMainFileID());
3936 if (NoSanitizeL.containsMainFile(Mask: Kind, FileName: MainFile.getName()))
3937 return true;
3938
3939 // Check "src" prefix.
3940 if (Loc.isValid())
3941 return NoSanitizeL.containsLocation(Mask: Kind, Loc);
3942 // If location is unknown, this may be a compiler-generated function. Assume
3943 // it's located in the main file.
3944 return NoSanitizeL.containsFile(Mask: Kind, FileName: MainFile.getName());
3945}
3946
3947bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind,
3948 llvm::GlobalVariable *GV,
3949 SourceLocation Loc, QualType Ty,
3950 StringRef Category) const {
3951 const auto &NoSanitizeL = getContext().getNoSanitizeList();
3952 if (NoSanitizeL.containsGlobal(Mask: Kind, GlobalName: GV->getName(), Category))
3953 return true;
3954 auto &SM = Context.getSourceManager();
3955 if (NoSanitizeL.containsMainFile(
3956 Mask: Kind, FileName: SM.getFileEntryRefForID(FID: SM.getMainFileID())->getName(),
3957 Category))
3958 return true;
3959 if (NoSanitizeL.containsLocation(Mask: Kind, Loc, Category))
3960 return true;
3961
3962 // Check global type.
3963 if (!Ty.isNull()) {
3964 // Drill down the array types: if global variable of a fixed type is
3965 // not sanitized, we also don't instrument arrays of them.
3966 while (auto AT = dyn_cast<ArrayType>(Val: Ty.getTypePtr()))
3967 Ty = AT->getElementType();
3968 Ty = Ty.getCanonicalType().getUnqualifiedType();
3969 // Only record types (classes, structs etc.) are ignored.
3970 if (Ty->isRecordType()) {
3971 std::string TypeStr = Ty.getAsString(Policy: getContext().getPrintingPolicy());
3972 if (NoSanitizeL.containsType(Mask: Kind, MangledTypeName: TypeStr, Category))
3973 return true;
3974 }
3975 }
3976 return false;
3977}
3978
3979bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
3980 StringRef Category) const {
3981 const auto &XRayFilter = getContext().getXRayFilter();
3982 using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
3983 auto Attr = ImbueAttr::NONE;
3984 if (Loc.isValid())
3985 Attr = XRayFilter.shouldImbueLocation(Loc, Category);
3986 if (Attr == ImbueAttr::NONE)
3987 Attr = XRayFilter.shouldImbueFunction(FunctionName: Fn->getName());
3988 switch (Attr) {
3989 case ImbueAttr::NONE:
3990 return false;
3991 case ImbueAttr::ALWAYS:
3992 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-always");
3993 break;
3994 case ImbueAttr::ALWAYS_ARG1:
3995 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-always");
3996 Fn->addFnAttr(Kind: "xray-log-args", Val: "1");
3997 break;
3998 case ImbueAttr::NEVER:
3999 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-never");
4000 break;
4001 }
4002 return true;
4003}
4004
4005ProfileList::ExclusionType
4006CodeGenModule::isFunctionBlockedByProfileList(llvm::Function *Fn,
4007 SourceLocation Loc) const {
4008 const auto &ProfileList = getContext().getProfileList();
4009 // If the profile list is empty, then instrument everything.
4010 if (ProfileList.isEmpty())
4011 return ProfileList::Allow;
4012 llvm::driver::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr();
4013 // First, check the function name.
4014 if (auto V = ProfileList.isFunctionExcluded(FunctionName: Fn->getName(), Kind))
4015 return *V;
4016 // Next, check the source location.
4017 if (Loc.isValid())
4018 if (auto V = ProfileList.isLocationExcluded(Loc, Kind))
4019 return *V;
4020 // If location is unknown, this may be a compiler-generated function. Assume
4021 // it's located in the main file.
4022 auto &SM = Context.getSourceManager();
4023 if (auto MainFile = SM.getFileEntryRefForID(FID: SM.getMainFileID()))
4024 if (auto V = ProfileList.isFileExcluded(FileName: MainFile->getName(), Kind))
4025 return *V;
4026 return ProfileList.getDefault(Kind);
4027}
4028
4029ProfileList::ExclusionType
4030CodeGenModule::isFunctionBlockedFromProfileInstr(llvm::Function *Fn,
4031 SourceLocation Loc) const {
4032 auto V = isFunctionBlockedByProfileList(Fn, Loc);
4033 if (V != ProfileList::Allow)
4034 return V;
4035
4036 auto NumGroups = getCodeGenOpts().ProfileTotalFunctionGroups;
4037 if (NumGroups > 1) {
4038 auto Group = llvm::crc32(Data: arrayRefFromStringRef(Input: Fn->getName())) % NumGroups;
4039 if (Group != getCodeGenOpts().ProfileSelectedFunctionGroup)
4040 return ProfileList::Skip;
4041 }
4042 return ProfileList::Allow;
4043}
4044
4045bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
4046 // Never defer when EmitAllDecls is specified.
4047 if (LangOpts.EmitAllDecls)
4048 return true;
4049
4050 const auto *VD = dyn_cast<VarDecl>(Val: Global);
4051 if (VD &&
4052 ((CodeGenOpts.KeepPersistentStorageVariables &&
4053 (VD->getStorageDuration() == SD_Static ||
4054 VD->getStorageDuration() == SD_Thread)) ||
4055 (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static &&
4056 VD->getType().isConstQualified())))
4057 return true;
4058
4059 return getContext().DeclMustBeEmitted(D: Global);
4060}
4061
4062bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
4063 // In OpenMP 5.0 variables and function may be marked as
4064 // device_type(host/nohost) and we should not emit them eagerly unless we sure
4065 // that they must be emitted on the host/device. To be sure we need to have
4066 // seen a declare target with an explicit mentioning of the function, we know
4067 // we have if the level of the declare target attribute is -1. Note that we
4068 // check somewhere else if we should emit this at all.
4069 if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) {
4070 std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
4071 OMPDeclareTargetDeclAttr::getActiveAttr(VD: Global);
4072 if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1)
4073 return false;
4074 }
4075
4076 if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) {
4077 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
4078 // Implicit template instantiations may change linkage if they are later
4079 // explicitly instantiated, so they should not be emitted eagerly.
4080 return false;
4081 // Defer until all versions have been semantically checked.
4082 if (FD->hasAttr<TargetVersionAttr>() && !FD->isMultiVersion())
4083 return false;
4084 // Defer emission of SYCL kernel entry point functions during device
4085 // compilation.
4086 if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>())
4087 return false;
4088 }
4089 if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) {
4090 if (Context.getInlineVariableDefinitionKind(VD) ==
4091 ASTContext::InlineVariableDefinitionKind::WeakUnknown)
4092 // A definition of an inline constexpr static data member may change
4093 // linkage later if it's redeclared outside the class.
4094 return false;
4095 if (CXX20ModuleInits && VD->getOwningModule() &&
4096 !VD->getOwningModule()->isModuleMapModule()) {
4097 // For CXX20, module-owned initializers need to be deferred, since it is
4098 // not known at this point if they will be run for the current module or
4099 // as part of the initializer for an imported one.
4100 return false;
4101 }
4102 }
4103 // If OpenMP is enabled and threadprivates must be generated like TLS, delay
4104 // codegen for global variables, because they may be marked as threadprivate.
4105 if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
4106 getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Val: Global) &&
4107 !Global->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: false, ExcludeDtor: false) &&
4108 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD: Global))
4109 return false;
4110
4111 return true;
4112}
4113
4114ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
4115 StringRef Name = getMangledName(GD);
4116
4117 // The UUID descriptor should be pointer aligned.
4118 CharUnits Alignment = CharUnits::fromQuantity(Quantity: PointerAlignInBytes);
4119
4120 // Look for an existing global.
4121 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
4122 return ConstantAddress(GV, GV->getValueType(), Alignment);
4123
4124 ConstantEmitter Emitter(*this);
4125 llvm::Constant *Init;
4126
4127 APValue &V = GD->getAsAPValue();
4128 if (!V.isAbsent()) {
4129 // If possible, emit the APValue version of the initializer. In particular,
4130 // this gets the type of the constant right.
4131 Init = Emitter.emitForInitializer(
4132 value: GD->getAsAPValue(), destAddrSpace: GD->getType().getAddressSpace(), destType: GD->getType());
4133 } else {
4134 // As a fallback, directly construct the constant.
4135 // FIXME: This may get padding wrong under esoteric struct layout rules.
4136 // MSVC appears to create a complete type 'struct __s_GUID' that it
4137 // presumably uses to represent these constants.
4138 MSGuidDecl::Parts Parts = GD->getParts();
4139 llvm::Constant *Fields[4] = {
4140 llvm::ConstantInt::get(Ty: Int32Ty, V: Parts.Part1),
4141 llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part2),
4142 llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part3),
4143 llvm::ConstantDataArray::getRaw(
4144 Data: StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), NumElements: 8,
4145 ElementTy: Int8Ty)};
4146 Init = llvm::ConstantStruct::getAnon(V: Fields);
4147 }
4148
4149 auto *GV = new llvm::GlobalVariable(
4150 getModule(), Init->getType(),
4151 /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
4152 if (supportsCOMDAT())
4153 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
4154 setDSOLocal(GV);
4155
4156 if (!V.isAbsent()) {
4157 Emitter.finalize(global: GV);
4158 return ConstantAddress(GV, GV->getValueType(), Alignment);
4159 }
4160
4161 llvm::Type *Ty = getTypes().ConvertTypeForMem(T: GD->getType());
4162 return ConstantAddress(GV, Ty, Alignment);
4163}
4164
4165ConstantAddress CodeGenModule::GetAddrOfUnnamedGlobalConstantDecl(
4166 const UnnamedGlobalConstantDecl *GCD) {
4167 CharUnits Alignment = getContext().getTypeAlignInChars(T: GCD->getType());
4168
4169 llvm::GlobalVariable **Entry = nullptr;
4170 Entry = &UnnamedGlobalConstantDeclMap[GCD];
4171 if (*Entry)
4172 return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment);
4173
4174 ConstantEmitter Emitter(*this);
4175 llvm::Constant *Init;
4176
4177 const APValue &V = GCD->getValue();
4178
4179 assert(!V.isAbsent());
4180 Init = Emitter.emitForInitializer(value: V, destAddrSpace: GCD->getType().getAddressSpace(),
4181 destType: GCD->getType());
4182
4183 auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
4184 /*isConstant=*/true,
4185 llvm::GlobalValue::PrivateLinkage, Init,
4186 ".constant");
4187 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4188 GV->setAlignment(Alignment.getAsAlign());
4189
4190 Emitter.finalize(global: GV);
4191
4192 *Entry = GV;
4193 return ConstantAddress(GV, GV->getValueType(), Alignment);
4194}
4195
4196ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
4197 const TemplateParamObjectDecl *TPO) {
4198 StringRef Name = getMangledName(GD: TPO);
4199 CharUnits Alignment = getNaturalTypeAlignment(T: TPO->getType());
4200
4201 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
4202 return ConstantAddress(GV, GV->getValueType(), Alignment);
4203
4204 ConstantEmitter Emitter(*this);
4205 llvm::Constant *Init = Emitter.emitForInitializer(
4206 value: TPO->getValue(), destAddrSpace: TPO->getType().getAddressSpace(), destType: TPO->getType());
4207
4208 if (!Init) {
4209 ErrorUnsupported(D: TPO, Type: "template parameter object");
4210 return ConstantAddress::invalid();
4211 }
4212
4213 llvm::GlobalValue::LinkageTypes Linkage =
4214 isExternallyVisible(L: TPO->getLinkageAndVisibility().getLinkage())
4215 ? llvm::GlobalValue::LinkOnceODRLinkage
4216 : llvm::GlobalValue::InternalLinkage;
4217 auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
4218 /*isConstant=*/true, Linkage, Init, Name);
4219 setGVProperties(GV, D: TPO);
4220 if (supportsCOMDAT() && Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4221 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
4222 Emitter.finalize(global: GV);
4223
4224 return ConstantAddress(GV, GV->getValueType(), Alignment);
4225}
4226
4227ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
4228 const AliasAttr *AA = VD->getAttr<AliasAttr>();
4229 assert(AA && "No alias?");
4230
4231 CharUnits Alignment = getContext().getDeclAlign(D: VD);
4232 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: VD->getType());
4233
4234 // See if there is already something with the target's name in the module.
4235 llvm::GlobalValue *Entry = GetGlobalValue(Name: AA->getAliasee());
4236 if (Entry)
4237 return ConstantAddress(Entry, DeclTy, Alignment);
4238
4239 llvm::Constant *Aliasee;
4240 if (isa<llvm::FunctionType>(Val: DeclTy))
4241 Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy,
4242 D: GlobalDecl(cast<FunctionDecl>(Val: VD)),
4243 /*ForVTable=*/false);
4244 else
4245 Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default,
4246 D: nullptr);
4247
4248 auto *F = cast<llvm::GlobalValue>(Val: Aliasee);
4249 F->setLinkage(llvm::Function::ExternalWeakLinkage);
4250 WeakRefReferences.insert(Ptr: F);
4251
4252 return ConstantAddress(Aliasee, DeclTy, Alignment);
4253}
4254
4255template <typename AttrT> static bool hasImplicitAttr(const ValueDecl *D) {
4256 if (!D)
4257 return false;
4258 if (auto *A = D->getAttr<AttrT>())
4259 return A->isImplicit();
4260 return D->isImplicit();
4261}
4262
4263static bool shouldSkipAliasEmission(const CodeGenModule &CGM,
4264 const ValueDecl *Global) {
4265 const LangOptions &LangOpts = CGM.getLangOpts();
4266 if (!LangOpts.OpenMPIsTargetDevice && !LangOpts.CUDA)
4267 return false;
4268
4269 const auto *AA = Global->getAttr<AliasAttr>();
4270 GlobalDecl AliaseeGD;
4271
4272 // Check if the aliasee exists, if the aliasee is not found, skip the alias
4273 // emission. This is executed for both the host and device.
4274 if (!CGM.lookupRepresentativeDecl(MangledName: AA->getAliasee(), Result&: AliaseeGD))
4275 return true;
4276
4277 const auto *AliaseeDecl = dyn_cast<ValueDecl>(Val: AliaseeGD.getDecl());
4278 if (LangOpts.OpenMPIsTargetDevice)
4279 return !AliaseeDecl ||
4280 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD: AliaseeDecl);
4281
4282 // CUDA / HIP
4283 const bool HasDeviceAttr = Global->hasAttr<CUDADeviceAttr>();
4284 const bool AliaseeHasDeviceAttr =
4285 AliaseeDecl && AliaseeDecl->hasAttr<CUDADeviceAttr>();
4286
4287 if (LangOpts.CUDAIsDevice)
4288 return !HasDeviceAttr || !AliaseeHasDeviceAttr;
4289
4290 // CUDA / HIP Host
4291 // we know that the aliasee exists from above, so we know to emit
4292 return false;
4293}
4294
4295bool CodeGenModule::shouldEmitCUDAGlobalVar(const VarDecl *Global) const {
4296 assert(LangOpts.CUDA && "Should not be called by non-CUDA languages");
4297 // We need to emit host-side 'shadows' for all global
4298 // device-side variables because the CUDA runtime needs their
4299 // size and host-side address in order to provide access to
4300 // their device-side incarnations.
4301 return !LangOpts.CUDAIsDevice || Global->hasAttr<CUDADeviceAttr>() ||
4302 Global->hasAttr<CUDAConstantAttr>() ||
4303 Global->hasAttr<CUDASharedAttr>() ||
4304 Global->getType()->isCUDADeviceBuiltinSurfaceType() ||
4305 Global->getType()->isCUDADeviceBuiltinTextureType();
4306}
4307
4308void CodeGenModule::EmitGlobal(GlobalDecl GD) {
4309 const auto *Global = cast<ValueDecl>(Val: GD.getDecl());
4310
4311 // Weak references don't produce any output by themselves.
4312 if (Global->hasAttr<WeakRefAttr>())
4313 return;
4314
4315 // If this is an alias definition (which otherwise looks like a declaration)
4316 // emit it now.
4317 if (Global->hasAttr<AliasAttr>()) {
4318 if (shouldSkipAliasEmission(CGM: *this, Global))
4319 return;
4320 return EmitAliasDefinition(GD);
4321 }
4322
4323 // IFunc like an alias whose value is resolved at runtime by calling resolver.
4324 if (Global->hasAttr<IFuncAttr>())
4325 return emitIFuncDefinition(GD);
4326
4327 // If this is a cpu_dispatch multiversion function, emit the resolver.
4328 if (Global->hasAttr<CPUDispatchAttr>())
4329 return emitCPUDispatchDefinition(GD);
4330
4331 // If this is CUDA, be selective about which declarations we emit.
4332 // Non-constexpr non-lambda implicit host device functions are not emitted
4333 // unless they are used on device side.
4334 if (LangOpts.CUDA) {
4335 assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
4336 "Expected Variable or Function");
4337 if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) {
4338 if (!shouldEmitCUDAGlobalVar(Global: VD))
4339 return;
4340 } else if (LangOpts.CUDAIsDevice) {
4341 const auto *FD = dyn_cast<FunctionDecl>(Val: Global);
4342 if ((!Global->hasAttr<CUDADeviceAttr>() ||
4343 (LangOpts.OffloadImplicitHostDeviceTemplates &&
4344 hasImplicitAttr<CUDAHostAttr>(D: FD) &&
4345 hasImplicitAttr<CUDADeviceAttr>(D: FD) && !FD->isConstexpr() &&
4346 !isLambdaCallOperator(DC: FD) &&
4347 !getContext().CUDAImplicitHostDeviceFunUsedByDevice.count(V: FD))) &&
4348 !Global->hasAttr<CUDAGlobalAttr>() &&
4349 !(LangOpts.HIPStdPar && isa<FunctionDecl>(Val: Global) &&
4350 !Global->hasAttr<CUDAHostAttr>()))
4351 return;
4352 // Device-only functions are the only things we skip.
4353 } else if (!Global->hasAttr<CUDAHostAttr>() &&
4354 Global->hasAttr<CUDADeviceAttr>())
4355 return;
4356 }
4357
4358 if (LangOpts.OpenMP) {
4359 // If this is OpenMP, check if it is legal to emit this global normally.
4360 if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
4361 return;
4362 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Val: Global)) {
4363 if (MustBeEmitted(Global))
4364 EmitOMPDeclareReduction(D: DRD);
4365 return;
4366 }
4367 if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Val: Global)) {
4368 if (MustBeEmitted(Global))
4369 EmitOMPDeclareMapper(D: DMD);
4370 return;
4371 }
4372 }
4373
4374 // Ignore declarations, they will be emitted on their first use.
4375 if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) {
4376 if (DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>()) &&
4377 FD->doesThisDeclarationHaveABody())
4378 addDeferredDeclToEmit(GD: GlobalDecl(FD, KernelReferenceKind::Stub));
4379
4380 // Update deferred annotations with the latest declaration if the function
4381 // function was already used or defined.
4382 if (FD->hasAttr<AnnotateAttr>()) {
4383 StringRef MangledName = getMangledName(GD);
4384 if (GetGlobalValue(Name: MangledName))
4385 DeferredAnnotations[MangledName] = FD;
4386 }
4387
4388 // Forward declarations are emitted lazily on first use.
4389 if (!FD->doesThisDeclarationHaveABody()) {
4390 if (!FD->doesDeclarationForceExternallyVisibleDefinition() &&
4391 (!FD->isMultiVersion() || !getTarget().getTriple().isAArch64()))
4392 return;
4393
4394 StringRef MangledName = getMangledName(GD);
4395
4396 // Compute the function info and LLVM type.
4397 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4398 llvm::Type *Ty = getTypes().GetFunctionType(Info: FI);
4399
4400 GetOrCreateLLVMFunction(MangledName, Ty, D: GD, /*ForVTable=*/false,
4401 /*DontDefer=*/false);
4402 return;
4403 }
4404 } else {
4405 const auto *VD = cast<VarDecl>(Val: Global);
4406 assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
4407 if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
4408 !Context.isMSStaticDataMemberInlineDefinition(VD)) {
4409 if (LangOpts.OpenMP) {
4410 // Emit declaration of the must-be-emitted declare target variable.
4411 if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
4412 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
4413
4414 // If this variable has external storage and doesn't require special
4415 // link handling we defer to its canonical definition.
4416 if (VD->hasExternalStorage() &&
4417 Res != OMPDeclareTargetDeclAttr::MT_Link)
4418 return;
4419
4420 bool UnifiedMemoryEnabled =
4421 getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
4422 if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
4423 *Res == OMPDeclareTargetDeclAttr::MT_Enter ||
4424 *Res == OMPDeclareTargetDeclAttr::MT_Local) &&
4425 !UnifiedMemoryEnabled) {
4426 (void)GetAddrOfGlobalVar(D: VD);
4427 } else {
4428 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
4429 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
4430 *Res == OMPDeclareTargetDeclAttr::MT_Enter ||
4431 *Res == OMPDeclareTargetDeclAttr::MT_Local) &&
4432 UnifiedMemoryEnabled)) &&
4433 "Link clause or to clause with unified memory expected.");
4434 (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
4435 }
4436
4437 return;
4438 }
4439 }
4440 // If this declaration may have caused an inline variable definition to
4441 // change linkage, make sure that it's emitted.
4442 if (Context.getInlineVariableDefinitionKind(VD) ==
4443 ASTContext::InlineVariableDefinitionKind::Strong)
4444 GetAddrOfGlobalVar(D: VD);
4445 return;
4446 }
4447 }
4448
4449 // Defer code generation to first use when possible, e.g. if this is an inline
4450 // function. If the global must always be emitted, do it eagerly if possible
4451 // to benefit from cache locality.
4452 if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
4453 // Emit the definition if it can't be deferred.
4454 EmitGlobalDefinition(D: GD);
4455 addEmittedDeferredDecl(GD);
4456 return;
4457 }
4458
4459 // If we're deferring emission of a C++ variable with an
4460 // initializer, remember the order in which it appeared in the file.
4461 if (getLangOpts().CPlusPlus && isa<VarDecl>(Val: Global) &&
4462 cast<VarDecl>(Val: Global)->hasInit()) {
4463 DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
4464 CXXGlobalInits.push_back(x: nullptr);
4465 }
4466
4467 StringRef MangledName = getMangledName(GD);
4468 if (GetGlobalValue(Name: MangledName) != nullptr) {
4469 // The value has already been used and should therefore be emitted.
4470 addDeferredDeclToEmit(GD);
4471 } else if (MustBeEmitted(Global)) {
4472 // The value must be emitted, but cannot be emitted eagerly.
4473 assert(!MayBeEmittedEagerly(Global));
4474 addDeferredDeclToEmit(GD);
4475 } else {
4476 // Otherwise, remember that we saw a deferred decl with this name. The
4477 // first use of the mangled name will cause it to move into
4478 // DeferredDeclsToEmit.
4479 DeferredDecls[MangledName] = GD;
4480 }
4481}
4482
4483// Check if T is a class type with a destructor that's not dllimport.
4484static bool HasNonDllImportDtor(QualType T) {
4485 if (const auto *RT =
4486 T->getBaseElementTypeUnsafe()->getAsCanonical<RecordType>())
4487 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) {
4488 RD = RD->getDefinitionOrSelf();
4489 if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
4490 return true;
4491 }
4492
4493 return false;
4494}
4495
4496namespace {
4497 struct FunctionIsDirectlyRecursive
4498 : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
4499 const StringRef Name;
4500 const Builtin::Context &BI;
4501 FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
4502 : Name(N), BI(C) {}
4503
4504 bool VisitCallExpr(const CallExpr *E) {
4505 const FunctionDecl *FD = E->getDirectCallee();
4506 if (!FD)
4507 return false;
4508 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
4509 if (Attr && Name == Attr->getLabel())
4510 return true;
4511 unsigned BuiltinID = FD->getBuiltinID();
4512 if (!BuiltinID || !BI.isLibFunction(ID: BuiltinID))
4513 return false;
4514 std::string BuiltinNameStr = BI.getName(ID: BuiltinID);
4515 StringRef BuiltinName = BuiltinNameStr;
4516 return BuiltinName.consume_front(Prefix: "__builtin_") && Name == BuiltinName;
4517 }
4518
4519 bool VisitStmt(const Stmt *S) {
4520 for (const Stmt *Child : S->children())
4521 if (Child && this->Visit(S: Child))
4522 return true;
4523 return false;
4524 }
4525 };
4526
4527 // Make sure we're not referencing non-imported vars or functions.
4528 struct DLLImportFunctionVisitor
4529 : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
4530 bool SafeToInline = true;
4531
4532 bool shouldVisitImplicitCode() const { return true; }
4533
4534 bool VisitVarDecl(VarDecl *VD) {
4535 if (VD->getTLSKind()) {
4536 // A thread-local variable cannot be imported.
4537 SafeToInline = false;
4538 return SafeToInline;
4539 }
4540
4541 // A variable definition might imply a destructor call.
4542 if (VD->isThisDeclarationADefinition())
4543 SafeToInline = !HasNonDllImportDtor(T: VD->getType());
4544
4545 return SafeToInline;
4546 }
4547
4548 bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
4549 if (const auto *D = E->getTemporary()->getDestructor())
4550 SafeToInline = D->hasAttr<DLLImportAttr>();
4551 return SafeToInline;
4552 }
4553
4554 bool VisitDeclRefExpr(DeclRefExpr *E) {
4555 ValueDecl *VD = E->getDecl();
4556 if (isa<FunctionDecl>(Val: VD))
4557 SafeToInline = VD->hasAttr<DLLImportAttr>();
4558 else if (VarDecl *V = dyn_cast<VarDecl>(Val: VD))
4559 SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
4560 return SafeToInline;
4561 }
4562
4563 bool VisitCXXConstructExpr(CXXConstructExpr *E) {
4564 SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
4565 return SafeToInline;
4566 }
4567
4568 bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
4569 CXXMethodDecl *M = E->getMethodDecl();
4570 if (!M) {
4571 // Call through a pointer to member function. This is safe to inline.
4572 SafeToInline = true;
4573 } else {
4574 SafeToInline = M->hasAttr<DLLImportAttr>();
4575 }
4576 return SafeToInline;
4577 }
4578
4579 bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
4580 SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
4581 return SafeToInline;
4582 }
4583
4584 bool VisitCXXNewExpr(CXXNewExpr *E) {
4585 SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
4586 return SafeToInline;
4587 }
4588 };
4589}
4590
4591// isTriviallyRecursive - Check if this function calls another
4592// decl that, because of the asm attribute or the other decl being a builtin,
4593// ends up pointing to itself.
4594bool
4595CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
4596 StringRef Name;
4597 if (getCXXABI().getMangleContext().shouldMangleDeclName(D: FD)) {
4598 // asm labels are a special kind of mangling we have to support.
4599 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
4600 if (!Attr)
4601 return false;
4602 Name = Attr->getLabel();
4603 } else {
4604 Name = FD->getName();
4605 }
4606
4607 FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
4608 const Stmt *Body = FD->getBody();
4609 return Body ? Walker.Visit(S: Body) : false;
4610}
4611
4612bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
4613 if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
4614 return true;
4615
4616 const auto *F = cast<FunctionDecl>(Val: GD.getDecl());
4617 // Inline builtins declaration must be emitted. They often are fortified
4618 // functions.
4619 if (F->isInlineBuiltinDeclaration())
4620 return true;
4621
4622 if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
4623 return false;
4624
4625 // We don't import function bodies from other named module units since that
4626 // behavior may break ABI compatibility of the current unit.
4627 if (const Module *M = F->getOwningModule();
4628 M && M->getTopLevelModule()->isNamedModule() &&
4629 getContext().getCurrentNamedModule() != M->getTopLevelModule()) {
4630 // There are practices to mark template member function as always-inline
4631 // and mark the template as extern explicit instantiation but not give
4632 // the definition for member function. So we have to emit the function
4633 // from explicitly instantiation with always-inline.
4634 //
4635 // See https://github.com/llvm/llvm-project/issues/86893 for details.
4636 //
4637 // TODO: Maybe it is better to give it a warning if we call a non-inline
4638 // function from other module units which is marked as always-inline.
4639 if (!F->isTemplateInstantiation() || !F->hasAttr<AlwaysInlineAttr>()) {
4640 return false;
4641 }
4642 }
4643
4644 if (F->hasAttr<NoInlineAttr>())
4645 return false;
4646
4647 if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) {
4648 // Check whether it would be safe to inline this dllimport function.
4649 DLLImportFunctionVisitor Visitor;
4650 Visitor.TraverseFunctionDecl(D: const_cast<FunctionDecl*>(F));
4651 if (!Visitor.SafeToInline)
4652 return false;
4653
4654 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: F)) {
4655 // Implicit destructor invocations aren't captured in the AST, so the
4656 // check above can't see them. Check for them manually here.
4657 for (const Decl *Member : Dtor->getParent()->decls())
4658 if (isa<FieldDecl>(Val: Member))
4659 if (HasNonDllImportDtor(T: cast<FieldDecl>(Val: Member)->getType()))
4660 return false;
4661 for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
4662 if (HasNonDllImportDtor(T: B.getType()))
4663 return false;
4664 }
4665 }
4666
4667 // PR9614. Avoid cases where the source code is lying to us. An available
4668 // externally function should have an equivalent function somewhere else,
4669 // but a function that calls itself through asm label/`__builtin_` trickery is
4670 // clearly not equivalent to the real implementation.
4671 // This happens in glibc's btowc and in some configure checks.
4672 return !isTriviallyRecursive(FD: F);
4673}
4674
4675bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
4676 return CodeGenOpts.OptimizationLevel > 0;
4677}
4678
4679void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
4680 llvm::GlobalValue *GV) {
4681 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
4682
4683 if (FD->isCPUSpecificMultiVersion()) {
4684 auto *Spec = FD->getAttr<CPUSpecificAttr>();
4685 for (unsigned I = 0; I < Spec->cpus_size(); ++I)
4686 EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr);
4687 } else if (auto *TC = FD->getAttr<TargetClonesAttr>()) {
4688 for (unsigned I = 0; I < TC->featuresStrs_size(); ++I)
4689 if (TC->isFirstOfVersion(Index: I))
4690 EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr);
4691 } else
4692 EmitGlobalFunctionDefinition(GD, GV);
4693
4694 // Ensure that the resolver function is also emitted.
4695 if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) {
4696 // On AArch64 defer the resolver emission until the entire TU is processed.
4697 if (getTarget().getTriple().isAArch64())
4698 AddDeferredMultiVersionResolverToEmit(GD);
4699 else
4700 GetOrCreateMultiVersionResolver(GD);
4701 }
4702}
4703
4704void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
4705 const auto *D = cast<ValueDecl>(Val: GD.getDecl());
4706
4707 PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
4708 Context.getSourceManager(),
4709 "Generating code for declaration");
4710
4711 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
4712 // At -O0, don't generate IR for functions with available_externally
4713 // linkage.
4714 if (!shouldEmitFunction(GD))
4715 return;
4716
4717 llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
4718 std::string Name;
4719 llvm::raw_string_ostream OS(Name);
4720 FD->getNameForDiagnostic(OS, Policy: getContext().getPrintingPolicy(),
4721 /*Qualified=*/true);
4722 return Name;
4723 });
4724
4725 if (const auto *Method = dyn_cast<CXXMethodDecl>(Val: D)) {
4726 // Make sure to emit the definition(s) before we emit the thunks.
4727 // This is necessary for the generation of certain thunks.
4728 if (isa<CXXConstructorDecl>(Val: Method) || isa<CXXDestructorDecl>(Val: Method))
4729 ABI->emitCXXStructor(GD);
4730 else if (FD->isMultiVersion())
4731 EmitMultiVersionFunctionDefinition(GD, GV);
4732 else
4733 EmitGlobalFunctionDefinition(GD, GV);
4734
4735 if (Method->isVirtual())
4736 getVTables().EmitThunks(GD);
4737
4738 return;
4739 }
4740
4741 if (FD->isMultiVersion())
4742 return EmitMultiVersionFunctionDefinition(GD, GV);
4743 return EmitGlobalFunctionDefinition(GD, GV);
4744 }
4745
4746 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
4747 return EmitGlobalVarDefinition(D: VD, IsTentative: !VD->hasDefinition());
4748
4749 llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
4750}
4751
4752static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
4753 llvm::Function *NewFn);
4754
4755static llvm::APInt
4756getFMVPriority(const TargetInfo &TI,
4757 const CodeGenFunction::FMVResolverOption &RO) {
4758 llvm::SmallVector<StringRef, 8> Features{RO.Features};
4759 if (RO.Architecture)
4760 Features.push_back(Elt: *RO.Architecture);
4761 return TI.getFMVPriority(Features);
4762}
4763
4764// Multiversion functions should be at most 'WeakODRLinkage' so that a different
4765// TU can forward declare the function without causing problems. Particularly
4766// in the cases of CPUDispatch, this causes issues. This also makes sure we
4767// work with internal linkage functions, so that the same function name can be
4768// used with internal linkage in multiple TUs.
4769static llvm::GlobalValue::LinkageTypes
4770getMultiversionLinkage(CodeGenModule &CGM, GlobalDecl GD) {
4771 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
4772 if (FD->getFormalLinkage() == Linkage::Internal)
4773 return llvm::GlobalValue::InternalLinkage;
4774 return llvm::GlobalValue::WeakODRLinkage;
4775}
4776
4777void CodeGenModule::emitMultiVersionFunctions() {
4778 std::vector<GlobalDecl> MVFuncsToEmit;
4779 MultiVersionFuncs.swap(x&: MVFuncsToEmit);
4780 for (GlobalDecl GD : MVFuncsToEmit) {
4781 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
4782 assert(FD && "Expected a FunctionDecl");
4783
4784 auto createFunction = [&](const FunctionDecl *Decl, unsigned MVIdx = 0) {
4785 GlobalDecl CurGD{Decl->isDefined() ? Decl->getDefinition() : Decl, MVIdx};
4786 StringRef MangledName = getMangledName(GD: CurGD);
4787 llvm::Constant *Func = GetGlobalValue(Name: MangledName);
4788 if (!Func) {
4789 if (Decl->isDefined()) {
4790 EmitGlobalFunctionDefinition(GD: CurGD, GV: nullptr);
4791 Func = GetGlobalValue(Name: MangledName);
4792 } else {
4793 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD: CurGD);
4794 llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI);
4795 Func = GetAddrOfFunction(GD: CurGD, Ty, /*ForVTable=*/false,
4796 /*DontDefer=*/false, IsForDefinition: ForDefinition);
4797 }
4798 assert(Func && "This should have just been created");
4799 }
4800 return cast<llvm::Function>(Val: Func);
4801 };
4802
4803 // For AArch64, a resolver is only emitted if a function marked with
4804 // target_version("default")) or target_clones("default") is defined
4805 // in this TU. For other architectures it is always emitted.
4806 bool ShouldEmitResolver = !getTarget().getTriple().isAArch64();
4807 SmallVector<CodeGenFunction::FMVResolverOption, 10> Options;
4808 llvm::DenseMap<llvm::Function *, const FunctionDecl *> DeclMap;
4809
4810 getContext().forEachMultiversionedFunctionVersion(
4811 FD, Pred: [&](const FunctionDecl *CurFD) {
4812 llvm::SmallVector<StringRef, 8> Feats;
4813 bool IsDefined = CurFD->getDefinition() != nullptr;
4814
4815 if (const auto *TA = CurFD->getAttr<TargetAttr>()) {
4816 assert(getTarget().getTriple().isX86() && "Unsupported target");
4817 TA->getX86AddedFeatures(Out&: Feats);
4818 llvm::Function *Func = createFunction(CurFD);
4819 DeclMap.insert(KV: {Func, CurFD});
4820 Options.emplace_back(Args&: Func, Args&: Feats, Args: TA->getX86Architecture());
4821 } else if (const auto *TVA = CurFD->getAttr<TargetVersionAttr>()) {
4822 if (TVA->isDefaultVersion() && IsDefined)
4823 ShouldEmitResolver = true;
4824 llvm::Function *Func = createFunction(CurFD);
4825 DeclMap.insert(KV: {Func, CurFD});
4826 char Delim = getTarget().getTriple().isAArch64() ? '+' : ',';
4827 TVA->getFeatures(Out&: Feats, Delim);
4828 Options.emplace_back(Args&: Func, Args&: Feats);
4829 } else if (const auto *TC = CurFD->getAttr<TargetClonesAttr>()) {
4830 for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) {
4831 if (!TC->isFirstOfVersion(Index: I))
4832 continue;
4833 if (TC->isDefaultVersion(Index: I) && IsDefined)
4834 ShouldEmitResolver = true;
4835 llvm::Function *Func = createFunction(CurFD, I);
4836 DeclMap.insert(KV: {Func, CurFD});
4837 Feats.clear();
4838 if (getTarget().getTriple().isX86()) {
4839 TC->getX86Feature(Out&: Feats, Index: I);
4840 Options.emplace_back(Args&: Func, Args&: Feats, Args: TC->getX86Architecture(Index: I));
4841 } else {
4842 char Delim = getTarget().getTriple().isAArch64() ? '+' : ',';
4843 TC->getFeatures(Out&: Feats, Index: I, Delim);
4844 Options.emplace_back(Args&: Func, Args&: Feats);
4845 }
4846 }
4847 } else
4848 llvm_unreachable("unexpected MultiVersionKind");
4849 });
4850
4851 if (!ShouldEmitResolver)
4852 continue;
4853
4854 llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD);
4855 if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: ResolverConstant)) {
4856 ResolverConstant = IFunc->getResolver();
4857 if (FD->isTargetClonesMultiVersion() &&
4858 !getTarget().getTriple().isAArch64()) {
4859 std::string MangledName = getMangledNameImpl(
4860 CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
4861 if (!GetGlobalValue(Name: MangledName + ".ifunc")) {
4862 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4863 llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI);
4864 // In prior versions of Clang, the mangling for ifuncs incorrectly
4865 // included an .ifunc suffix. This alias is generated for backward
4866 // compatibility. It is deprecated, and may be removed in the future.
4867 auto *Alias = llvm::GlobalAlias::create(
4868 Ty: DeclTy, AddressSpace: 0, Linkage: getMultiversionLinkage(CGM&: *this, GD),
4869 Name: MangledName + ".ifunc", Aliasee: IFunc, Parent: &getModule());
4870 SetCommonAttributes(GD: FD, GV: Alias);
4871 }
4872 }
4873 }
4874 llvm::Function *ResolverFunc = cast<llvm::Function>(Val: ResolverConstant);
4875
4876 const TargetInfo &TI = getTarget();
4877 llvm::stable_sort(
4878 Range&: Options, C: [&TI](const CodeGenFunction::FMVResolverOption &LHS,
4879 const CodeGenFunction::FMVResolverOption &RHS) {
4880 return getFMVPriority(TI, RO: LHS).ugt(RHS: getFMVPriority(TI, RO: RHS));
4881 });
4882
4883 // Diagnose unreachable function versions.
4884 if (getTarget().getTriple().isAArch64()) {
4885 for (auto I = Options.begin() + 1, E = Options.end(); I != E; ++I) {
4886 llvm::APInt RHS = llvm::AArch64::getCpuSupportsMask(Features: I->Features);
4887 if (std::any_of(first: Options.begin(), last: I, pred: [RHS](auto RO) {
4888 llvm::APInt LHS = llvm::AArch64::getCpuSupportsMask(Features: RO.Features);
4889 return LHS.isSubsetOf(RHS);
4890 })) {
4891 Diags.Report(Loc: DeclMap[I->Function]->getLocation(),
4892 DiagID: diag::warn_unreachable_version)
4893 << I->Function->getName();
4894 assert(I->Function->user_empty() && "unexpected users");
4895 I->Function->eraseFromParent();
4896 I->Function = nullptr;
4897 }
4898 }
4899 }
4900 CodeGenFunction CGF(*this);
4901 CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options);
4902
4903 setMultiVersionResolverAttributes(Resolver: ResolverFunc, GD);
4904 if (!ResolverFunc->hasLocalLinkage() && supportsCOMDAT())
4905 ResolverFunc->setComdat(
4906 getModule().getOrInsertComdat(Name: ResolverFunc->getName()));
4907 }
4908
4909 // Ensure that any additions to the deferred decls list caused by emitting a
4910 // variant are emitted. This can happen when the variant itself is inline and
4911 // calls a function without linkage.
4912 if (!MVFuncsToEmit.empty())
4913 EmitDeferred();
4914
4915 // Ensure that any additions to the multiversion funcs list from either the
4916 // deferred decls or the multiversion functions themselves are emitted.
4917 if (!MultiVersionFuncs.empty())
4918 emitMultiVersionFunctions();
4919}
4920
4921// Symbols with this prefix are used as deactivation symbols for PFP fields.
4922// See clang/docs/StructureProtection.rst for more information.
4923static const char PFPDeactivationSymbolPrefix[] = "__pfp_ds_";
4924
4925llvm::GlobalValue *
4926CodeGenModule::getPFPDeactivationSymbol(const FieldDecl *FD) {
4927 std::string DSName = PFPDeactivationSymbolPrefix + getPFPFieldName(FD);
4928 llvm::GlobalValue *DS = TheModule.getNamedValue(Name: DSName);
4929 if (!DS) {
4930 DS = new llvm::GlobalVariable(TheModule, Int8Ty, false,
4931 llvm::GlobalVariable::ExternalWeakLinkage,
4932 nullptr, DSName);
4933 DS->setVisibility(llvm::GlobalValue::HiddenVisibility);
4934 }
4935 return DS;
4936}
4937
4938void CodeGenModule::emitPFPFieldsWithEvaluatedOffset() {
4939 llvm::Constant *Nop = llvm::ConstantExpr::getIntToPtr(
4940 C: llvm::ConstantInt::get(Ty: Int64Ty, V: 0xd503201f), Ty: VoidPtrTy);
4941 for (auto *FD : getContext().PFPFieldsWithEvaluatedOffset) {
4942 std::string DSName = PFPDeactivationSymbolPrefix + getPFPFieldName(FD);
4943 llvm::GlobalValue *OldDS = TheModule.getNamedValue(Name: DSName);
4944 llvm::GlobalValue *DS = llvm::GlobalAlias::create(
4945 Ty: Int8Ty, AddressSpace: 0, Linkage: llvm::GlobalValue::ExternalLinkage, Name: DSName, Aliasee: Nop, Parent: &TheModule);
4946 DS->setVisibility(llvm::GlobalValue::HiddenVisibility);
4947 if (OldDS) {
4948 DS->takeName(V: OldDS);
4949 OldDS->replaceAllUsesWith(V: DS);
4950 OldDS->eraseFromParent();
4951 }
4952 }
4953}
4954
4955static void replaceDeclarationWith(llvm::GlobalValue *Old,
4956 llvm::Constant *New) {
4957 assert(cast<llvm::Function>(Old)->isDeclaration() && "Not a declaration");
4958 New->takeName(V: Old);
4959 Old->replaceAllUsesWith(V: New);
4960 Old->eraseFromParent();
4961}
4962
4963void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
4964 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
4965 assert(FD && "Not a FunctionDecl?");
4966 assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
4967 const auto *DD = FD->getAttr<CPUDispatchAttr>();
4968 assert(DD && "Not a cpu_dispatch Function?");
4969
4970 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4971 llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI);
4972
4973 StringRef ResolverName = getMangledName(GD);
4974 UpdateMultiVersionNames(GD, FD, CurName&: ResolverName);
4975
4976 llvm::Type *ResolverType;
4977 GlobalDecl ResolverGD;
4978 if (getTarget().supportsIFunc()) {
4979 ResolverType = llvm::FunctionType::get(
4980 Result: llvm::PointerType::get(C&: getLLVMContext(),
4981 AddressSpace: getTypes().getTargetAddressSpace(T: FD->getType())),
4982 isVarArg: false);
4983 }
4984 else {
4985 ResolverType = DeclTy;
4986 ResolverGD = GD;
4987 }
4988
4989 auto *ResolverFunc = cast<llvm::Function>(Val: GetOrCreateLLVMFunction(
4990 MangledName: ResolverName, Ty: ResolverType, D: ResolverGD, /*ForVTable=*/false));
4991
4992 if (supportsCOMDAT())
4993 ResolverFunc->setComdat(
4994 getModule().getOrInsertComdat(Name: ResolverFunc->getName()));
4995
4996 SmallVector<CodeGenFunction::FMVResolverOption, 10> Options;
4997 const TargetInfo &Target = getTarget();
4998 unsigned Index = 0;
4999 for (const IdentifierInfo *II : DD->cpus()) {
5000 // Get the name of the target function so we can look it up/create it.
5001 std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND: FD, OmitMultiVersionMangling: true) +
5002 getCPUSpecificMangling(CGM: *this, Name: II->getName());
5003
5004 llvm::Constant *Func = GetGlobalValue(Name: MangledName);
5005
5006 if (!Func) {
5007 GlobalDecl ExistingDecl = Manglings.lookup(Key: MangledName);
5008 if (ExistingDecl.getDecl() &&
5009 ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
5010 EmitGlobalFunctionDefinition(GD: ExistingDecl, GV: nullptr);
5011 Func = GetGlobalValue(Name: MangledName);
5012 } else {
5013 if (!ExistingDecl.getDecl())
5014 ExistingDecl = GD.getWithMultiVersionIndex(Index);
5015
5016 Func = GetOrCreateLLVMFunction(
5017 MangledName, Ty: DeclTy, D: ExistingDecl,
5018 /*ForVTable=*/false, /*DontDefer=*/true,
5019 /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(), IsForDefinition: ForDefinition);
5020 }
5021 }
5022
5023 llvm::SmallVector<StringRef, 32> Features;
5024 Target.getCPUSpecificCPUDispatchFeatures(Name: II->getName(), Features);
5025 llvm::transform(Range&: Features, d_first: Features.begin(),
5026 F: [](StringRef Str) { return Str.substr(Start: 1); });
5027 llvm::erase_if(C&: Features, P: [&Target](StringRef Feat) {
5028 return !Target.validateCpuSupports(Name: Feat);
5029 });
5030 Options.emplace_back(Args: cast<llvm::Function>(Val: Func), Args&: Features);
5031 ++Index;
5032 }
5033
5034 llvm::stable_sort(Range&: Options, C: [](const CodeGenFunction::FMVResolverOption &LHS,
5035 const CodeGenFunction::FMVResolverOption &RHS) {
5036 return llvm::X86::getCpuSupportsMask(FeatureStrs: LHS.Features) >
5037 llvm::X86::getCpuSupportsMask(FeatureStrs: RHS.Features);
5038 });
5039
5040 // If the list contains multiple 'default' versions, such as when it contains
5041 // 'pentium' and 'generic', don't emit the call to the generic one (since we
5042 // always run on at least a 'pentium'). We do this by deleting the 'least
5043 // advanced' (read, lowest mangling letter).
5044 while (Options.size() > 1 && llvm::all_of(Range: llvm::X86::getCpuSupportsMask(
5045 FeatureStrs: (Options.end() - 2)->Features),
5046 P: [](auto X) { return X == 0; })) {
5047 StringRef LHSName = (Options.end() - 2)->Function->getName();
5048 StringRef RHSName = (Options.end() - 1)->Function->getName();
5049 if (LHSName.compare(RHS: RHSName) < 0)
5050 Options.erase(CI: Options.end() - 2);
5051 else
5052 Options.erase(CI: Options.end() - 1);
5053 }
5054
5055 CodeGenFunction CGF(*this);
5056 CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options);
5057 setMultiVersionResolverAttributes(Resolver: ResolverFunc, GD);
5058
5059 if (getTarget().supportsIFunc()) {
5060 llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(CGM&: *this, GD);
5061 auto *IFunc = cast<llvm::GlobalValue>(Val: GetOrCreateMultiVersionResolver(GD));
5062 unsigned AS = IFunc->getType()->getPointerAddressSpace();
5063
5064 // Fix up function declarations that were created for cpu_specific before
5065 // cpu_dispatch was known
5066 if (!isa<llvm::GlobalIFunc>(Val: IFunc)) {
5067 auto *GI = llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: AS, Linkage, Name: "",
5068 Resolver: ResolverFunc, Parent: &getModule());
5069 replaceDeclarationWith(Old: IFunc, New: GI);
5070 IFunc = GI;
5071 }
5072
5073 std::string AliasName = getMangledNameImpl(
5074 CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
5075 llvm::Constant *AliasFunc = GetGlobalValue(Name: AliasName);
5076 if (!AliasFunc) {
5077 auto *GA = llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage, Name: AliasName,
5078 Aliasee: IFunc, Parent: &getModule());
5079 SetCommonAttributes(GD, GV: GA);
5080 }
5081 }
5082}
5083
5084/// Adds a declaration to the list of multi version functions if not present.
5085void CodeGenModule::AddDeferredMultiVersionResolverToEmit(GlobalDecl GD) {
5086 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
5087 assert(FD && "Not a FunctionDecl?");
5088
5089 if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) {
5090 std::string MangledName =
5091 getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
5092 if (!DeferredResolversToEmit.insert(key: MangledName).second)
5093 return;
5094 }
5095 MultiVersionFuncs.push_back(x: GD);
5096}
5097
5098/// If a dispatcher for the specified mangled name is not in the module, create
5099/// and return it. The dispatcher is either an llvm Function with the specified
5100/// type, or a global ifunc.
5101llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
5102 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
5103 assert(FD && "Not a FunctionDecl?");
5104
5105 std::string MangledName =
5106 getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
5107
5108 // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
5109 // a separate resolver).
5110 std::string ResolverName = MangledName;
5111 if (getTarget().supportsIFunc()) {
5112 switch (FD->getMultiVersionKind()) {
5113 case MultiVersionKind::None:
5114 llvm_unreachable("unexpected MultiVersionKind::None for resolver");
5115 case MultiVersionKind::Target:
5116 case MultiVersionKind::CPUSpecific:
5117 case MultiVersionKind::CPUDispatch:
5118 ResolverName += ".ifunc";
5119 break;
5120 case MultiVersionKind::TargetClones:
5121 case MultiVersionKind::TargetVersion:
5122 break;
5123 }
5124 } else if (FD->isTargetMultiVersion()) {
5125 ResolverName += ".resolver";
5126 }
5127
5128 bool ShouldReturnIFunc =
5129 getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion();
5130
5131 // If the resolver has already been created, just return it. This lookup may
5132 // yield a function declaration instead of a resolver on AArch64. That is
5133 // because we didn't know whether a resolver will be generated when we first
5134 // encountered a use of the symbol named after this resolver. Therefore,
5135 // targets which support ifuncs should not return here unless we actually
5136 // found an ifunc.
5137 llvm::GlobalValue *ResolverGV = GetGlobalValue(Name: ResolverName);
5138 if (ResolverGV && (isa<llvm::GlobalIFunc>(Val: ResolverGV) || !ShouldReturnIFunc))
5139 return ResolverGV;
5140
5141 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
5142 llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI);
5143
5144 // The resolver needs to be created. For target and target_clones, defer
5145 // creation until the end of the TU.
5146 if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion())
5147 AddDeferredMultiVersionResolverToEmit(GD);
5148
5149 // For cpu_specific, don't create an ifunc yet because we don't know if the
5150 // cpu_dispatch will be emitted in this translation unit.
5151 if (ShouldReturnIFunc) {
5152 unsigned AS = getTypes().getTargetAddressSpace(T: FD->getType());
5153 llvm::Type *ResolverType = llvm::FunctionType::get(
5154 Result: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS), isVarArg: false);
5155 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
5156 MangledName: MangledName + ".resolver", Ty: ResolverType, D: GlobalDecl{},
5157 /*ForVTable=*/false);
5158 llvm::GlobalIFunc *GIF =
5159 llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: AS, Linkage: getMultiversionLinkage(CGM&: *this, GD),
5160 Name: "", Resolver, Parent: &getModule());
5161 GIF->setName(ResolverName);
5162 SetCommonAttributes(GD: FD, GV: GIF);
5163 if (ResolverGV)
5164 replaceDeclarationWith(Old: ResolverGV, New: GIF);
5165 return GIF;
5166 }
5167
5168 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
5169 MangledName: ResolverName, Ty: DeclTy, D: GlobalDecl{}, /*ForVTable=*/false);
5170 assert(isa<llvm::GlobalValue>(Resolver) && !ResolverGV &&
5171 "Resolver should be created for the first time");
5172 SetCommonAttributes(GD: FD, GV: cast<llvm::GlobalValue>(Val: Resolver));
5173 return Resolver;
5174}
5175
5176void CodeGenModule::setMultiVersionResolverAttributes(llvm::Function *Resolver,
5177 GlobalDecl GD) {
5178 const NamedDecl *D = dyn_cast_or_null<NamedDecl>(Val: GD.getDecl());
5179 Resolver->setLinkage(getMultiversionLinkage(CGM&: *this, GD));
5180
5181 // Function body has to be emitted before calling setGlobalVisibility
5182 // for Resolver to be considered as definition.
5183 setGlobalVisibility(GV: Resolver, D);
5184
5185 setDSOLocal(Resolver);
5186
5187 // The resolver must be exempt from sanitizer instrumentation, as it can run
5188 // before the sanitizer is initialized.
5189 // (https://github.com/llvm/llvm-project/issues/163369)
5190 Resolver->addFnAttr(Kind: llvm::Attribute::DisableSanitizerInstrumentation);
5191
5192 // Set the default target-specific attributes, such as PAC and BTI ones on
5193 // AArch64. Not passing Decl to prevent setting unrelated attributes,
5194 // as Resolver can be shared by multiple declarations.
5195 // FIXME Some targets may require a non-null D to set some attributes
5196 // (such as "stackrealign" on X86, even when it is requested via
5197 // "-mstackrealign" command line option).
5198 getTargetCodeGenInfo().setTargetAttributes(/*D=*/nullptr, GV: Resolver, M&: *this);
5199}
5200
5201bool CodeGenModule::shouldDropDLLAttribute(const Decl *D,
5202 const llvm::GlobalValue *GV) const {
5203 auto SC = GV->getDLLStorageClass();
5204 if (SC == llvm::GlobalValue::DefaultStorageClass)
5205 return false;
5206 const Decl *MRD = D->getMostRecentDecl();
5207 return (((SC == llvm::GlobalValue::DLLImportStorageClass &&
5208 !MRD->hasAttr<DLLImportAttr>()) ||
5209 (SC == llvm::GlobalValue::DLLExportStorageClass &&
5210 !MRD->hasAttr<DLLExportAttr>())) &&
5211 !shouldMapVisibilityToDLLExport(D: cast<NamedDecl>(Val: MRD)));
5212}
5213
5214/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
5215/// module, create and return an llvm Function with the specified type. If there
5216/// is something in the module with the specified name, return it potentially
5217/// bitcasted to the right type.
5218///
5219/// If D is non-null, it specifies a decl that correspond to this. This is used
5220/// to set the attributes on the function when it is first created.
5221llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
5222 StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable,
5223 bool DontDefer, bool IsThunk, llvm::AttributeList ExtraAttrs,
5224 ForDefinition_t IsForDefinition) {
5225 const Decl *D = GD.getDecl();
5226
5227 std::string NameWithoutMultiVersionMangling;
5228 if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D)) {
5229 // For the device mark the function as one that should be emitted.
5230 if (getLangOpts().OpenMPIsTargetDevice && OpenMPRuntime &&
5231 !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
5232 !DontDefer && !IsForDefinition) {
5233 if (const FunctionDecl *FDDef = FD->getDefinition()) {
5234 GlobalDecl GDDef;
5235 if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: FDDef))
5236 GDDef = GlobalDecl(CD, GD.getCtorType());
5237 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: FDDef))
5238 GDDef = GlobalDecl(DD, GD.getDtorType());
5239 else
5240 GDDef = GlobalDecl(FDDef);
5241 EmitGlobal(GD: GDDef);
5242 }
5243 }
5244
5245 // Any attempts to use a MultiVersion function should result in retrieving
5246 // the iFunc instead. Name Mangling will handle the rest of the changes.
5247 if (FD->isMultiVersion()) {
5248 UpdateMultiVersionNames(GD, FD, CurName&: MangledName);
5249 if (!IsForDefinition) {
5250 // On AArch64 we do not immediatelly emit an ifunc resolver when a
5251 // function is used. Instead we defer the emission until we see a
5252 // default definition. In the meantime we just reference the symbol
5253 // without FMV mangling (it may or may not be replaced later).
5254 if (getTarget().getTriple().isAArch64()) {
5255 AddDeferredMultiVersionResolverToEmit(GD);
5256 NameWithoutMultiVersionMangling = getMangledNameImpl(
5257 CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
5258 } else
5259 return GetOrCreateMultiVersionResolver(GD);
5260 }
5261 }
5262 }
5263
5264 if (!NameWithoutMultiVersionMangling.empty())
5265 MangledName = NameWithoutMultiVersionMangling;
5266
5267 // Lookup the entry, lazily creating it if necessary.
5268 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
5269 if (Entry) {
5270 if (WeakRefReferences.erase(Ptr: Entry)) {
5271 const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D);
5272 if (FD && !FD->hasAttr<WeakAttr>())
5273 Entry->setLinkage(llvm::Function::ExternalLinkage);
5274 }
5275
5276 // Handle dropped DLL attributes.
5277 if (D && shouldDropDLLAttribute(D, GV: Entry)) {
5278 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
5279 setDSOLocal(Entry);
5280 }
5281
5282 // If there are two attempts to define the same mangled name, issue an
5283 // error.
5284 if (IsForDefinition && !Entry->isDeclaration()) {
5285 GlobalDecl OtherGD;
5286 // Check that GD is not yet in DiagnosedConflictingDefinitions is required
5287 // to make sure that we issue an error only once.
5288 if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) &&
5289 (GD.getCanonicalDecl().getDecl() !=
5290 OtherGD.getCanonicalDecl().getDecl()) &&
5291 DiagnosedConflictingDefinitions.insert(V: GD).second) {
5292 getDiags().Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name)
5293 << MangledName;
5294 getDiags().Report(Loc: OtherGD.getDecl()->getLocation(),
5295 DiagID: diag::note_previous_definition);
5296 }
5297 }
5298
5299 if ((isa<llvm::Function>(Val: Entry) || isa<llvm::GlobalAlias>(Val: Entry)) &&
5300 (Entry->getValueType() == Ty)) {
5301 return Entry;
5302 }
5303
5304 // Make sure the result is of the correct type.
5305 // (If function is requested for a definition, we always need to create a new
5306 // function, not just return a bitcast.)
5307 if (!IsForDefinition)
5308 return Entry;
5309 }
5310
5311 // This function doesn't have a complete type (for example, the return
5312 // type is an incomplete struct). Use a fake type instead, and make
5313 // sure not to try to set attributes.
5314 bool IsIncompleteFunction = false;
5315
5316 llvm::FunctionType *FTy;
5317 if (isa<llvm::FunctionType>(Val: Ty)) {
5318 FTy = cast<llvm::FunctionType>(Val: Ty);
5319 } else {
5320 FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false);
5321 IsIncompleteFunction = true;
5322 }
5323
5324 llvm::Function *F =
5325 llvm::Function::Create(Ty: FTy, Linkage: llvm::Function::ExternalLinkage,
5326 N: Entry ? StringRef() : MangledName, M: &getModule());
5327
5328 // Store the declaration associated with this function so it is potentially
5329 // updated by further declarations or definitions and emitted at the end.
5330 if (D && D->hasAttr<AnnotateAttr>())
5331 DeferredAnnotations[MangledName] = cast<ValueDecl>(Val: D);
5332
5333 // If we already created a function with the same mangled name (but different
5334 // type) before, take its name and add it to the list of functions to be
5335 // replaced with F at the end of CodeGen.
5336 //
5337 // This happens if there is a prototype for a function (e.g. "int f()") and
5338 // then a definition of a different type (e.g. "int f(int x)").
5339 if (Entry) {
5340 F->takeName(V: Entry);
5341
5342 // This might be an implementation of a function without a prototype, in
5343 // which case, try to do special replacement of calls which match the new
5344 // prototype. The really key thing here is that we also potentially drop
5345 // arguments from the call site so as to make a direct call, which makes the
5346 // inliner happier and suppresses a number of optimizer warnings (!) about
5347 // dropping arguments.
5348 if (!Entry->use_empty()) {
5349 ReplaceUsesOfNonProtoTypeWithRealFunction(Old: Entry, NewFn: F);
5350 Entry->removeDeadConstantUsers();
5351 }
5352
5353 addGlobalValReplacement(GV: Entry, C: F);
5354 }
5355
5356 assert(F->getName() == MangledName && "name was uniqued!");
5357 if (D)
5358 SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
5359 if (ExtraAttrs.hasFnAttrs()) {
5360 llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
5361 F->addFnAttrs(Attrs: B);
5362 }
5363
5364 if (!DontDefer) {
5365 // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
5366 // each other bottoming out with the base dtor. Therefore we emit non-base
5367 // dtors on usage, even if there is no dtor definition in the TU.
5368 if (isa_and_nonnull<CXXDestructorDecl>(Val: D) &&
5369 getCXXABI().useThunkForDtorVariant(Dtor: cast<CXXDestructorDecl>(Val: D),
5370 DT: GD.getDtorType()))
5371 addDeferredDeclToEmit(GD);
5372
5373 // This is the first use or definition of a mangled name. If there is a
5374 // deferred decl with this name, remember that we need to emit it at the end
5375 // of the file.
5376 auto DDI = DeferredDecls.find(Val: MangledName);
5377 if (DDI != DeferredDecls.end()) {
5378 // Move the potentially referenced deferred decl to the
5379 // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
5380 // don't need it anymore).
5381 addDeferredDeclToEmit(GD: DDI->second);
5382 DeferredDecls.erase(I: DDI);
5383
5384 // Otherwise, there are cases we have to worry about where we're
5385 // using a declaration for which we must emit a definition but where
5386 // we might not find a top-level definition:
5387 // - member functions defined inline in their classes
5388 // - friend functions defined inline in some class
5389 // - special member functions with implicit definitions
5390 // If we ever change our AST traversal to walk into class methods,
5391 // this will be unnecessary.
5392 //
5393 // We also don't emit a definition for a function if it's going to be an
5394 // entry in a vtable, unless it's already marked as used.
5395 } else if (getLangOpts().CPlusPlus && D) {
5396 // Look for a declaration that's lexically in a record.
5397 for (const auto *FD = cast<FunctionDecl>(Val: D)->getMostRecentDecl(); FD;
5398 FD = FD->getPreviousDecl()) {
5399 if (isa<CXXRecordDecl>(Val: FD->getLexicalDeclContext())) {
5400 if (FD->doesThisDeclarationHaveABody()) {
5401 addDeferredDeclToEmit(GD: GD.getWithDecl(D: FD));
5402 break;
5403 }
5404 }
5405 }
5406 }
5407 }
5408
5409 // Make sure the result is of the requested type.
5410 if (!IsIncompleteFunction) {
5411 assert(F->getFunctionType() == Ty);
5412 return F;
5413 }
5414
5415 return F;
5416}
5417
5418/// GetAddrOfFunction - Return the address of the given function. If Ty is
5419/// non-null, then this function will use the specified type if it has to
5420/// create it (this occurs when we see a definition of the function).
5421llvm::Constant *
5422CodeGenModule::GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty, bool ForVTable,
5423 bool DontDefer,
5424 ForDefinition_t IsForDefinition) {
5425 // If there was no specific requested type, just convert it now.
5426 if (!Ty) {
5427 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
5428 Ty = getTypes().ConvertType(T: FD->getType());
5429 if (DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>()) &&
5430 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
5431 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
5432 Ty = getTypes().GetFunctionType(Info: FI);
5433 }
5434 }
5435
5436 // Devirtualized destructor calls may come through here instead of via
5437 // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
5438 // of the complete destructor when necessary.
5439 if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: GD.getDecl())) {
5440 if (getTarget().getCXXABI().isMicrosoft() &&
5441 GD.getDtorType() == Dtor_Complete &&
5442 DD->getParent()->getNumVBases() == 0)
5443 GD = GlobalDecl(DD, Dtor_Base);
5444 }
5445
5446 StringRef MangledName = getMangledName(GD);
5447 auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
5448 /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(),
5449 IsForDefinition);
5450 // Returns kernel handle for HIP kernel stub function.
5451 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
5452 cast<FunctionDecl>(Val: GD.getDecl())->hasAttr<CUDAGlobalAttr>()) {
5453 auto *Handle = getCUDARuntime().getKernelHandle(
5454 Stub: cast<llvm::Function>(Val: F->stripPointerCasts()), GD);
5455 if (IsForDefinition)
5456 return F;
5457 return Handle;
5458 }
5459 return F;
5460}
5461
5462llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) {
5463 llvm::GlobalValue *F =
5464 cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD: Decl)->stripPointerCasts());
5465
5466 return llvm::NoCFIValue::get(GV: F);
5467}
5468
5469static const FunctionDecl *
5470GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
5471 TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
5472 DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl);
5473
5474 IdentifierInfo &CII = C.Idents.get(Name);
5475 for (const auto *Result : DC->lookup(Name: &CII))
5476 if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result))
5477 return FD;
5478
5479 if (!C.getLangOpts().CPlusPlus)
5480 return nullptr;
5481
5482 // Demangle the premangled name from getTerminateFn()
5483 IdentifierInfo &CXXII =
5484 (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
5485 ? C.Idents.get(Name: "terminate")
5486 : C.Idents.get(Name);
5487
5488 for (const auto &N : {"__cxxabiv1", "std"}) {
5489 IdentifierInfo &NS = C.Idents.get(Name: N);
5490 for (const auto *Result : DC->lookup(Name: &NS)) {
5491 const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Val: Result);
5492 if (auto *LSD = dyn_cast<LinkageSpecDecl>(Val: Result))
5493 for (const auto *Result : LSD->lookup(Name: &NS))
5494 if ((ND = dyn_cast<NamespaceDecl>(Val: Result)))
5495 break;
5496
5497 if (ND)
5498 for (const auto *Result : ND->lookup(Name: &CXXII))
5499 if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result))
5500 return FD;
5501 }
5502 }
5503
5504 return nullptr;
5505}
5506
5507static void setWindowsItaniumDLLImport(CodeGenModule &CGM, bool Local,
5508 llvm::Function *F, StringRef Name) {
5509 // In Windows Itanium environments, try to mark runtime functions
5510 // dllimport. For Mingw and MSVC, don't. We don't really know if the user
5511 // will link their standard library statically or dynamically. Marking
5512 // functions imported when they are not imported can cause linker errors
5513 // and warnings.
5514 if (!Local && CGM.getTriple().isWindowsItaniumEnvironment() &&
5515 !CGM.getCodeGenOpts().LTOVisibilityPublicStd) {
5516 const FunctionDecl *FD = GetRuntimeFunctionDecl(C&: CGM.getContext(), Name);
5517 if (!FD || FD->hasAttr<DLLImportAttr>()) {
5518 F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
5519 F->setLinkage(llvm::GlobalValue::ExternalLinkage);
5520 }
5521 }
5522}
5523
5524llvm::FunctionCallee CodeGenModule::CreateRuntimeFunction(
5525 QualType ReturnTy, ArrayRef<QualType> ArgTys, StringRef Name,
5526 llvm::AttributeList ExtraAttrs, bool Local, bool AssumeConvergent) {
5527 if (AssumeConvergent) {
5528 ExtraAttrs =
5529 ExtraAttrs.addFnAttribute(C&: VMContext, Kind: llvm::Attribute::Convergent);
5530 }
5531
5532 QualType FTy = Context.getFunctionType(ResultTy: ReturnTy, Args: ArgTys,
5533 EPI: FunctionProtoType::ExtProtoInfo());
5534 const CGFunctionInfo &Info = getTypes().arrangeFreeFunctionType(
5535 Ty: Context.getCanonicalType(T: FTy).castAs<FunctionProtoType>());
5536 auto *ConvTy = getTypes().GetFunctionType(Info);
5537 llvm::Constant *C = GetOrCreateLLVMFunction(
5538 MangledName: Name, Ty: ConvTy, GD: GlobalDecl(), /*ForVTable=*/false,
5539 /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
5540
5541 if (auto *F = dyn_cast<llvm::Function>(Val: C)) {
5542 if (F->empty()) {
5543 SetLLVMFunctionAttributes(GD: GlobalDecl(), Info, F, /*IsThunk*/ false);
5544 // FIXME: Set calling-conv properly in ExtProtoInfo
5545 F->setCallingConv(getRuntimeCC());
5546 setWindowsItaniumDLLImport(CGM&: *this, Local, F, Name);
5547 setDSOLocal(F);
5548 }
5549 }
5550 return {ConvTy, C};
5551}
5552
5553/// CreateRuntimeFunction - Create a new runtime function with the specified
5554/// type and name.
5555llvm::FunctionCallee
5556CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
5557 llvm::AttributeList ExtraAttrs, bool Local,
5558 bool AssumeConvergent) {
5559 if (AssumeConvergent) {
5560 ExtraAttrs =
5561 ExtraAttrs.addFnAttribute(C&: VMContext, Kind: llvm::Attribute::Convergent);
5562 }
5563
5564 llvm::Constant *C =
5565 GetOrCreateLLVMFunction(MangledName: Name, Ty: FTy, GD: GlobalDecl(), /*ForVTable=*/false,
5566 /*DontDefer=*/false, /*IsThunk=*/false,
5567 ExtraAttrs);
5568
5569 if (auto *F = dyn_cast<llvm::Function>(Val: C)) {
5570 if (F->empty()) {
5571 F->setCallingConv(getRuntimeCC());
5572 setWindowsItaniumDLLImport(CGM&: *this, Local, F, Name);
5573 setDSOLocal(F);
5574 // FIXME: We should use CodeGenModule::SetLLVMFunctionAttributes() instead
5575 // of trying to approximate the attributes using the LLVM function
5576 // signature. The other overload of CreateRuntimeFunction does this; it
5577 // should be used for new code.
5578 markRegisterParameterAttributes(F);
5579 }
5580 }
5581
5582 return {FTy, C};
5583}
5584
5585/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
5586/// create and return an llvm GlobalVariable with the specified type and address
5587/// space. If there is something in the module with the specified name, return
5588/// it potentially bitcasted to the right type.
5589///
5590/// If D is non-null, it specifies a decl that correspond to this. This is used
5591/// to set the attributes on the global when it is first created.
5592///
5593/// If IsForDefinition is true, it is guaranteed that an actual global with
5594/// type Ty will be returned, not conversion of a variable with the same
5595/// mangled name but some other type.
5596llvm::Constant *
5597CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
5598 LangAS AddrSpace, const VarDecl *D,
5599 ForDefinition_t IsForDefinition) {
5600 // Lookup the entry, lazily creating it if necessary.
5601 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
5602 unsigned TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace);
5603 if (Entry) {
5604 if (WeakRefReferences.erase(Ptr: Entry)) {
5605 if (D && !D->hasAttr<WeakAttr>())
5606 Entry->setLinkage(llvm::Function::ExternalLinkage);
5607 }
5608
5609 // Handle dropped DLL attributes.
5610 if (D && shouldDropDLLAttribute(D, GV: Entry))
5611 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
5612
5613 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
5614 getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: Entry);
5615
5616 if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS)
5617 return Entry;
5618
5619 // If there are two attempts to define the same mangled name, issue an
5620 // error.
5621 if (IsForDefinition && !Entry->isDeclaration()) {
5622 GlobalDecl OtherGD;
5623 const VarDecl *OtherD;
5624
5625 // Check that D is not yet in DiagnosedConflictingDefinitions is required
5626 // to make sure that we issue an error only once.
5627 if (D && lookupRepresentativeDecl(MangledName, Result&: OtherGD) &&
5628 (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) &&
5629 (OtherD = dyn_cast<VarDecl>(Val: OtherGD.getDecl())) &&
5630 OtherD->hasInit() &&
5631 DiagnosedConflictingDefinitions.insert(V: D).second) {
5632 getDiags().Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name)
5633 << MangledName;
5634 getDiags().Report(Loc: OtherGD.getDecl()->getLocation(),
5635 DiagID: diag::note_previous_definition);
5636 }
5637 }
5638
5639 // Make sure the result is of the correct type.
5640 if (Entry->getType()->getAddressSpace() != TargetAS)
5641 return llvm::ConstantExpr::getAddrSpaceCast(
5642 C: Entry, Ty: llvm::PointerType::get(C&: Ty->getContext(), AddressSpace: TargetAS));
5643
5644 // (If global is requested for a definition, we always need to create a new
5645 // global, not just return a bitcast.)
5646 if (!IsForDefinition)
5647 return Entry;
5648 }
5649
5650 auto DAddrSpace = GetGlobalVarAddressSpace(D);
5651
5652 auto *GV = new llvm::GlobalVariable(
5653 getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr,
5654 MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal,
5655 getContext().getTargetAddressSpace(AS: DAddrSpace));
5656
5657 // If we already created a global with the same mangled name (but different
5658 // type) before, take its name and remove it from its parent.
5659 if (Entry) {
5660 GV->takeName(V: Entry);
5661
5662 if (!Entry->use_empty()) {
5663 Entry->replaceAllUsesWith(V: GV);
5664 }
5665
5666 Entry->eraseFromParent();
5667 }
5668
5669 // This is the first use or definition of a mangled name. If there is a
5670 // deferred decl with this name, remember that we need to emit it at the end
5671 // of the file.
5672 auto DDI = DeferredDecls.find(Val: MangledName);
5673 if (DDI != DeferredDecls.end()) {
5674 // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
5675 // list, and remove it from DeferredDecls (since we don't need it anymore).
5676 addDeferredDeclToEmit(GD: DDI->second);
5677 DeferredDecls.erase(I: DDI);
5678 }
5679
5680 // Handle things which are present even on external declarations.
5681 if (D) {
5682 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
5683 getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: GV);
5684
5685 // FIXME: This code is overly simple and should be merged with other global
5686 // handling.
5687 GV->setConstant(D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: false, ExcludeDtor: false));
5688
5689 GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
5690
5691 setLinkageForGV(GV, ND: D);
5692
5693 if (D->getTLSKind()) {
5694 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
5695 CXXThreadLocals.push_back(x: D);
5696 setTLSMode(GV, D: *D);
5697 }
5698
5699 setGVProperties(GV, D);
5700
5701 // If required by the ABI, treat declarations of static data members with
5702 // inline initializers as definitions.
5703 if (getContext().isMSStaticDataMemberInlineDefinition(VD: D)) {
5704 EmitGlobalVarDefinition(D);
5705 }
5706
5707 // Emit section information for extern variables.
5708 if (D->hasExternalStorage()) {
5709 if (const SectionAttr *SA = D->getAttr<SectionAttr>())
5710 GV->setSection(SA->getName());
5711 }
5712
5713 // Handle XCore specific ABI requirements.
5714 if (getTriple().getArch() == llvm::Triple::xcore &&
5715 D->getLanguageLinkage() == CLanguageLinkage &&
5716 D->getType().isConstant(Ctx: Context) &&
5717 isExternallyVisible(L: D->getLinkageAndVisibility().getLinkage()))
5718 GV->setSection(".cp.rodata");
5719
5720 // Handle code model attribute
5721 if (const auto *CMA = D->getAttr<CodeModelAttr>())
5722 GV->setCodeModel(CMA->getModel());
5723
5724 // Check if we a have a const declaration with an initializer, we may be
5725 // able to emit it as available_externally to expose it's value to the
5726 // optimizer.
5727 if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() &&
5728 D->getType().isConstQualified() && !GV->hasInitializer() &&
5729 !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) {
5730 const auto *Record =
5731 Context.getBaseElementType(QT: D->getType())->getAsCXXRecordDecl();
5732 bool HasMutableFields = Record && Record->hasMutableFields();
5733 if (!HasMutableFields) {
5734 const VarDecl *InitDecl;
5735 const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl);
5736 if (InitExpr) {
5737 ConstantEmitter emitter(*this);
5738 llvm::Constant *Init = emitter.tryEmitForInitializer(D: *InitDecl);
5739 if (Init) {
5740 auto *InitType = Init->getType();
5741 if (GV->getValueType() != InitType) {
5742 // The type of the initializer does not match the definition.
5743 // This happens when an initializer has a different type from
5744 // the type of the global (because of padding at the end of a
5745 // structure for instance).
5746 GV->setName(StringRef());
5747 // Make a new global with the correct type, this is now guaranteed
5748 // to work.
5749 auto *NewGV = cast<llvm::GlobalVariable>(
5750 Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition)
5751 ->stripPointerCasts());
5752
5753 // Erase the old global, since it is no longer used.
5754 GV->eraseFromParent();
5755 GV = NewGV;
5756 } else {
5757 GV->setInitializer(Init);
5758 GV->setConstant(true);
5759 GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
5760 }
5761 emitter.finalize(global: GV);
5762 }
5763 }
5764 }
5765 }
5766 }
5767
5768 if (D &&
5769 D->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly) {
5770 getTargetCodeGenInfo().setTargetAttributes(D, GV, M&: *this);
5771 // External HIP managed variables needed to be recorded for transformation
5772 // in both device and host compilations.
5773 if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() &&
5774 D->hasExternalStorage())
5775 getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV);
5776 }
5777
5778 if (D)
5779 SanitizerMD->reportGlobal(GV, D: *D);
5780
5781 LangAS ExpectedAS =
5782 D ? D->getType().getAddressSpace()
5783 : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
5784 assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS);
5785 if (DAddrSpace != ExpectedAS)
5786 return performAddrSpaceCast(
5787 Src: GV, DestTy: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: TargetAS));
5788
5789 return GV;
5790}
5791
5792llvm::Constant *
5793CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
5794 const Decl *D = GD.getDecl();
5795
5796 if (isa<CXXConstructorDecl>(Val: D) || isa<CXXDestructorDecl>(Val: D))
5797 return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
5798 /*DontDefer=*/false, IsForDefinition);
5799
5800 if (isa<CXXMethodDecl>(Val: D)) {
5801 auto FInfo =
5802 &getTypes().arrangeCXXMethodDeclaration(MD: cast<CXXMethodDecl>(Val: D));
5803 auto Ty = getTypes().GetFunctionType(Info: *FInfo);
5804 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
5805 IsForDefinition);
5806 }
5807
5808 if (isa<FunctionDecl>(Val: D)) {
5809 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
5810 llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI);
5811 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
5812 IsForDefinition);
5813 }
5814
5815 return GetAddrOfGlobalVar(D: cast<VarDecl>(Val: D), /*Ty=*/nullptr, IsForDefinition);
5816}
5817
5818llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
5819 StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
5820 llvm::Align Alignment) {
5821 llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
5822 llvm::GlobalVariable *OldGV = nullptr;
5823
5824 if (GV) {
5825 // Check if the variable has the right type.
5826 if (GV->getValueType() == Ty)
5827 return GV;
5828
5829 // Because C++ name mangling, the only way we can end up with an already
5830 // existing global with the same name is if it has been declared extern "C".
5831 assert(GV->isDeclaration() && "Declaration has wrong type!");
5832 OldGV = GV;
5833 }
5834
5835 // Create a new variable.
5836 GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
5837 Linkage, nullptr, Name);
5838
5839 if (OldGV) {
5840 // Replace occurrences of the old variable if needed.
5841 GV->takeName(V: OldGV);
5842
5843 if (!OldGV->use_empty()) {
5844 OldGV->replaceAllUsesWith(V: GV);
5845 }
5846
5847 OldGV->eraseFromParent();
5848 }
5849
5850 if (supportsCOMDAT() && GV->isWeakForLinker() &&
5851 !GV->hasAvailableExternallyLinkage())
5852 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
5853
5854 GV->setAlignment(Alignment);
5855
5856 return GV;
5857}
5858
5859/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
5860/// given global variable. If Ty is non-null and if the global doesn't exist,
5861/// then it will be created with the specified type instead of whatever the
5862/// normal requested type would be. If IsForDefinition is true, it is guaranteed
5863/// that an actual global with type Ty will be returned, not conversion of a
5864/// variable with the same mangled name but some other type.
5865llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
5866 llvm::Type *Ty,
5867 ForDefinition_t IsForDefinition) {
5868 assert(D->hasGlobalStorage() && "Not a global variable");
5869 QualType ASTTy = D->getType();
5870 if (!Ty)
5871 Ty = getTypes().ConvertTypeForMem(T: ASTTy);
5872
5873 StringRef MangledName = getMangledName(GD: D);
5874 return GetOrCreateLLVMGlobal(MangledName, Ty, AddrSpace: ASTTy.getAddressSpace(), D,
5875 IsForDefinition);
5876}
5877
5878/// CreateRuntimeVariable - Create a new runtime global variable with the
5879/// specified type and name.
5880llvm::Constant *
5881CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
5882 StringRef Name) {
5883 LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global
5884 : LangAS::Default;
5885 auto *Ret = GetOrCreateLLVMGlobal(MangledName: Name, Ty, AddrSpace, D: nullptr);
5886 setDSOLocal(cast<llvm::GlobalValue>(Val: Ret->stripPointerCasts()));
5887 return Ret;
5888}
5889
5890void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
5891 assert(!D->getInit() && "Cannot emit definite definitions here!");
5892
5893 StringRef MangledName = getMangledName(GD: D);
5894 llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName);
5895
5896 // We already have a definition, not declaration, with the same mangled name.
5897 // Emitting of declaration is not required (and actually overwrites emitted
5898 // definition).
5899 if (GV && !GV->isDeclaration())
5900 return;
5901
5902 // If we have not seen a reference to this variable yet, place it into the
5903 // deferred declarations table to be emitted if needed later.
5904 if (!MustBeEmitted(Global: D) && !GV) {
5905 DeferredDecls[MangledName] = D;
5906 return;
5907 }
5908
5909 // The tentative definition is the only definition.
5910 EmitGlobalVarDefinition(D);
5911}
5912
5913// Return a GlobalDecl. Use the base variants for destructors and constructors.
5914static GlobalDecl getBaseVariantGlobalDecl(const NamedDecl *D) {
5915 if (auto const *CD = dyn_cast<const CXXConstructorDecl>(Val: D))
5916 return GlobalDecl(CD, CXXCtorType::Ctor_Base);
5917 else if (auto const *DD = dyn_cast<const CXXDestructorDecl>(Val: D))
5918 return GlobalDecl(DD, CXXDtorType::Dtor_Base);
5919 return GlobalDecl(D);
5920}
5921
5922void CodeGenModule::EmitExternalDeclaration(const DeclaratorDecl *D) {
5923 CGDebugInfo *DI = getModuleDebugInfo();
5924 if (!DI || !getCodeGenOpts().hasReducedDebugInfo())
5925 return;
5926
5927 GlobalDecl GD = getBaseVariantGlobalDecl(D);
5928 if (!GD)
5929 return;
5930
5931 llvm::Constant *Addr = GetAddrOfGlobal(GD)->stripPointerCasts();
5932 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
5933 DI->EmitExternalVariable(
5934 GV: cast<llvm::GlobalVariable>(Val: Addr->stripPointerCasts()), Decl: VD);
5935 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
5936 llvm::Function *Fn = cast<llvm::Function>(Val: Addr);
5937 if (!Fn->getSubprogram())
5938 DI->EmitFunctionDecl(GD, Loc: FD->getLocation(), FnType: FD->getType(), Fn);
5939 }
5940}
5941
5942CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
5943 return Context.toCharUnitsFromBits(
5944 BitSize: getDataLayout().getTypeStoreSizeInBits(Ty));
5945}
5946
5947LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
5948 if (LangOpts.OpenCL) {
5949 LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
5950 assert(AS == LangAS::opencl_global ||
5951 AS == LangAS::opencl_global_device ||
5952 AS == LangAS::opencl_global_host ||
5953 AS == LangAS::opencl_constant ||
5954 AS == LangAS::opencl_local ||
5955 AS >= LangAS::FirstTargetAddressSpace);
5956 return AS;
5957 }
5958
5959 if (LangOpts.SYCLIsDevice &&
5960 (!D || D->getType().getAddressSpace() == LangAS::Default))
5961 return LangAS::sycl_global;
5962
5963 if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
5964 if (D) {
5965 if (D->hasAttr<CUDAConstantAttr>())
5966 return LangAS::cuda_constant;
5967 if (D->hasAttr<CUDASharedAttr>())
5968 return LangAS::cuda_shared;
5969 if (D->hasAttr<CUDADeviceAttr>())
5970 return LangAS::cuda_device;
5971 if (D->getType().isConstQualified())
5972 return LangAS::cuda_constant;
5973 }
5974 return LangAS::cuda_device;
5975 }
5976
5977 if (LangOpts.OpenMP) {
5978 LangAS AS;
5979 if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(VD: D, AS))
5980 return AS;
5981 }
5982 return getTargetCodeGenInfo().getGlobalVarAddressSpace(CGM&: *this, D);
5983}
5984
5985LangAS CodeGenModule::GetGlobalConstantAddressSpace() const {
5986 // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
5987 if (LangOpts.OpenCL)
5988 return LangAS::opencl_constant;
5989 if (LangOpts.SYCLIsDevice)
5990 return LangAS::sycl_global;
5991 if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV())
5992 // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V)
5993 // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up
5994 // with OpVariable instructions with Generic storage class which is not
5995 // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V
5996 // UniformConstant storage class is not viable as pointers to it may not be
5997 // casted to Generic pointers which are used to model HIP's "flat" pointers.
5998 return LangAS::cuda_device;
5999 if (auto AS = getTarget().getConstantAddressSpace())
6000 return *AS;
6001 return LangAS::Default;
6002}
6003
6004// In address space agnostic languages, string literals are in default address
6005// space in AST. However, certain targets (e.g. amdgcn) request them to be
6006// emitted in constant address space in LLVM IR. To be consistent with other
6007// parts of AST, string literal global variables in constant address space
6008// need to be casted to default address space before being put into address
6009// map and referenced by other part of CodeGen.
6010// In OpenCL, string literals are in constant address space in AST, therefore
6011// they should not be casted to default address space.
6012static llvm::Constant *
6013castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM,
6014 llvm::GlobalVariable *GV) {
6015 llvm::Constant *Cast = GV;
6016 if (!CGM.getLangOpts().OpenCL) {
6017 auto AS = CGM.GetGlobalConstantAddressSpace();
6018 if (AS != LangAS::Default)
6019 Cast = CGM.performAddrSpaceCast(
6020 Src: GV, DestTy: llvm::PointerType::get(
6021 C&: CGM.getLLVMContext(),
6022 AddressSpace: CGM.getContext().getTargetAddressSpace(AS: LangAS::Default)));
6023 }
6024 return Cast;
6025}
6026
6027template<typename SomeDecl>
6028void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
6029 llvm::GlobalValue *GV) {
6030 if (!getLangOpts().CPlusPlus)
6031 return;
6032
6033 // Must have 'used' attribute, or else inline assembly can't rely on
6034 // the name existing.
6035 if (!D->template hasAttr<UsedAttr>())
6036 return;
6037
6038 // Must have internal linkage and an ordinary name.
6039 if (!D->getIdentifier() || D->getFormalLinkage() != Linkage::Internal)
6040 return;
6041
6042 // Must be in an extern "C" context. Entities declared directly within
6043 // a record are not extern "C" even if the record is in such a context.
6044 const SomeDecl *First = D->getFirstDecl();
6045 if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
6046 return;
6047
6048 // OK, this is an internal linkage entity inside an extern "C" linkage
6049 // specification. Make a note of that so we can give it the "expected"
6050 // mangled name if nothing else is using that name.
6051 std::pair<StaticExternCMap::iterator, bool> R =
6052 StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
6053
6054 // If we have multiple internal linkage entities with the same name
6055 // in extern "C" regions, none of them gets that name.
6056 if (!R.second)
6057 R.first->second = nullptr;
6058}
6059
6060static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
6061 if (!CGM.supportsCOMDAT())
6062 return false;
6063
6064 if (D.hasAttr<SelectAnyAttr>())
6065 return true;
6066
6067 GVALinkage Linkage;
6068 if (auto *VD = dyn_cast<VarDecl>(Val: &D))
6069 Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
6070 else
6071 Linkage = CGM.getContext().GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: &D));
6072
6073 switch (Linkage) {
6074 case GVA_Internal:
6075 case GVA_AvailableExternally:
6076 case GVA_StrongExternal:
6077 return false;
6078 case GVA_DiscardableODR:
6079 case GVA_StrongODR:
6080 return true;
6081 }
6082 llvm_unreachable("No such linkage");
6083}
6084
6085bool CodeGenModule::supportsCOMDAT() const {
6086 return getTriple().supportsCOMDAT();
6087}
6088
6089void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
6090 llvm::GlobalObject &GO) {
6091 if (!shouldBeInCOMDAT(CGM&: *this, D))
6092 return;
6093 GO.setComdat(TheModule.getOrInsertComdat(Name: GO.getName()));
6094}
6095
6096const ABIInfo &CodeGenModule::getABIInfo() {
6097 return getTargetCodeGenInfo().getABIInfo();
6098}
6099
6100/// Pass IsTentative as true if you want to create a tentative definition.
6101void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
6102 bool IsTentative) {
6103 // OpenCL global variables of sampler type are translated to function calls,
6104 // therefore no need to be translated.
6105 QualType ASTTy = D->getType();
6106 if (getLangOpts().OpenCL && ASTTy->isSamplerT())
6107 return;
6108
6109 // HLSL default buffer constants will be emitted during HLSLBufferDecl codegen
6110 if (getLangOpts().HLSL &&
6111 D->getType().getAddressSpace() == LangAS::hlsl_constant)
6112 return;
6113
6114 // If this is OpenMP device, check if it is legal to emit this global
6115 // normally.
6116 if (LangOpts.OpenMPIsTargetDevice && OpenMPRuntime &&
6117 OpenMPRuntime->emitTargetGlobalVariable(GD: D))
6118 return;
6119
6120 llvm::TrackingVH<llvm::Constant> Init;
6121 bool NeedsGlobalCtor = false;
6122 // Whether the definition of the variable is available externally.
6123 // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable
6124 // since this is the job for its original source.
6125 bool IsDefinitionAvailableExternally =
6126 getContext().GetGVALinkageForVariable(VD: D) == GVA_AvailableExternally;
6127 bool NeedsGlobalDtor =
6128 !IsDefinitionAvailableExternally &&
6129 D->needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
6130
6131 // It is helpless to emit the definition for an available_externally variable
6132 // which can't be marked as const.
6133 // We don't need to check if it needs global ctor or dtor. See the above
6134 // comment for ideas.
6135 if (IsDefinitionAvailableExternally &&
6136 (!D->hasConstantInitialization() ||
6137 // TODO: Update this when we have interface to check constexpr
6138 // destructor.
6139 D->needsDestruction(Ctx: getContext()) ||
6140 !D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: true)))
6141 return;
6142
6143 const VarDecl *InitDecl;
6144 const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl);
6145
6146 std::optional<ConstantEmitter> emitter;
6147
6148 // CUDA E.2.4.1 "__shared__ variables cannot have an initialization
6149 // as part of their declaration." Sema has already checked for
6150 // error cases, so we just need to set Init to UndefValue.
6151 bool IsCUDASharedVar =
6152 getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
6153 // Shadows of initialized device-side global variables are also left
6154 // undefined.
6155 // Managed Variables should be initialized on both host side and device side.
6156 bool IsCUDAShadowVar =
6157 !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
6158 (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
6159 D->hasAttr<CUDASharedAttr>());
6160 bool IsCUDADeviceShadowVar =
6161 getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
6162 (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
6163 D->getType()->isCUDADeviceBuiltinTextureType());
6164 if (getLangOpts().CUDA &&
6165 (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar)) {
6166 Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy));
6167 } else if (getLangOpts().HLSL &&
6168 (D->getType()->isHLSLResourceRecord() ||
6169 D->getType()->isHLSLResourceRecordArray())) {
6170 Init = llvm::PoisonValue::get(T: getTypes().ConvertType(T: ASTTy));
6171 NeedsGlobalCtor = D->getType()->isHLSLResourceRecord() ||
6172 D->getStorageClass() == SC_Static;
6173 } else if (D->hasAttr<LoaderUninitializedAttr>()) {
6174 Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy));
6175 } else if (!InitExpr) {
6176 // This is a tentative definition; tentative definitions are
6177 // implicitly initialized with { 0 }.
6178 //
6179 // Note that tentative definitions are only emitted at the end of
6180 // a translation unit, so they should never have incomplete
6181 // type. In addition, EmitTentativeDefinition makes sure that we
6182 // never attempt to emit a tentative definition if a real one
6183 // exists. A use may still exists, however, so we still may need
6184 // to do a RAUW.
6185 assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
6186 Init = EmitNullConstant(T: D->getType());
6187 } else {
6188 initializedGlobalDecl = GlobalDecl(D);
6189 emitter.emplace(args&: *this);
6190 llvm::Constant *Initializer = emitter->tryEmitForInitializer(D: *InitDecl);
6191 if (!Initializer) {
6192 QualType T = InitExpr->getType();
6193 if (D->getType()->isReferenceType())
6194 T = D->getType();
6195
6196 if (getLangOpts().CPlusPlus) {
6197 Init = EmitNullConstant(T);
6198 if (!IsDefinitionAvailableExternally)
6199 NeedsGlobalCtor = true;
6200 if (InitDecl->hasFlexibleArrayInit(Ctx: getContext())) {
6201 ErrorUnsupported(D, Type: "flexible array initializer");
6202 // We cannot create ctor for flexible array initializer
6203 NeedsGlobalCtor = false;
6204 }
6205 } else {
6206 ErrorUnsupported(D, Type: "static initializer");
6207 Init = llvm::PoisonValue::get(T: getTypes().ConvertType(T));
6208 }
6209 } else {
6210 Init = Initializer;
6211 // We don't need an initializer, so remove the entry for the delayed
6212 // initializer position (just in case this entry was delayed) if we
6213 // also don't need to register a destructor.
6214 if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
6215 DelayedCXXInitPosition.erase(Val: D);
6216
6217#ifndef NDEBUG
6218 CharUnits VarSize = getContext().getTypeSizeInChars(ASTTy) +
6219 InitDecl->getFlexibleArrayInitChars(getContext());
6220 CharUnits CstSize = CharUnits::fromQuantity(
6221 getDataLayout().getTypeAllocSize(Init->getType()));
6222 assert(VarSize == CstSize && "Emitted constant has unexpected size");
6223#endif
6224 }
6225 }
6226
6227 llvm::Type* InitType = Init->getType();
6228 llvm::Constant *Entry =
6229 GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative));
6230
6231 // Strip off pointer casts if we got them.
6232 Entry = Entry->stripPointerCasts();
6233
6234 // Entry is now either a Function or GlobalVariable.
6235 auto *GV = dyn_cast<llvm::GlobalVariable>(Val: Entry);
6236
6237 // We have a definition after a declaration with the wrong type.
6238 // We must make a new GlobalVariable* and update everything that used OldGV
6239 // (a declaration or tentative definition) with the new GlobalVariable*
6240 // (which will be a definition).
6241 //
6242 // This happens if there is a prototype for a global (e.g.
6243 // "extern int x[];") and then a definition of a different type (e.g.
6244 // "int x[10];"). This also happens when an initializer has a different type
6245 // from the type of the global (this happens with unions).
6246 if (!GV || GV->getValueType() != InitType ||
6247 GV->getType()->getAddressSpace() !=
6248 getContext().getTargetAddressSpace(AS: GetGlobalVarAddressSpace(D))) {
6249
6250 // Move the old entry aside so that we'll create a new one.
6251 Entry->setName(StringRef());
6252
6253 // Make a new global with the correct type, this is now guaranteed to work.
6254 GV = cast<llvm::GlobalVariable>(
6255 Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative))
6256 ->stripPointerCasts());
6257
6258 // Replace all uses of the old global with the new global
6259 llvm::Constant *NewPtrForOldDecl =
6260 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: GV,
6261 Ty: Entry->getType());
6262 Entry->replaceAllUsesWith(V: NewPtrForOldDecl);
6263
6264 // Erase the old global, since it is no longer used.
6265 cast<llvm::GlobalValue>(Val: Entry)->eraseFromParent();
6266 }
6267
6268 MaybeHandleStaticInExternC(D, GV);
6269
6270 if (D->hasAttr<AnnotateAttr>())
6271 AddGlobalAnnotations(D, GV);
6272
6273 // Set the llvm linkage type as appropriate.
6274 llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD: D);
6275
6276 // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
6277 // the device. [...]"
6278 // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
6279 // __device__, declares a variable that: [...]
6280 // Is accessible from all the threads within the grid and from the host
6281 // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
6282 // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
6283 if (LangOpts.CUDA) {
6284 if (LangOpts.CUDAIsDevice) {
6285 if (Linkage != llvm::GlobalValue::InternalLinkage && !D->isConstexpr() &&
6286 !D->getType().isConstQualified() &&
6287 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
6288 D->getType()->isCUDADeviceBuiltinSurfaceType() ||
6289 D->getType()->isCUDADeviceBuiltinTextureType()))
6290 GV->setExternallyInitialized(true);
6291 } else {
6292 getCUDARuntime().internalizeDeviceSideVar(D, Linkage);
6293 }
6294 getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV);
6295 }
6296
6297 if (LangOpts.HLSL &&
6298 hlsl::isInitializedByPipeline(AS: GetGlobalVarAddressSpace(D))) {
6299 // HLSL Input variables are considered to be set by the driver/pipeline, but
6300 // only visible to a single thread/wave. Push constants are also externally
6301 // initialized, but constant, hence cross-wave visibility is not relevant.
6302 GV->setExternallyInitialized(true);
6303 } else {
6304 GV->setInitializer(Init);
6305 }
6306
6307 if (LangOpts.HLSL)
6308 getHLSLRuntime().handleGlobalVarDefinition(VD: D, Var: GV);
6309
6310 if (emitter)
6311 emitter->finalize(global: GV);
6312
6313 // If it is safe to mark the global 'constant', do so now.
6314 GV->setConstant((D->hasAttr<CUDAConstantAttr>() && LangOpts.CUDAIsDevice) ||
6315 (!NeedsGlobalCtor && !NeedsGlobalDtor &&
6316 D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: true)));
6317
6318 // If it is in a read-only section, mark it 'constant'.
6319 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
6320 const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
6321 if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
6322 GV->setConstant(true);
6323 }
6324
6325 CharUnits AlignVal = getContext().getDeclAlign(D);
6326 // Check for alignment specifed in an 'omp allocate' directive.
6327 if (std::optional<CharUnits> AlignValFromAllocate =
6328 getOMPAllocateAlignment(VD: D))
6329 AlignVal = *AlignValFromAllocate;
6330 GV->setAlignment(AlignVal.getAsAlign());
6331
6332 // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
6333 // function is only defined alongside the variable, not also alongside
6334 // callers. Normally, all accesses to a thread_local go through the
6335 // thread-wrapper in order to ensure initialization has occurred, underlying
6336 // variable will never be used other than the thread-wrapper, so it can be
6337 // converted to internal linkage.
6338 //
6339 // However, if the variable has the 'constinit' attribute, it _can_ be
6340 // referenced directly, without calling the thread-wrapper, so the linkage
6341 // must not be changed.
6342 //
6343 // Additionally, if the variable isn't plain external linkage, e.g. if it's
6344 // weak or linkonce, the de-duplication semantics are important to preserve,
6345 // so we don't change the linkage.
6346 if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
6347 Linkage == llvm::GlobalValue::ExternalLinkage &&
6348 Context.getTargetInfo().getTriple().isOSDarwin() &&
6349 !D->hasAttr<ConstInitAttr>())
6350 Linkage = llvm::GlobalValue::InternalLinkage;
6351
6352 // HLSL variables in the input or push-constant address space maps are like
6353 // memory-mapped variables. Even if they are 'static', they are externally
6354 // initialized and read/write by the hardware/driver/pipeline.
6355 if (LangOpts.HLSL &&
6356 hlsl::isInitializedByPipeline(AS: GetGlobalVarAddressSpace(D)))
6357 Linkage = llvm::GlobalValue::ExternalLinkage;
6358
6359 GV->setLinkage(Linkage);
6360 if (D->hasAttr<DLLImportAttr>())
6361 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
6362 else if (D->hasAttr<DLLExportAttr>())
6363 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
6364 else
6365 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
6366
6367 if (Linkage == llvm::GlobalVariable::CommonLinkage) {
6368 // common vars aren't constant even if declared const.
6369 GV->setConstant(false);
6370 // Tentative definition of global variables may be initialized with
6371 // non-zero null pointers. In this case they should have weak linkage
6372 // since common linkage must have zero initializer and must not have
6373 // explicit section therefore cannot have non-zero initial value.
6374 if (!GV->getInitializer()->isNullValue())
6375 GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
6376 }
6377
6378 setNonAliasAttributes(GD: D, GO: GV);
6379
6380 if (D->getTLSKind() && !GV->isThreadLocal()) {
6381 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
6382 CXXThreadLocals.push_back(x: D);
6383 setTLSMode(GV, D: *D);
6384 }
6385
6386 maybeSetTrivialComdat(D: *D, GO&: *GV);
6387
6388 // Emit the initializer function if necessary.
6389 if (NeedsGlobalCtor || NeedsGlobalDtor)
6390 EmitCXXGlobalVarDeclInitFunc(D, Addr: GV, PerformInit: NeedsGlobalCtor);
6391
6392 SanitizerMD->reportGlobal(GV, D: *D, IsDynInit: NeedsGlobalCtor);
6393
6394 // Emit global variable debug information.
6395 if (CGDebugInfo *DI = getModuleDebugInfo())
6396 if (getCodeGenOpts().hasReducedDebugInfo())
6397 DI->EmitGlobalVariable(GV, Decl: D);
6398}
6399
6400static bool isVarDeclStrongDefinition(const ASTContext &Context,
6401 CodeGenModule &CGM, const VarDecl *D,
6402 bool NoCommon) {
6403 // Don't give variables common linkage if -fno-common was specified unless it
6404 // was overridden by a NoCommon attribute.
6405 if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
6406 return true;
6407
6408 // C11 6.9.2/2:
6409 // A declaration of an identifier for an object that has file scope without
6410 // an initializer, and without a storage-class specifier or with the
6411 // storage-class specifier static, constitutes a tentative definition.
6412 if (D->getInit() || D->hasExternalStorage())
6413 return true;
6414
6415 // A variable cannot be both common and exist in a section.
6416 if (D->hasAttr<SectionAttr>())
6417 return true;
6418
6419 // A variable cannot be both common and exist in a section.
6420 // We don't try to determine which is the right section in the front-end.
6421 // If no specialized section name is applicable, it will resort to default.
6422 if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
6423 D->hasAttr<PragmaClangDataSectionAttr>() ||
6424 D->hasAttr<PragmaClangRelroSectionAttr>() ||
6425 D->hasAttr<PragmaClangRodataSectionAttr>())
6426 return true;
6427
6428 // Thread local vars aren't considered common linkage.
6429 if (D->getTLSKind())
6430 return true;
6431
6432 // Tentative definitions marked with WeakImportAttr are true definitions.
6433 if (D->hasAttr<WeakImportAttr>())
6434 return true;
6435
6436 // A variable cannot be both common and exist in a comdat.
6437 if (shouldBeInCOMDAT(CGM, D: *D))
6438 return true;
6439
6440 // Declarations with a required alignment do not have common linkage in MSVC
6441 // mode.
6442 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
6443 if (D->hasAttr<AlignedAttr>())
6444 return true;
6445 QualType VarType = D->getType();
6446 if (Context.isAlignmentRequired(T: VarType))
6447 return true;
6448
6449 if (const auto *RD = VarType->getAsRecordDecl()) {
6450 for (const FieldDecl *FD : RD->fields()) {
6451 if (FD->isBitField())
6452 continue;
6453 if (FD->hasAttr<AlignedAttr>())
6454 return true;
6455 if (Context.isAlignmentRequired(T: FD->getType()))
6456 return true;
6457 }
6458 }
6459 }
6460
6461 // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
6462 // common symbols, so symbols with greater alignment requirements cannot be
6463 // common.
6464 // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
6465 // alignments for common symbols via the aligncomm directive, so this
6466 // restriction only applies to MSVC environments.
6467 if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
6468 Context.getTypeAlignIfKnown(T: D->getType()) >
6469 Context.toBits(CharSize: CharUnits::fromQuantity(Quantity: 32)))
6470 return true;
6471
6472 return false;
6473}
6474
6475llvm::GlobalValue::LinkageTypes
6476CodeGenModule::getLLVMLinkageForDeclarator(const DeclaratorDecl *D,
6477 GVALinkage Linkage) {
6478 if (Linkage == GVA_Internal)
6479 return llvm::Function::InternalLinkage;
6480
6481 if (D->hasAttr<WeakAttr>())
6482 return llvm::GlobalVariable::WeakAnyLinkage;
6483
6484 if (const auto *FD = D->getAsFunction())
6485 if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
6486 return llvm::GlobalVariable::LinkOnceAnyLinkage;
6487
6488 // We are guaranteed to have a strong definition somewhere else,
6489 // so we can use available_externally linkage.
6490 if (Linkage == GVA_AvailableExternally)
6491 return llvm::GlobalValue::AvailableExternallyLinkage;
6492
6493 // Note that Apple's kernel linker doesn't support symbol
6494 // coalescing, so we need to avoid linkonce and weak linkages there.
6495 // Normally, this means we just map to internal, but for explicit
6496 // instantiations we'll map to external.
6497
6498 // In C++, the compiler has to emit a definition in every translation unit
6499 // that references the function. We should use linkonce_odr because
6500 // a) if all references in this translation unit are optimized away, we
6501 // don't need to codegen it. b) if the function persists, it needs to be
6502 // merged with other definitions. c) C++ has the ODR, so we know the
6503 // definition is dependable.
6504 if (Linkage == GVA_DiscardableODR)
6505 return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
6506 : llvm::Function::InternalLinkage;
6507
6508 // An explicit instantiation of a template has weak linkage, since
6509 // explicit instantiations can occur in multiple translation units
6510 // and must all be equivalent. However, we are not allowed to
6511 // throw away these explicit instantiations.
6512 //
6513 // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
6514 // so say that CUDA templates are either external (for kernels) or internal.
6515 // This lets llvm perform aggressive inter-procedural optimizations. For
6516 // -fgpu-rdc case, device function calls across multiple TU's are allowed,
6517 // therefore we need to follow the normal linkage paradigm.
6518 if (Linkage == GVA_StrongODR) {
6519 if (getLangOpts().AppleKext)
6520 return llvm::Function::ExternalLinkage;
6521 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
6522 !getLangOpts().GPURelocatableDeviceCode)
6523 return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
6524 : llvm::Function::InternalLinkage;
6525 return llvm::Function::WeakODRLinkage;
6526 }
6527
6528 // C++ doesn't have tentative definitions and thus cannot have common
6529 // linkage.
6530 if (!getLangOpts().CPlusPlus && isa<VarDecl>(Val: D) &&
6531 !isVarDeclStrongDefinition(Context, CGM&: *this, D: cast<VarDecl>(Val: D),
6532 NoCommon: CodeGenOpts.NoCommon))
6533 return llvm::GlobalVariable::CommonLinkage;
6534
6535 // selectany symbols are externally visible, so use weak instead of
6536 // linkonce. MSVC optimizes away references to const selectany globals, so
6537 // all definitions should be the same and ODR linkage should be used.
6538 // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
6539 if (D->hasAttr<SelectAnyAttr>())
6540 return llvm::GlobalVariable::WeakODRLinkage;
6541
6542 // Otherwise, we have strong external linkage.
6543 assert(Linkage == GVA_StrongExternal);
6544 return llvm::GlobalVariable::ExternalLinkage;
6545}
6546
6547llvm::GlobalValue::LinkageTypes
6548CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) {
6549 GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
6550 return getLLVMLinkageForDeclarator(D: VD, Linkage);
6551}
6552
6553/// Replace the uses of a function that was declared with a non-proto type.
6554/// We want to silently drop extra arguments from call sites
6555static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
6556 llvm::Function *newFn) {
6557 // Fast path.
6558 if (old->use_empty())
6559 return;
6560
6561 llvm::Type *newRetTy = newFn->getReturnType();
6562 SmallVector<llvm::Value *, 4> newArgs;
6563
6564 SmallVector<llvm::CallBase *> callSitesToBeRemovedFromParent;
6565
6566 for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
6567 ui != ue; ui++) {
6568 llvm::User *user = ui->getUser();
6569
6570 // Recognize and replace uses of bitcasts. Most calls to
6571 // unprototyped functions will use bitcasts.
6572 if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(Val: user)) {
6573 if (bitcast->getOpcode() == llvm::Instruction::BitCast)
6574 replaceUsesOfNonProtoConstant(old: bitcast, newFn);
6575 continue;
6576 }
6577
6578 // Recognize calls to the function.
6579 llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(Val: user);
6580 if (!callSite)
6581 continue;
6582 if (!callSite->isCallee(U: &*ui))
6583 continue;
6584
6585 // If the return types don't match exactly, then we can't
6586 // transform this call unless it's dead.
6587 if (callSite->getType() != newRetTy && !callSite->use_empty())
6588 continue;
6589
6590 // Get the call site's attribute list.
6591 SmallVector<llvm::AttributeSet, 8> newArgAttrs;
6592 llvm::AttributeList oldAttrs = callSite->getAttributes();
6593
6594 // If the function was passed too few arguments, don't transform.
6595 unsigned newNumArgs = newFn->arg_size();
6596 if (callSite->arg_size() < newNumArgs)
6597 continue;
6598
6599 // If extra arguments were passed, we silently drop them.
6600 // If any of the types mismatch, we don't transform.
6601 unsigned argNo = 0;
6602 bool dontTransform = false;
6603 for (llvm::Argument &A : newFn->args()) {
6604 if (callSite->getArgOperand(i: argNo)->getType() != A.getType()) {
6605 dontTransform = true;
6606 break;
6607 }
6608
6609 // Add any parameter attributes.
6610 newArgAttrs.push_back(Elt: oldAttrs.getParamAttrs(ArgNo: argNo));
6611 argNo++;
6612 }
6613 if (dontTransform)
6614 continue;
6615
6616 // Okay, we can transform this. Create the new call instruction and copy
6617 // over the required information.
6618 newArgs.append(in_start: callSite->arg_begin(), in_end: callSite->arg_begin() + argNo);
6619
6620 // Copy over any operand bundles.
6621 SmallVector<llvm::OperandBundleDef, 1> newBundles;
6622 callSite->getOperandBundlesAsDefs(Defs&: newBundles);
6623
6624 llvm::CallBase *newCall;
6625 if (isa<llvm::CallInst>(Val: callSite)) {
6626 newCall = llvm::CallInst::Create(Func: newFn, Args: newArgs, Bundles: newBundles, NameStr: "",
6627 InsertBefore: callSite->getIterator());
6628 } else {
6629 auto *oldInvoke = cast<llvm::InvokeInst>(Val: callSite);
6630 newCall = llvm::InvokeInst::Create(
6631 Func: newFn, IfNormal: oldInvoke->getNormalDest(), IfException: oldInvoke->getUnwindDest(),
6632 Args: newArgs, Bundles: newBundles, NameStr: "", InsertBefore: callSite->getIterator());
6633 }
6634 newArgs.clear(); // for the next iteration
6635
6636 if (!newCall->getType()->isVoidTy())
6637 newCall->takeName(V: callSite);
6638 newCall->setAttributes(
6639 llvm::AttributeList::get(C&: newFn->getContext(), FnAttrs: oldAttrs.getFnAttrs(),
6640 RetAttrs: oldAttrs.getRetAttrs(), ArgAttrs: newArgAttrs));
6641 newCall->setCallingConv(callSite->getCallingConv());
6642
6643 // Finally, remove the old call, replacing any uses with the new one.
6644 if (!callSite->use_empty())
6645 callSite->replaceAllUsesWith(V: newCall);
6646
6647 // Copy debug location attached to CI.
6648 if (callSite->getDebugLoc())
6649 newCall->setDebugLoc(callSite->getDebugLoc());
6650
6651 callSitesToBeRemovedFromParent.push_back(Elt: callSite);
6652 }
6653
6654 for (auto *callSite : callSitesToBeRemovedFromParent) {
6655 callSite->eraseFromParent();
6656 }
6657}
6658
6659/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
6660/// implement a function with no prototype, e.g. "int foo() {}". If there are
6661/// existing call uses of the old function in the module, this adjusts them to
6662/// call the new function directly.
6663///
6664/// This is not just a cleanup: the always_inline pass requires direct calls to
6665/// functions to be able to inline them. If there is a bitcast in the way, it
6666/// won't inline them. Instcombine normally deletes these calls, but it isn't
6667/// run at -O0.
6668static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
6669 llvm::Function *NewFn) {
6670 // If we're redefining a global as a function, don't transform it.
6671 if (!isa<llvm::Function>(Val: Old)) return;
6672
6673 replaceUsesOfNonProtoConstant(old: Old, newFn: NewFn);
6674}
6675
6676void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
6677 auto DK = VD->isThisDeclarationADefinition();
6678 if ((DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>()) ||
6679 (LangOpts.CUDA && !shouldEmitCUDAGlobalVar(Global: VD)))
6680 return;
6681
6682 TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
6683 // If we have a definition, this might be a deferred decl. If the
6684 // instantiation is explicit, make sure we emit it at the end.
6685 if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
6686 GetAddrOfGlobalVar(D: VD);
6687
6688 EmitTopLevelDecl(D: VD);
6689}
6690
6691void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
6692 llvm::GlobalValue *GV) {
6693 const auto *D = cast<FunctionDecl>(Val: GD.getDecl());
6694
6695 // Compute the function info and LLVM type.
6696 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
6697 llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI);
6698
6699 // Get or create the prototype for the function.
6700 if (!GV || (GV->getValueType() != Ty))
6701 GV = cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
6702 /*DontDefer=*/true,
6703 IsForDefinition: ForDefinition));
6704
6705 // Already emitted.
6706 if (!GV->isDeclaration())
6707 return;
6708
6709 // We need to set linkage and visibility on the function before
6710 // generating code for it because various parts of IR generation
6711 // want to propagate this information down (e.g. to local static
6712 // declarations).
6713 auto *Fn = cast<llvm::Function>(Val: GV);
6714 setFunctionLinkage(GD, F: Fn);
6715
6716 // FIXME: this is redundant with part of setFunctionDefinitionAttributes
6717 setGVProperties(GV: Fn, GD);
6718
6719 MaybeHandleStaticInExternC(D, GV: Fn);
6720
6721 maybeSetTrivialComdat(D: *D, GO&: *Fn);
6722
6723 CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo: FI);
6724
6725 setNonAliasAttributes(GD, GO: Fn);
6726
6727 bool ShouldAddOptNone = !CodeGenOpts.DisableO0ImplyOptNone &&
6728 (CodeGenOpts.OptimizationLevel == 0) &&
6729 !D->hasAttr<MinSizeAttr>();
6730
6731 if (DeviceKernelAttr::isOpenCLSpelling(A: D->getAttr<DeviceKernelAttr>())) {
6732 if (GD.getKernelReferenceKind() == KernelReferenceKind::Stub &&
6733 !D->hasAttr<NoInlineAttr>() &&
6734 !Fn->hasFnAttribute(Kind: llvm::Attribute::NoInline) &&
6735 !D->hasAttr<OptimizeNoneAttr>() &&
6736 !Fn->hasFnAttribute(Kind: llvm::Attribute::OptimizeNone) &&
6737 !ShouldAddOptNone) {
6738 Fn->addFnAttr(Kind: llvm::Attribute::AlwaysInline);
6739 }
6740 }
6741
6742 SetLLVMFunctionAttributesForDefinition(D, F: Fn);
6743
6744 auto GetPriority = [this](const auto *Attr) -> int {
6745 Expr *E = Attr->getPriority();
6746 if (E) {
6747 return E->EvaluateKnownConstInt(Ctx: this->getContext()).getExtValue();
6748 }
6749 return Attr->DefaultPriority;
6750 };
6751
6752 if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
6753 AddGlobalCtor(Ctor: Fn, Priority: GetPriority(CA));
6754 if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
6755 AddGlobalDtor(Dtor: Fn, Priority: GetPriority(DA), IsDtorAttrFunc: true);
6756 if (getLangOpts().OpenMP && D->hasAttr<OMPDeclareTargetDeclAttr>())
6757 getOpenMPRuntime().emitDeclareTargetFunction(FD: D, GV);
6758}
6759
6760void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
6761 const auto *D = cast<ValueDecl>(Val: GD.getDecl());
6762 const AliasAttr *AA = D->getAttr<AliasAttr>();
6763 assert(AA && "Not an alias?");
6764
6765 StringRef MangledName = getMangledName(GD);
6766
6767 if (AA->getAliasee() == MangledName) {
6768 Diags.Report(Loc: AA->getLocation(), DiagID: diag::err_cyclic_alias) << 0;
6769 return;
6770 }
6771
6772 // If there is a definition in the module, then it wins over the alias.
6773 // This is dubious, but allow it to be safe. Just ignore the alias.
6774 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
6775 if (Entry && !Entry->isDeclaration())
6776 return;
6777
6778 Aliases.push_back(x: GD);
6779
6780 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType());
6781
6782 // Create a reference to the named value. This ensures that it is emitted
6783 // if a deferred decl.
6784 llvm::Constant *Aliasee;
6785 llvm::GlobalValue::LinkageTypes LT;
6786 if (isa<llvm::FunctionType>(Val: DeclTy)) {
6787 Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy, GD,
6788 /*ForVTable=*/false);
6789 LT = getFunctionLinkage(GD);
6790 } else {
6791 Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default,
6792 /*D=*/nullptr);
6793 if (const auto *VD = dyn_cast<VarDecl>(Val: GD.getDecl()))
6794 LT = getLLVMLinkageVarDefinition(VD);
6795 else
6796 LT = getFunctionLinkage(GD);
6797 }
6798
6799 // Create the new alias itself, but don't set a name yet.
6800 unsigned AS = Aliasee->getType()->getPointerAddressSpace();
6801 auto *GA =
6802 llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage: LT, Name: "", Aliasee, Parent: &getModule());
6803
6804 if (Entry) {
6805 if (GA->getAliasee() == Entry) {
6806 Diags.Report(Loc: AA->getLocation(), DiagID: diag::err_cyclic_alias) << 0;
6807 return;
6808 }
6809
6810 assert(Entry->isDeclaration());
6811
6812 // If there is a declaration in the module, then we had an extern followed
6813 // by the alias, as in:
6814 // extern int test6();
6815 // ...
6816 // int test6() __attribute__((alias("test7")));
6817 //
6818 // Remove it and replace uses of it with the alias.
6819 GA->takeName(V: Entry);
6820
6821 Entry->replaceAllUsesWith(V: GA);
6822 Entry->eraseFromParent();
6823 } else {
6824 GA->setName(MangledName);
6825 }
6826
6827 // Set attributes which are particular to an alias; this is a
6828 // specialization of the attributes which may be set on a global
6829 // variable/function.
6830 if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
6831 D->isWeakImported()) {
6832 GA->setLinkage(llvm::Function::WeakAnyLinkage);
6833 }
6834
6835 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
6836 if (VD->getTLSKind())
6837 setTLSMode(GV: GA, D: *VD);
6838
6839 SetCommonAttributes(GD, GV: GA);
6840
6841 // Emit global alias debug information.
6842 if (isa<VarDecl>(Val: D))
6843 if (CGDebugInfo *DI = getModuleDebugInfo())
6844 DI->EmitGlobalAlias(GV: cast<llvm::GlobalValue>(Val: GA->getAliasee()->stripPointerCasts()), Decl: GD);
6845}
6846
6847void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
6848 const auto *D = cast<ValueDecl>(Val: GD.getDecl());
6849 const IFuncAttr *IFA = D->getAttr<IFuncAttr>();
6850 assert(IFA && "Not an ifunc?");
6851
6852 StringRef MangledName = getMangledName(GD);
6853
6854 if (IFA->getResolver() == MangledName) {
6855 Diags.Report(Loc: IFA->getLocation(), DiagID: diag::err_cyclic_alias) << 1;
6856 return;
6857 }
6858
6859 // Report an error if some definition overrides ifunc.
6860 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
6861 if (Entry && !Entry->isDeclaration()) {
6862 GlobalDecl OtherGD;
6863 if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) &&
6864 DiagnosedConflictingDefinitions.insert(V: GD).second) {
6865 Diags.Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name)
6866 << MangledName;
6867 Diags.Report(Loc: OtherGD.getDecl()->getLocation(),
6868 DiagID: diag::note_previous_definition);
6869 }
6870 return;
6871 }
6872
6873 Aliases.push_back(x: GD);
6874
6875 // The resolver might not be visited yet. Specify a dummy non-function type to
6876 // indicate IsIncompleteFunction. Either the type is ignored (if the resolver
6877 // was emitted) or the whole function will be replaced (if the resolver has
6878 // not been emitted).
6879 llvm::Constant *Resolver =
6880 GetOrCreateLLVMFunction(MangledName: IFA->getResolver(), Ty: VoidTy, GD: {},
6881 /*ForVTable=*/false);
6882 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType());
6883 unsigned AS = getTypes().getTargetAddressSpace(T: D->getType());
6884 llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
6885 Ty: DeclTy, AddressSpace: AS, Linkage: llvm::Function::ExternalLinkage, Name: "", Resolver, Parent: &getModule());
6886 if (Entry) {
6887 if (GIF->getResolver() == Entry) {
6888 Diags.Report(Loc: IFA->getLocation(), DiagID: diag::err_cyclic_alias) << 1;
6889 return;
6890 }
6891 assert(Entry->isDeclaration());
6892
6893 // If there is a declaration in the module, then we had an extern followed
6894 // by the ifunc, as in:
6895 // extern int test();
6896 // ...
6897 // int test() __attribute__((ifunc("resolver")));
6898 //
6899 // Remove it and replace uses of it with the ifunc.
6900 GIF->takeName(V: Entry);
6901
6902 Entry->replaceAllUsesWith(V: GIF);
6903 Entry->eraseFromParent();
6904 } else
6905 GIF->setName(MangledName);
6906 SetCommonAttributes(GD, GV: GIF);
6907}
6908
6909llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
6910 ArrayRef<llvm::Type*> Tys) {
6911 return llvm::Intrinsic::getOrInsertDeclaration(M: &getModule(),
6912 id: (llvm::Intrinsic::ID)IID, Tys);
6913}
6914
6915static llvm::StringMapEntry<llvm::GlobalVariable *> &
6916GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
6917 const StringLiteral *Literal, bool TargetIsLSB,
6918 bool &IsUTF16, unsigned &StringLength) {
6919 StringRef String = Literal->getString();
6920 unsigned NumBytes = String.size();
6921
6922 // Check for simple case.
6923 if (!Literal->containsNonAsciiOrNull()) {
6924 StringLength = NumBytes;
6925 return *Map.insert(KV: std::make_pair(x&: String, y: nullptr)).first;
6926 }
6927
6928 // Otherwise, convert the UTF8 literals into a string of shorts.
6929 IsUTF16 = true;
6930
6931 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
6932 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
6933 llvm::UTF16 *ToPtr = &ToBuf[0];
6934
6935 (void)llvm::ConvertUTF8toUTF16(sourceStart: &FromPtr, sourceEnd: FromPtr + NumBytes, targetStart: &ToPtr,
6936 targetEnd: ToPtr + NumBytes, flags: llvm::strictConversion);
6937
6938 // ConvertUTF8toUTF16 returns the length in ToPtr.
6939 StringLength = ToPtr - &ToBuf[0];
6940
6941 // Add an explicit null.
6942 *ToPtr = 0;
6943 return *Map.insert(KV: std::make_pair(
6944 x: StringRef(reinterpret_cast<const char *>(ToBuf.data()),
6945 (StringLength + 1) * 2),
6946 y: nullptr)).first;
6947}
6948
6949ConstantAddress
6950CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
6951 unsigned StringLength = 0;
6952 bool isUTF16 = false;
6953 llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
6954 GetConstantCFStringEntry(Map&: CFConstantStringMap, Literal,
6955 TargetIsLSB: getDataLayout().isLittleEndian(), IsUTF16&: isUTF16,
6956 StringLength);
6957
6958 if (auto *C = Entry.second)
6959 return ConstantAddress(
6960 C, C->getValueType(), CharUnits::fromQuantity(Quantity: C->getAlignment()));
6961
6962 const ASTContext &Context = getContext();
6963 const llvm::Triple &Triple = getTriple();
6964
6965 const auto CFRuntime = getLangOpts().CFRuntime;
6966 const bool IsSwiftABI =
6967 static_cast<unsigned>(CFRuntime) >=
6968 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
6969 const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
6970
6971 // If we don't already have it, get __CFConstantStringClassReference.
6972 if (!CFConstantStringClassRef) {
6973 const char *CFConstantStringClassName = "__CFConstantStringClassReference";
6974 llvm::Type *Ty = getTypes().ConvertType(T: getContext().IntTy);
6975 Ty = llvm::ArrayType::get(ElementType: Ty, NumElements: 0);
6976
6977 switch (CFRuntime) {
6978 default: break;
6979 case LangOptions::CoreFoundationABI::Swift: [[fallthrough]];
6980 case LangOptions::CoreFoundationABI::Swift5_0:
6981 CFConstantStringClassName =
6982 Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
6983 : "$s10Foundation19_NSCFConstantStringCN";
6984 Ty = IntPtrTy;
6985 break;
6986 case LangOptions::CoreFoundationABI::Swift4_2:
6987 CFConstantStringClassName =
6988 Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
6989 : "$S10Foundation19_NSCFConstantStringCN";
6990 Ty = IntPtrTy;
6991 break;
6992 case LangOptions::CoreFoundationABI::Swift4_1:
6993 CFConstantStringClassName =
6994 Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
6995 : "__T010Foundation19_NSCFConstantStringCN";
6996 Ty = IntPtrTy;
6997 break;
6998 }
6999
7000 llvm::Constant *C = CreateRuntimeVariable(Ty, Name: CFConstantStringClassName);
7001
7002 if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
7003 llvm::GlobalValue *GV = nullptr;
7004
7005 if ((GV = dyn_cast<llvm::GlobalValue>(Val: C))) {
7006 IdentifierInfo &II = Context.Idents.get(Name: GV->getName());
7007 TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
7008 DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl);
7009
7010 const VarDecl *VD = nullptr;
7011 for (const auto *Result : DC->lookup(Name: &II))
7012 if ((VD = dyn_cast<VarDecl>(Val: Result)))
7013 break;
7014
7015 if (Triple.isOSBinFormatELF()) {
7016 if (!VD)
7017 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
7018 } else {
7019 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
7020 if (!VD || !VD->hasAttr<DLLExportAttr>())
7021 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
7022 else
7023 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
7024 }
7025
7026 setDSOLocal(GV);
7027 }
7028 }
7029
7030 // Decay array -> ptr
7031 CFConstantStringClassRef =
7032 IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty) : C;
7033 }
7034
7035 QualType CFTy = Context.getCFConstantStringType();
7036
7037 auto *STy = cast<llvm::StructType>(Val: getTypes().ConvertType(T: CFTy));
7038
7039 ConstantInitBuilder Builder(*this);
7040 auto Fields = Builder.beginStruct(structTy: STy);
7041
7042 // Class pointer.
7043 Fields.addSignedPointer(Pointer: cast<llvm::Constant>(Val&: CFConstantStringClassRef),
7044 Schema: getCodeGenOpts().PointerAuth.ObjCIsaPointers,
7045 CalleeDecl: GlobalDecl(), CalleeType: QualType());
7046
7047 // Flags.
7048 if (IsSwiftABI) {
7049 Fields.addInt(intTy: IntPtrTy, value: IsSwift4_1 ? 0x05 : 0x01);
7050 Fields.addInt(intTy: Int64Ty, value: isUTF16 ? 0x07d0 : 0x07c8);
7051 } else {
7052 Fields.addInt(intTy: IntTy, value: isUTF16 ? 0x07d0 : 0x07C8);
7053 }
7054
7055 // String pointer.
7056 llvm::Constant *C = nullptr;
7057 if (isUTF16) {
7058 auto Arr = llvm::ArrayRef(
7059 reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
7060 Entry.first().size() / 2);
7061 C = llvm::ConstantDataArray::get(Context&: VMContext, Elts: Arr);
7062 } else {
7063 C = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Entry.first());
7064 }
7065
7066 // Note: -fwritable-strings doesn't make the backing store strings of
7067 // CFStrings writable.
7068 auto *GV =
7069 new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
7070 llvm::GlobalValue::PrivateLinkage, C, ".str");
7071 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7072 // Don't enforce the target's minimum global alignment, since the only use
7073 // of the string is via this class initializer.
7074 CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(T: Context.ShortTy)
7075 : Context.getTypeAlignInChars(T: Context.CharTy);
7076 GV->setAlignment(Align.getAsAlign());
7077
7078 // FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
7079 // Without it LLVM can merge the string with a non unnamed_addr one during
7080 // LTO. Doing that changes the section it ends in, which surprises ld64.
7081 if (Triple.isOSBinFormatMachO())
7082 GV->setSection(isUTF16 ? "__TEXT,__ustring"
7083 : "__TEXT,__cstring,cstring_literals");
7084 // Make sure the literal ends up in .rodata to allow for safe ICF and for
7085 // the static linker to adjust permissions to read-only later on.
7086 else if (Triple.isOSBinFormatELF())
7087 GV->setSection(".rodata");
7088
7089 // String.
7090 Fields.add(value: GV);
7091
7092 // String length.
7093 llvm::IntegerType *LengthTy =
7094 llvm::IntegerType::get(C&: getModule().getContext(),
7095 NumBits: Context.getTargetInfo().getLongWidth());
7096 if (IsSwiftABI) {
7097 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
7098 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
7099 LengthTy = Int32Ty;
7100 else
7101 LengthTy = IntPtrTy;
7102 }
7103 Fields.addInt(intTy: LengthTy, value: StringLength);
7104
7105 // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is
7106 // properly aligned on 32-bit platforms.
7107 CharUnits Alignment =
7108 IsSwiftABI ? Context.toCharUnitsFromBits(BitSize: 64) : getPointerAlign();
7109
7110 // The struct.
7111 GV = Fields.finishAndCreateGlobal(args: "_unnamed_cfstring_", args&: Alignment,
7112 /*isConstant=*/args: false,
7113 args: llvm::GlobalVariable::PrivateLinkage);
7114 GV->addAttribute(Kind: "objc_arc_inert");
7115 switch (Triple.getObjectFormat()) {
7116 case llvm::Triple::UnknownObjectFormat:
7117 llvm_unreachable("unknown file format");
7118 case llvm::Triple::DXContainer:
7119 case llvm::Triple::GOFF:
7120 case llvm::Triple::SPIRV:
7121 case llvm::Triple::XCOFF:
7122 llvm_unreachable("unimplemented");
7123 case llvm::Triple::COFF:
7124 case llvm::Triple::ELF:
7125 case llvm::Triple::Wasm:
7126 GV->setSection("cfstring");
7127 break;
7128 case llvm::Triple::MachO:
7129 GV->setSection("__DATA,__cfstring");
7130 break;
7131 }
7132 Entry.second = GV;
7133
7134 return ConstantAddress(GV, GV->getValueType(), Alignment);
7135}
7136
7137bool CodeGenModule::getExpressionLocationsEnabled() const {
7138 return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo;
7139}
7140
7141QualType CodeGenModule::getObjCFastEnumerationStateType() {
7142 if (ObjCFastEnumerationStateType.isNull()) {
7143 RecordDecl *D = Context.buildImplicitRecord(Name: "__objcFastEnumerationState");
7144 D->startDefinition();
7145
7146 QualType FieldTypes[] = {
7147 Context.UnsignedLongTy, Context.getPointerType(T: Context.getObjCIdType()),
7148 Context.getPointerType(T: Context.UnsignedLongTy),
7149 Context.getConstantArrayType(EltTy: Context.UnsignedLongTy, ArySize: llvm::APInt(32, 5),
7150 SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0)};
7151
7152 for (size_t i = 0; i < 4; ++i) {
7153 FieldDecl *Field = FieldDecl::Create(C: Context,
7154 DC: D,
7155 StartLoc: SourceLocation(),
7156 IdLoc: SourceLocation(), Id: nullptr,
7157 T: FieldTypes[i], /*TInfo=*/nullptr,
7158 /*BitWidth=*/BW: nullptr,
7159 /*Mutable=*/false,
7160 InitStyle: ICIS_NoInit);
7161 Field->setAccess(AS_public);
7162 D->addDecl(D: Field);
7163 }
7164
7165 D->completeDefinition();
7166 ObjCFastEnumerationStateType = Context.getCanonicalTagType(TD: D);
7167 }
7168
7169 return ObjCFastEnumerationStateType;
7170}
7171
7172llvm::Constant *
7173CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
7174 assert(!E->getType()->isPointerType() && "Strings are always arrays");
7175
7176 // Don't emit it as the address of the string, emit the string data itself
7177 // as an inline array.
7178 if (E->getCharByteWidth() == 1) {
7179 SmallString<64> Str(E->getString());
7180
7181 // Resize the string to the right size, which is indicated by its type.
7182 const ConstantArrayType *CAT = Context.getAsConstantArrayType(T: E->getType());
7183 assert(CAT && "String literal not of constant array type!");
7184 Str.resize(N: CAT->getZExtSize());
7185 return llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Str, AddNull: false);
7186 }
7187
7188 auto *AType = cast<llvm::ArrayType>(Val: getTypes().ConvertType(T: E->getType()));
7189 llvm::Type *ElemTy = AType->getElementType();
7190 unsigned NumElements = AType->getNumElements();
7191
7192 // Wide strings have either 2-byte or 4-byte elements.
7193 if (ElemTy->getPrimitiveSizeInBits() == 16) {
7194 SmallVector<uint16_t, 32> Elements;
7195 Elements.reserve(N: NumElements);
7196
7197 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
7198 Elements.push_back(Elt: E->getCodeUnit(i));
7199 Elements.resize(N: NumElements);
7200 return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements);
7201 }
7202
7203 assert(ElemTy->getPrimitiveSizeInBits() == 32);
7204 SmallVector<uint32_t, 32> Elements;
7205 Elements.reserve(N: NumElements);
7206
7207 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
7208 Elements.push_back(Elt: E->getCodeUnit(i));
7209 Elements.resize(N: NumElements);
7210 return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements);
7211}
7212
7213static llvm::GlobalVariable *
7214GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
7215 CodeGenModule &CGM, StringRef GlobalName,
7216 CharUnits Alignment) {
7217 unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
7218 AS: CGM.GetGlobalConstantAddressSpace());
7219
7220 llvm::Module &M = CGM.getModule();
7221 // Create a global variable for this string
7222 auto *GV = new llvm::GlobalVariable(
7223 M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
7224 nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
7225 GV->setAlignment(Alignment.getAsAlign());
7226 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7227 if (GV->isWeakForLinker()) {
7228 assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
7229 GV->setComdat(M.getOrInsertComdat(Name: GV->getName()));
7230 }
7231 CGM.setDSOLocal(GV);
7232
7233 return GV;
7234}
7235
7236/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
7237/// constant array for the given string literal.
7238ConstantAddress
7239CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
7240 StringRef Name) {
7241 CharUnits Alignment =
7242 getContext().getAlignOfGlobalVarInChars(T: S->getType(), /*VD=*/nullptr);
7243
7244 llvm::Constant *C = GetConstantArrayFromStringLiteral(E: S);
7245 llvm::GlobalVariable **Entry = nullptr;
7246 if (!LangOpts.WritableStrings) {
7247 Entry = &ConstantStringMap[C];
7248 if (auto GV = *Entry) {
7249 if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
7250 GV->setAlignment(Alignment.getAsAlign());
7251 return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV),
7252 GV->getValueType(), Alignment);
7253 }
7254 }
7255
7256 SmallString<256> MangledNameBuffer;
7257 StringRef GlobalVariableName;
7258 llvm::GlobalValue::LinkageTypes LT;
7259
7260 // Mangle the string literal if that's how the ABI merges duplicate strings.
7261 // Don't do it if they are writable, since we don't want writes in one TU to
7262 // affect strings in another.
7263 if (getCXXABI().getMangleContext().shouldMangleStringLiteral(SL: S) &&
7264 !LangOpts.WritableStrings) {
7265 llvm::raw_svector_ostream Out(MangledNameBuffer);
7266 getCXXABI().getMangleContext().mangleStringLiteral(SL: S, Out);
7267 LT = llvm::GlobalValue::LinkOnceODRLinkage;
7268 GlobalVariableName = MangledNameBuffer;
7269 } else {
7270 LT = llvm::GlobalValue::PrivateLinkage;
7271 GlobalVariableName = Name;
7272 }
7273
7274 auto GV = GenerateStringLiteral(C, LT, CGM&: *this, GlobalName: GlobalVariableName, Alignment);
7275
7276 CGDebugInfo *DI = getModuleDebugInfo();
7277 if (DI && getCodeGenOpts().hasReducedDebugInfo())
7278 DI->AddStringLiteralDebugInfo(GV, S);
7279
7280 if (Entry)
7281 *Entry = GV;
7282
7283 SanitizerMD->reportGlobal(GV, Loc: S->getStrTokenLoc(TokNum: 0), Name: "<string literal>");
7284
7285 return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV),
7286 GV->getValueType(), Alignment);
7287}
7288
7289/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
7290/// array for the given ObjCEncodeExpr node.
7291ConstantAddress
7292CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
7293 std::string Str;
7294 getContext().getObjCEncodingForType(T: E->getEncodedType(), S&: Str);
7295
7296 return GetAddrOfConstantCString(Str);
7297}
7298
7299/// GetAddrOfConstantCString - Returns a pointer to a character array containing
7300/// the literal and a terminating '\0' character.
7301/// The result has pointer to array type.
7302ConstantAddress CodeGenModule::GetAddrOfConstantCString(const std::string &Str,
7303 StringRef GlobalName) {
7304 StringRef StrWithNull(Str.c_str(), Str.size() + 1);
7305 CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(
7306 T: getContext().CharTy, /*VD=*/nullptr);
7307
7308 llvm::Constant *C =
7309 llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: StrWithNull, AddNull: false);
7310
7311 // Don't share any string literals if strings aren't constant.
7312 llvm::GlobalVariable **Entry = nullptr;
7313 if (!LangOpts.WritableStrings) {
7314 Entry = &ConstantStringMap[C];
7315 if (auto GV = *Entry) {
7316 if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
7317 GV->setAlignment(Alignment.getAsAlign());
7318 return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV),
7319 GV->getValueType(), Alignment);
7320 }
7321 }
7322
7323 // Create a global variable for this.
7324 auto GV = GenerateStringLiteral(C, LT: llvm::GlobalValue::PrivateLinkage, CGM&: *this,
7325 GlobalName, Alignment);
7326 if (Entry)
7327 *Entry = GV;
7328
7329 return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV),
7330 GV->getValueType(), Alignment);
7331}
7332
7333ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
7334 const MaterializeTemporaryExpr *E, const Expr *Init) {
7335 assert((E->getStorageDuration() == SD_Static ||
7336 E->getStorageDuration() == SD_Thread) && "not a global temporary");
7337 const auto *VD = cast<VarDecl>(Val: E->getExtendingDecl());
7338
7339 // If we're not materializing a subobject of the temporary, keep the
7340 // cv-qualifiers from the type of the MaterializeTemporaryExpr.
7341 QualType MaterializedType = Init->getType();
7342 if (Init == E->getSubExpr())
7343 MaterializedType = E->getType();
7344
7345 CharUnits Align = getContext().getTypeAlignInChars(T: MaterializedType);
7346
7347 auto InsertResult = MaterializedGlobalTemporaryMap.insert(KV: {E, nullptr});
7348 if (!InsertResult.second) {
7349 // We've seen this before: either we already created it or we're in the
7350 // process of doing so.
7351 if (!InsertResult.first->second) {
7352 // We recursively re-entered this function, probably during emission of
7353 // the initializer. Create a placeholder. We'll clean this up in the
7354 // outer call, at the end of this function.
7355 llvm::Type *Type = getTypes().ConvertTypeForMem(T: MaterializedType);
7356 InsertResult.first->second = new llvm::GlobalVariable(
7357 getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
7358 nullptr);
7359 }
7360 return ConstantAddress(InsertResult.first->second,
7361 llvm::cast<llvm::GlobalVariable>(
7362 Val: InsertResult.first->second->stripPointerCasts())
7363 ->getValueType(),
7364 Align);
7365 }
7366
7367 // FIXME: If an externally-visible declaration extends multiple temporaries,
7368 // we need to give each temporary the same name in every translation unit (and
7369 // we also need to make the temporaries externally-visible).
7370 SmallString<256> Name;
7371 llvm::raw_svector_ostream Out(Name);
7372 getCXXABI().getMangleContext().mangleReferenceTemporary(
7373 D: VD, ManglingNumber: E->getManglingNumber(), Out);
7374
7375 APValue *Value = nullptr;
7376 if (E->getStorageDuration() == SD_Static && VD->evaluateValue()) {
7377 // If the initializer of the extending declaration is a constant
7378 // initializer, we should have a cached constant initializer for this
7379 // temporary. Note that this might have a different value from the value
7380 // computed by evaluating the initializer if the surrounding constant
7381 // expression modifies the temporary.
7382 Value = E->getOrCreateValue(MayCreate: false);
7383 }
7384
7385 // Try evaluating it now, it might have a constant initializer.
7386 Expr::EvalResult EvalResult;
7387 if (!Value && Init->EvaluateAsRValue(Result&: EvalResult, Ctx: getContext()) &&
7388 !EvalResult.hasSideEffects())
7389 Value = &EvalResult.Val;
7390
7391 LangAS AddrSpace = GetGlobalVarAddressSpace(D: VD);
7392
7393 std::optional<ConstantEmitter> emitter;
7394 llvm::Constant *InitialValue = nullptr;
7395 bool Constant = false;
7396 llvm::Type *Type;
7397 if (Value) {
7398 // The temporary has a constant initializer, use it.
7399 emitter.emplace(args&: *this);
7400 InitialValue = emitter->emitForInitializer(value: *Value, destAddrSpace: AddrSpace,
7401 destType: MaterializedType);
7402 Constant =
7403 MaterializedType.isConstantStorage(Ctx: getContext(), /*ExcludeCtor*/ Value,
7404 /*ExcludeDtor*/ false);
7405 Type = InitialValue->getType();
7406 } else {
7407 // No initializer, the initialization will be provided when we
7408 // initialize the declaration which performed lifetime extension.
7409 Type = getTypes().ConvertTypeForMem(T: MaterializedType);
7410 }
7411
7412 // Create a global variable for this lifetime-extended temporary.
7413 llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD);
7414 if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
7415 const VarDecl *InitVD;
7416 if (VD->isStaticDataMember() && VD->getAnyInitializer(D&: InitVD) &&
7417 isa<CXXRecordDecl>(Val: InitVD->getLexicalDeclContext())) {
7418 // Temporaries defined inside a class get linkonce_odr linkage because the
7419 // class can be defined in multiple translation units.
7420 Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
7421 } else {
7422 // There is no need for this temporary to have external linkage if the
7423 // VarDecl has external linkage.
7424 Linkage = llvm::GlobalVariable::InternalLinkage;
7425 }
7426 }
7427 auto TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace);
7428 auto *GV = new llvm::GlobalVariable(
7429 getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
7430 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
7431 if (emitter) emitter->finalize(global: GV);
7432 // Don't assign dllimport or dllexport to local linkage globals.
7433 if (!llvm::GlobalValue::isLocalLinkage(Linkage)) {
7434 setGVProperties(GV, D: VD);
7435 if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass)
7436 // The reference temporary should never be dllexport.
7437 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
7438 }
7439 GV->setAlignment(Align.getAsAlign());
7440 if (supportsCOMDAT() && GV->isWeakForLinker())
7441 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
7442 if (VD->getTLSKind())
7443 setTLSMode(GV, D: *VD);
7444 llvm::Constant *CV = GV;
7445 if (AddrSpace != LangAS::Default)
7446 CV = performAddrSpaceCast(
7447 Src: GV, DestTy: llvm::PointerType::get(
7448 C&: getLLVMContext(),
7449 AddressSpace: getContext().getTargetAddressSpace(AS: LangAS::Default)));
7450
7451 // Update the map with the new temporary. If we created a placeholder above,
7452 // replace it with the new global now.
7453 llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E];
7454 if (Entry) {
7455 Entry->replaceAllUsesWith(V: CV);
7456 llvm::cast<llvm::GlobalVariable>(Val: Entry)->eraseFromParent();
7457 }
7458 Entry = CV;
7459
7460 return ConstantAddress(CV, Type, Align);
7461}
7462
7463/// EmitObjCPropertyImplementations - Emit information for synthesized
7464/// properties for an implementation.
7465void CodeGenModule::EmitObjCPropertyImplementations(const
7466 ObjCImplementationDecl *D) {
7467 for (const auto *PID : D->property_impls()) {
7468 // Dynamic is just for type-checking.
7469 if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
7470 ObjCPropertyDecl *PD = PID->getPropertyDecl();
7471
7472 // Determine which methods need to be implemented, some may have
7473 // been overridden. Note that ::isPropertyAccessor is not the method
7474 // we want, that just indicates if the decl came from a
7475 // property. What we want to know is if the method is defined in
7476 // this implementation.
7477 auto *Getter = PID->getGetterMethodDecl();
7478 if (!Getter || Getter->isSynthesizedAccessorStub())
7479 CodeGenFunction(*this).GenerateObjCGetter(
7480 IMP: const_cast<ObjCImplementationDecl *>(D), PID);
7481 auto *Setter = PID->getSetterMethodDecl();
7482 if (!PD->isReadOnly() && (!Setter || Setter->isSynthesizedAccessorStub()))
7483 CodeGenFunction(*this).GenerateObjCSetter(
7484 IMP: const_cast<ObjCImplementationDecl *>(D), PID);
7485 }
7486 }
7487}
7488
7489static bool needsDestructMethod(ObjCImplementationDecl *impl) {
7490 const ObjCInterfaceDecl *iface = impl->getClassInterface();
7491 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
7492 ivar; ivar = ivar->getNextIvar())
7493 if (ivar->getType().isDestructedType())
7494 return true;
7495
7496 return false;
7497}
7498
7499static bool AllTrivialInitializers(CodeGenModule &CGM,
7500 ObjCImplementationDecl *D) {
7501 CodeGenFunction CGF(CGM);
7502 for (ObjCImplementationDecl::init_iterator B = D->init_begin(),
7503 E = D->init_end(); B != E; ++B) {
7504 CXXCtorInitializer *CtorInitExp = *B;
7505 Expr *Init = CtorInitExp->getInit();
7506 if (!CGF.isTrivialInitializer(Init))
7507 return false;
7508 }
7509 return true;
7510}
7511
7512/// EmitObjCIvarInitializations - Emit information for ivar initialization
7513/// for an implementation.
7514void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
7515 // We might need a .cxx_destruct even if we don't have any ivar initializers.
7516 if (needsDestructMethod(impl: D)) {
7517 const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_destruct");
7518 Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II);
7519 ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(
7520 C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector,
7521 T: getContext().VoidTy, ReturnTInfo: nullptr, contextDecl: D,
7522 /*isInstance=*/true, /*isVariadic=*/false,
7523 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
7524 /*isImplicitlyDeclared=*/true,
7525 /*isDefined=*/false, impControl: ObjCImplementationControl::Required);
7526 D->addInstanceMethod(method: DTORMethod);
7527 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: DTORMethod, ctor: false);
7528 D->setHasDestructors(true);
7529 }
7530
7531 // If the implementation doesn't have any ivar initializers, we don't need
7532 // a .cxx_construct.
7533 if (D->getNumIvarInitializers() == 0 ||
7534 AllTrivialInitializers(CGM&: *this, D))
7535 return;
7536
7537 const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_construct");
7538 Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II);
7539 // The constructor returns 'self'.
7540 ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(
7541 C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector,
7542 T: getContext().getObjCIdType(), ReturnTInfo: nullptr, contextDecl: D, /*isInstance=*/true,
7543 /*isVariadic=*/false,
7544 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
7545 /*isImplicitlyDeclared=*/true,
7546 /*isDefined=*/false, impControl: ObjCImplementationControl::Required);
7547 D->addInstanceMethod(method: CTORMethod);
7548 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: CTORMethod, ctor: true);
7549 D->setHasNonZeroConstructors(true);
7550}
7551
7552// EmitLinkageSpec - Emit all declarations in a linkage spec.
7553void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
7554 if (LSD->getLanguage() != LinkageSpecLanguageIDs::C &&
7555 LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) {
7556 ErrorUnsupported(D: LSD, Type: "linkage spec");
7557 return;
7558 }
7559
7560 EmitDeclContext(DC: LSD);
7561}
7562
7563void CodeGenModule::EmitTopLevelStmt(const TopLevelStmtDecl *D) {
7564 // Device code should not be at top level.
7565 if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
7566 return;
7567
7568 std::unique_ptr<CodeGenFunction> &CurCGF =
7569 GlobalTopLevelStmtBlockInFlight.first;
7570
7571 // We emitted a top-level stmt but after it there is initialization.
7572 // Stop squashing the top-level stmts into a single function.
7573 if (CurCGF && CXXGlobalInits.back() != CurCGF->CurFn) {
7574 CurCGF->FinishFunction(EndLoc: D->getEndLoc());
7575 CurCGF = nullptr;
7576 }
7577
7578 if (!CurCGF) {
7579 // void __stmts__N(void)
7580 // FIXME: Ask the ABI name mangler to pick a name.
7581 std::string Name = "__stmts__" + llvm::utostr(X: CXXGlobalInits.size());
7582 FunctionArgList Args;
7583 QualType RetTy = getContext().VoidTy;
7584 const CGFunctionInfo &FnInfo =
7585 getTypes().arrangeBuiltinFunctionDeclaration(resultType: RetTy, args: Args);
7586 llvm::FunctionType *FnTy = getTypes().GetFunctionType(Info: FnInfo);
7587 llvm::Function *Fn = llvm::Function::Create(
7588 Ty: FnTy, Linkage: llvm::GlobalValue::InternalLinkage, N: Name, M: &getModule());
7589
7590 CurCGF.reset(p: new CodeGenFunction(*this));
7591 GlobalTopLevelStmtBlockInFlight.second = D;
7592 CurCGF->StartFunction(GD: GlobalDecl(), RetTy, Fn, FnInfo, Args,
7593 Loc: D->getBeginLoc(), StartLoc: D->getBeginLoc());
7594 CXXGlobalInits.push_back(x: Fn);
7595 }
7596
7597 CurCGF->EmitStmt(S: D->getStmt());
7598}
7599
7600void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
7601 for (auto *I : DC->decls()) {
7602 // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
7603 // are themselves considered "top-level", so EmitTopLevelDecl on an
7604 // ObjCImplDecl does not recursively visit them. We need to do that in
7605 // case they're nested inside another construct (LinkageSpecDecl /
7606 // ExportDecl) that does stop them from being considered "top-level".
7607 if (auto *OID = dyn_cast<ObjCImplDecl>(Val: I)) {
7608 for (auto *M : OID->methods())
7609 EmitTopLevelDecl(D: M);
7610 }
7611
7612 EmitTopLevelDecl(D: I);
7613 }
7614}
7615
7616/// EmitTopLevelDecl - Emit code for a single top level declaration.
7617void CodeGenModule::EmitTopLevelDecl(Decl *D) {
7618 // Ignore dependent declarations.
7619 if (D->isTemplated())
7620 return;
7621
7622 // Consteval function shouldn't be emitted.
7623 if (auto *FD = dyn_cast<FunctionDecl>(Val: D); FD && FD->isImmediateFunction())
7624 return;
7625
7626 switch (D->getKind()) {
7627 case Decl::CXXConversion:
7628 case Decl::CXXMethod:
7629 case Decl::Function:
7630 EmitGlobal(GD: cast<FunctionDecl>(Val: D));
7631 // Always provide some coverage mapping
7632 // even for the functions that aren't emitted.
7633 AddDeferredUnusedCoverageMapping(D);
7634 break;
7635
7636 case Decl::CXXDeductionGuide:
7637 // Function-like, but does not result in code emission.
7638 break;
7639
7640 case Decl::Var:
7641 case Decl::Decomposition:
7642 case Decl::VarTemplateSpecialization:
7643 EmitGlobal(GD: cast<VarDecl>(Val: D));
7644 if (auto *DD = dyn_cast<DecompositionDecl>(Val: D))
7645 for (auto *B : DD->flat_bindings())
7646 if (auto *HD = B->getHoldingVar())
7647 EmitGlobal(GD: HD);
7648
7649 break;
7650
7651 // Indirect fields from global anonymous structs and unions can be
7652 // ignored; only the actual variable requires IR gen support.
7653 case Decl::IndirectField:
7654 break;
7655
7656 // C++ Decls
7657 case Decl::Namespace:
7658 EmitDeclContext(DC: cast<NamespaceDecl>(Val: D));
7659 break;
7660 case Decl::ClassTemplateSpecialization: {
7661 const auto *Spec = cast<ClassTemplateSpecializationDecl>(Val: D);
7662 if (CGDebugInfo *DI = getModuleDebugInfo())
7663 if (Spec->getSpecializationKind() ==
7664 TSK_ExplicitInstantiationDefinition &&
7665 Spec->hasDefinition())
7666 DI->completeTemplateDefinition(SD: *Spec);
7667 } [[fallthrough]];
7668 case Decl::CXXRecord: {
7669 CXXRecordDecl *CRD = cast<CXXRecordDecl>(Val: D);
7670 if (CGDebugInfo *DI = getModuleDebugInfo()) {
7671 if (CRD->hasDefinition())
7672 DI->EmitAndRetainType(
7673 Ty: getContext().getCanonicalTagType(TD: cast<RecordDecl>(Val: D)));
7674 if (auto *ES = D->getASTContext().getExternalSource())
7675 if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
7676 DI->completeUnusedClass(D: *CRD);
7677 }
7678 // Emit any static data members, they may be definitions.
7679 for (auto *I : CRD->decls())
7680 if (isa<VarDecl>(Val: I) || isa<CXXRecordDecl>(Val: I) || isa<EnumDecl>(Val: I))
7681 EmitTopLevelDecl(D: I);
7682 break;
7683 }
7684 // No code generation needed.
7685 case Decl::UsingShadow:
7686 case Decl::ClassTemplate:
7687 case Decl::VarTemplate:
7688 case Decl::Concept:
7689 case Decl::VarTemplatePartialSpecialization:
7690 case Decl::FunctionTemplate:
7691 case Decl::TypeAliasTemplate:
7692 case Decl::Block:
7693 case Decl::Empty:
7694 case Decl::Binding:
7695 break;
7696 case Decl::Using: // using X; [C++]
7697 if (CGDebugInfo *DI = getModuleDebugInfo())
7698 DI->EmitUsingDecl(UD: cast<UsingDecl>(Val&: *D));
7699 break;
7700 case Decl::UsingEnum: // using enum X; [C++]
7701 if (CGDebugInfo *DI = getModuleDebugInfo())
7702 DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val&: *D));
7703 break;
7704 case Decl::NamespaceAlias:
7705 if (CGDebugInfo *DI = getModuleDebugInfo())
7706 DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val&: *D));
7707 break;
7708 case Decl::UsingDirective: // using namespace X; [C++]
7709 if (CGDebugInfo *DI = getModuleDebugInfo())
7710 DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val&: *D));
7711 break;
7712 case Decl::CXXConstructor:
7713 getCXXABI().EmitCXXConstructors(D: cast<CXXConstructorDecl>(Val: D));
7714 break;
7715 case Decl::CXXDestructor:
7716 getCXXABI().EmitCXXDestructors(D: cast<CXXDestructorDecl>(Val: D));
7717 break;
7718
7719 case Decl::StaticAssert:
7720 // Nothing to do.
7721 break;
7722
7723 // Objective-C Decls
7724
7725 // Forward declarations, no (immediate) code generation.
7726 case Decl::ObjCInterface:
7727 case Decl::ObjCCategory:
7728 break;
7729
7730 case Decl::ObjCProtocol: {
7731 auto *Proto = cast<ObjCProtocolDecl>(Val: D);
7732 if (Proto->isThisDeclarationADefinition())
7733 ObjCRuntime->GenerateProtocol(OPD: Proto);
7734 break;
7735 }
7736
7737 case Decl::ObjCCategoryImpl:
7738 // Categories have properties but don't support synthesize so we
7739 // can ignore them here.
7740 ObjCRuntime->GenerateCategory(OCD: cast<ObjCCategoryImplDecl>(Val: D));
7741 break;
7742
7743 case Decl::ObjCImplementation: {
7744 auto *OMD = cast<ObjCImplementationDecl>(Val: D);
7745 EmitObjCPropertyImplementations(D: OMD);
7746 EmitObjCIvarInitializations(D: OMD);
7747 ObjCRuntime->GenerateClass(OID: OMD);
7748 // Emit global variable debug information.
7749 if (CGDebugInfo *DI = getModuleDebugInfo())
7750 if (getCodeGenOpts().hasReducedDebugInfo())
7751 DI->getOrCreateInterfaceType(Ty: getContext().getObjCInterfaceType(
7752 Decl: OMD->getClassInterface()), Loc: OMD->getLocation());
7753 break;
7754 }
7755 case Decl::ObjCMethod: {
7756 auto *OMD = cast<ObjCMethodDecl>(Val: D);
7757 // If this is not a prototype, emit the body.
7758 if (OMD->getBody())
7759 CodeGenFunction(*this).GenerateObjCMethod(OMD);
7760 break;
7761 }
7762 case Decl::ObjCCompatibleAlias:
7763 ObjCRuntime->RegisterAlias(OAD: cast<ObjCCompatibleAliasDecl>(Val: D));
7764 break;
7765
7766 case Decl::PragmaComment: {
7767 const auto *PCD = cast<PragmaCommentDecl>(Val: D);
7768 switch (PCD->getCommentKind()) {
7769 case PCK_Unknown:
7770 llvm_unreachable("unexpected pragma comment kind");
7771 case PCK_Linker:
7772 AppendLinkerOptions(Opts: PCD->getArg());
7773 break;
7774 case PCK_Lib:
7775 AddDependentLib(Lib: PCD->getArg());
7776 break;
7777 case PCK_Compiler:
7778 case PCK_ExeStr:
7779 case PCK_User:
7780 break; // We ignore all of these.
7781 }
7782 break;
7783 }
7784
7785 case Decl::PragmaDetectMismatch: {
7786 const auto *PDMD = cast<PragmaDetectMismatchDecl>(Val: D);
7787 AddDetectMismatch(Name: PDMD->getName(), Value: PDMD->getValue());
7788 break;
7789 }
7790
7791 case Decl::LinkageSpec:
7792 EmitLinkageSpec(LSD: cast<LinkageSpecDecl>(Val: D));
7793 break;
7794
7795 case Decl::FileScopeAsm: {
7796 // File-scope asm is ignored during device-side CUDA compilation.
7797 if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
7798 break;
7799 // File-scope asm is ignored during device-side OpenMP compilation.
7800 if (LangOpts.OpenMPIsTargetDevice)
7801 break;
7802 // File-scope asm is ignored during device-side SYCL compilation.
7803 if (LangOpts.SYCLIsDevice)
7804 break;
7805 auto *AD = cast<FileScopeAsmDecl>(Val: D);
7806 getModule().appendModuleInlineAsm(Asm: AD->getAsmString());
7807 break;
7808 }
7809
7810 case Decl::TopLevelStmt:
7811 EmitTopLevelStmt(D: cast<TopLevelStmtDecl>(Val: D));
7812 break;
7813
7814 case Decl::Import: {
7815 auto *Import = cast<ImportDecl>(Val: D);
7816
7817 // If we've already imported this module, we're done.
7818 if (!ImportedModules.insert(X: Import->getImportedModule()))
7819 break;
7820
7821 // Emit debug information for direct imports.
7822 if (!Import->getImportedOwningModule()) {
7823 if (CGDebugInfo *DI = getModuleDebugInfo())
7824 DI->EmitImportDecl(ID: *Import);
7825 }
7826
7827 // For C++ standard modules we are done - we will call the module
7828 // initializer for imported modules, and that will likewise call those for
7829 // any imports it has.
7830 if (CXX20ModuleInits && Import->getImportedModule() &&
7831 Import->getImportedModule()->isNamedModule())
7832 break;
7833
7834 // For clang C++ module map modules the initializers for sub-modules are
7835 // emitted here.
7836
7837 // Find all of the submodules and emit the module initializers.
7838 llvm::SmallPtrSet<clang::Module *, 16> Visited;
7839 SmallVector<clang::Module *, 16> Stack;
7840 Visited.insert(Ptr: Import->getImportedModule());
7841 Stack.push_back(Elt: Import->getImportedModule());
7842
7843 while (!Stack.empty()) {
7844 clang::Module *Mod = Stack.pop_back_val();
7845 if (!EmittedModuleInitializers.insert(Ptr: Mod).second)
7846 continue;
7847
7848 for (auto *D : Context.getModuleInitializers(M: Mod))
7849 EmitTopLevelDecl(D);
7850
7851 // Visit the submodules of this module.
7852 for (auto *Submodule : Mod->submodules()) {
7853 // Skip explicit children; they need to be explicitly imported to emit
7854 // the initializers.
7855 if (Submodule->IsExplicit)
7856 continue;
7857
7858 if (Visited.insert(Ptr: Submodule).second)
7859 Stack.push_back(Elt: Submodule);
7860 }
7861 }
7862 break;
7863 }
7864
7865 case Decl::Export:
7866 EmitDeclContext(DC: cast<ExportDecl>(Val: D));
7867 break;
7868
7869 case Decl::OMPThreadPrivate:
7870 EmitOMPThreadPrivateDecl(D: cast<OMPThreadPrivateDecl>(Val: D));
7871 break;
7872
7873 case Decl::OMPAllocate:
7874 EmitOMPAllocateDecl(D: cast<OMPAllocateDecl>(Val: D));
7875 break;
7876
7877 case Decl::OMPDeclareReduction:
7878 EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: D));
7879 break;
7880
7881 case Decl::OMPDeclareMapper:
7882 EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: D));
7883 break;
7884
7885 case Decl::OMPRequires:
7886 EmitOMPRequiresDecl(D: cast<OMPRequiresDecl>(Val: D));
7887 break;
7888
7889 case Decl::Typedef:
7890 case Decl::TypeAlias: // using foo = bar; [C++11]
7891 if (CGDebugInfo *DI = getModuleDebugInfo())
7892 DI->EmitAndRetainType(Ty: getContext().getTypedefType(
7893 Keyword: ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
7894 Decl: cast<TypedefNameDecl>(Val: D)));
7895 break;
7896
7897 case Decl::Record:
7898 if (CGDebugInfo *DI = getModuleDebugInfo())
7899 if (cast<RecordDecl>(Val: D)->getDefinition())
7900 DI->EmitAndRetainType(
7901 Ty: getContext().getCanonicalTagType(TD: cast<RecordDecl>(Val: D)));
7902 break;
7903
7904 case Decl::Enum:
7905 if (CGDebugInfo *DI = getModuleDebugInfo())
7906 if (cast<EnumDecl>(Val: D)->getDefinition())
7907 DI->EmitAndRetainType(
7908 Ty: getContext().getCanonicalTagType(TD: cast<EnumDecl>(Val: D)));
7909 break;
7910
7911 case Decl::HLSLRootSignature:
7912 getHLSLRuntime().addRootSignature(D: cast<HLSLRootSignatureDecl>(Val: D));
7913 break;
7914 case Decl::HLSLBuffer:
7915 getHLSLRuntime().addBuffer(D: cast<HLSLBufferDecl>(Val: D));
7916 break;
7917
7918 case Decl::OpenACCDeclare:
7919 EmitOpenACCDeclare(D: cast<OpenACCDeclareDecl>(Val: D));
7920 break;
7921 case Decl::OpenACCRoutine:
7922 EmitOpenACCRoutine(D: cast<OpenACCRoutineDecl>(Val: D));
7923 break;
7924
7925 default:
7926 // Make sure we handled everything we should, every other kind is a
7927 // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
7928 // function. Need to recode Decl::Kind to do that easily.
7929 assert(isa<TypeDecl>(D) && "Unsupported decl kind");
7930 break;
7931 }
7932}
7933
7934void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
7935 // Do we need to generate coverage mapping?
7936 if (!CodeGenOpts.CoverageMapping)
7937 return;
7938 switch (D->getKind()) {
7939 case Decl::CXXConversion:
7940 case Decl::CXXMethod:
7941 case Decl::Function:
7942 case Decl::ObjCMethod:
7943 case Decl::CXXConstructor:
7944 case Decl::CXXDestructor: {
7945 if (!cast<FunctionDecl>(Val: D)->doesThisDeclarationHaveABody())
7946 break;
7947 SourceManager &SM = getContext().getSourceManager();
7948 if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(SpellingLoc: D->getBeginLoc()))
7949 break;
7950 if (!llvm::coverage::SystemHeadersCoverage &&
7951 SM.isInSystemHeader(Loc: D->getBeginLoc()))
7952 break;
7953 DeferredEmptyCoverageMappingDecls.try_emplace(Key: D, Args: true);
7954 break;
7955 }
7956 default:
7957 break;
7958 };
7959}
7960
7961void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
7962 // Do we need to generate coverage mapping?
7963 if (!CodeGenOpts.CoverageMapping)
7964 return;
7965 if (const auto *Fn = dyn_cast<FunctionDecl>(Val: D)) {
7966 if (Fn->isTemplateInstantiation())
7967 ClearUnusedCoverageMapping(D: Fn->getTemplateInstantiationPattern());
7968 }
7969 DeferredEmptyCoverageMappingDecls.insert_or_assign(Key: D, Val: false);
7970}
7971
7972void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
7973 // We call takeVector() here to avoid use-after-free.
7974 // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because
7975 // we deserialize function bodies to emit coverage info for them, and that
7976 // deserializes more declarations. How should we handle that case?
7977 for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) {
7978 if (!Entry.second)
7979 continue;
7980 const Decl *D = Entry.first;
7981 switch (D->getKind()) {
7982 case Decl::CXXConversion:
7983 case Decl::CXXMethod:
7984 case Decl::Function:
7985 case Decl::ObjCMethod: {
7986 CodeGenPGO PGO(*this);
7987 GlobalDecl GD(cast<FunctionDecl>(Val: D));
7988 PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD),
7989 Linkage: getFunctionLinkage(GD));
7990 break;
7991 }
7992 case Decl::CXXConstructor: {
7993 CodeGenPGO PGO(*this);
7994 GlobalDecl GD(cast<CXXConstructorDecl>(Val: D), Ctor_Base);
7995 PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD),
7996 Linkage: getFunctionLinkage(GD));
7997 break;
7998 }
7999 case Decl::CXXDestructor: {
8000 CodeGenPGO PGO(*this);
8001 GlobalDecl GD(cast<CXXDestructorDecl>(Val: D), Dtor_Base);
8002 PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD),
8003 Linkage: getFunctionLinkage(GD));
8004 break;
8005 }
8006 default:
8007 break;
8008 };
8009 }
8010}
8011
8012void CodeGenModule::EmitMainVoidAlias() {
8013 // In order to transition away from "__original_main" gracefully, emit an
8014 // alias for "main" in the no-argument case so that libc can detect when
8015 // new-style no-argument main is in used.
8016 if (llvm::Function *F = getModule().getFunction(Name: "main")) {
8017 if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
8018 F->getReturnType()->isIntegerTy(Bitwidth: Context.getTargetInfo().getIntWidth())) {
8019 auto *GA = llvm::GlobalAlias::create(Name: "__main_void", Aliasee: F);
8020 GA->setVisibility(llvm::GlobalValue::HiddenVisibility);
8021 }
8022 }
8023}
8024
8025/// Turns the given pointer into a constant.
8026static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
8027 const void *Ptr) {
8028 uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
8029 llvm::Type *i64 = llvm::Type::getInt64Ty(C&: Context);
8030 return llvm::ConstantInt::get(Ty: i64, V: PtrInt);
8031}
8032
8033static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
8034 llvm::NamedMDNode *&GlobalMetadata,
8035 GlobalDecl D,
8036 llvm::GlobalValue *Addr) {
8037 if (!GlobalMetadata)
8038 GlobalMetadata =
8039 CGM.getModule().getOrInsertNamedMetadata(Name: "clang.global.decl.ptrs");
8040
8041 // TODO: should we report variant information for ctors/dtors?
8042 llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(C: Addr),
8043 llvm::ConstantAsMetadata::get(C: GetPointerConstant(
8044 Context&: CGM.getLLVMContext(), Ptr: D.getDecl()))};
8045 GlobalMetadata->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops));
8046}
8047
8048bool CodeGenModule::CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem,
8049 llvm::GlobalValue *CppFunc) {
8050 // Store the list of ifuncs we need to replace uses in.
8051 llvm::SmallVector<llvm::GlobalIFunc *> IFuncs;
8052 // List of ConstantExprs that we should be able to delete when we're done
8053 // here.
8054 llvm::SmallVector<llvm::ConstantExpr *> CEs;
8055
8056 // It isn't valid to replace the extern-C ifuncs if all we find is itself!
8057 if (Elem == CppFunc)
8058 return false;
8059
8060 // First make sure that all users of this are ifuncs (or ifuncs via a
8061 // bitcast), and collect the list of ifuncs and CEs so we can work on them
8062 // later.
8063 for (llvm::User *User : Elem->users()) {
8064 // Users can either be a bitcast ConstExpr that is used by the ifuncs, OR an
8065 // ifunc directly. In any other case, just give up, as we don't know what we
8066 // could break by changing those.
8067 if (auto *ConstExpr = dyn_cast<llvm::ConstantExpr>(Val: User)) {
8068 if (ConstExpr->getOpcode() != llvm::Instruction::BitCast)
8069 return false;
8070
8071 for (llvm::User *CEUser : ConstExpr->users()) {
8072 if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: CEUser)) {
8073 IFuncs.push_back(Elt: IFunc);
8074 } else {
8075 return false;
8076 }
8077 }
8078 CEs.push_back(Elt: ConstExpr);
8079 } else if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: User)) {
8080 IFuncs.push_back(Elt: IFunc);
8081 } else {
8082 // This user is one we don't know how to handle, so fail redirection. This
8083 // will result in an ifunc retaining a resolver name that will ultimately
8084 // fail to be resolved to a defined function.
8085 return false;
8086 }
8087 }
8088
8089 // Now we know this is a valid case where we can do this alias replacement, we
8090 // need to remove all of the references to Elem (and the bitcasts!) so we can
8091 // delete it.
8092 for (llvm::GlobalIFunc *IFunc : IFuncs)
8093 IFunc->setResolver(nullptr);
8094 for (llvm::ConstantExpr *ConstExpr : CEs)
8095 ConstExpr->destroyConstant();
8096
8097 // We should now be out of uses for the 'old' version of this function, so we
8098 // can erase it as well.
8099 Elem->eraseFromParent();
8100
8101 for (llvm::GlobalIFunc *IFunc : IFuncs) {
8102 // The type of the resolver is always just a function-type that returns the
8103 // type of the IFunc, so create that here. If the type of the actual
8104 // resolver doesn't match, it just gets bitcast to the right thing.
8105 auto *ResolverTy =
8106 llvm::FunctionType::get(Result: IFunc->getType(), /*isVarArg*/ false);
8107 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
8108 MangledName: CppFunc->getName(), Ty: ResolverTy, GD: {}, /*ForVTable*/ false);
8109 IFunc->setResolver(Resolver);
8110 }
8111 return true;
8112}
8113
8114/// For each function which is declared within an extern "C" region and marked
8115/// as 'used', but has internal linkage, create an alias from the unmangled
8116/// name to the mangled name if possible. People expect to be able to refer
8117/// to such functions with an unmangled name from inline assembly within the
8118/// same translation unit.
8119void CodeGenModule::EmitStaticExternCAliases() {
8120 if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases())
8121 return;
8122 for (auto &I : StaticExternCValues) {
8123 const IdentifierInfo *Name = I.first;
8124 llvm::GlobalValue *Val = I.second;
8125
8126 // If Val is null, that implies there were multiple declarations that each
8127 // had a claim to the unmangled name. In this case, generation of the alias
8128 // is suppressed. See CodeGenModule::MaybeHandleStaticInExternC.
8129 if (!Val)
8130 break;
8131
8132 llvm::GlobalValue *ExistingElem =
8133 getModule().getNamedValue(Name: Name->getName());
8134
8135 // If there is either not something already by this name, or we were able to
8136 // replace all uses from IFuncs, create the alias.
8137 if (!ExistingElem || CheckAndReplaceExternCIFuncs(Elem: ExistingElem, CppFunc: Val))
8138 addCompilerUsedGlobal(GV: llvm::GlobalAlias::create(Name: Name->getName(), Aliasee: Val));
8139 }
8140}
8141
8142bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
8143 GlobalDecl &Result) const {
8144 auto Res = Manglings.find(Key: MangledName);
8145 if (Res == Manglings.end())
8146 return false;
8147 Result = Res->getValue();
8148 return true;
8149}
8150
8151/// Emits metadata nodes associating all the global values in the
8152/// current module with the Decls they came from. This is useful for
8153/// projects using IR gen as a subroutine.
8154///
8155/// Since there's currently no way to associate an MDNode directly
8156/// with an llvm::GlobalValue, we create a global named metadata
8157/// with the name 'clang.global.decl.ptrs'.
8158void CodeGenModule::EmitDeclMetadata() {
8159 llvm::NamedMDNode *GlobalMetadata = nullptr;
8160
8161 for (auto &I : MangledDeclNames) {
8162 llvm::GlobalValue *Addr = getModule().getNamedValue(Name: I.second);
8163 // Some mangled names don't necessarily have an associated GlobalValue
8164 // in this module, e.g. if we mangled it for DebugInfo.
8165 if (Addr)
8166 EmitGlobalDeclMetadata(CGM&: *this, GlobalMetadata, D: I.first, Addr);
8167 }
8168}
8169
8170/// Emits metadata nodes for all the local variables in the current
8171/// function.
8172void CodeGenFunction::EmitDeclMetadata() {
8173 if (LocalDeclMap.empty()) return;
8174
8175 llvm::LLVMContext &Context = getLLVMContext();
8176
8177 // Find the unique metadata ID for this name.
8178 unsigned DeclPtrKind = Context.getMDKindID(Name: "clang.decl.ptr");
8179
8180 llvm::NamedMDNode *GlobalMetadata = nullptr;
8181
8182 for (auto &I : LocalDeclMap) {
8183 const Decl *D = I.first;
8184 llvm::Value *Addr = I.second.emitRawPointer(CGF&: *this);
8185 if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Val: Addr)) {
8186 llvm::Value *DAddr = GetPointerConstant(Context&: getLLVMContext(), Ptr: D);
8187 Alloca->setMetadata(
8188 KindID: DeclPtrKind, Node: llvm::MDNode::get(
8189 Context, MDs: llvm::ValueAsMetadata::getConstant(C: DAddr)));
8190 } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr)) {
8191 GlobalDecl GD = GlobalDecl(cast<VarDecl>(Val: D));
8192 EmitGlobalDeclMetadata(CGM, GlobalMetadata, D: GD, Addr: GV);
8193 }
8194 }
8195}
8196
8197void CodeGenModule::EmitVersionIdentMetadata() {
8198 llvm::NamedMDNode *IdentMetadata =
8199 TheModule.getOrInsertNamedMetadata(Name: "llvm.ident");
8200 std::string Version = getClangFullVersion();
8201 llvm::LLVMContext &Ctx = TheModule.getContext();
8202
8203 llvm::Metadata *IdentNode[] = {llvm::MDString::get(Context&: Ctx, Str: Version)};
8204 IdentMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: IdentNode));
8205}
8206
8207void CodeGenModule::EmitCommandLineMetadata() {
8208 llvm::NamedMDNode *CommandLineMetadata =
8209 TheModule.getOrInsertNamedMetadata(Name: "llvm.commandline");
8210 std::string CommandLine = getCodeGenOpts().RecordCommandLine;
8211 llvm::LLVMContext &Ctx = TheModule.getContext();
8212
8213 llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Context&: Ctx, Str: CommandLine)};
8214 CommandLineMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: CommandLineNode));
8215}
8216
8217void CodeGenModule::EmitCoverageFile() {
8218 llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata(Name: "llvm.dbg.cu");
8219 if (!CUNode)
8220 return;
8221
8222 llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata(Name: "llvm.gcov");
8223 llvm::LLVMContext &Ctx = TheModule.getContext();
8224 auto *CoverageDataFile =
8225 llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageDataFile);
8226 auto *CoverageNotesFile =
8227 llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageNotesFile);
8228 for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
8229 llvm::MDNode *CU = CUNode->getOperand(i);
8230 llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU};
8231 GCov->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: Elts));
8232 }
8233}
8234
8235llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
8236 bool ForEH) {
8237 // Return a bogus pointer if RTTI is disabled, unless it's for EH.
8238 // FIXME: should we even be calling this method if RTTI is disabled
8239 // and it's not for EH?
8240 if (!shouldEmitRTTI(ForEH))
8241 return llvm::Constant::getNullValue(Ty: GlobalsInt8PtrTy);
8242
8243 if (ForEH && Ty->isObjCObjectPointerType() &&
8244 LangOpts.ObjCRuntime.isGNUFamily())
8245 return ObjCRuntime->GetEHType(T: Ty);
8246
8247 return getCXXABI().getAddrOfRTTIDescriptor(Ty);
8248}
8249
8250void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
8251 // Do not emit threadprivates in simd-only mode.
8252 if (LangOpts.OpenMP && LangOpts.OpenMPSimd)
8253 return;
8254 for (auto RefExpr : D->varlist()) {
8255 auto *VD = cast<VarDecl>(Val: cast<DeclRefExpr>(Val: RefExpr)->getDecl());
8256 bool PerformInit =
8257 VD->getAnyInitializer() &&
8258 !VD->getAnyInitializer()->isConstantInitializer(Ctx&: getContext(),
8259 /*ForRef=*/false);
8260
8261 Address Addr(GetAddrOfGlobalVar(D: VD),
8262 getTypes().ConvertTypeForMem(T: VD->getType()),
8263 getContext().getDeclAlign(D: VD));
8264 if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
8265 VD, VDAddr: Addr, Loc: RefExpr->getBeginLoc(), PerformInit))
8266 CXXGlobalInits.push_back(x: InitFunction);
8267 }
8268}
8269
8270llvm::Metadata *
8271CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
8272 StringRef Suffix) {
8273 if (auto *FnType = T->getAs<FunctionProtoType>())
8274 T = getContext().getFunctionType(
8275 ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(),
8276 EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None));
8277
8278 llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
8279 if (InternalId)
8280 return InternalId;
8281
8282 if (isExternallyVisible(L: T->getLinkage())) {
8283 std::string OutName;
8284 llvm::raw_string_ostream Out(OutName);
8285 getCXXABI().getMangleContext().mangleCanonicalTypeName(
8286 T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
8287
8288 if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
8289 Out << ".normalized";
8290
8291 Out << Suffix;
8292
8293 InternalId = llvm::MDString::get(Context&: getLLVMContext(), Str: Out.str());
8294 } else {
8295 InternalId = llvm::MDNode::getDistinct(Context&: getLLVMContext(),
8296 MDs: llvm::ArrayRef<llvm::Metadata *>());
8297 }
8298
8299 return InternalId;
8300}
8301
8302llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForFnType(QualType T) {
8303 assert(isa<FunctionType>(T));
8304 T = GeneralizeFunctionType(
8305 Ctx&: getContext(), Ty: T, GeneralizePointers: getCodeGenOpts().SanitizeCfiICallGeneralizePointers);
8306 if (getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
8307 return CreateMetadataIdentifierGeneralized(T);
8308 return CreateMetadataIdentifierForType(T);
8309}
8310
8311llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
8312 return CreateMetadataIdentifierImpl(T, Map&: MetadataIdMap, Suffix: "");
8313}
8314
8315llvm::Metadata *
8316CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) {
8317 return CreateMetadataIdentifierImpl(T, Map&: VirtualMetadataIdMap, Suffix: ".virtual");
8318}
8319
8320llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) {
8321 return CreateMetadataIdentifierImpl(T, Map&: GeneralizedMetadataIdMap,
8322 Suffix: ".generalized");
8323}
8324
8325/// Returns whether this module needs the "all-vtables" type identifier.
8326bool CodeGenModule::NeedAllVtablesTypeId() const {
8327 // Returns true if at least one of vtable-based CFI checkers is enabled and
8328 // is not in the trapping mode.
8329 return ((LangOpts.Sanitize.has(K: SanitizerKind::CFIVCall) &&
8330 !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIVCall)) ||
8331 (LangOpts.Sanitize.has(K: SanitizerKind::CFINVCall) &&
8332 !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFINVCall)) ||
8333 (LangOpts.Sanitize.has(K: SanitizerKind::CFIDerivedCast) &&
8334 !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIDerivedCast)) ||
8335 (LangOpts.Sanitize.has(K: SanitizerKind::CFIUnrelatedCast) &&
8336 !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIUnrelatedCast)));
8337}
8338
8339void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
8340 CharUnits Offset,
8341 const CXXRecordDecl *RD) {
8342 CanQualType T = getContext().getCanonicalTagType(TD: RD);
8343 llvm::Metadata *MD = CreateMetadataIdentifierForType(T);
8344 VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD);
8345
8346 if (CodeGenOpts.SanitizeCfiCrossDso)
8347 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
8348 VTable->addTypeMetadata(Offset: Offset.getQuantity(),
8349 TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId));
8350
8351 if (NeedAllVtablesTypeId()) {
8352 llvm::Metadata *MD = llvm::MDString::get(Context&: getLLVMContext(), Str: "all-vtables");
8353 VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD);
8354 }
8355}
8356
8357llvm::SanitizerStatReport &CodeGenModule::getSanStats() {
8358 if (!SanStats)
8359 SanStats = std::make_unique<llvm::SanitizerStatReport>(args: &getModule());
8360
8361 return *SanStats;
8362}
8363
8364llvm::Value *
8365CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
8366 CodeGenFunction &CGF) {
8367 llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, T: E->getType());
8368 auto *SamplerT = getOpenCLRuntime().getSamplerType(T: E->getType().getTypePtr());
8369 auto *FTy = llvm::FunctionType::get(Result: SamplerT, Params: {C->getType()}, isVarArg: false);
8370 auto *Call = CGF.EmitRuntimeCall(
8371 callee: CreateRuntimeFunction(FTy, Name: "__translate_sampler_initializer"), args: {C});
8372 return Call;
8373}
8374
8375CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
8376 QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
8377 return getNaturalTypeAlignment(T: T->getPointeeType(), BaseInfo, TBAAInfo,
8378 /* forPointeeType= */ true);
8379}
8380
8381CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
8382 LValueBaseInfo *BaseInfo,
8383 TBAAAccessInfo *TBAAInfo,
8384 bool forPointeeType) {
8385 if (TBAAInfo)
8386 *TBAAInfo = getTBAAAccessInfo(AccessType: T);
8387
8388 // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But
8389 // that doesn't return the information we need to compute BaseInfo.
8390
8391 // Honor alignment typedef attributes even on incomplete types.
8392 // We also honor them straight for C++ class types, even as pointees;
8393 // there's an expressivity gap here.
8394 if (auto TT = T->getAs<TypedefType>()) {
8395 if (auto Align = TT->getDecl()->getMaxAlignment()) {
8396 if (BaseInfo)
8397 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
8398 return getContext().toCharUnitsFromBits(BitSize: Align);
8399 }
8400 }
8401
8402 bool AlignForArray = T->isArrayType();
8403
8404 // Analyze the base element type, so we don't get confused by incomplete
8405 // array types.
8406 T = getContext().getBaseElementType(QT: T);
8407
8408 if (T->isIncompleteType()) {
8409 // We could try to replicate the logic from
8410 // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
8411 // type is incomplete, so it's impossible to test. We could try to reuse
8412 // getTypeAlignIfKnown, but that doesn't return the information we need
8413 // to set BaseInfo. So just ignore the possibility that the alignment is
8414 // greater than one.
8415 if (BaseInfo)
8416 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
8417 return CharUnits::One();
8418 }
8419
8420 if (BaseInfo)
8421 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
8422
8423 CharUnits Alignment;
8424 const CXXRecordDecl *RD;
8425 if (T.getQualifiers().hasUnaligned()) {
8426 Alignment = CharUnits::One();
8427 } else if (forPointeeType && !AlignForArray &&
8428 (RD = T->getAsCXXRecordDecl())) {
8429 // For C++ class pointees, we don't know whether we're pointing at a
8430 // base or a complete object, so we generally need to use the
8431 // non-virtual alignment.
8432 Alignment = getClassPointerAlignment(CD: RD);
8433 } else {
8434 Alignment = getContext().getTypeAlignInChars(T);
8435 }
8436
8437 // Cap to the global maximum type alignment unless the alignment
8438 // was somehow explicit on the type.
8439 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
8440 if (Alignment.getQuantity() > MaxAlign &&
8441 !getContext().isAlignmentRequired(T))
8442 Alignment = CharUnits::fromQuantity(Quantity: MaxAlign);
8443 }
8444 return Alignment;
8445}
8446
8447bool CodeGenModule::stopAutoInit() {
8448 unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter;
8449 if (StopAfter) {
8450 // This number is positive only when -ftrivial-auto-var-init-stop-after=* is
8451 // used
8452 if (NumAutoVarInit >= StopAfter) {
8453 return true;
8454 }
8455 if (!NumAutoVarInit) {
8456 getDiags().Report(DiagID: diag::warn_trivial_auto_var_limit)
8457 << StopAfter
8458 << (getContext().getLangOpts().getTrivialAutoVarInit() ==
8459 LangOptions::TrivialAutoVarInitKind::Zero
8460 ? "zero"
8461 : "pattern");
8462 }
8463 ++NumAutoVarInit;
8464 }
8465 return false;
8466}
8467
8468void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS,
8469 const Decl *D) const {
8470 // ptxas does not allow '.' in symbol names. On the other hand, HIP prefers
8471 // postfix beginning with '.' since the symbol name can be demangled.
8472 if (LangOpts.HIP)
8473 OS << (isa<VarDecl>(Val: D) ? ".static." : ".intern.");
8474 else
8475 OS << (isa<VarDecl>(Val: D) ? "__static__" : "__intern__");
8476
8477 // If the CUID is not specified we try to generate a unique postfix.
8478 if (getLangOpts().CUID.empty()) {
8479 SourceManager &SM = getContext().getSourceManager();
8480 PresumedLoc PLoc = SM.getPresumedLoc(Loc: D->getLocation());
8481 assert(PLoc.isValid() && "Source location is expected to be valid.");
8482
8483 // Get the hash of the user defined macros.
8484 llvm::MD5 Hash;
8485 llvm::MD5::MD5Result Result;
8486 for (const auto &Arg : PreprocessorOpts.Macros)
8487 Hash.update(Str: Arg.first);
8488 Hash.final(Result);
8489
8490 // Get the UniqueID for the file containing the decl.
8491 llvm::sys::fs::UniqueID ID;
8492 auto Status = FS->status(Path: PLoc.getFilename());
8493 if (!Status) {
8494 PLoc = SM.getPresumedLoc(Loc: D->getLocation(), /*UseLineDirectives=*/false);
8495 assert(PLoc.isValid() && "Source location is expected to be valid.");
8496 Status = FS->status(Path: PLoc.getFilename());
8497 }
8498 if (!Status) {
8499 SM.getDiagnostics().Report(DiagID: diag::err_cannot_open_file)
8500 << PLoc.getFilename() << Status.getError().message();
8501 } else {
8502 ID = Status->getUniqueID();
8503 }
8504 OS << llvm::format(Fmt: "%x", Vals: ID.getFile()) << llvm::format(Fmt: "%x", Vals: ID.getDevice())
8505 << "_" << llvm::utohexstr(X: Result.low(), /*LowerCase=*/true, /*Width=*/8);
8506 } else {
8507 OS << getContext().getCUIDHash();
8508 }
8509}
8510
8511void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) {
8512 assert(DeferredDeclsToEmit.empty() &&
8513 "Should have emitted all decls deferred to emit.");
8514 assert(NewBuilder->DeferredDecls.empty() &&
8515 "Newly created module should not have deferred decls");
8516 NewBuilder->DeferredDecls = std::move(DeferredDecls);
8517 assert(EmittedDeferredDecls.empty() &&
8518 "Still have (unmerged) EmittedDeferredDecls deferred decls");
8519
8520 assert(NewBuilder->DeferredVTables.empty() &&
8521 "Newly created module should not have deferred vtables");
8522 NewBuilder->DeferredVTables = std::move(DeferredVTables);
8523
8524 assert(NewBuilder->MangledDeclNames.empty() &&
8525 "Newly created module should not have mangled decl names");
8526 assert(NewBuilder->Manglings.empty() &&
8527 "Newly created module should not have manglings");
8528 NewBuilder->Manglings = std::move(Manglings);
8529
8530 NewBuilder->WeakRefReferences = std::move(WeakRefReferences);
8531
8532 NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx);
8533}
8534
8535std::string CodeGenModule::getPFPFieldName(const FieldDecl *FD) {
8536 std::string OutName;
8537 llvm::raw_string_ostream Out(OutName);
8538 getCXXABI().getMangleContext().mangleCanonicalTypeName(
8539 T: getContext().getCanonicalTagType(TD: FD->getParent()), Out, NormalizeIntegers: false);
8540 Out << "." << FD->getName();
8541 return OutName;
8542}
8543
8544bool CodeGenModule::classNeedsVectorDestructor(const CXXRecordDecl *RD) {
8545 if (!Context.getTargetInfo().emitVectorDeletingDtors(Context.getLangOpts()))
8546 return false;
8547 CXXDestructorDecl *Dtor = RD->getDestructor();
8548 // The compiler can't know if new[]/delete[] will be used outside of the DLL,
8549 // so just force vector deleting destructor emission if dllexport is present.
8550 // This matches MSVC behavior.
8551 if (Dtor && Dtor->isVirtual() && Dtor->hasAttr<DLLExportAttr>())
8552 return true;
8553
8554 return RequireVectorDeletingDtor.count(Ptr: RD);
8555}
8556
8557void CodeGenModule::requireVectorDestructorDefinition(const CXXRecordDecl *RD) {
8558 if (!Context.getTargetInfo().emitVectorDeletingDtors(Context.getLangOpts()))
8559 return;
8560 RequireVectorDeletingDtor.insert(Ptr: RD);
8561
8562 // To reduce code size in general case we lazily emit scalar deleting
8563 // destructor definition and an alias from vector deleting destructor to
8564 // scalar deleting destructor. It may happen that we first emitted the scalar
8565 // deleting destructor definition and the alias and then discovered that the
8566 // definition of the vector deleting destructor is required. Then we need to
8567 // remove the alias and the scalar deleting destructor and queue vector
8568 // deleting destructor body for emission. Check if that is the case.
8569 CXXDestructorDecl *DtorD = RD->getDestructor();
8570 GlobalDecl ScalarDtorGD(DtorD, Dtor_Deleting);
8571 StringRef MangledName = getMangledName(GD: ScalarDtorGD);
8572 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
8573 GlobalDecl VectorDtorGD(DtorD, Dtor_VectorDeleting);
8574 if (Entry && !Entry->isDeclaration()) {
8575 StringRef VDName = getMangledName(GD: VectorDtorGD);
8576 llvm::GlobalValue *VDEntry = GetGlobalValue(Name: VDName);
8577 // It exists and it should be an alias.
8578 assert(VDEntry && isa<llvm::GlobalAlias>(VDEntry));
8579 auto *NewFn = llvm::Function::Create(
8580 Ty: cast<llvm::FunctionType>(Val: VDEntry->getValueType()),
8581 Linkage: llvm::Function::ExternalLinkage, N: VDName, M: &getModule());
8582 SetFunctionAttributes(GD: VectorDtorGD, F: NewFn, /*IsIncompleteFunction*/ false,
8583 /*IsThunk*/ false);
8584 NewFn->takeName(V: VDEntry);
8585 VDEntry->replaceAllUsesWith(V: NewFn);
8586 VDEntry->eraseFromParent();
8587 Entry->replaceAllUsesWith(V: NewFn);
8588 Entry->eraseFromParent();
8589 }
8590 // Always add a deferred decl to emit once we confirmed that vector deleting
8591 // destructor definition is required. That helps to enforse its generation
8592 // even if destructor is only declared.
8593 addDeferredDeclToEmit(GD: VectorDtorGD);
8594}
8595