1//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-module state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenModule.h"
14#include "ABIInfo.h"
15#include "CGBlocks.h"
16#include "CGCUDARuntime.h"
17#include "CGCXXABI.h"
18#include "CGCall.h"
19#include "CGDebugInfo.h"
20#include "CGHLSLRuntime.h"
21#include "CGObjCRuntime.h"
22#include "CGOpenCLRuntime.h"
23#include "CGOpenMPRuntime.h"
24#include "CGOpenMPRuntimeGPU.h"
25#include "CodeGenFunction.h"
26#include "CodeGenPGO.h"
27#include "ConstantEmitter.h"
28#include "CoverageMappingGen.h"
29#include "TargetInfo.h"
30#include "clang/AST/ASTContext.h"
31#include "clang/AST/ASTLambda.h"
32#include "clang/AST/CharUnits.h"
33#include "clang/AST/Decl.h"
34#include "clang/AST/DeclCXX.h"
35#include "clang/AST/DeclObjC.h"
36#include "clang/AST/DeclTemplate.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/RecursiveASTVisitor.h"
39#include "clang/AST/StmtVisitor.h"
40#include "clang/Basic/Builtins.h"
41#include "clang/Basic/CodeGenOptions.h"
42#include "clang/Basic/Diagnostic.h"
43#include "clang/Basic/DiagnosticFrontend.h"
44#include "clang/Basic/Module.h"
45#include "clang/Basic/SourceManager.h"
46#include "clang/Basic/TargetInfo.h"
47#include "clang/Basic/Version.h"
48#include "clang/CodeGen/BackendUtil.h"
49#include "clang/CodeGen/ConstantInitBuilder.h"
50#include "llvm/ADT/STLExtras.h"
51#include "llvm/ADT/StringExtras.h"
52#include "llvm/ADT/StringSwitch.h"
53#include "llvm/Analysis/TargetLibraryInfo.h"
54#include "llvm/BinaryFormat/ELF.h"
55#include "llvm/IR/AttributeMask.h"
56#include "llvm/IR/CallingConv.h"
57#include "llvm/IR/DataLayout.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/Module.h"
61#include "llvm/IR/ProfileSummary.h"
62#include "llvm/ProfileData/InstrProfReader.h"
63#include "llvm/ProfileData/SampleProf.h"
64#include "llvm/Support/ARMBuildAttributes.h"
65#include "llvm/Support/CRC.h"
66#include "llvm/Support/CodeGen.h"
67#include "llvm/Support/CommandLine.h"
68#include "llvm/Support/ConvertUTF.h"
69#include "llvm/Support/ErrorHandling.h"
70#include "llvm/Support/Hash.h"
71#include "llvm/Support/TimeProfiler.h"
72#include "llvm/TargetParser/AArch64TargetParser.h"
73#include "llvm/TargetParser/RISCVISAInfo.h"
74#include "llvm/TargetParser/Triple.h"
75#include "llvm/TargetParser/X86TargetParser.h"
76#include "llvm/Transforms/Instrumentation/KCFI.h"
77#include "llvm/Transforms/Utils/BuildLibCalls.h"
78#include <optional>
79#include <set>
80
81using namespace clang;
82using namespace CodeGen;
83
84static llvm::cl::opt<bool> LimitedCoverage(
85 "limited-coverage-experimental", llvm::cl::Hidden,
86 llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
87
88static const char AnnotationSection[] = "llvm.metadata";
89static constexpr auto ErrnoTBAAMDName = "llvm.errno.tbaa";
90
91static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
92 switch (CGM.getContext().getCXXABIKind()) {
93 case TargetCXXABI::AppleARM64:
94 case TargetCXXABI::Fuchsia:
95 case TargetCXXABI::GenericAArch64:
96 case TargetCXXABI::GenericARM:
97 case TargetCXXABI::iOS:
98 case TargetCXXABI::WatchOS:
99 case TargetCXXABI::GenericMIPS:
100 case TargetCXXABI::GenericItanium:
101 case TargetCXXABI::WebAssembly:
102 case TargetCXXABI::XL:
103 return CreateItaniumCXXABI(CGM);
104 case TargetCXXABI::Microsoft:
105 return CreateMicrosoftCXXABI(CGM);
106 }
107
108 llvm_unreachable("invalid C++ ABI kind");
109}
110
111static std::unique_ptr<TargetCodeGenInfo>
112createTargetCodeGenInfo(CodeGenModule &CGM) {
113 const TargetInfo &Target = CGM.getTarget();
114 const llvm::Triple &Triple = Target.getTriple();
115 const CodeGenOptions &CodeGenOpts = CGM.getCodeGenOpts();
116
117 switch (Triple.getArch()) {
118 default:
119 return createDefaultTargetCodeGenInfo(CGM);
120
121 case llvm::Triple::m68k:
122 return createM68kTargetCodeGenInfo(CGM);
123 case llvm::Triple::mips:
124 case llvm::Triple::mipsel:
125 if (Triple.getOS() == llvm::Triple::Win32)
126 return createWindowsMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true);
127 return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true);
128
129 case llvm::Triple::mips64:
130 case llvm::Triple::mips64el:
131 return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/false);
132
133 case llvm::Triple::avr: {
134 // For passing parameters, R8~R25 are used on avr, and R18~R25 are used
135 // on avrtiny. For passing return value, R18~R25 are used on avr, and
136 // R22~R25 are used on avrtiny.
137 unsigned NPR = Target.getABI() == "avrtiny" ? 6 : 18;
138 unsigned NRR = Target.getABI() == "avrtiny" ? 4 : 8;
139 return createAVRTargetCodeGenInfo(CGM, NPR, NRR);
140 }
141
142 case llvm::Triple::aarch64:
143 case llvm::Triple::aarch64_32:
144 case llvm::Triple::aarch64_be: {
145 AArch64ABIKind Kind = AArch64ABIKind::AAPCS;
146 if (Target.getABI() == "darwinpcs")
147 Kind = AArch64ABIKind::DarwinPCS;
148 else if (Triple.isOSWindows())
149 return createWindowsAArch64TargetCodeGenInfo(CGM, K: AArch64ABIKind::Win64);
150 else if (Target.getABI() == "aapcs-soft")
151 Kind = AArch64ABIKind::AAPCSSoft;
152
153 return createAArch64TargetCodeGenInfo(CGM, Kind);
154 }
155
156 case llvm::Triple::wasm32:
157 case llvm::Triple::wasm64: {
158 WebAssemblyABIKind Kind = WebAssemblyABIKind::MVP;
159 if (Target.getABI() == "experimental-mv")
160 Kind = WebAssemblyABIKind::ExperimentalMV;
161 return createWebAssemblyTargetCodeGenInfo(CGM, K: Kind);
162 }
163
164 case llvm::Triple::arm:
165 case llvm::Triple::armeb:
166 case llvm::Triple::thumb:
167 case llvm::Triple::thumbeb: {
168 if (Triple.getOS() == llvm::Triple::Win32)
169 return createWindowsARMTargetCodeGenInfo(CGM, K: ARMABIKind::AAPCS_VFP);
170
171 ARMABIKind Kind = ARMABIKind::AAPCS;
172 StringRef ABIStr = Target.getABI();
173 if (ABIStr == "apcs-gnu")
174 Kind = ARMABIKind::APCS;
175 else if (ABIStr == "aapcs16")
176 Kind = ARMABIKind::AAPCS16_VFP;
177 else if (CodeGenOpts.FloatABI == "hard" ||
178 (CodeGenOpts.FloatABI != "soft" && Triple.isHardFloatABI()))
179 Kind = ARMABIKind::AAPCS_VFP;
180
181 return createARMTargetCodeGenInfo(CGM, Kind);
182 }
183
184 case llvm::Triple::ppc: {
185 if (Triple.isOSAIX())
186 return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/false);
187
188 bool IsSoftFloat =
189 CodeGenOpts.FloatABI == "soft" || Target.hasFeature(Feature: "spe");
190 return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat);
191 }
192 case llvm::Triple::ppcle: {
193 bool IsSoftFloat =
194 CodeGenOpts.FloatABI == "soft" || Target.hasFeature(Feature: "spe");
195 return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat);
196 }
197 case llvm::Triple::ppc64:
198 if (Triple.isOSAIX())
199 return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/true);
200
201 if (Triple.isOSBinFormatELF()) {
202 PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv1;
203 if (Target.getABI() == "elfv2")
204 Kind = PPC64_SVR4_ABIKind::ELFv2;
205 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
206
207 return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat);
208 }
209 return createPPC64TargetCodeGenInfo(CGM);
210 case llvm::Triple::ppc64le: {
211 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
212 PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv2;
213 if (Target.getABI() == "elfv1")
214 Kind = PPC64_SVR4_ABIKind::ELFv1;
215 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
216
217 return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat);
218 }
219
220 case llvm::Triple::nvptx:
221 case llvm::Triple::nvptx64:
222 return createNVPTXTargetCodeGenInfo(CGM);
223
224 case llvm::Triple::msp430:
225 return createMSP430TargetCodeGenInfo(CGM);
226
227 case llvm::Triple::riscv32:
228 case llvm::Triple::riscv64:
229 case llvm::Triple::riscv32be:
230 case llvm::Triple::riscv64be: {
231 StringRef ABIStr = Target.getABI();
232 unsigned XLen = Target.getPointerWidth(AddrSpace: LangAS::Default);
233 unsigned ABIFLen = 0;
234 if (ABIStr.ends_with(Suffix: "f"))
235 ABIFLen = 32;
236 else if (ABIStr.ends_with(Suffix: "d"))
237 ABIFLen = 64;
238 bool EABI = ABIStr.ends_with(Suffix: "e");
239 return createRISCVTargetCodeGenInfo(CGM, XLen, FLen: ABIFLen, EABI);
240 }
241
242 case llvm::Triple::systemz: {
243 bool SoftFloat = CodeGenOpts.FloatABI == "soft";
244 bool HasVector = !SoftFloat && Target.getABI() == "vector";
245 return createSystemZTargetCodeGenInfo(CGM, HasVector, SoftFloatABI: SoftFloat);
246 }
247
248 case llvm::Triple::tce:
249 case llvm::Triple::tcele:
250 return createTCETargetCodeGenInfo(CGM);
251
252 case llvm::Triple::x86: {
253 bool IsDarwinVectorABI = Triple.isOSDarwin();
254 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
255
256 if (Triple.getOS() == llvm::Triple::Win32) {
257 return createWinX86_32TargetCodeGenInfo(
258 CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI,
259 NumRegisterParameters: CodeGenOpts.NumRegisterParameters);
260 }
261 return createX86_32TargetCodeGenInfo(
262 CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI,
263 NumRegisterParameters: CodeGenOpts.NumRegisterParameters, SoftFloatABI: CodeGenOpts.FloatABI == "soft");
264 }
265
266 case llvm::Triple::x86_64: {
267 StringRef ABI = Target.getABI();
268 X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512
269 : ABI == "avx" ? X86AVXABILevel::AVX
270 : X86AVXABILevel::None);
271
272 switch (Triple.getOS()) {
273 case llvm::Triple::UEFI:
274 case llvm::Triple::Win32:
275 return createWinX86_64TargetCodeGenInfo(CGM, AVXLevel);
276 default:
277 return createX86_64TargetCodeGenInfo(CGM, AVXLevel);
278 }
279 }
280 case llvm::Triple::hexagon:
281 return createHexagonTargetCodeGenInfo(CGM);
282 case llvm::Triple::lanai:
283 return createLanaiTargetCodeGenInfo(CGM);
284 case llvm::Triple::r600:
285 return createAMDGPUTargetCodeGenInfo(CGM);
286 case llvm::Triple::amdgcn:
287 return createAMDGPUTargetCodeGenInfo(CGM);
288 case llvm::Triple::sparc:
289 return createSparcV8TargetCodeGenInfo(CGM);
290 case llvm::Triple::sparcv9:
291 return createSparcV9TargetCodeGenInfo(CGM);
292 case llvm::Triple::xcore:
293 return createXCoreTargetCodeGenInfo(CGM);
294 case llvm::Triple::arc:
295 return createARCTargetCodeGenInfo(CGM);
296 case llvm::Triple::spir:
297 case llvm::Triple::spir64:
298 return createCommonSPIRTargetCodeGenInfo(CGM);
299 case llvm::Triple::spirv32:
300 case llvm::Triple::spirv64:
301 case llvm::Triple::spirv:
302 return createSPIRVTargetCodeGenInfo(CGM);
303 case llvm::Triple::dxil:
304 return createDirectXTargetCodeGenInfo(CGM);
305 case llvm::Triple::ve:
306 return createVETargetCodeGenInfo(CGM);
307 case llvm::Triple::csky: {
308 bool IsSoftFloat = !Target.hasFeature(Feature: "hard-float-abi");
309 bool hasFP64 =
310 Target.hasFeature(Feature: "fpuv2_df") || Target.hasFeature(Feature: "fpuv3_df");
311 return createCSKYTargetCodeGenInfo(CGM, FLen: IsSoftFloat ? 0
312 : hasFP64 ? 64
313 : 32);
314 }
315 case llvm::Triple::bpfeb:
316 case llvm::Triple::bpfel:
317 return createBPFTargetCodeGenInfo(CGM);
318 case llvm::Triple::loongarch32:
319 case llvm::Triple::loongarch64: {
320 StringRef ABIStr = Target.getABI();
321 unsigned ABIFRLen = 0;
322 if (ABIStr.ends_with(Suffix: "f"))
323 ABIFRLen = 32;
324 else if (ABIStr.ends_with(Suffix: "d"))
325 ABIFRLen = 64;
326 return createLoongArchTargetCodeGenInfo(
327 CGM, GRLen: Target.getPointerWidth(AddrSpace: LangAS::Default), FLen: ABIFRLen);
328 }
329 }
330}
331
332const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
333 if (!TheTargetCodeGenInfo)
334 TheTargetCodeGenInfo = createTargetCodeGenInfo(CGM&: *this);
335 return *TheTargetCodeGenInfo;
336}
337
338static void checkDataLayoutConsistency(const TargetInfo &Target,
339 llvm::LLVMContext &Context,
340 const LangOptions &Opts) {
341#ifndef NDEBUG
342 // Don't verify non-standard ABI configurations.
343 if (Opts.AlignDouble || Opts.OpenCL || Opts.HLSL)
344 return;
345
346 llvm::Triple Triple = Target.getTriple();
347 llvm::DataLayout DL(Target.getDataLayoutString());
348 auto Check = [&](const char *Name, llvm::Type *Ty, unsigned Alignment) {
349 llvm::Align DLAlign = DL.getABITypeAlign(Ty);
350 llvm::Align ClangAlign(Alignment / 8);
351 if (DLAlign != ClangAlign) {
352 llvm::errs() << "For target " << Triple.str() << " type " << Name
353 << " mapping to " << *Ty << " has data layout alignment "
354 << DLAlign.value() << " while clang specifies "
355 << ClangAlign.value() << "\n";
356 abort();
357 }
358 };
359
360 Check("bool", llvm::Type::getIntNTy(Context, Target.BoolWidth),
361 Target.BoolAlign);
362 Check("short", llvm::Type::getIntNTy(Context, Target.ShortWidth),
363 Target.ShortAlign);
364 Check("int", llvm::Type::getIntNTy(Context, Target.IntWidth),
365 Target.IntAlign);
366 Check("long", llvm::Type::getIntNTy(Context, Target.LongWidth),
367 Target.LongAlign);
368 // FIXME: M68k specifies incorrect long long alignment in both LLVM and Clang.
369 if (Triple.getArch() != llvm::Triple::m68k)
370 Check("long long", llvm::Type::getIntNTy(Context, Target.LongLongWidth),
371 Target.LongLongAlign);
372 // FIXME: There are int128 alignment mismatches on multiple targets.
373 if (Target.hasInt128Type() && !Target.getTargetOpts().ForceEnableInt128 &&
374 !Triple.isAMDGPU() && !Triple.isSPIRV() &&
375 Triple.getArch() != llvm::Triple::ve)
376 Check("__int128", llvm::Type::getIntNTy(Context, 128), Target.Int128Align);
377
378 if (Target.hasFloat16Type())
379 Check("half", llvm::Type::getFloatingPointTy(Context, *Target.HalfFormat),
380 Target.HalfAlign);
381 if (Target.hasBFloat16Type())
382 Check("bfloat", llvm::Type::getBFloatTy(Context), Target.BFloat16Align);
383 Check("float", llvm::Type::getFloatingPointTy(Context, *Target.FloatFormat),
384 Target.FloatAlign);
385 Check("double", llvm::Type::getFloatingPointTy(Context, *Target.DoubleFormat),
386 Target.DoubleAlign);
387 Check("long double",
388 llvm::Type::getFloatingPointTy(Context, *Target.LongDoubleFormat),
389 Target.LongDoubleAlign);
390 if (Target.hasFloat128Type())
391 Check("__float128", llvm::Type::getFP128Ty(Context), Target.Float128Align);
392 if (Target.hasIbm128Type())
393 Check("__ibm128", llvm::Type::getPPC_FP128Ty(Context), Target.Ibm128Align);
394
395 Check("void*", llvm::PointerType::getUnqual(Context), Target.PointerAlign);
396#endif
397}
398
399CodeGenModule::CodeGenModule(ASTContext &C,
400 IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
401 const HeaderSearchOptions &HSO,
402 const PreprocessorOptions &PPO,
403 const CodeGenOptions &CGO, llvm::Module &M,
404 DiagnosticsEngine &diags,
405 CoverageSourceInfo *CoverageInfo)
406 : Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO),
407 PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
408 Target(C.getTargetInfo()), ABI(createCXXABI(CGM&: *this)),
409 VMContext(M.getContext()), VTables(*this), StackHandler(diags),
410 SanitizerMD(new SanitizerMetadata(*this)),
411 AtomicOpts(Target.getAtomicOpts()) {
412
413 // Initialize the type cache.
414 Types.reset(p: new CodeGenTypes(*this));
415 llvm::LLVMContext &LLVMContext = M.getContext();
416 VoidTy = llvm::Type::getVoidTy(C&: LLVMContext);
417 Int8Ty = llvm::Type::getInt8Ty(C&: LLVMContext);
418 Int16Ty = llvm::Type::getInt16Ty(C&: LLVMContext);
419 Int32Ty = llvm::Type::getInt32Ty(C&: LLVMContext);
420 Int64Ty = llvm::Type::getInt64Ty(C&: LLVMContext);
421 HalfTy = llvm::Type::getHalfTy(C&: LLVMContext);
422 BFloatTy = llvm::Type::getBFloatTy(C&: LLVMContext);
423 FloatTy = llvm::Type::getFloatTy(C&: LLVMContext);
424 DoubleTy = llvm::Type::getDoubleTy(C&: LLVMContext);
425 PointerWidthInBits = C.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default);
426 PointerAlignInBytes =
427 C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getPointerAlign(AddrSpace: LangAS::Default))
428 .getQuantity();
429 SizeSizeInBytes =
430 C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getMaxPointerWidth()).getQuantity();
431 IntAlignInBytes =
432 C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getIntAlign()).getQuantity();
433 CharTy =
434 llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getCharWidth());
435 IntTy = llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getIntWidth());
436 IntPtrTy = llvm::IntegerType::get(C&: LLVMContext,
437 NumBits: C.getTargetInfo().getMaxPointerWidth());
438 Int8PtrTy = llvm::PointerType::get(C&: LLVMContext,
439 AddressSpace: C.getTargetAddressSpace(AS: LangAS::Default));
440 const llvm::DataLayout &DL = M.getDataLayout();
441 AllocaInt8PtrTy =
442 llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getAllocaAddrSpace());
443 GlobalsInt8PtrTy =
444 llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getDefaultGlobalsAddressSpace());
445 ConstGlobalsPtrTy = llvm::PointerType::get(
446 C&: LLVMContext, AddressSpace: C.getTargetAddressSpace(AS: GetGlobalConstantAddressSpace()));
447 ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
448
449 // Build C++20 Module initializers.
450 // TODO: Add Microsoft here once we know the mangling required for the
451 // initializers.
452 CXX20ModuleInits =
453 LangOpts.CPlusPlusModules && getCXXABI().getMangleContext().getKind() ==
454 ItaniumMangleContext::MK_Itanium;
455
456 RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
457
458 if (LangOpts.ObjC)
459 createObjCRuntime();
460 if (LangOpts.OpenCL)
461 createOpenCLRuntime();
462 if (LangOpts.OpenMP)
463 createOpenMPRuntime();
464 if (LangOpts.CUDA)
465 createCUDARuntime();
466 if (LangOpts.HLSL)
467 createHLSLRuntime();
468
469 // Enable TBAA unless it's suppressed. TSan and TySan need TBAA even at O0.
470 if (LangOpts.Sanitize.hasOneOf(K: SanitizerKind::Thread | SanitizerKind::Type) ||
471 (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
472 TBAA.reset(p: new CodeGenTBAA(Context, getTypes(), TheModule, CodeGenOpts,
473 getLangOpts()));
474
475 // If debug info or coverage generation is enabled, create the CGDebugInfo
476 // object.
477 if (CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo ||
478 CodeGenOpts.CoverageNotesFile.size() ||
479 CodeGenOpts.CoverageDataFile.size())
480 DebugInfo.reset(p: new CGDebugInfo(*this));
481 else if (getTriple().isOSWindows())
482 // On Windows targets, we want to emit compiler info even if debug info is
483 // otherwise disabled. Use a temporary CGDebugInfo instance to emit only
484 // basic compiler metadata.
485 CGDebugInfo(*this);
486
487 Block.GlobalUniqueCount = 0;
488
489 if (C.getLangOpts().ObjC)
490 ObjCData.reset(p: new ObjCEntrypoints());
491
492 if (CodeGenOpts.hasProfileClangUse()) {
493 auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
494 Path: CodeGenOpts.ProfileInstrumentUsePath, FS&: *FS,
495 RemappingPath: CodeGenOpts.ProfileRemappingFile);
496 if (auto E = ReaderOrErr.takeError()) {
497 llvm::handleAllErrors(E: std::move(E), Handlers: [&](const llvm::ErrorInfoBase &EI) {
498 Diags.Report(DiagID: diag::err_reading_profile)
499 << CodeGenOpts.ProfileInstrumentUsePath << EI.message();
500 });
501 return;
502 }
503 PGOReader = std::move(ReaderOrErr.get());
504 }
505
506 // If coverage mapping generation is enabled, create the
507 // CoverageMappingModuleGen object.
508 if (CodeGenOpts.CoverageMapping)
509 CoverageMapping.reset(p: new CoverageMappingModuleGen(*this, *CoverageInfo));
510
511 // Generate the module name hash here if needed.
512 if (CodeGenOpts.UniqueInternalLinkageNames &&
513 !getModule().getSourceFileName().empty()) {
514 std::string Path = getModule().getSourceFileName();
515 // Check if a path substitution is needed from the MacroPrefixMap.
516 for (const auto &Entry : LangOpts.MacroPrefixMap)
517 if (Path.rfind(str: Entry.first, pos: 0) != std::string::npos) {
518 Path = Entry.second + Path.substr(pos: Entry.first.size());
519 break;
520 }
521 ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(FName: Path);
522 }
523
524 // Record mregparm value now so it is visible through all of codegen.
525 if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
526 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "NumRegisterParameters",
527 Val: CodeGenOpts.NumRegisterParameters);
528
529 // If there are any functions that are marked for Windows secure hot-patching,
530 // then build the list of functions now.
531 if (!CGO.MSSecureHotPatchFunctionsFile.empty() ||
532 !CGO.MSSecureHotPatchFunctionsList.empty()) {
533 if (!CGO.MSSecureHotPatchFunctionsFile.empty()) {
534 auto BufOrErr = FS->getBufferForFile(Name: CGO.MSSecureHotPatchFunctionsFile);
535 if (BufOrErr) {
536 const llvm::MemoryBuffer &FileBuffer = **BufOrErr;
537 for (llvm::line_iterator I(FileBuffer.getMemBufferRef(), true), E;
538 I != E; ++I)
539 this->MSHotPatchFunctions.push_back(x: std::string{*I});
540 } else {
541 auto &DE = Context.getDiagnostics();
542 DE.Report(DiagID: diag::err_open_hotpatch_file_failed)
543 << CGO.MSSecureHotPatchFunctionsFile
544 << BufOrErr.getError().message();
545 }
546 }
547
548 for (const auto &FuncName : CGO.MSSecureHotPatchFunctionsList)
549 this->MSHotPatchFunctions.push_back(x: FuncName);
550
551 llvm::sort(C&: this->MSHotPatchFunctions);
552 }
553
554 if (!Context.getAuxTargetInfo())
555 checkDataLayoutConsistency(Target: Context.getTargetInfo(), Context&: LLVMContext, Opts: LangOpts);
556}
557
558CodeGenModule::~CodeGenModule() {}
559
560void CodeGenModule::createObjCRuntime() {
561 // This is just isGNUFamily(), but we want to force implementors of
562 // new ABIs to decide how best to do this.
563 switch (LangOpts.ObjCRuntime.getKind()) {
564 case ObjCRuntime::GNUstep:
565 case ObjCRuntime::GCC:
566 case ObjCRuntime::ObjFW:
567 ObjCRuntime.reset(p: CreateGNUObjCRuntime(CGM&: *this));
568 return;
569
570 case ObjCRuntime::FragileMacOSX:
571 case ObjCRuntime::MacOSX:
572 case ObjCRuntime::iOS:
573 case ObjCRuntime::WatchOS:
574 ObjCRuntime.reset(p: CreateMacObjCRuntime(CGM&: *this));
575 return;
576 }
577 llvm_unreachable("bad runtime kind");
578}
579
580void CodeGenModule::createOpenCLRuntime() {
581 OpenCLRuntime.reset(p: new CGOpenCLRuntime(*this));
582}
583
584void CodeGenModule::createOpenMPRuntime() {
585 if (!LangOpts.OMPHostIRFile.empty() && !FS->exists(Path: LangOpts.OMPHostIRFile))
586 Diags.Report(DiagID: diag::err_omp_host_ir_file_not_found)
587 << LangOpts.OMPHostIRFile;
588
589 // Select a specialized code generation class based on the target, if any.
590 // If it does not exist use the default implementation.
591 switch (getTriple().getArch()) {
592 case llvm::Triple::nvptx:
593 case llvm::Triple::nvptx64:
594 case llvm::Triple::amdgcn:
595 case llvm::Triple::spirv64:
596 assert(
597 getLangOpts().OpenMPIsTargetDevice &&
598 "OpenMP AMDGPU/NVPTX/SPIRV is only prepared to deal with device code.");
599 OpenMPRuntime.reset(p: new CGOpenMPRuntimeGPU(*this));
600 break;
601 default:
602 if (LangOpts.OpenMPSimd)
603 OpenMPRuntime.reset(p: new CGOpenMPSIMDRuntime(*this));
604 else
605 OpenMPRuntime.reset(p: new CGOpenMPRuntime(*this));
606 break;
607 }
608}
609
610void CodeGenModule::createCUDARuntime() {
611 CUDARuntime.reset(p: CreateNVCUDARuntime(CGM&: *this));
612}
613
614void CodeGenModule::createHLSLRuntime() {
615 HLSLRuntime.reset(p: new CGHLSLRuntime(*this));
616}
617
618void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
619 Replacements[Name] = C;
620}
621
622void CodeGenModule::applyReplacements() {
623 for (auto &I : Replacements) {
624 StringRef MangledName = I.first;
625 llvm::Constant *Replacement = I.second;
626 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
627 if (!Entry)
628 continue;
629 auto *OldF = cast<llvm::Function>(Val: Entry);
630 auto *NewF = dyn_cast<llvm::Function>(Val: Replacement);
631 if (!NewF) {
632 if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Val: Replacement)) {
633 NewF = dyn_cast<llvm::Function>(Val: Alias->getAliasee());
634 } else {
635 auto *CE = cast<llvm::ConstantExpr>(Val: Replacement);
636 assert(CE->getOpcode() == llvm::Instruction::BitCast ||
637 CE->getOpcode() == llvm::Instruction::GetElementPtr);
638 NewF = dyn_cast<llvm::Function>(Val: CE->getOperand(i_nocapture: 0));
639 }
640 }
641
642 // Replace old with new, but keep the old order.
643 OldF->replaceAllUsesWith(V: Replacement);
644 if (NewF) {
645 NewF->removeFromParent();
646 OldF->getParent()->getFunctionList().insertAfter(where: OldF->getIterator(),
647 New: NewF);
648 }
649 OldF->eraseFromParent();
650 }
651}
652
653void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
654 GlobalValReplacements.push_back(Elt: std::make_pair(x&: GV, y&: C));
655}
656
657void CodeGenModule::applyGlobalValReplacements() {
658 for (auto &I : GlobalValReplacements) {
659 llvm::GlobalValue *GV = I.first;
660 llvm::Constant *C = I.second;
661
662 GV->replaceAllUsesWith(V: C);
663 GV->eraseFromParent();
664 }
665}
666
667// This is only used in aliases that we created and we know they have a
668// linear structure.
669static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) {
670 const llvm::Constant *C;
671 if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: GV))
672 C = GA->getAliasee();
673 else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(Val: GV))
674 C = GI->getResolver();
675 else
676 return GV;
677
678 const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(Val: C->stripPointerCasts());
679 if (!AliaseeGV)
680 return nullptr;
681
682 const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject();
683 if (FinalGV == GV)
684 return nullptr;
685
686 return FinalGV;
687}
688
689static bool checkAliasedGlobal(
690 const ASTContext &Context, DiagnosticsEngine &Diags, SourceLocation Location,
691 bool IsIFunc, const llvm::GlobalValue *Alias, const llvm::GlobalValue *&GV,
692 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames,
693 SourceRange AliasRange) {
694 GV = getAliasedGlobal(GV: Alias);
695 if (!GV) {
696 Diags.Report(Loc: Location, DiagID: diag::err_cyclic_alias) << IsIFunc;
697 return false;
698 }
699
700 if (GV->hasCommonLinkage()) {
701 const llvm::Triple &Triple = Context.getTargetInfo().getTriple();
702 if (Triple.getObjectFormat() == llvm::Triple::XCOFF) {
703 Diags.Report(Loc: Location, DiagID: diag::err_alias_to_common);
704 return false;
705 }
706 }
707
708 if (GV->isDeclaration()) {
709 Diags.Report(Loc: Location, DiagID: diag::err_alias_to_undefined) << IsIFunc << IsIFunc;
710 Diags.Report(Loc: Location, DiagID: diag::note_alias_requires_mangled_name)
711 << IsIFunc << IsIFunc;
712 // Provide a note if the given function is not found and exists as a
713 // mangled name.
714 for (const auto &[Decl, Name] : MangledDeclNames) {
715 if (const auto *ND = dyn_cast<NamedDecl>(Val: Decl.getDecl())) {
716 IdentifierInfo *II = ND->getIdentifier();
717 if (II && II->getName() == GV->getName()) {
718 Diags.Report(Loc: Location, DiagID: diag::note_alias_mangled_name_alternative)
719 << Name
720 << FixItHint::CreateReplacement(
721 RemoveRange: AliasRange,
722 Code: (Twine(IsIFunc ? "ifunc" : "alias") + "(\"" + Name + "\")")
723 .str());
724 }
725 }
726 }
727 return false;
728 }
729
730 if (IsIFunc) {
731 // Check resolver function type.
732 const auto *F = dyn_cast<llvm::Function>(Val: GV);
733 if (!F) {
734 Diags.Report(Loc: Location, DiagID: diag::err_alias_to_undefined)
735 << IsIFunc << IsIFunc;
736 return false;
737 }
738
739 llvm::FunctionType *FTy = F->getFunctionType();
740 if (!FTy->getReturnType()->isPointerTy()) {
741 Diags.Report(Loc: Location, DiagID: diag::err_ifunc_resolver_return);
742 return false;
743 }
744 }
745
746 return true;
747}
748
749// Emit a warning if toc-data attribute is requested for global variables that
750// have aliases and remove the toc-data attribute.
751static void checkAliasForTocData(llvm::GlobalVariable *GVar,
752 const CodeGenOptions &CodeGenOpts,
753 DiagnosticsEngine &Diags,
754 SourceLocation Location) {
755 if (GVar->hasAttribute(Kind: "toc-data")) {
756 auto GVId = GVar->getName();
757 // Is this a global variable specified by the user as local?
758 if ((llvm::binary_search(Range: CodeGenOpts.TocDataVarsUserSpecified, Value&: GVId))) {
759 Diags.Report(Loc: Location, DiagID: diag::warn_toc_unsupported_type)
760 << GVId << "the variable has an alias";
761 }
762 llvm::AttributeSet CurrAttributes = GVar->getAttributes();
763 llvm::AttributeSet NewAttributes =
764 CurrAttributes.removeAttribute(C&: GVar->getContext(), Kind: "toc-data");
765 GVar->setAttributes(NewAttributes);
766 }
767}
768
769void CodeGenModule::checkAliases() {
770 // Check if the constructed aliases are well formed. It is really unfortunate
771 // that we have to do this in CodeGen, but we only construct mangled names
772 // and aliases during codegen.
773 bool Error = false;
774 DiagnosticsEngine &Diags = getDiags();
775 for (const GlobalDecl &GD : Aliases) {
776 const auto *D = cast<ValueDecl>(Val: GD.getDecl());
777 SourceLocation Location;
778 SourceRange Range;
779 bool IsIFunc = D->hasAttr<IFuncAttr>();
780 if (const Attr *A = D->getDefiningAttr()) {
781 Location = A->getLocation();
782 Range = A->getRange();
783 } else
784 llvm_unreachable("Not an alias or ifunc?");
785
786 StringRef MangledName = getMangledName(GD);
787 llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName);
788 const llvm::GlobalValue *GV = nullptr;
789 if (!checkAliasedGlobal(Context: getContext(), Diags, Location, IsIFunc, Alias, GV,
790 MangledDeclNames, AliasRange: Range)) {
791 Error = true;
792 continue;
793 }
794
795 if (getContext().getTargetInfo().getTriple().isOSAIX())
796 if (const llvm::GlobalVariable *GVar =
797 dyn_cast<const llvm::GlobalVariable>(Val: GV))
798 checkAliasForTocData(GVar: const_cast<llvm::GlobalVariable *>(GVar),
799 CodeGenOpts: getCodeGenOpts(), Diags, Location);
800
801 llvm::Constant *Aliasee =
802 IsIFunc ? cast<llvm::GlobalIFunc>(Val: Alias)->getResolver()
803 : cast<llvm::GlobalAlias>(Val: Alias)->getAliasee();
804
805 llvm::GlobalValue *AliaseeGV;
806 if (auto CE = dyn_cast<llvm::ConstantExpr>(Val: Aliasee))
807 AliaseeGV = cast<llvm::GlobalValue>(Val: CE->getOperand(i_nocapture: 0));
808 else
809 AliaseeGV = cast<llvm::GlobalValue>(Val: Aliasee);
810
811 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
812 StringRef AliasSection = SA->getName();
813 if (AliasSection != AliaseeGV->getSection())
814 Diags.Report(Loc: SA->getLocation(), DiagID: diag::warn_alias_with_section)
815 << AliasSection << IsIFunc << IsIFunc;
816 }
817
818 // We have to handle alias to weak aliases in here. LLVM itself disallows
819 // this since the object semantics would not match the IL one. For
820 // compatibility with gcc we implement it by just pointing the alias
821 // to its aliasee's aliasee. We also warn, since the user is probably
822 // expecting the link to be weak.
823 if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: AliaseeGV)) {
824 if (GA->isInterposable()) {
825 Diags.Report(Loc: Location, DiagID: diag::warn_alias_to_weak_alias)
826 << GV->getName() << GA->getName() << IsIFunc;
827 Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
828 C: GA->getAliasee(), Ty: Alias->getType());
829
830 if (IsIFunc)
831 cast<llvm::GlobalIFunc>(Val: Alias)->setResolver(Aliasee);
832 else
833 cast<llvm::GlobalAlias>(Val: Alias)->setAliasee(Aliasee);
834 }
835 }
836 // ifunc resolvers are usually implemented to run before sanitizer
837 // initialization. Disable instrumentation to prevent the ordering issue.
838 if (IsIFunc)
839 cast<llvm::Function>(Val: Aliasee)->addFnAttr(
840 Kind: llvm::Attribute::DisableSanitizerInstrumentation);
841 }
842 if (!Error)
843 return;
844
845 for (const GlobalDecl &GD : Aliases) {
846 StringRef MangledName = getMangledName(GD);
847 llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName);
848 Alias->replaceAllUsesWith(V: llvm::PoisonValue::get(T: Alias->getType()));
849 Alias->eraseFromParent();
850 }
851}
852
853void CodeGenModule::clear() {
854 DeferredDeclsToEmit.clear();
855 EmittedDeferredDecls.clear();
856 DeferredAnnotations.clear();
857 if (OpenMPRuntime)
858 OpenMPRuntime->clear();
859}
860
861void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
862 StringRef MainFile) {
863 if (!hasDiagnostics())
864 return;
865 if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
866 if (MainFile.empty())
867 MainFile = "<stdin>";
868 Diags.Report(DiagID: diag::warn_profile_data_unprofiled) << MainFile;
869 } else {
870 if (Mismatched > 0)
871 Diags.Report(DiagID: diag::warn_profile_data_out_of_date) << Visited << Mismatched;
872
873 if (Missing > 0)
874 Diags.Report(DiagID: diag::warn_profile_data_missing) << Visited << Missing;
875 }
876}
877
878static std::optional<llvm::GlobalValue::VisibilityTypes>
879getLLVMVisibility(clang::LangOptions::VisibilityFromDLLStorageClassKinds K) {
880 // Map to LLVM visibility.
881 switch (K) {
882 case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Keep:
883 return std::nullopt;
884 case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Default:
885 return llvm::GlobalValue::DefaultVisibility;
886 case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Hidden:
887 return llvm::GlobalValue::HiddenVisibility;
888 case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Protected:
889 return llvm::GlobalValue::ProtectedVisibility;
890 }
891 llvm_unreachable("unknown option value!");
892}
893
894static void
895setLLVMVisibility(llvm::GlobalValue &GV,
896 std::optional<llvm::GlobalValue::VisibilityTypes> V) {
897 if (!V)
898 return;
899
900 // Reset DSO locality before setting the visibility. This removes
901 // any effects that visibility options and annotations may have
902 // had on the DSO locality. Setting the visibility will implicitly set
903 // appropriate globals to DSO Local; however, this will be pessimistic
904 // w.r.t. to the normal compiler IRGen.
905 GV.setDSOLocal(false);
906 GV.setVisibility(*V);
907}
908
909static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
910 llvm::Module &M) {
911 if (!LO.VisibilityFromDLLStorageClass)
912 return;
913
914 std::optional<llvm::GlobalValue::VisibilityTypes> DLLExportVisibility =
915 getLLVMVisibility(K: LO.getDLLExportVisibility());
916
917 std::optional<llvm::GlobalValue::VisibilityTypes>
918 NoDLLStorageClassVisibility =
919 getLLVMVisibility(K: LO.getNoDLLStorageClassVisibility());
920
921 std::optional<llvm::GlobalValue::VisibilityTypes>
922 ExternDeclDLLImportVisibility =
923 getLLVMVisibility(K: LO.getExternDeclDLLImportVisibility());
924
925 std::optional<llvm::GlobalValue::VisibilityTypes>
926 ExternDeclNoDLLStorageClassVisibility =
927 getLLVMVisibility(K: LO.getExternDeclNoDLLStorageClassVisibility());
928
929 for (llvm::GlobalValue &GV : M.global_values()) {
930 if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
931 continue;
932
933 if (GV.isDeclarationForLinker())
934 setLLVMVisibility(GV, V: GV.getDLLStorageClass() ==
935 llvm::GlobalValue::DLLImportStorageClass
936 ? ExternDeclDLLImportVisibility
937 : ExternDeclNoDLLStorageClassVisibility);
938 else
939 setLLVMVisibility(GV, V: GV.getDLLStorageClass() ==
940 llvm::GlobalValue::DLLExportStorageClass
941 ? DLLExportVisibility
942 : NoDLLStorageClassVisibility);
943
944 GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
945 }
946}
947
948static bool isStackProtectorOn(const LangOptions &LangOpts,
949 const llvm::Triple &Triple,
950 clang::LangOptions::StackProtectorMode Mode) {
951 if (Triple.isGPU())
952 return false;
953 return LangOpts.getStackProtector() == Mode;
954}
955
956std::optional<llvm::Attribute::AttrKind>
957CodeGenModule::StackProtectorAttribute(const Decl *D) const {
958 if (D && D->hasAttr<NoStackProtectorAttr>())
959 ; // Do nothing.
960 else if (D && D->hasAttr<StrictGuardStackCheckAttr>() &&
961 isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPOn))
962 return llvm::Attribute::StackProtectStrong;
963 else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPOn))
964 return llvm::Attribute::StackProtect;
965 else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPStrong))
966 return llvm::Attribute::StackProtectStrong;
967 else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPReq))
968 return llvm::Attribute::StackProtectReq;
969 return std::nullopt;
970}
971
972void CodeGenModule::Release() {
973 Module *Primary = getContext().getCurrentNamedModule();
974 if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule())
975 EmitModuleInitializers(Primary);
976 EmitDeferred();
977 DeferredDecls.insert_range(R&: EmittedDeferredDecls);
978 EmittedDeferredDecls.clear();
979 EmitVTablesOpportunistically();
980 applyGlobalValReplacements();
981 applyReplacements();
982 emitMultiVersionFunctions();
983 emitPFPFieldsWithEvaluatedOffset();
984
985 if (Context.getLangOpts().IncrementalExtensions &&
986 GlobalTopLevelStmtBlockInFlight.first) {
987 const TopLevelStmtDecl *TLSD = GlobalTopLevelStmtBlockInFlight.second;
988 GlobalTopLevelStmtBlockInFlight.first->FinishFunction(EndLoc: TLSD->getEndLoc());
989 GlobalTopLevelStmtBlockInFlight = {nullptr, nullptr};
990 }
991
992 // Module implementations are initialized the same way as a regular TU that
993 // imports one or more modules.
994 if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition())
995 EmitCXXModuleInitFunc(Primary);
996 else
997 EmitCXXGlobalInitFunc();
998 EmitCXXGlobalCleanUpFunc();
999 registerGlobalDtorsWithAtExit();
1000 EmitCXXThreadLocalInitFunc();
1001 if (ObjCRuntime)
1002 if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
1003 AddGlobalCtor(Ctor: ObjCInitFunction);
1004 if (Context.getLangOpts().CUDA && CUDARuntime) {
1005 if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
1006 AddGlobalCtor(Ctor: CudaCtorFunction);
1007 }
1008 if (OpenMPRuntime) {
1009 OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
1010 OpenMPRuntime->clear();
1011 }
1012 if (PGOReader) {
1013 getModule().setProfileSummary(
1014 M: PGOReader->getSummary(/* UseCS */ false).getMD(Context&: VMContext),
1015 Kind: llvm::ProfileSummary::PSK_Instr);
1016 if (PGOStats.hasDiagnostics())
1017 PGOStats.reportDiagnostics(Diags&: getDiags(), MainFile: getCodeGenOpts().MainFileName);
1018 }
1019 llvm::stable_sort(Range&: GlobalCtors, C: [](const Structor &L, const Structor &R) {
1020 return L.LexOrder < R.LexOrder;
1021 });
1022 EmitCtorList(Fns&: GlobalCtors, GlobalName: "llvm.global_ctors");
1023 EmitCtorList(Fns&: GlobalDtors, GlobalName: "llvm.global_dtors");
1024 EmitGlobalAnnotations();
1025 EmitStaticExternCAliases();
1026 checkAliases();
1027 EmitDeferredUnusedCoverageMappings();
1028 CodeGenPGO(*this).setValueProfilingFlag(getModule());
1029 CodeGenPGO(*this).setProfileVersion(getModule());
1030 if (CoverageMapping)
1031 CoverageMapping->emit();
1032 if (CodeGenOpts.SanitizeCfiCrossDso) {
1033 CodeGenFunction(*this).EmitCfiCheckFail();
1034 CodeGenFunction(*this).EmitCfiCheckStub();
1035 }
1036 if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI))
1037 finalizeKCFITypes();
1038 emitAtAvailableLinkGuard();
1039 if (Context.getTargetInfo().getTriple().isWasm())
1040 EmitMainVoidAlias();
1041
1042 if (getTriple().isAMDGPU() ||
1043 (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD)) {
1044 // Emit amdhsa_code_object_version module flag, which is code object version
1045 // times 100.
1046 if (getTarget().getTargetOpts().CodeObjectVersion !=
1047 llvm::CodeObjectVersionKind::COV_None) {
1048 getModule().addModuleFlag(Behavior: llvm::Module::Error,
1049 Key: "amdhsa_code_object_version",
1050 Val: getTarget().getTargetOpts().CodeObjectVersion);
1051 }
1052
1053 // Currently, "-mprintf-kind" option is only supported for HIP
1054 if (LangOpts.HIP) {
1055 auto *MDStr = llvm::MDString::get(
1056 Context&: getLLVMContext(), Str: (getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
1057 TargetOptions::AMDGPUPrintfKind::Hostcall)
1058 ? "hostcall"
1059 : "buffered");
1060 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "amdgpu_printf_kind",
1061 Val: MDStr);
1062 }
1063 }
1064
1065 // Emit a global array containing all external kernels or device variables
1066 // used by host functions and mark it as used for CUDA/HIP. This is necessary
1067 // to get kernels or device variables in archives linked in even if these
1068 // kernels or device variables are only used in host functions.
1069 if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) {
1070 SmallVector<llvm::Constant *, 8> UsedArray;
1071 for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) {
1072 GlobalDecl GD;
1073 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
1074 GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
1075 else
1076 GD = GlobalDecl(D);
1077 UsedArray.push_back(Elt: llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
1078 C: GetAddrOfGlobal(GD), Ty: Int8PtrTy));
1079 }
1080
1081 llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: Int8PtrTy, NumElements: UsedArray.size());
1082
1083 auto *GV = new llvm::GlobalVariable(
1084 getModule(), ATy, false, llvm::GlobalValue::InternalLinkage,
1085 llvm::ConstantArray::get(T: ATy, V: UsedArray), "__clang_gpu_used_external");
1086 addCompilerUsedGlobal(GV);
1087 }
1088 if (LangOpts.HIP) {
1089 // Emit a unique ID so that host and device binaries from the same
1090 // compilation unit can be associated.
1091 auto *GV = new llvm::GlobalVariable(
1092 getModule(), Int8Ty, false, llvm::GlobalValue::ExternalLinkage,
1093 llvm::Constant::getNullValue(Ty: Int8Ty),
1094 "__hip_cuid_" + getContext().getCUIDHash());
1095 getSanitizerMetadata()->disableSanitizerForGlobal(GV);
1096 addCompilerUsedGlobal(GV);
1097 }
1098 emitLLVMUsed();
1099 if (SanStats)
1100 SanStats->finish();
1101
1102 if (CodeGenOpts.Autolink &&
1103 (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
1104 EmitModuleLinkOptions();
1105 }
1106
1107 // On ELF we pass the dependent library specifiers directly to the linker
1108 // without manipulating them. This is in contrast to other platforms where
1109 // they are mapped to a specific linker option by the compiler. This
1110 // difference is a result of the greater variety of ELF linkers and the fact
1111 // that ELF linkers tend to handle libraries in a more complicated fashion
1112 // than on other platforms. This forces us to defer handling the dependent
1113 // libs to the linker.
1114 //
1115 // CUDA/HIP device and host libraries are different. Currently there is no
1116 // way to differentiate dependent libraries for host or device. Existing
1117 // usage of #pragma comment(lib, *) is intended for host libraries on
1118 // Windows. Therefore emit llvm.dependent-libraries only for host.
1119 if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
1120 auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.dependent-libraries");
1121 for (auto *MD : ELFDependentLibraries)
1122 NMD->addOperand(M: MD);
1123 }
1124
1125 if (CodeGenOpts.DwarfVersion) {
1126 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "Dwarf Version",
1127 Val: CodeGenOpts.DwarfVersion);
1128 }
1129
1130 if (CodeGenOpts.Dwarf64)
1131 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "DWARF64", Val: 1);
1132
1133 if (Context.getLangOpts().SemanticInterposition)
1134 // Require various optimization to respect semantic interposition.
1135 getModule().setSemanticInterposition(true);
1136
1137 if (CodeGenOpts.EmitCodeView) {
1138 // Indicate that we want CodeView in the metadata.
1139 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeView", Val: 1);
1140 }
1141 if (CodeGenOpts.CodeViewGHash) {
1142 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeViewGHash", Val: 1);
1143 }
1144 if (CodeGenOpts.ControlFlowGuard) {
1145 // Function ID tables and checks for Control Flow Guard.
1146 getModule().addModuleFlag(
1147 Behavior: llvm::Module::Warning, Key: "cfguard",
1148 Val: static_cast<unsigned>(llvm::ControlFlowGuardMode::Enabled));
1149 } else if (CodeGenOpts.ControlFlowGuardNoChecks) {
1150 // Function ID tables for Control Flow Guard.
1151 getModule().addModuleFlag(
1152 Behavior: llvm::Module::Warning, Key: "cfguard",
1153 Val: static_cast<unsigned>(llvm::ControlFlowGuardMode::TableOnly));
1154 }
1155 if (CodeGenOpts.EHContGuard) {
1156 // Function ID tables for EH Continuation Guard.
1157 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ehcontguard", Val: 1);
1158 }
1159 if (Context.getLangOpts().Kernel) {
1160 // Note if we are compiling with /kernel.
1161 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ms-kernel", Val: 1);
1162 }
1163 if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
1164 // We don't support LTO with 2 with different StrictVTablePointers
1165 // FIXME: we could support it by stripping all the information introduced
1166 // by StrictVTablePointers.
1167
1168 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "StrictVTablePointers",Val: 1);
1169
1170 llvm::Metadata *Ops[2] = {
1171 llvm::MDString::get(Context&: VMContext, Str: "StrictVTablePointers"),
1172 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
1173 Ty: llvm::Type::getInt32Ty(C&: VMContext), V: 1))};
1174
1175 getModule().addModuleFlag(Behavior: llvm::Module::Require,
1176 Key: "StrictVTablePointersRequirement",
1177 Val: llvm::MDNode::get(Context&: VMContext, MDs: Ops));
1178 }
1179 if (getModuleDebugInfo() || getTriple().isOSWindows())
1180 // We support a single version in the linked module. The LLVM
1181 // parser will drop debug info with a different version number
1182 // (and warn about it, too).
1183 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "Debug Info Version",
1184 Val: llvm::DEBUG_METADATA_VERSION);
1185
1186 // We need to record the widths of enums and wchar_t, so that we can generate
1187 // the correct build attributes in the ARM backend. wchar_size is also used by
1188 // TargetLibraryInfo.
1189 uint64_t WCharWidth =
1190 Context.getTypeSizeInChars(T: Context.getWideCharType()).getQuantity();
1191 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "wchar_size", Val: WCharWidth);
1192
1193 if (getTriple().isOSzOS()) {
1194 getModule().addModuleFlag(Behavior: llvm::Module::Warning,
1195 Key: "zos_product_major_version",
1196 Val: uint32_t(CLANG_VERSION_MAJOR));
1197 getModule().addModuleFlag(Behavior: llvm::Module::Warning,
1198 Key: "zos_product_minor_version",
1199 Val: uint32_t(CLANG_VERSION_MINOR));
1200 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "zos_product_patchlevel",
1201 Val: uint32_t(CLANG_VERSION_PATCHLEVEL));
1202 std::string ProductId = getClangVendor() + "clang";
1203 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_product_id",
1204 Val: llvm::MDString::get(Context&: VMContext, Str: ProductId));
1205
1206 // Record the language because we need it for the PPA2.
1207 StringRef lang_str = languageToString(
1208 L: LangStandard::getLangStandardForKind(K: LangOpts.LangStd).Language);
1209 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_cu_language",
1210 Val: llvm::MDString::get(Context&: VMContext, Str: lang_str));
1211
1212 time_t TT = PreprocessorOpts.SourceDateEpoch
1213 ? *PreprocessorOpts.SourceDateEpoch
1214 : std::time(timer: nullptr);
1215 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "zos_translation_time",
1216 Val: static_cast<uint64_t>(TT));
1217
1218 // Multiple modes will be supported here.
1219 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_le_char_mode",
1220 Val: llvm::MDString::get(Context&: VMContext, Str: "ascii"));
1221 }
1222
1223 llvm::Triple T = Context.getTargetInfo().getTriple();
1224 if (T.isARM() || T.isThumb()) {
1225 // The minimum width of an enum in bytes
1226 uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
1227 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "min_enum_size", Val: EnumWidth);
1228 }
1229
1230 if (T.isRISCV()) {
1231 StringRef ABIStr = Target.getABI();
1232 llvm::LLVMContext &Ctx = TheModule.getContext();
1233 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "target-abi",
1234 Val: llvm::MDString::get(Context&: Ctx, Str: ABIStr));
1235
1236 // Add the canonical ISA string as metadata so the backend can set the ELF
1237 // attributes correctly. We use AppendUnique so LTO will keep all of the
1238 // unique ISA strings that were linked together.
1239 const std::vector<std::string> &Features =
1240 getTarget().getTargetOpts().Features;
1241 auto ParseResult =
1242 llvm::RISCVISAInfo::parseFeatures(XLen: T.isRISCV64() ? 64 : 32, Features);
1243 if (!errorToBool(Err: ParseResult.takeError()))
1244 getModule().addModuleFlag(
1245 Behavior: llvm::Module::AppendUnique, Key: "riscv-isa",
1246 Val: llvm::MDNode::get(
1247 Context&: Ctx, MDs: llvm::MDString::get(Context&: Ctx, Str: (*ParseResult)->toString())));
1248 }
1249
1250 if (CodeGenOpts.SanitizeCfiCrossDso) {
1251 // Indicate that we want cross-DSO control flow integrity checks.
1252 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "Cross-DSO CFI", Val: 1);
1253 }
1254
1255 if (CodeGenOpts.WholeProgramVTables) {
1256 // Indicate whether VFE was enabled for this module, so that the
1257 // vcall_visibility metadata added under whole program vtables is handled
1258 // appropriately in the optimizer.
1259 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "Virtual Function Elim",
1260 Val: CodeGenOpts.VirtualFunctionElimination);
1261 }
1262
1263 if (LangOpts.Sanitize.has(K: SanitizerKind::CFIICall)) {
1264 getModule().addModuleFlag(Behavior: llvm::Module::Override,
1265 Key: "CFI Canonical Jump Tables",
1266 Val: CodeGenOpts.SanitizeCfiCanonicalJumpTables);
1267 }
1268
1269 if (CodeGenOpts.SanitizeCfiICallNormalizeIntegers) {
1270 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "cfi-normalize-integers",
1271 Val: 1);
1272 }
1273
1274 if (!CodeGenOpts.UniqueSourceFileIdentifier.empty()) {
1275 getModule().addModuleFlag(
1276 Behavior: llvm::Module::Append, Key: "Unique Source File Identifier",
1277 Val: llvm::MDTuple::get(
1278 Context&: TheModule.getContext(),
1279 MDs: llvm::MDString::get(Context&: TheModule.getContext(),
1280 Str: CodeGenOpts.UniqueSourceFileIdentifier)));
1281 }
1282
1283 if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) {
1284 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi", Val: 1);
1285 // KCFI assumes patchable-function-prefix is the same for all indirectly
1286 // called functions. Store the expected offset for code generation.
1287 if (CodeGenOpts.PatchableFunctionEntryOffset)
1288 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-offset",
1289 Val: CodeGenOpts.PatchableFunctionEntryOffset);
1290 if (CodeGenOpts.SanitizeKcfiArity)
1291 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-arity", Val: 1);
1292 // Store the hash algorithm choice for use in LLVM passes
1293 getModule().addModuleFlag(
1294 Behavior: llvm::Module::Override, Key: "kcfi-hash",
1295 Val: llvm::MDString::get(
1296 Context&: getLLVMContext(),
1297 Str: llvm::stringifyKCFIHashAlgorithm(Algorithm: CodeGenOpts.SanitizeKcfiHash)));
1298 }
1299
1300 if (CodeGenOpts.CFProtectionReturn &&
1301 Target.checkCFProtectionReturnSupported(Diags&: getDiags())) {
1302 // Indicate that we want to instrument return control flow protection.
1303 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-return",
1304 Val: 1);
1305 }
1306
1307 if (CodeGenOpts.CFProtectionBranch &&
1308 Target.checkCFProtectionBranchSupported(Diags&: getDiags())) {
1309 // Indicate that we want to instrument branch control flow protection.
1310 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-branch",
1311 Val: 1);
1312
1313 auto Scheme = CodeGenOpts.getCFBranchLabelScheme();
1314 if (Target.checkCFBranchLabelSchemeSupported(Scheme, Diags&: getDiags())) {
1315 if (Scheme == CFBranchLabelSchemeKind::Default)
1316 Scheme = Target.getDefaultCFBranchLabelScheme();
1317 getModule().addModuleFlag(
1318 Behavior: llvm::Module::Error, Key: "cf-branch-label-scheme",
1319 Val: llvm::MDString::get(Context&: getLLVMContext(),
1320 Str: getCFBranchLabelSchemeFlagVal(Scheme)));
1321 }
1322 }
1323
1324 if (CodeGenOpts.FunctionReturnThunks)
1325 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "function_return_thunk_extern", Val: 1);
1326
1327 if (CodeGenOpts.IndirectBranchCSPrefix)
1328 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "indirect_branch_cs_prefix", Val: 1);
1329
1330 // Add module metadata for return address signing (ignoring
1331 // non-leaf/all) and stack tagging. These are actually turned on by function
1332 // attributes, but we use module metadata to emit build attributes. This is
1333 // needed for LTO, where the function attributes are inside bitcode
1334 // serialised into a global variable by the time build attributes are
1335 // emitted, so we can't access them. LTO objects could be compiled with
1336 // different flags therefore module flags are set to "Min" behavior to achieve
1337 // the same end result of the normal build where e.g BTI is off if any object
1338 // doesn't support it.
1339 if (Context.getTargetInfo().hasFeature(Feature: "ptrauth") &&
1340 LangOpts.getSignReturnAddressScope() !=
1341 LangOptions::SignReturnAddressScopeKind::None)
1342 getModule().addModuleFlag(Behavior: llvm::Module::Override,
1343 Key: "sign-return-address-buildattr", Val: 1);
1344 if (LangOpts.Sanitize.has(K: SanitizerKind::MemtagStack))
1345 getModule().addModuleFlag(Behavior: llvm::Module::Override,
1346 Key: "tag-stack-memory-buildattr", Val: 1);
1347
1348 if (T.isARM() || T.isThumb() || T.isAArch64()) {
1349 // Previously 1 is used and meant for the backed to derive the function
1350 // attribute form it. 2 now means function attributes already set for all
1351 // functions in this module, so no need to propagate those from the module
1352 // flag. Value is only used in case of LTO module merge because the backend
1353 // will see all required function attribute set already. Value is used
1354 // before modules got merged. Any posive value means the feature is active
1355 // and required binary markings need to be emit accordingly.
1356 if (LangOpts.BranchTargetEnforcement)
1357 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-target-enforcement",
1358 Val: 2);
1359 if (LangOpts.BranchProtectionPAuthLR)
1360 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-protection-pauth-lr",
1361 Val: 2);
1362 if (LangOpts.GuardedControlStack)
1363 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "guarded-control-stack", Val: 2);
1364 if (LangOpts.hasSignReturnAddress())
1365 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address", Val: 2);
1366 if (LangOpts.isSignReturnAddressScopeAll())
1367 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address-all",
1368 Val: 2);
1369 if (!LangOpts.isSignReturnAddressWithAKey())
1370 getModule().addModuleFlag(Behavior: llvm::Module::Min,
1371 Key: "sign-return-address-with-bkey", Val: 2);
1372
1373 if (LangOpts.PointerAuthELFGOT)
1374 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "ptrauth-elf-got", Val: 1);
1375
1376 if (getTriple().isOSLinux()) {
1377 if (LangOpts.PointerAuthCalls)
1378 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "ptrauth-sign-personality",
1379 Val: 1);
1380 assert(getTriple().isOSBinFormatELF());
1381 using namespace llvm::ELF;
1382 uint64_t PAuthABIVersion =
1383 (LangOpts.PointerAuthIntrinsics
1384 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INTRINSICS) |
1385 (LangOpts.PointerAuthCalls
1386 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_CALLS) |
1387 (LangOpts.PointerAuthReturns
1388 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_RETURNS) |
1389 (LangOpts.PointerAuthAuthTraps
1390 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_AUTHTRAPS) |
1391 (LangOpts.PointerAuthVTPtrAddressDiscrimination
1392 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRADDRDISCR) |
1393 (LangOpts.PointerAuthVTPtrTypeDiscrimination
1394 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR) |
1395 (LangOpts.PointerAuthInitFini
1396 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI) |
1397 (LangOpts.PointerAuthInitFiniAddressDiscrimination
1398 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINIADDRDISC) |
1399 (LangOpts.PointerAuthELFGOT
1400 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT) |
1401 (LangOpts.PointerAuthIndirectGotos
1402 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOTOS) |
1403 (LangOpts.PointerAuthTypeInfoVTPtrDiscrimination
1404 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_TYPEINFOVPTRDISCR) |
1405 (LangOpts.PointerAuthFunctionTypeDiscrimination
1406 << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_FPTRTYPEDISCR);
1407 static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_FPTRTYPEDISCR ==
1408 AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST,
1409 "Update when new enum items are defined");
1410 if (PAuthABIVersion != 0) {
1411 getModule().addModuleFlag(Behavior: llvm::Module::Error,
1412 Key: "aarch64-elf-pauthabi-platform",
1413 Val: AARCH64_PAUTH_PLATFORM_LLVM_LINUX);
1414 getModule().addModuleFlag(Behavior: llvm::Module::Error,
1415 Key: "aarch64-elf-pauthabi-version",
1416 Val: PAuthABIVersion);
1417 }
1418 }
1419 }
1420 if ((T.isARM() || T.isThumb()) && getTriple().isTargetAEABI() &&
1421 getTriple().isOSBinFormatELF()) {
1422 uint32_t TagVal = 0;
1423 llvm::Module::ModFlagBehavior DenormalTagBehavior = llvm::Module::Max;
1424 if (getCodeGenOpts().FPDenormalMode ==
1425 llvm::DenormalMode::getPositiveZero()) {
1426 TagVal = llvm::ARMBuildAttrs::PositiveZero;
1427 } else if (getCodeGenOpts().FPDenormalMode ==
1428 llvm::DenormalMode::getIEEE()) {
1429 TagVal = llvm::ARMBuildAttrs::IEEEDenormals;
1430 DenormalTagBehavior = llvm::Module::Override;
1431 } else if (getCodeGenOpts().FPDenormalMode ==
1432 llvm::DenormalMode::getPreserveSign()) {
1433 TagVal = llvm::ARMBuildAttrs::PreserveFPSign;
1434 }
1435 getModule().addModuleFlag(Behavior: DenormalTagBehavior, Key: "arm-eabi-fp-denormal",
1436 Val: TagVal);
1437
1438 if (getLangOpts().getDefaultExceptionMode() !=
1439 LangOptions::FPExceptionModeKind::FPE_Ignore)
1440 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "arm-eabi-fp-exceptions",
1441 Val: llvm::ARMBuildAttrs::Allowed);
1442
1443 if (getLangOpts().NoHonorNaNs && getLangOpts().NoHonorInfs)
1444 TagVal = llvm::ARMBuildAttrs::AllowIEEENormal;
1445 else
1446 TagVal = llvm::ARMBuildAttrs::AllowIEEE754;
1447 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "arm-eabi-fp-number-model",
1448 Val: TagVal);
1449 }
1450
1451 if (CodeGenOpts.StackClashProtector)
1452 getModule().addModuleFlag(
1453 Behavior: llvm::Module::Override, Key: "probe-stack",
1454 Val: llvm::MDString::get(Context&: TheModule.getContext(), Str: "inline-asm"));
1455
1456 if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096)
1457 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "stack-probe-size",
1458 Val: CodeGenOpts.StackProbeSize);
1459
1460 if (!CodeGenOpts.MemoryProfileOutput.empty()) {
1461 llvm::LLVMContext &Ctx = TheModule.getContext();
1462 getModule().addModuleFlag(
1463 Behavior: llvm::Module::Error, Key: "MemProfProfileFilename",
1464 Val: llvm::MDString::get(Context&: Ctx, Str: CodeGenOpts.MemoryProfileOutput));
1465 }
1466
1467 if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
1468 // Indicate whether __nvvm_reflect should be configured to flush denormal
1469 // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
1470 // property.)
1471 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "nvvm-reflect-ftz",
1472 Val: CodeGenOpts.FP32DenormalMode.Output !=
1473 llvm::DenormalMode::IEEE);
1474 }
1475
1476 if (LangOpts.EHAsynch)
1477 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "eh-asynch", Val: 1);
1478
1479 // Emit Import Call section.
1480 if (CodeGenOpts.ImportCallOptimization)
1481 getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "import-call-optimization",
1482 Val: 1);
1483
1484 // Enable unwind v2 (epilog).
1485 if (CodeGenOpts.getWinX64EHUnwindV2() != llvm::WinX64EHUnwindV2Mode::Disabled)
1486 getModule().addModuleFlag(
1487 Behavior: llvm::Module::Warning, Key: "winx64-eh-unwindv2",
1488 Val: static_cast<unsigned>(CodeGenOpts.getWinX64EHUnwindV2()));
1489
1490 // Indicate whether this Module was compiled with -fopenmp
1491 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
1492 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp", Val: LangOpts.OpenMP);
1493 if (getLangOpts().OpenMPIsTargetDevice)
1494 getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp-device",
1495 Val: LangOpts.OpenMP);
1496
1497 // Emit OpenCL specific module metadata: OpenCL/SPIR version.
1498 if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) {
1499 EmitOpenCLMetadata();
1500 // Emit SPIR version.
1501 if (getTriple().isSPIR()) {
1502 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
1503 // opencl.spir.version named metadata.
1504 // C++ for OpenCL has a distinct mapping for version compatibility with
1505 // OpenCL.
1506 auto Version = LangOpts.getOpenCLCompatibleVersion();
1507 llvm::Metadata *SPIRVerElts[] = {
1508 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
1509 Ty: Int32Ty, V: Version / 100)),
1510 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
1511 Ty: Int32Ty, V: (Version / 100 > 1) ? 0 : 2))};
1512 llvm::NamedMDNode *SPIRVerMD =
1513 TheModule.getOrInsertNamedMetadata(Name: "opencl.spir.version");
1514 llvm::LLVMContext &Ctx = TheModule.getContext();
1515 SPIRVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: SPIRVerElts));
1516 }
1517 }
1518
1519 // HLSL related end of code gen work items.
1520 if (LangOpts.HLSL)
1521 getHLSLRuntime().finishCodeGen();
1522
1523 if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
1524 assert(PLevel < 3 && "Invalid PIC Level");
1525 getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
1526 if (Context.getLangOpts().PIE)
1527 getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
1528 }
1529
1530 if (getCodeGenOpts().CodeModel.size() > 0) {
1531 unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
1532 .Case(S: "tiny", Value: llvm::CodeModel::Tiny)
1533 .Case(S: "small", Value: llvm::CodeModel::Small)
1534 .Case(S: "kernel", Value: llvm::CodeModel::Kernel)
1535 .Case(S: "medium", Value: llvm::CodeModel::Medium)
1536 .Case(S: "large", Value: llvm::CodeModel::Large)
1537 .Default(Value: ~0u);
1538 if (CM != ~0u) {
1539 llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
1540 getModule().setCodeModel(codeModel);
1541
1542 if ((CM == llvm::CodeModel::Medium || CM == llvm::CodeModel::Large) &&
1543 Context.getTargetInfo().getTriple().getArch() ==
1544 llvm::Triple::x86_64) {
1545 getModule().setLargeDataThreshold(getCodeGenOpts().LargeDataThreshold);
1546 }
1547 }
1548 }
1549
1550 if (CodeGenOpts.NoPLT)
1551 getModule().setRtLibUseGOT();
1552 if (getTriple().isOSBinFormatELF() &&
1553 CodeGenOpts.DirectAccessExternalData !=
1554 getModule().getDirectAccessExternalData()) {
1555 getModule().setDirectAccessExternalData(
1556 CodeGenOpts.DirectAccessExternalData);
1557 }
1558 if (CodeGenOpts.UnwindTables)
1559 getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables));
1560
1561 switch (CodeGenOpts.getFramePointer()) {
1562 case CodeGenOptions::FramePointerKind::None:
1563 // 0 ("none") is the default.
1564 break;
1565 case CodeGenOptions::FramePointerKind::Reserved:
1566 getModule().setFramePointer(llvm::FramePointerKind::Reserved);
1567 break;
1568 case CodeGenOptions::FramePointerKind::NonLeafNoReserve:
1569 getModule().setFramePointer(llvm::FramePointerKind::NonLeafNoReserve);
1570 break;
1571 case CodeGenOptions::FramePointerKind::NonLeaf:
1572 getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
1573 break;
1574 case CodeGenOptions::FramePointerKind::All:
1575 getModule().setFramePointer(llvm::FramePointerKind::All);
1576 break;
1577 }
1578
1579 SimplifyPersonality();
1580
1581 if (getCodeGenOpts().EmitDeclMetadata)
1582 EmitDeclMetadata();
1583
1584 if (getCodeGenOpts().CoverageNotesFile.size() ||
1585 getCodeGenOpts().CoverageDataFile.size())
1586 EmitCoverageFile();
1587
1588 if (CGDebugInfo *DI = getModuleDebugInfo())
1589 DI->finalize();
1590
1591 if (getCodeGenOpts().EmitVersionIdentMetadata)
1592 EmitVersionIdentMetadata();
1593
1594 if (!getCodeGenOpts().RecordCommandLine.empty())
1595 EmitCommandLineMetadata();
1596
1597 if (!getCodeGenOpts().StackProtectorGuard.empty())
1598 getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard);
1599 if (!getCodeGenOpts().StackProtectorGuardReg.empty())
1600 getModule().setStackProtectorGuardReg(
1601 getCodeGenOpts().StackProtectorGuardReg);
1602 if (!getCodeGenOpts().StackProtectorGuardSymbol.empty())
1603 getModule().setStackProtectorGuardSymbol(
1604 getCodeGenOpts().StackProtectorGuardSymbol);
1605 if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX)
1606 getModule().setStackProtectorGuardOffset(
1607 getCodeGenOpts().StackProtectorGuardOffset);
1608 if (getCodeGenOpts().StackAlignment)
1609 getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment);
1610 if (getCodeGenOpts().SkipRaxSetup)
1611 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "SkipRaxSetup", Val: 1);
1612 if (getLangOpts().RegCall4)
1613 getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "RegCallv4", Val: 1);
1614
1615 if (getContext().getTargetInfo().getMaxTLSAlign())
1616 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "MaxTLSAlign",
1617 Val: getContext().getTargetInfo().getMaxTLSAlign());
1618
1619 getTargetCodeGenInfo().emitTargetGlobals(CGM&: *this);
1620
1621 getTargetCodeGenInfo().emitTargetMetadata(CGM&: *this, MangledDeclNames);
1622
1623 EmitBackendOptionsMetadata(CodeGenOpts: getCodeGenOpts());
1624
1625 // If there is device offloading code embed it in the host now.
1626 EmbedObject(M: &getModule(), CGOpts: CodeGenOpts, VFS&: *getFileSystem(), Diags&: getDiags());
1627
1628 // Set visibility from DLL storage class
1629 // We do this at the end of LLVM IR generation; after any operation
1630 // that might affect the DLL storage class or the visibility, and
1631 // before anything that might act on these.
1632 setVisibilityFromDLLStorageClass(LO: LangOpts, M&: getModule());
1633
1634 // Check the tail call symbols are truly undefined.
1635 if (!MustTailCallUndefinedGlobals.empty()) {
1636 if (getTriple().isPPC()) {
1637 for (auto &I : MustTailCallUndefinedGlobals) {
1638 if (!I.first->isDefined())
1639 getDiags().Report(Loc: I.second, DiagID: diag::err_ppc_impossible_musttail) << 2;
1640 else {
1641 StringRef MangledName = getMangledName(GD: GlobalDecl(I.first));
1642 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
1643 if (!Entry || Entry->isWeakForLinker() ||
1644 Entry->isDeclarationForLinker())
1645 getDiags().Report(Loc: I.second, DiagID: diag::err_ppc_impossible_musttail) << 2;
1646 }
1647 }
1648 } else if (getTriple().isMIPS()) {
1649 for (auto &I : MustTailCallUndefinedGlobals) {
1650 const FunctionDecl *FD = I.first;
1651 StringRef MangledName = getMangledName(GD: GlobalDecl(FD));
1652 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
1653
1654 if (!Entry)
1655 continue;
1656
1657 bool CalleeIsLocal;
1658 if (Entry->isDeclarationForLinker()) {
1659 // For declarations, only visibility can indicate locality.
1660 CalleeIsLocal =
1661 Entry->hasHiddenVisibility() || Entry->hasProtectedVisibility();
1662 } else {
1663 CalleeIsLocal = Entry->isDSOLocal();
1664 }
1665
1666 if (!CalleeIsLocal)
1667 getDiags().Report(Loc: I.second, DiagID: diag::err_mips_impossible_musttail) << 1;
1668 }
1669 }
1670 }
1671
1672 // Emit `!llvm.errno.tbaa`, a module-level metadata that specifies the TBAA
1673 // for an int access. This allows LLVM to reason about what memory can be
1674 // accessed by certain library calls that only touch errno.
1675 if (TBAA) {
1676 TBAAAccessInfo TBAAInfo = getTBAAAccessInfo(AccessType: Context.IntTy);
1677 if (llvm::MDNode *IntegerNode = getTBAAAccessTagInfo(Info: TBAAInfo)) {
1678 auto *ErrnoTBAAMD = TheModule.getOrInsertNamedMetadata(Name: ErrnoTBAAMDName);
1679 ErrnoTBAAMD->addOperand(M: IntegerNode);
1680 }
1681 }
1682}
1683
1684void CodeGenModule::EmitOpenCLMetadata() {
1685 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
1686 // opencl.ocl.version named metadata node.
1687 // C++ for OpenCL has a distinct mapping for versions compatible with OpenCL.
1688 auto CLVersion = LangOpts.getOpenCLCompatibleVersion();
1689
1690 auto EmitVersion = [this](StringRef MDName, int Version) {
1691 llvm::Metadata *OCLVerElts[] = {
1692 llvm::ConstantAsMetadata::get(
1693 C: llvm::ConstantInt::get(Ty: Int32Ty, V: Version / 100)),
1694 llvm::ConstantAsMetadata::get(
1695 C: llvm::ConstantInt::get(Ty: Int32Ty, V: (Version % 100) / 10))};
1696 llvm::NamedMDNode *OCLVerMD = TheModule.getOrInsertNamedMetadata(Name: MDName);
1697 llvm::LLVMContext &Ctx = TheModule.getContext();
1698 OCLVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: OCLVerElts));
1699 };
1700
1701 EmitVersion("opencl.ocl.version", CLVersion);
1702 if (LangOpts.OpenCLCPlusPlus) {
1703 // In addition to the OpenCL compatible version, emit the C++ version.
1704 EmitVersion("opencl.cxx.version", LangOpts.OpenCLCPlusPlusVersion);
1705 }
1706}
1707
1708void CodeGenModule::EmitBackendOptionsMetadata(
1709 const CodeGenOptions &CodeGenOpts) {
1710 if (getTriple().isRISCV()) {
1711 getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "SmallDataLimit",
1712 Val: CodeGenOpts.SmallDataLimit);
1713 }
1714
1715 // Set AllocToken configuration for backend pipeline.
1716 if (LangOpts.AllocTokenMode) {
1717 StringRef S = llvm::getAllocTokenModeAsString(Mode: *LangOpts.AllocTokenMode);
1718 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "alloc-token-mode",
1719 Val: llvm::MDString::get(Context&: VMContext, Str: S));
1720 }
1721 if (LangOpts.AllocTokenMax)
1722 getModule().addModuleFlag(
1723 Behavior: llvm::Module::Error, Key: "alloc-token-max",
1724 Val: llvm::ConstantInt::get(Ty: llvm::Type::getInt64Ty(C&: VMContext),
1725 V: *LangOpts.AllocTokenMax));
1726 if (CodeGenOpts.SanitizeAllocTokenFastABI)
1727 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "alloc-token-fast-abi", Val: 1);
1728 if (CodeGenOpts.SanitizeAllocTokenExtended)
1729 getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "alloc-token-extended", Val: 1);
1730}
1731
1732void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
1733 // Make sure that this type is translated.
1734 getTypes().UpdateCompletedType(TD);
1735}
1736
1737void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
1738 // Make sure that this type is translated.
1739 getTypes().RefreshTypeCacheForClass(RD);
1740}
1741
1742llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
1743 if (!TBAA)
1744 return nullptr;
1745 return TBAA->getTypeInfo(QTy);
1746}
1747
1748TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
1749 if (!TBAA)
1750 return TBAAAccessInfo();
1751 if (getLangOpts().CUDAIsDevice) {
1752 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
1753 // access info.
1754 if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
1755 if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
1756 nullptr)
1757 return TBAAAccessInfo();
1758 } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
1759 if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
1760 nullptr)
1761 return TBAAAccessInfo();
1762 }
1763 }
1764 return TBAA->getAccessInfo(AccessType);
1765}
1766
1767TBAAAccessInfo
1768CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
1769 if (!TBAA)
1770 return TBAAAccessInfo();
1771 return TBAA->getVTablePtrAccessInfo(VTablePtrType);
1772}
1773
1774llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
1775 if (!TBAA)
1776 return nullptr;
1777 return TBAA->getTBAAStructInfo(QTy);
1778}
1779
1780llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) {
1781 if (!TBAA)
1782 return nullptr;
1783 return TBAA->getBaseTypeInfo(QTy);
1784}
1785
1786llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) {
1787 if (!TBAA)
1788 return nullptr;
1789 return TBAA->getAccessTagInfo(Info);
1790}
1791
1792TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,
1793 TBAAAccessInfo TargetInfo) {
1794 if (!TBAA)
1795 return TBAAAccessInfo();
1796 return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
1797}
1798
1799TBAAAccessInfo
1800CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
1801 TBAAAccessInfo InfoB) {
1802 if (!TBAA)
1803 return TBAAAccessInfo();
1804 return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
1805}
1806
1807TBAAAccessInfo
1808CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
1809 TBAAAccessInfo SrcInfo) {
1810 if (!TBAA)
1811 return TBAAAccessInfo();
1812 return TBAA->mergeTBAAInfoForConditionalOperator(InfoA: DestInfo, InfoB: SrcInfo);
1813}
1814
1815void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
1816 TBAAAccessInfo TBAAInfo) {
1817 if (llvm::MDNode *Tag = getTBAAAccessTagInfo(Info: TBAAInfo))
1818 Inst->setMetadata(KindID: llvm::LLVMContext::MD_tbaa, Node: Tag);
1819}
1820
1821void CodeGenModule::DecorateInstructionWithInvariantGroup(
1822 llvm::Instruction *I, const CXXRecordDecl *RD) {
1823 I->setMetadata(KindID: llvm::LLVMContext::MD_invariant_group,
1824 Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: {}));
1825}
1826
1827void CodeGenModule::Error(SourceLocation loc, StringRef message) {
1828 unsigned diagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, FormatString: "%0");
1829 getDiags().Report(Loc: Context.getFullLoc(Loc: loc), DiagID: diagID) << message;
1830}
1831
1832/// ErrorUnsupported - Print out an error that codegen doesn't support the
1833/// specified stmt yet.
1834void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
1835 std::string Msg = Type;
1836 getDiags().Report(Loc: Context.getFullLoc(Loc: S->getBeginLoc()),
1837 DiagID: diag::err_codegen_unsupported)
1838 << Msg << S->getSourceRange();
1839}
1840
1841void CodeGenModule::ErrorUnsupported(const Stmt *S, llvm::StringRef Type) {
1842 getDiags().Report(Loc: Context.getFullLoc(Loc: S->getBeginLoc()),
1843 DiagID: diag::err_codegen_unsupported)
1844 << Type << S->getSourceRange();
1845}
1846
1847/// ErrorUnsupported - Print out an error that codegen doesn't support the
1848/// specified decl yet.
1849void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
1850 std::string Msg = Type;
1851 getDiags().Report(Loc: Context.getFullLoc(Loc: D->getLocation()),
1852 DiagID: diag::err_codegen_unsupported)
1853 << Msg;
1854}
1855
1856void CodeGenModule::runWithSufficientStackSpace(SourceLocation Loc,
1857 llvm::function_ref<void()> Fn) {
1858 StackHandler.runWithSufficientStackSpace(Loc, Fn);
1859}
1860
1861llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
1862 return llvm::ConstantInt::get(Ty: SizeTy, V: size.getQuantity());
1863}
1864
1865void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
1866 const NamedDecl *D) const {
1867 // Internal definitions always have default visibility.
1868 if (GV->hasLocalLinkage()) {
1869 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1870 return;
1871 }
1872 if (!D)
1873 return;
1874
1875 // Set visibility for definitions, and for declarations if requested globally
1876 // or set explicitly.
1877 LinkageInfo LV = D->getLinkageAndVisibility();
1878
1879 // OpenMP declare target variables must be visible to the host so they can
1880 // be registered. We require protected visibility unless the variable has
1881 // the DT_nohost modifier and does not need to be registered.
1882 if (Context.getLangOpts().OpenMP &&
1883 Context.getLangOpts().OpenMPIsTargetDevice && isa<VarDecl>(Val: D) &&
1884 D->hasAttr<OMPDeclareTargetDeclAttr>() &&
1885 D->getAttr<OMPDeclareTargetDeclAttr>()->getDevType() !=
1886 OMPDeclareTargetDeclAttr::DT_NoHost &&
1887 LV.getVisibility() == HiddenVisibility) {
1888 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
1889 return;
1890 }
1891
1892 if (Context.getLangOpts().HLSL && !D->isInExportDeclContext()) {
1893 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
1894 return;
1895 }
1896
1897 if (GV->hasDLLExportStorageClass() || GV->hasDLLImportStorageClass()) {
1898 // Reject incompatible dlllstorage and visibility annotations.
1899 if (!LV.isVisibilityExplicit())
1900 return;
1901 if (GV->hasDLLExportStorageClass()) {
1902 if (LV.getVisibility() == HiddenVisibility)
1903 getDiags().Report(Loc: D->getLocation(),
1904 DiagID: diag::err_hidden_visibility_dllexport);
1905 } else if (LV.getVisibility() != DefaultVisibility) {
1906 getDiags().Report(Loc: D->getLocation(),
1907 DiagID: diag::err_non_default_visibility_dllimport);
1908 }
1909 return;
1910 }
1911
1912 if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
1913 !GV->isDeclarationForLinker())
1914 GV->setVisibility(GetLLVMVisibility(V: LV.getVisibility()));
1915}
1916
1917static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
1918 llvm::GlobalValue *GV) {
1919 if (GV->hasLocalLinkage())
1920 return true;
1921
1922 if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
1923 return true;
1924
1925 // DLLImport explicitly marks the GV as external.
1926 if (GV->hasDLLImportStorageClass())
1927 return false;
1928
1929 const llvm::Triple &TT = CGM.getTriple();
1930 const auto &CGOpts = CGM.getCodeGenOpts();
1931 if (TT.isOSCygMing()) {
1932 // In MinGW, variables without DLLImport can still be automatically
1933 // imported from a DLL by the linker; don't mark variables that
1934 // potentially could come from another DLL as DSO local.
1935
1936 // With EmulatedTLS, TLS variables can be autoimported from other DLLs
1937 // (and this actually happens in the public interface of libstdc++), so
1938 // such variables can't be marked as DSO local. (Native TLS variables
1939 // can't be dllimported at all, though.)
1940 if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(Val: GV) &&
1941 (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS) &&
1942 CGOpts.AutoImport)
1943 return false;
1944 }
1945
1946 // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
1947 // remain unresolved in the link, they can be resolved to zero, which is
1948 // outside the current DSO.
1949 if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
1950 return false;
1951
1952 // Every other GV is local on COFF.
1953 // Make an exception for windows OS in the triple: Some firmware builds use
1954 // *-win32-macho triples. This (accidentally?) produced windows relocations
1955 // without GOT tables in older clang versions; Keep this behaviour.
1956 // FIXME: even thread local variables?
1957 if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
1958 return true;
1959
1960 // Only handle COFF and ELF for now.
1961 if (!TT.isOSBinFormatELF())
1962 return false;
1963
1964 // If this is not an executable, don't assume anything is local.
1965 llvm::Reloc::Model RM = CGOpts.RelocationModel;
1966 const auto &LOpts = CGM.getLangOpts();
1967 if (RM != llvm::Reloc::Static && !LOpts.PIE) {
1968 // On ELF, if -fno-semantic-interposition is specified and the target
1969 // supports local aliases, there will be neither CC1
1970 // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
1971 // dso_local on the function if using a local alias is preferable (can avoid
1972 // PLT indirection).
1973 if (!(isa<llvm::Function>(Val: GV) && GV->canBenefitFromLocalAlias()))
1974 return false;
1975 return !(CGM.getLangOpts().SemanticInterposition ||
1976 CGM.getLangOpts().HalfNoSemanticInterposition);
1977 }
1978
1979 // A definition cannot be preempted from an executable.
1980 if (!GV->isDeclarationForLinker())
1981 return true;
1982
1983 // Most PIC code sequences that assume that a symbol is local cannot produce a
1984 // 0 if it turns out the symbol is undefined. While this is ABI and relocation
1985 // depended, it seems worth it to handle it here.
1986 if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
1987 return false;
1988
1989 // PowerPC64 prefers TOC indirection to avoid copy relocations.
1990 if (TT.isPPC64())
1991 return false;
1992
1993 if (CGOpts.DirectAccessExternalData) {
1994 // If -fdirect-access-external-data (default for -fno-pic), set dso_local
1995 // for non-thread-local variables. If the symbol is not defined in the
1996 // executable, a copy relocation will be needed at link time. dso_local is
1997 // excluded for thread-local variables because they generally don't support
1998 // copy relocations.
1999 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: GV))
2000 if (!Var->isThreadLocal())
2001 return true;
2002
2003 // -fno-pic sets dso_local on a function declaration to allow direct
2004 // accesses when taking its address (similar to a data symbol). If the
2005 // function is not defined in the executable, a canonical PLT entry will be
2006 // needed at link time. -fno-direct-access-external-data can avoid the
2007 // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
2008 // it could just cause trouble without providing perceptible benefits.
2009 if (isa<llvm::Function>(Val: GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
2010 return true;
2011 }
2012
2013 // If we can use copy relocations we can assume it is local.
2014
2015 // Otherwise don't assume it is local.
2016 return false;
2017}
2018
2019void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
2020 GV->setDSOLocal(shouldAssumeDSOLocal(CGM: *this, GV));
2021}
2022
2023void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
2024 GlobalDecl GD) const {
2025 const auto *D = dyn_cast<NamedDecl>(Val: GD.getDecl());
2026 // C++ destructors have a few C++ ABI specific special cases.
2027 if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(Val: D)) {
2028 getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, DT: GD.getDtorType());
2029 return;
2030 }
2031 setDLLImportDLLExport(GV, D);
2032}
2033
2034void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
2035 const NamedDecl *D) const {
2036 if (D && D->isExternallyVisible()) {
2037 if (D->hasAttr<DLLImportAttr>())
2038 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2039 else if ((D->hasAttr<DLLExportAttr>() ||
2040 shouldMapVisibilityToDLLExport(D)) &&
2041 !GV->isDeclarationForLinker())
2042 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
2043 }
2044}
2045
2046void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
2047 GlobalDecl GD) const {
2048 setDLLImportDLLExport(GV, GD);
2049 setGVPropertiesAux(GV, D: dyn_cast<NamedDecl>(Val: GD.getDecl()));
2050}
2051
2052void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
2053 const NamedDecl *D) const {
2054 setDLLImportDLLExport(GV, D);
2055 setGVPropertiesAux(GV, D);
2056}
2057
2058void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
2059 const NamedDecl *D) const {
2060 setGlobalVisibility(GV, D);
2061 setDSOLocal(GV);
2062 GV->setPartition(CodeGenOpts.SymbolPartition);
2063}
2064
2065static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
2066 return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
2067 .Case(S: "global-dynamic", Value: llvm::GlobalVariable::GeneralDynamicTLSModel)
2068 .Case(S: "local-dynamic", Value: llvm::GlobalVariable::LocalDynamicTLSModel)
2069 .Case(S: "initial-exec", Value: llvm::GlobalVariable::InitialExecTLSModel)
2070 .Case(S: "local-exec", Value: llvm::GlobalVariable::LocalExecTLSModel);
2071}
2072
2073llvm::GlobalVariable::ThreadLocalMode
2074CodeGenModule::GetDefaultLLVMTLSModel() const {
2075 switch (CodeGenOpts.getDefaultTLSModel()) {
2076 case CodeGenOptions::GeneralDynamicTLSModel:
2077 return llvm::GlobalVariable::GeneralDynamicTLSModel;
2078 case CodeGenOptions::LocalDynamicTLSModel:
2079 return llvm::GlobalVariable::LocalDynamicTLSModel;
2080 case CodeGenOptions::InitialExecTLSModel:
2081 return llvm::GlobalVariable::InitialExecTLSModel;
2082 case CodeGenOptions::LocalExecTLSModel:
2083 return llvm::GlobalVariable::LocalExecTLSModel;
2084 }
2085 llvm_unreachable("Invalid TLS model!");
2086}
2087
2088void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
2089 assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
2090
2091 llvm::GlobalValue::ThreadLocalMode TLM;
2092 TLM = GetDefaultLLVMTLSModel();
2093
2094 // Override the TLS model if it is explicitly specified.
2095 if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
2096 TLM = GetLLVMTLSModel(S: Attr->getModel());
2097 }
2098
2099 GV->setThreadLocalMode(TLM);
2100}
2101
2102static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
2103 StringRef Name) {
2104 const TargetInfo &Target = CGM.getTarget();
2105 return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
2106}
2107
2108static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
2109 const CPUSpecificAttr *Attr,
2110 unsigned CPUIndex,
2111 raw_ostream &Out) {
2112 // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
2113 // supported.
2114 if (Attr)
2115 Out << getCPUSpecificMangling(CGM, Name: Attr->getCPUName(Index: CPUIndex)->getName());
2116 else if (CGM.getTarget().supportsIFunc())
2117 Out << ".resolver";
2118}
2119
2120// Returns true if GD is a function decl with internal linkage and
2121// needs a unique suffix after the mangled name.
2122static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
2123 CodeGenModule &CGM) {
2124 const Decl *D = GD.getDecl();
2125 return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(Val: D) &&
2126 (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
2127}
2128
2129static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
2130 const NamedDecl *ND,
2131 bool OmitMultiVersionMangling = false) {
2132 SmallString<256> Buffer;
2133 llvm::raw_svector_ostream Out(Buffer);
2134 MangleContext &MC = CGM.getCXXABI().getMangleContext();
2135 if (!CGM.getModuleNameHash().empty())
2136 MC.needsUniqueInternalLinkageNames();
2137 bool ShouldMangle = MC.shouldMangleDeclName(D: ND);
2138 if (ShouldMangle)
2139 MC.mangleName(GD: GD.getWithDecl(D: ND), Out);
2140 else {
2141 IdentifierInfo *II = ND->getIdentifier();
2142 assert(II && "Attempt to mangle unnamed decl.");
2143 const auto *FD = dyn_cast<FunctionDecl>(Val: ND);
2144
2145 if (FD &&
2146 FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
2147 if (CGM.getLangOpts().RegCall4)
2148 Out << "__regcall4__" << II->getName();
2149 else
2150 Out << "__regcall3__" << II->getName();
2151 } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
2152 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
2153 Out << "__device_stub__" << II->getName();
2154 } else if (FD &&
2155 DeviceKernelAttr::isOpenCLSpelling(
2156 A: FD->getAttr<DeviceKernelAttr>()) &&
2157 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
2158 Out << "__clang_ocl_kern_imp_" << II->getName();
2159 } else {
2160 Out << II->getName();
2161 }
2162 }
2163
2164 // Check if the module name hash should be appended for internal linkage
2165 // symbols. This should come before multi-version target suffixes are
2166 // appended. This is to keep the name and module hash suffix of the
2167 // internal linkage function together. The unique suffix should only be
2168 // added when name mangling is done to make sure that the final name can
2169 // be properly demangled. For example, for C functions without prototypes,
2170 // name mangling is not done and the unique suffix should not be appeneded
2171 // then.
2172 if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
2173 assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&
2174 "Hash computed when not explicitly requested");
2175 Out << CGM.getModuleNameHash();
2176 }
2177
2178 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
2179 if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
2180 switch (FD->getMultiVersionKind()) {
2181 case MultiVersionKind::CPUDispatch:
2182 case MultiVersionKind::CPUSpecific:
2183 AppendCPUSpecificCPUDispatchMangling(CGM,
2184 Attr: FD->getAttr<CPUSpecificAttr>(),
2185 CPUIndex: GD.getMultiVersionIndex(), Out);
2186 break;
2187 case MultiVersionKind::Target: {
2188 auto *Attr = FD->getAttr<TargetAttr>();
2189 assert(Attr && "Expected TargetAttr to be present "
2190 "for attribute mangling");
2191 const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
2192 Info.appendAttributeMangling(Attr, Out);
2193 break;
2194 }
2195 case MultiVersionKind::TargetVersion: {
2196 auto *Attr = FD->getAttr<TargetVersionAttr>();
2197 assert(Attr && "Expected TargetVersionAttr to be present "
2198 "for attribute mangling");
2199 const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
2200 Info.appendAttributeMangling(Attr, Out);
2201 break;
2202 }
2203 case MultiVersionKind::TargetClones: {
2204 auto *Attr = FD->getAttr<TargetClonesAttr>();
2205 assert(Attr && "Expected TargetClonesAttr to be present "
2206 "for attribute mangling");
2207 unsigned Index = GD.getMultiVersionIndex();
2208 const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
2209 Info.appendAttributeMangling(Attr, Index, Out);
2210 break;
2211 }
2212 case MultiVersionKind::None:
2213 llvm_unreachable("None multiversion type isn't valid here");
2214 }
2215 }
2216
2217 // Make unique name for device side static file-scope variable for HIP.
2218 if (CGM.getContext().shouldExternalize(D: ND) &&
2219 CGM.getLangOpts().GPURelocatableDeviceCode &&
2220 CGM.getLangOpts().CUDAIsDevice)
2221 CGM.printPostfixForExternalizedDecl(OS&: Out, D: ND);
2222
2223 return std::string(Out.str());
2224}
2225
2226void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
2227 const FunctionDecl *FD,
2228 StringRef &CurName) {
2229 if (!FD->isMultiVersion())
2230 return;
2231
2232 // Get the name of what this would be without the 'target' attribute. This
2233 // allows us to lookup the version that was emitted when this wasn't a
2234 // multiversion function.
2235 std::string NonTargetName =
2236 getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
2237 GlobalDecl OtherGD;
2238 if (lookupRepresentativeDecl(MangledName: NonTargetName, Result&: OtherGD)) {
2239 assert(OtherGD.getCanonicalDecl()
2240 .getDecl()
2241 ->getAsFunction()
2242 ->isMultiVersion() &&
2243 "Other GD should now be a multiversioned function");
2244 // OtherFD is the version of this function that was mangled BEFORE
2245 // becoming a MultiVersion function. It potentially needs to be updated.
2246 const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
2247 .getDecl()
2248 ->getAsFunction()
2249 ->getMostRecentDecl();
2250 std::string OtherName = getMangledNameImpl(CGM&: *this, GD: OtherGD, ND: OtherFD);
2251 // This is so that if the initial version was already the 'default'
2252 // version, we don't try to update it.
2253 if (OtherName != NonTargetName) {
2254 // Remove instead of erase, since others may have stored the StringRef
2255 // to this.
2256 const auto ExistingRecord = Manglings.find(Key: NonTargetName);
2257 if (ExistingRecord != std::end(cont&: Manglings))
2258 Manglings.remove(KeyValue: &(*ExistingRecord));
2259 auto Result = Manglings.insert(KV: std::make_pair(x&: OtherName, y&: OtherGD));
2260 StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
2261 Result.first->first();
2262 // If this is the current decl is being created, make sure we update the name.
2263 if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
2264 CurName = OtherNameRef;
2265 if (llvm::GlobalValue *Entry = GetGlobalValue(Ref: NonTargetName))
2266 Entry->setName(OtherName);
2267 }
2268 }
2269}
2270
2271StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
2272 GlobalDecl CanonicalGD = GD.getCanonicalDecl();
2273
2274 // Some ABIs don't have constructor variants. Make sure that base and
2275 // complete constructors get mangled the same.
2276 if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: CanonicalGD.getDecl())) {
2277 if (!getTarget().getCXXABI().hasConstructorVariants()) {
2278 CXXCtorType OrigCtorType = GD.getCtorType();
2279 assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
2280 if (OrigCtorType == Ctor_Base)
2281 CanonicalGD = GlobalDecl(CD, Ctor_Complete);
2282 }
2283 }
2284
2285 // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
2286 // static device variable depends on whether the variable is referenced by
2287 // a host or device host function. Therefore the mangled name cannot be
2288 // cached.
2289 if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(D: GD.getDecl())) {
2290 auto FoundName = MangledDeclNames.find(Key: CanonicalGD);
2291 if (FoundName != MangledDeclNames.end())
2292 return FoundName->second;
2293 }
2294
2295 // Keep the first result in the case of a mangling collision.
2296 const auto *ND = cast<NamedDecl>(Val: GD.getDecl());
2297 std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND);
2298
2299 // Ensure either we have different ABIs between host and device compilations,
2300 // says host compilation following MSVC ABI but device compilation follows
2301 // Itanium C++ ABI or, if they follow the same ABI, kernel names after
2302 // mangling should be the same after name stubbing. The later checking is
2303 // very important as the device kernel name being mangled in host-compilation
2304 // is used to resolve the device binaries to be executed. Inconsistent naming
2305 // result in undefined behavior. Even though we cannot check that naming
2306 // directly between host- and device-compilations, the host- and
2307 // device-mangling in host compilation could help catching certain ones.
2308 assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
2309 getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice ||
2310 (getContext().getAuxTargetInfo() &&
2311 (getContext().getAuxTargetInfo()->getCXXABI() !=
2312 getContext().getTargetInfo().getCXXABI())) ||
2313 getCUDARuntime().getDeviceSideName(ND) ==
2314 getMangledNameImpl(
2315 *this,
2316 GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
2317 ND));
2318
2319 // This invariant should hold true in the future.
2320 // Prior work:
2321 // https://discourse.llvm.org/t/rfc-clang-diagnostic-for-demangling-failures/82835/8
2322 // https://github.com/llvm/llvm-project/issues/111345
2323 // assert(!((StringRef(MangledName).starts_with("_Z") ||
2324 // StringRef(MangledName).starts_with("?")) &&
2325 // !GD.getDecl()->hasAttr<AsmLabelAttr>() &&
2326 // llvm::demangle(MangledName) == MangledName) &&
2327 // "LLVM demangler must demangle clang-generated names");
2328
2329 auto Result = Manglings.insert(KV: std::make_pair(x&: MangledName, y&: GD));
2330 return MangledDeclNames[CanonicalGD] = Result.first->first();
2331}
2332
2333StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
2334 const BlockDecl *BD) {
2335 MangleContext &MangleCtx = getCXXABI().getMangleContext();
2336 const Decl *D = GD.getDecl();
2337
2338 SmallString<256> Buffer;
2339 llvm::raw_svector_ostream Out(Buffer);
2340 if (!D)
2341 MangleCtx.mangleGlobalBlock(BD,
2342 ID: dyn_cast_or_null<VarDecl>(Val: initializedGlobalDecl.getDecl()), Out);
2343 else if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: D))
2344 MangleCtx.mangleCtorBlock(CD, CT: GD.getCtorType(), BD, Out);
2345 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: D))
2346 MangleCtx.mangleDtorBlock(CD: DD, DT: GD.getDtorType(), BD, Out);
2347 else
2348 MangleCtx.mangleBlock(DC: cast<DeclContext>(Val: D), BD, Out);
2349
2350 auto Result = Manglings.insert(KV: std::make_pair(x: Out.str(), y&: BD));
2351 return Result.first->first();
2352}
2353
2354const GlobalDecl CodeGenModule::getMangledNameDecl(StringRef Name) {
2355 auto it = MangledDeclNames.begin();
2356 while (it != MangledDeclNames.end()) {
2357 if (it->second == Name)
2358 return it->first;
2359 it++;
2360 }
2361 return GlobalDecl();
2362}
2363
2364llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
2365 return getModule().getNamedValue(Name);
2366}
2367
2368/// AddGlobalCtor - Add a function to the list that will be called before
2369/// main() runs.
2370void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
2371 unsigned LexOrder,
2372 llvm::Constant *AssociatedData) {
2373 // FIXME: Type coercion of void()* types.
2374 GlobalCtors.push_back(x: Structor(Priority, LexOrder, Ctor, AssociatedData));
2375}
2376
2377/// AddGlobalDtor - Add a function to the list that will be called
2378/// when the module is unloaded.
2379void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
2380 bool IsDtorAttrFunc) {
2381 if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
2382 (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
2383 DtorsUsingAtExit[Priority].push_back(NewVal: Dtor);
2384 return;
2385 }
2386
2387 // FIXME: Type coercion of void()* types.
2388 GlobalDtors.push_back(x: Structor(Priority, ~0U, Dtor, nullptr));
2389}
2390
2391void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
2392 if (Fns.empty()) return;
2393
2394 const PointerAuthSchema &InitFiniAuthSchema =
2395 getCodeGenOpts().PointerAuth.InitFiniPointers;
2396
2397 // Ctor function type is ptr.
2398 llvm::PointerType *PtrTy = llvm::PointerType::get(
2399 C&: getLLVMContext(), AddressSpace: TheModule.getDataLayout().getProgramAddressSpace());
2400
2401 // Get the type of a ctor entry, { i32, ptr, ptr }.
2402 llvm::StructType *CtorStructTy = llvm::StructType::get(elt1: Int32Ty, elts: PtrTy, elts: PtrTy);
2403
2404 // Construct the constructor and destructor arrays.
2405 ConstantInitBuilder Builder(*this);
2406 auto Ctors = Builder.beginArray(eltTy: CtorStructTy);
2407 for (const auto &I : Fns) {
2408 auto Ctor = Ctors.beginStruct(ty: CtorStructTy);
2409 Ctor.addInt(intTy: Int32Ty, value: I.Priority);
2410 if (InitFiniAuthSchema) {
2411 llvm::Constant *StorageAddress =
2412 (InitFiniAuthSchema.isAddressDiscriminated()
2413 ? llvm::ConstantExpr::getIntToPtr(
2414 C: llvm::ConstantInt::get(
2415 Ty: IntPtrTy,
2416 V: llvm::ConstantPtrAuth::AddrDiscriminator_CtorsDtors),
2417 Ty: PtrTy)
2418 : nullptr);
2419 llvm::Constant *SignedCtorPtr = getConstantSignedPointer(
2420 Pointer: I.Initializer, Key: InitFiniAuthSchema.getKey(), StorageAddress,
2421 OtherDiscriminator: llvm::ConstantInt::get(
2422 Ty: SizeTy, V: InitFiniAuthSchema.getConstantDiscrimination()));
2423 Ctor.add(value: SignedCtorPtr);
2424 } else {
2425 Ctor.add(value: I.Initializer);
2426 }
2427 if (I.AssociatedData)
2428 Ctor.add(value: I.AssociatedData);
2429 else
2430 Ctor.addNullPointer(ptrTy: PtrTy);
2431 Ctor.finishAndAddTo(parent&: Ctors);
2432 }
2433
2434 auto List = Ctors.finishAndCreateGlobal(args&: GlobalName, args: getPointerAlign(),
2435 /*constant*/ args: false,
2436 args: llvm::GlobalValue::AppendingLinkage);
2437
2438 // The LTO linker doesn't seem to like it when we set an alignment
2439 // on appending variables. Take it off as a workaround.
2440 List->setAlignment(std::nullopt);
2441
2442 Fns.clear();
2443}
2444
2445llvm::GlobalValue::LinkageTypes
2446CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
2447 const auto *D = cast<FunctionDecl>(Val: GD.getDecl());
2448
2449 GVALinkage Linkage = getContext().GetGVALinkageForFunction(FD: D);
2450
2451 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Val: D))
2452 return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, DT: GD.getDtorType());
2453
2454 return getLLVMLinkageForDeclarator(D, Linkage);
2455}
2456
2457llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
2458 llvm::MDString *MDS = dyn_cast<llvm::MDString>(Val: MD);
2459 if (!MDS) return nullptr;
2460
2461 return llvm::ConstantInt::get(Ty: Int64Ty, V: llvm::MD5Hash(Str: MDS->getString()));
2462}
2463
2464static QualType GeneralizeTransparentUnion(QualType Ty) {
2465 const RecordType *UT = Ty->getAsUnionType();
2466 if (!UT)
2467 return Ty;
2468 const RecordDecl *UD = UT->getDecl()->getDefinitionOrSelf();
2469 if (!UD->hasAttr<TransparentUnionAttr>())
2470 return Ty;
2471 if (!UD->fields().empty())
2472 return UD->fields().begin()->getType();
2473 return Ty;
2474}
2475
2476// If `GeneralizePointers` is true, generalizes types to a void pointer with the
2477// qualifiers of the originally pointed-to type, e.g. 'const char *' and 'char *
2478// const *' generalize to 'const void *' while 'char *' and 'const char **'
2479// generalize to 'void *'.
2480static QualType GeneralizeType(ASTContext &Ctx, QualType Ty,
2481 bool GeneralizePointers) {
2482 Ty = GeneralizeTransparentUnion(Ty);
2483
2484 if (!GeneralizePointers || !Ty->isPointerType())
2485 return Ty;
2486
2487 return Ctx.getPointerType(
2488 T: QualType(Ctx.VoidTy)
2489 .withCVRQualifiers(CVR: Ty->getPointeeType().getCVRQualifiers()));
2490}
2491
2492// Apply type generalization to a FunctionType's return and argument types
2493static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty,
2494 bool GeneralizePointers) {
2495 if (auto *FnType = Ty->getAs<FunctionProtoType>()) {
2496 SmallVector<QualType, 8> GeneralizedParams;
2497 for (auto &Param : FnType->param_types())
2498 GeneralizedParams.push_back(
2499 Elt: GeneralizeType(Ctx, Ty: Param, GeneralizePointers));
2500
2501 return Ctx.getFunctionType(
2502 ResultTy: GeneralizeType(Ctx, Ty: FnType->getReturnType(), GeneralizePointers),
2503 Args: GeneralizedParams, EPI: FnType->getExtProtoInfo());
2504 }
2505
2506 if (auto *FnType = Ty->getAs<FunctionNoProtoType>())
2507 return Ctx.getFunctionNoProtoType(
2508 ResultTy: GeneralizeType(Ctx, Ty: FnType->getReturnType(), GeneralizePointers));
2509
2510 llvm_unreachable("Encountered unknown FunctionType");
2511}
2512
2513llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T, StringRef Salt) {
2514 T = GeneralizeFunctionType(
2515 Ctx&: getContext(), Ty: T, GeneralizePointers: getCodeGenOpts().SanitizeCfiICallGeneralizePointers);
2516 if (auto *FnType = T->getAs<FunctionProtoType>())
2517 T = getContext().getFunctionType(
2518 ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(),
2519 EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None));
2520
2521 std::string OutName;
2522 llvm::raw_string_ostream Out(OutName);
2523 getCXXABI().getMangleContext().mangleCanonicalTypeName(
2524 T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
2525
2526 if (!Salt.empty())
2527 Out << "." << Salt;
2528
2529 if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
2530 Out << ".normalized";
2531 if (getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
2532 Out << ".generalized";
2533
2534 return llvm::ConstantInt::get(
2535 Ty: Int32Ty, V: llvm::getKCFITypeID(MangledTypeName: OutName, Algorithm: getCodeGenOpts().SanitizeKcfiHash));
2536}
2537
2538void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
2539 const CGFunctionInfo &Info,
2540 llvm::Function *F, bool IsThunk) {
2541 unsigned CallingConv;
2542 llvm::AttributeList PAL;
2543 ConstructAttributeList(Name: F->getName(), Info, CalleeInfo: GD, Attrs&: PAL, CallingConv,
2544 /*AttrOnCallSite=*/false, IsThunk);
2545 if (CallingConv == llvm::CallingConv::X86_VectorCall &&
2546 getTarget().getTriple().isWindowsArm64EC()) {
2547 SourceLocation Loc;
2548 if (const Decl *D = GD.getDecl())
2549 Loc = D->getLocation();
2550
2551 Error(loc: Loc, message: "__vectorcall calling convention is not currently supported");
2552 }
2553 F->setAttributes(PAL);
2554 F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2555}
2556
2557static void removeImageAccessQualifier(std::string& TyName) {
2558 std::string ReadOnlyQual("__read_only");
2559 std::string::size_type ReadOnlyPos = TyName.find(str: ReadOnlyQual);
2560 if (ReadOnlyPos != std::string::npos)
2561 // "+ 1" for the space after access qualifier.
2562 TyName.erase(pos: ReadOnlyPos, n: ReadOnlyQual.size() + 1);
2563 else {
2564 std::string WriteOnlyQual("__write_only");
2565 std::string::size_type WriteOnlyPos = TyName.find(str: WriteOnlyQual);
2566 if (WriteOnlyPos != std::string::npos)
2567 TyName.erase(pos: WriteOnlyPos, n: WriteOnlyQual.size() + 1);
2568 else {
2569 std::string ReadWriteQual("__read_write");
2570 std::string::size_type ReadWritePos = TyName.find(str: ReadWriteQual);
2571 if (ReadWritePos != std::string::npos)
2572 TyName.erase(pos: ReadWritePos, n: ReadWriteQual.size() + 1);
2573 }
2574 }
2575}
2576
2577// Returns the address space id that should be produced to the
2578// kernel_arg_addr_space metadata. This is always fixed to the ids
2579// as specified in the SPIR 2.0 specification in order to differentiate
2580// for example in clGetKernelArgInfo() implementation between the address
2581// spaces with targets without unique mapping to the OpenCL address spaces
2582// (basically all single AS CPUs).
2583static unsigned ArgInfoAddressSpace(LangAS AS) {
2584 switch (AS) {
2585 case LangAS::opencl_global:
2586 return 1;
2587 case LangAS::opencl_constant:
2588 return 2;
2589 case LangAS::opencl_local:
2590 return 3;
2591 case LangAS::opencl_generic:
2592 return 4; // Not in SPIR 2.0 specs.
2593 case LangAS::opencl_global_device:
2594 return 5;
2595 case LangAS::opencl_global_host:
2596 return 6;
2597 default:
2598 return 0; // Assume private.
2599 }
2600}
2601
2602void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn,
2603 const FunctionDecl *FD,
2604 CodeGenFunction *CGF) {
2605 assert(((FD && CGF) || (!FD && !CGF)) &&
2606 "Incorrect use - FD and CGF should either be both null or not!");
2607 // Create MDNodes that represent the kernel arg metadata.
2608 // Each MDNode is a list in the form of "key", N number of values which is
2609 // the same number of values as their are kernel arguments.
2610
2611 const PrintingPolicy &Policy = Context.getPrintingPolicy();
2612
2613 // MDNode for the kernel argument address space qualifiers.
2614 SmallVector<llvm::Metadata *, 8> addressQuals;
2615
2616 // MDNode for the kernel argument access qualifiers (images only).
2617 SmallVector<llvm::Metadata *, 8> accessQuals;
2618
2619 // MDNode for the kernel argument type names.
2620 SmallVector<llvm::Metadata *, 8> argTypeNames;
2621
2622 // MDNode for the kernel argument base type names.
2623 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
2624
2625 // MDNode for the kernel argument type qualifiers.
2626 SmallVector<llvm::Metadata *, 8> argTypeQuals;
2627
2628 // MDNode for the kernel argument names.
2629 SmallVector<llvm::Metadata *, 8> argNames;
2630
2631 if (FD && CGF)
2632 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
2633 const ParmVarDecl *parm = FD->getParamDecl(i);
2634 // Get argument name.
2635 argNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: parm->getName()));
2636
2637 if (!getLangOpts().OpenCL)
2638 continue;
2639 QualType ty = parm->getType();
2640 std::string typeQuals;
2641
2642 // Get image and pipe access qualifier:
2643 if (ty->isImageType() || ty->isPipeType()) {
2644 const Decl *PDecl = parm;
2645 if (const auto *TD = ty->getAs<TypedefType>())
2646 PDecl = TD->getDecl();
2647 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
2648 if (A && A->isWriteOnly())
2649 accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "write_only"));
2650 else if (A && A->isReadWrite())
2651 accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_write"));
2652 else
2653 accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_only"));
2654 } else
2655 accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "none"));
2656
2657 auto getTypeSpelling = [&](QualType Ty) {
2658 auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
2659
2660 if (Ty.isCanonical()) {
2661 StringRef typeNameRef = typeName;
2662 // Turn "unsigned type" to "utype"
2663 if (typeNameRef.consume_front(Prefix: "unsigned "))
2664 return std::string("u") + typeNameRef.str();
2665 if (typeNameRef.consume_front(Prefix: "signed "))
2666 return typeNameRef.str();
2667 }
2668
2669 return typeName;
2670 };
2671
2672 if (ty->isPointerType()) {
2673 QualType pointeeTy = ty->getPointeeType();
2674
2675 // Get address qualifier.
2676 addressQuals.push_back(
2677 Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32(
2678 C: ArgInfoAddressSpace(AS: pointeeTy.getAddressSpace()))));
2679
2680 // Get argument type name.
2681 std::string typeName = getTypeSpelling(pointeeTy) + "*";
2682 std::string baseTypeName =
2683 getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
2684 argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName));
2685 argBaseTypeNames.push_back(
2686 Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName));
2687
2688 // Get argument type qualifiers:
2689 if (ty.isRestrictQualified())
2690 typeQuals = "restrict";
2691 if (pointeeTy.isConstQualified() ||
2692 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
2693 typeQuals += typeQuals.empty() ? "const" : " const";
2694 if (pointeeTy.isVolatileQualified())
2695 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
2696 } else {
2697 uint32_t AddrSpc = 0;
2698 bool isPipe = ty->isPipeType();
2699 if (ty->isImageType() || isPipe)
2700 AddrSpc = ArgInfoAddressSpace(AS: LangAS::opencl_global);
2701
2702 addressQuals.push_back(
2703 Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32(C: AddrSpc)));
2704
2705 // Get argument type name.
2706 ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
2707 std::string typeName = getTypeSpelling(ty);
2708 std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
2709
2710 // Remove access qualifiers on images
2711 // (as they are inseparable from type in clang implementation,
2712 // but OpenCL spec provides a special query to get access qualifier
2713 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
2714 if (ty->isImageType()) {
2715 removeImageAccessQualifier(TyName&: typeName);
2716 removeImageAccessQualifier(TyName&: baseTypeName);
2717 }
2718
2719 argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName));
2720 argBaseTypeNames.push_back(
2721 Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName));
2722
2723 if (isPipe)
2724 typeQuals = "pipe";
2725 }
2726 argTypeQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeQuals));
2727 }
2728
2729 if (getLangOpts().OpenCL) {
2730 Fn->setMetadata(Kind: "kernel_arg_addr_space",
2731 Node: llvm::MDNode::get(Context&: VMContext, MDs: addressQuals));
2732 Fn->setMetadata(Kind: "kernel_arg_access_qual",
2733 Node: llvm::MDNode::get(Context&: VMContext, MDs: accessQuals));
2734 Fn->setMetadata(Kind: "kernel_arg_type",
2735 Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeNames));
2736 Fn->setMetadata(Kind: "kernel_arg_base_type",
2737 Node: llvm::MDNode::get(Context&: VMContext, MDs: argBaseTypeNames));
2738 Fn->setMetadata(Kind: "kernel_arg_type_qual",
2739 Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeQuals));
2740 }
2741 if (getCodeGenOpts().EmitOpenCLArgMetadata ||
2742 getCodeGenOpts().HIPSaveKernelArgName)
2743 Fn->setMetadata(Kind: "kernel_arg_name",
2744 Node: llvm::MDNode::get(Context&: VMContext, MDs: argNames));
2745}
2746
2747/// Determines whether the language options require us to model
2748/// unwind exceptions. We treat -fexceptions as mandating this
2749/// except under the fragile ObjC ABI with only ObjC exceptions
2750/// enabled. This means, for example, that C with -fexceptions
2751/// enables this.
2752static bool hasUnwindExceptions(const LangOptions &LangOpts) {
2753 // If exceptions are completely disabled, obviously this is false.
2754 if (!LangOpts.Exceptions) return false;
2755
2756 // If C++ exceptions are enabled, this is true.
2757 if (LangOpts.CXXExceptions) return true;
2758
2759 // If ObjC exceptions are enabled, this depends on the ABI.
2760 if (LangOpts.ObjCExceptions) {
2761 return LangOpts.ObjCRuntime.hasUnwindExceptions();
2762 }
2763
2764 return true;
2765}
2766
2767static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
2768 const CXXMethodDecl *MD) {
2769 // Check that the type metadata can ever actually be used by a call.
2770 if (!CGM.getCodeGenOpts().LTOUnit ||
2771 !CGM.HasHiddenLTOVisibility(RD: MD->getParent()))
2772 return false;
2773
2774 // Only functions whose address can be taken with a member function pointer
2775 // need this sort of type metadata.
2776 return MD->isImplicitObjectMemberFunction() && !MD->isVirtual() &&
2777 !isa<CXXConstructorDecl, CXXDestructorDecl>(Val: MD);
2778}
2779
2780SmallVector<const CXXRecordDecl *, 0>
2781CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
2782 llvm::SetVector<const CXXRecordDecl *> MostBases;
2783
2784 std::function<void (const CXXRecordDecl *)> CollectMostBases;
2785 CollectMostBases = [&](const CXXRecordDecl *RD) {
2786 if (RD->getNumBases() == 0)
2787 MostBases.insert(X: RD);
2788 for (const CXXBaseSpecifier &B : RD->bases())
2789 CollectMostBases(B.getType()->getAsCXXRecordDecl());
2790 };
2791 CollectMostBases(RD);
2792 return MostBases.takeVector();
2793}
2794
2795void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
2796 llvm::Function *F) {
2797 llvm::AttrBuilder B(F->getContext());
2798
2799 if ((!D || !D->hasAttr<NoUwtableAttr>()) && CodeGenOpts.UnwindTables)
2800 B.addUWTableAttr(Kind: llvm::UWTableKind(CodeGenOpts.UnwindTables));
2801
2802 if (CodeGenOpts.StackClashProtector)
2803 B.addAttribute(A: "probe-stack", V: "inline-asm");
2804
2805 if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096)
2806 B.addAttribute(A: "stack-probe-size",
2807 V: std::to_string(val: CodeGenOpts.StackProbeSize));
2808
2809 if (!hasUnwindExceptions(LangOpts))
2810 B.addAttribute(Val: llvm::Attribute::NoUnwind);
2811
2812 if (std::optional<llvm::Attribute::AttrKind> Attr =
2813 StackProtectorAttribute(D)) {
2814 B.addAttribute(Val: *Attr);
2815 }
2816
2817 if (!D) {
2818 // Non-entry HLSL functions must always be inlined.
2819 if (getLangOpts().HLSL && !F->hasFnAttribute(Kind: llvm::Attribute::NoInline))
2820 B.addAttribute(Val: llvm::Attribute::AlwaysInline);
2821 // If we don't have a declaration to control inlining, the function isn't
2822 // explicitly marked as alwaysinline for semantic reasons, and inlining is
2823 // disabled, mark the function as noinline.
2824 else if (!F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline) &&
2825 CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
2826 B.addAttribute(Val: llvm::Attribute::NoInline);
2827
2828 F->addFnAttrs(Attrs: B);
2829 return;
2830 }
2831
2832 // Handle SME attributes that apply to function definitions,
2833 // rather than to function prototypes.
2834 if (D->hasAttr<ArmLocallyStreamingAttr>())
2835 B.addAttribute(A: "aarch64_pstate_sm_body");
2836
2837 if (auto *Attr = D->getAttr<ArmNewAttr>()) {
2838 if (Attr->isNewZA())
2839 B.addAttribute(A: "aarch64_new_za");
2840 if (Attr->isNewZT0())
2841 B.addAttribute(A: "aarch64_new_zt0");
2842 }
2843
2844 // Track whether we need to add the optnone LLVM attribute,
2845 // starting with the default for this optimization level.
2846 bool ShouldAddOptNone =
2847 !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
2848 // We can't add optnone in the following cases, it won't pass the verifier.
2849 ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
2850 ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
2851
2852 // Non-entry HLSL functions must always be inlined.
2853 if (getLangOpts().HLSL && !F->hasFnAttribute(Kind: llvm::Attribute::NoInline) &&
2854 !D->hasAttr<NoInlineAttr>()) {
2855 B.addAttribute(Val: llvm::Attribute::AlwaysInline);
2856 } else if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
2857 !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) {
2858 // Add optnone, but do so only if the function isn't always_inline.
2859 B.addAttribute(Val: llvm::Attribute::OptimizeNone);
2860
2861 // OptimizeNone implies noinline; we should not be inlining such functions.
2862 B.addAttribute(Val: llvm::Attribute::NoInline);
2863
2864 // We still need to handle naked functions even though optnone subsumes
2865 // much of their semantics.
2866 if (D->hasAttr<NakedAttr>())
2867 B.addAttribute(Val: llvm::Attribute::Naked);
2868
2869 // OptimizeNone wins over OptimizeForSize and MinSize.
2870 F->removeFnAttr(Kind: llvm::Attribute::OptimizeForSize);
2871 F->removeFnAttr(Kind: llvm::Attribute::MinSize);
2872 } else if (D->hasAttr<NakedAttr>()) {
2873 // Naked implies noinline: we should not be inlining such functions.
2874 B.addAttribute(Val: llvm::Attribute::Naked);
2875 B.addAttribute(Val: llvm::Attribute::NoInline);
2876 } else if (D->hasAttr<NoDuplicateAttr>()) {
2877 B.addAttribute(Val: llvm::Attribute::NoDuplicate);
2878 } else if (D->hasAttr<NoInlineAttr>() &&
2879 !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) {
2880 // Add noinline if the function isn't always_inline.
2881 B.addAttribute(Val: llvm::Attribute::NoInline);
2882 } else if (D->hasAttr<AlwaysInlineAttr>() &&
2883 !F->hasFnAttribute(Kind: llvm::Attribute::NoInline)) {
2884 // (noinline wins over always_inline, and we can't specify both in IR)
2885 B.addAttribute(Val: llvm::Attribute::AlwaysInline);
2886 } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
2887 // If we're not inlining, then force everything that isn't always_inline to
2888 // carry an explicit noinline attribute.
2889 if (!F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline))
2890 B.addAttribute(Val: llvm::Attribute::NoInline);
2891 } else {
2892 // Otherwise, propagate the inline hint attribute and potentially use its
2893 // absence to mark things as noinline.
2894 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2895 // Search function and template pattern redeclarations for inline.
2896 auto CheckForInline = [](const FunctionDecl *FD) {
2897 auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
2898 return Redecl->isInlineSpecified();
2899 };
2900 if (any_of(Range: FD->redecls(), P: CheckRedeclForInline))
2901 return true;
2902 const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
2903 if (!Pattern)
2904 return false;
2905 return any_of(Range: Pattern->redecls(), P: CheckRedeclForInline);
2906 };
2907 if (CheckForInline(FD)) {
2908 B.addAttribute(Val: llvm::Attribute::InlineHint);
2909 } else if (CodeGenOpts.getInlining() ==
2910 CodeGenOptions::OnlyHintInlining &&
2911 !FD->isInlined() &&
2912 !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) {
2913 B.addAttribute(Val: llvm::Attribute::NoInline);
2914 }
2915 }
2916 }
2917
2918 // Add other optimization related attributes if we are optimizing this
2919 // function.
2920 if (!D->hasAttr<OptimizeNoneAttr>()) {
2921 if (D->hasAttr<ColdAttr>()) {
2922 if (!ShouldAddOptNone)
2923 B.addAttribute(Val: llvm::Attribute::OptimizeForSize);
2924 B.addAttribute(Val: llvm::Attribute::Cold);
2925 }
2926 if (D->hasAttr<HotAttr>())
2927 B.addAttribute(Val: llvm::Attribute::Hot);
2928 if (D->hasAttr<MinSizeAttr>())
2929 B.addAttribute(Val: llvm::Attribute::MinSize);
2930 }
2931
2932 // Add `nooutline` if Outlining is disabled with a command-line flag or a
2933 // function attribute.
2934 if (CodeGenOpts.DisableOutlining || D->hasAttr<NoOutlineAttr>())
2935 B.addAttribute(Val: llvm::Attribute::NoOutline);
2936
2937 F->addFnAttrs(Attrs: B);
2938
2939 unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
2940 if (alignment)
2941 F->setAlignment(llvm::Align(alignment));
2942
2943 if (!D->hasAttr<AlignedAttr>())
2944 if (LangOpts.FunctionAlignment)
2945 F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment));
2946
2947 // Some C++ ABIs require 2-byte alignment for member functions, in order to
2948 // reserve a bit for differentiating between virtual and non-virtual member
2949 // functions. If the current target's C++ ABI requires this and this is a
2950 // member function, set its alignment accordingly.
2951 if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
2952 if (isa<CXXMethodDecl>(Val: D) && F->getPointerAlignment(DL: getDataLayout()) < 2)
2953 F->setAlignment(std::max(a: llvm::Align(2), b: F->getAlign().valueOrOne()));
2954 }
2955
2956 // In the cross-dso CFI mode with canonical jump tables, we want !type
2957 // attributes on definitions only.
2958 if (CodeGenOpts.SanitizeCfiCrossDso &&
2959 CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
2960 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2961 // Skip available_externally functions. They won't be codegen'ed in the
2962 // current module anyway.
2963 if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
2964 createFunctionTypeMetadataForIcall(FD, F);
2965 }
2966 }
2967
2968 if (CodeGenOpts.CallGraphSection) {
2969 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
2970 createIndirectFunctionTypeMD(FD, F);
2971 }
2972
2973 // Emit type metadata on member functions for member function pointer checks.
2974 // These are only ever necessary on definitions; we're guaranteed that the
2975 // definition will be present in the LTO unit as a result of LTO visibility.
2976 auto *MD = dyn_cast<CXXMethodDecl>(Val: D);
2977 if (MD && requiresMemberFunctionPointerTypeMetadata(CGM&: *this, MD)) {
2978 for (const CXXRecordDecl *Base : getMostBaseClasses(RD: MD->getParent())) {
2979 llvm::Metadata *Id =
2980 CreateMetadataIdentifierForType(T: Context.getMemberPointerType(
2981 T: MD->getType(), /*Qualifier=*/std::nullopt, Cls: Base));
2982 F->addTypeMetadata(Offset: 0, TypeID: Id);
2983 }
2984 }
2985}
2986
2987void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
2988 const Decl *D = GD.getDecl();
2989 if (isa_and_nonnull<NamedDecl>(Val: D))
2990 setGVProperties(GV, GD);
2991 else
2992 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
2993
2994 if (D && D->hasAttr<UsedAttr>())
2995 addUsedOrCompilerUsedGlobal(GV);
2996
2997 if (const auto *VD = dyn_cast_if_present<VarDecl>(Val: D);
2998 VD &&
2999 ((CodeGenOpts.KeepPersistentStorageVariables &&
3000 (VD->getStorageDuration() == SD_Static ||
3001 VD->getStorageDuration() == SD_Thread)) ||
3002 (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static &&
3003 VD->getType().isConstQualified())))
3004 addUsedOrCompilerUsedGlobal(GV);
3005}
3006
3007/// Get the feature delta from the default feature map for the given target CPU.
3008static std::vector<std::string>
3009getFeatureDeltaFromDefault(const CodeGenModule &CGM, StringRef TargetCPU,
3010 llvm::StringMap<bool> &FeatureMap) {
3011 llvm::StringMap<bool> DefaultFeatureMap;
3012 CGM.getTarget().initFeatureMap(
3013 Features&: DefaultFeatureMap, Diags&: CGM.getContext().getDiagnostics(), CPU: TargetCPU, FeatureVec: {});
3014
3015 std::vector<std::string> Delta;
3016 for (const auto &[K, V] : FeatureMap) {
3017 auto DefaultIt = DefaultFeatureMap.find(Key: K);
3018 if (DefaultIt == DefaultFeatureMap.end() || DefaultIt->getValue() != V)
3019 Delta.push_back(x: (V ? "+" : "-") + K.str());
3020 }
3021
3022 return Delta;
3023}
3024
3025bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
3026 llvm::AttrBuilder &Attrs,
3027 bool SetTargetFeatures) {
3028 // Add target-cpu and target-features attributes to functions. If
3029 // we have a decl for the function and it has a target attribute then
3030 // parse that and add it to the feature set.
3031 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
3032 StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
3033 std::vector<std::string> Features;
3034 const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: GD.getDecl());
3035 FD = FD ? FD->getMostRecentDecl() : FD;
3036 const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
3037 const auto *TV = FD ? FD->getAttr<TargetVersionAttr>() : nullptr;
3038 assert((!TD || !TV) && "both target_version and target specified");
3039 const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
3040 const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr;
3041 bool AddedAttr = false;
3042 if (TD || TV || SD || TC) {
3043 llvm::StringMap<bool> FeatureMap;
3044 getContext().getFunctionFeatureMap(FeatureMap, GD);
3045
3046 // Now add the target-cpu and target-features to the function.
3047 // While we populated the feature map above, we still need to
3048 // get and parse the target attribute so we can get the cpu for
3049 // the function.
3050 if (TD) {
3051 ParsedTargetAttr ParsedAttr =
3052 Target.parseTargetAttr(Str: TD->getFeaturesStr());
3053 if (!ParsedAttr.CPU.empty() &&
3054 getTarget().isValidCPUName(Name: ParsedAttr.CPU)) {
3055 TargetCPU = ParsedAttr.CPU;
3056 TuneCPU = ""; // Clear the tune CPU.
3057 }
3058 if (!ParsedAttr.Tune.empty() &&
3059 getTarget().isValidCPUName(Name: ParsedAttr.Tune))
3060 TuneCPU = ParsedAttr.Tune;
3061 }
3062
3063 if (SD) {
3064 // Apply the given CPU name as the 'tune-cpu' so that the optimizer can
3065 // favor this processor.
3066 TuneCPU = SD->getCPUName(Index: GD.getMultiVersionIndex())->getName();
3067 }
3068
3069 // For AMDGPU, only emit delta features (features that differ from the
3070 // target CPU's defaults). Other targets might want to follow a similar
3071 // pattern.
3072 if (getTarget().getTriple().isAMDGPU()) {
3073 Features = getFeatureDeltaFromDefault(CGM: *this, TargetCPU, FeatureMap);
3074 } else {
3075 // Produce the canonical string for this set of features.
3076 for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
3077 Features.push_back(x: (Entry.getValue() ? "+" : "-") +
3078 Entry.getKey().str());
3079 }
3080 } else {
3081 // Otherwise just add the existing target cpu and target features to the
3082 // function.
3083 if (SetTargetFeatures && getTarget().getTriple().isAMDGPU()) {
3084 llvm::StringMap<bool> FeatureMap;
3085 if (FD) {
3086 getContext().getFunctionFeatureMap(FeatureMap, GD);
3087 } else {
3088 getTarget().initFeatureMap(Features&: FeatureMap, Diags&: getContext().getDiagnostics(),
3089 CPU: TargetCPU,
3090 FeatureVec: getTarget().getTargetOpts().Features);
3091 }
3092 Features = getFeatureDeltaFromDefault(CGM: *this, TargetCPU, FeatureMap);
3093 } else {
3094 Features = getTarget().getTargetOpts().Features;
3095 }
3096 }
3097
3098 if (!TargetCPU.empty()) {
3099 Attrs.addAttribute(A: "target-cpu", V: TargetCPU);
3100 AddedAttr = true;
3101 }
3102 if (!TuneCPU.empty()) {
3103 Attrs.addAttribute(A: "tune-cpu", V: TuneCPU);
3104 AddedAttr = true;
3105 }
3106 if (!Features.empty() && SetTargetFeatures) {
3107 llvm::erase_if(C&: Features, P: [&](const std::string& F) {
3108 return getTarget().isReadOnlyFeature(Feature: F.substr(pos: 1));
3109 });
3110 llvm::sort(C&: Features);
3111 Attrs.addAttribute(A: "target-features", V: llvm::join(R&: Features, Separator: ","));
3112 AddedAttr = true;
3113 }
3114 // Add metadata for AArch64 Function Multi Versioning.
3115 if (getTarget().getTriple().isAArch64()) {
3116 llvm::SmallVector<StringRef, 8> Feats;
3117 bool IsDefault = false;
3118 if (TV) {
3119 IsDefault = TV->isDefaultVersion();
3120 TV->getFeatures(Out&: Feats);
3121 } else if (TC) {
3122 IsDefault = TC->isDefaultVersion(Index: GD.getMultiVersionIndex());
3123 TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex());
3124 }
3125 if (IsDefault) {
3126 Attrs.addAttribute(A: "fmv-features");
3127 AddedAttr = true;
3128 } else if (!Feats.empty()) {
3129 // Sort features and remove duplicates.
3130 std::set<StringRef> OrderedFeats(Feats.begin(), Feats.end());
3131 std::string FMVFeatures;
3132 for (StringRef F : OrderedFeats)
3133 FMVFeatures.append(str: "," + F.str());
3134 Attrs.addAttribute(A: "fmv-features", V: FMVFeatures.substr(pos: 1));
3135 AddedAttr = true;
3136 }
3137 }
3138 return AddedAttr;
3139}
3140
3141void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
3142 llvm::GlobalObject *GO) {
3143 const Decl *D = GD.getDecl();
3144 SetCommonAttributes(GD, GV: GO);
3145
3146 if (D) {
3147 if (auto *GV = dyn_cast<llvm::GlobalVariable>(Val: GO)) {
3148 if (D->hasAttr<RetainAttr>())
3149 addUsedGlobal(GV);
3150 if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
3151 GV->addAttribute(Kind: "bss-section", Val: SA->getName());
3152 if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
3153 GV->addAttribute(Kind: "data-section", Val: SA->getName());
3154 if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
3155 GV->addAttribute(Kind: "rodata-section", Val: SA->getName());
3156 if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
3157 GV->addAttribute(Kind: "relro-section", Val: SA->getName());
3158 }
3159
3160 if (auto *F = dyn_cast<llvm::Function>(Val: GO)) {
3161 if (D->hasAttr<RetainAttr>())
3162 addUsedGlobal(GV: F);
3163 if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
3164 if (!D->getAttr<SectionAttr>())
3165 F->setSection(SA->getName());
3166
3167 llvm::AttrBuilder Attrs(F->getContext());
3168 if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
3169 // We know that GetCPUAndFeaturesAttributes will always have the
3170 // newest set, since it has the newest possible FunctionDecl, so the
3171 // new ones should replace the old.
3172 llvm::AttributeMask RemoveAttrs;
3173 RemoveAttrs.addAttribute(A: "target-cpu");
3174 RemoveAttrs.addAttribute(A: "target-features");
3175 RemoveAttrs.addAttribute(A: "fmv-features");
3176 RemoveAttrs.addAttribute(A: "tune-cpu");
3177 F->removeFnAttrs(Attrs: RemoveAttrs);
3178 F->addFnAttrs(Attrs);
3179 }
3180 }
3181
3182 if (const auto *CSA = D->getAttr<CodeSegAttr>())
3183 GO->setSection(CSA->getName());
3184 else if (const auto *SA = D->getAttr<SectionAttr>())
3185 GO->setSection(SA->getName());
3186 }
3187
3188 getTargetCodeGenInfo().setTargetAttributes(D, GV: GO, M&: *this);
3189}
3190
3191void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
3192 llvm::Function *F,
3193 const CGFunctionInfo &FI) {
3194 const Decl *D = GD.getDecl();
3195 SetLLVMFunctionAttributes(GD, Info: FI, F, /*IsThunk=*/false);
3196 SetLLVMFunctionAttributesForDefinition(D, F);
3197
3198 F->setLinkage(llvm::Function::InternalLinkage);
3199
3200 setNonAliasAttributes(GD, GO: F);
3201}
3202
3203static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
3204 // Set linkage and visibility in case we never see a definition.
3205 LinkageInfo LV = ND->getLinkageAndVisibility();
3206 // Don't set internal linkage on declarations.
3207 // "extern_weak" is overloaded in LLVM; we probably should have
3208 // separate linkage types for this.
3209 if (isExternallyVisible(L: LV.getLinkage()) &&
3210 (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
3211 GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
3212}
3213
3214static bool hasExistingGeneralizedTypeMD(llvm::Function *F) {
3215 llvm::MDNode *MD = F->getMetadata(KindID: llvm::LLVMContext::MD_type);
3216 return MD && MD->hasGeneralizedMDString();
3217}
3218
3219void CodeGenModule::createIndirectFunctionTypeMD(const FunctionDecl *FD,
3220 llvm::Function *F) {
3221 // Return if generalized type metadata is already attached.
3222 if (hasExistingGeneralizedTypeMD(F))
3223 return;
3224
3225 // All functions which are not internal linkage could be indirect targets.
3226 // Address taken functions with internal linkage could be indirect targets.
3227 if (!F->hasLocalLinkage() ||
3228 F->getFunction().hasAddressTaken(nullptr, /*IgnoreCallbackUses=*/true,
3229 /*IgnoreAssumeLikeCalls=*/true,
3230 /*IgnoreLLVMUsed=*/IngoreLLVMUsed: false))
3231 F->addTypeMetadata(Offset: 0, TypeID: CreateMetadataIdentifierGeneralized(T: FD->getType()));
3232}
3233
3234void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
3235 llvm::Function *F) {
3236 // Only if we are checking indirect calls.
3237 if (!LangOpts.Sanitize.has(K: SanitizerKind::CFIICall))
3238 return;
3239
3240 // Non-static class methods are handled via vtable or member function pointer
3241 // checks elsewhere.
3242 if (isa<CXXMethodDecl>(Val: FD) && !cast<CXXMethodDecl>(Val: FD)->isStatic())
3243 return;
3244
3245 QualType FnType = GeneralizeFunctionType(Ctx&: getContext(), Ty: FD->getType(),
3246 /*GeneralizePointers=*/false);
3247 llvm::Metadata *MD = CreateMetadataIdentifierForType(T: FnType);
3248 F->addTypeMetadata(Offset: 0, TypeID: MD);
3249 // Add the generalized identifier if not added already.
3250 if (!hasExistingGeneralizedTypeMD(F)) {
3251 QualType GenPtrFnType = GeneralizeFunctionType(Ctx&: getContext(), Ty: FD->getType(),
3252 /*GeneralizePointers=*/true);
3253 F->addTypeMetadata(Offset: 0, TypeID: CreateMetadataIdentifierGeneralized(T: GenPtrFnType));
3254 }
3255
3256 // Emit a hash-based bit set entry for cross-DSO calls.
3257 if (CodeGenOpts.SanitizeCfiCrossDso)
3258 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
3259 F->addTypeMetadata(Offset: 0, TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId));
3260}
3261
3262void CodeGenModule::createCalleeTypeMetadataForIcall(const QualType &QT,
3263 llvm::CallBase *CB) {
3264 // Only if needed for call graph section and only for indirect calls.
3265 if (!CodeGenOpts.CallGraphSection || !CB->isIndirectCall())
3266 return;
3267
3268 llvm::Metadata *TypeIdMD = CreateMetadataIdentifierGeneralized(T: QT);
3269 llvm::MDTuple *TypeTuple = llvm::MDTuple::get(
3270 Context&: getLLVMContext(), MDs: {llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
3271 Ty: llvm::Type::getInt64Ty(C&: getLLVMContext()), V: 0)),
3272 TypeIdMD});
3273 llvm::MDTuple *MDN = llvm::MDNode::get(Context&: getLLVMContext(), MDs: {TypeTuple});
3274 CB->setMetadata(KindID: llvm::LLVMContext::MD_callee_type, Node: MDN);
3275}
3276
3277void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
3278 llvm::LLVMContext &Ctx = F->getContext();
3279 llvm::MDBuilder MDB(Ctx);
3280 llvm::StringRef Salt;
3281
3282 if (const auto *FP = FD->getType()->getAs<FunctionProtoType>())
3283 if (const auto &Info = FP->getExtraAttributeInfo())
3284 Salt = Info.CFISalt;
3285
3286 F->setMetadata(KindID: llvm::LLVMContext::MD_kcfi_type,
3287 Node: llvm::MDNode::get(Context&: Ctx, MDs: MDB.createConstant(C: CreateKCFITypeId(
3288 T: FD->getType(), Salt))));
3289}
3290
3291static bool allowKCFIIdentifier(StringRef Name) {
3292 // KCFI type identifier constants are only necessary for external assembly
3293 // functions, which means it's safe to skip unusual names. Subset of
3294 // MCAsmInfo::isAcceptableChar() and MCAsmInfoXCOFF::isAcceptableChar().
3295 return llvm::all_of(Range&: Name, P: [](const char &C) {
3296 return llvm::isAlnum(C) || C == '_' || C == '.';
3297 });
3298}
3299
3300void CodeGenModule::finalizeKCFITypes() {
3301 llvm::Module &M = getModule();
3302 for (auto &F : M.functions()) {
3303 // Remove KCFI type metadata from non-address-taken local functions.
3304 bool AddressTaken = F.hasAddressTaken();
3305 if (!AddressTaken && F.hasLocalLinkage())
3306 F.eraseMetadata(KindID: llvm::LLVMContext::MD_kcfi_type);
3307
3308 // Generate a constant with the expected KCFI type identifier for all
3309 // address-taken function declarations to support annotating indirectly
3310 // called assembly functions.
3311 if (!AddressTaken || !F.isDeclaration())
3312 continue;
3313
3314 const llvm::ConstantInt *Type;
3315 if (const llvm::MDNode *MD = F.getMetadata(KindID: llvm::LLVMContext::MD_kcfi_type))
3316 Type = llvm::mdconst::extract<llvm::ConstantInt>(MD: MD->getOperand(I: 0));
3317 else
3318 continue;
3319
3320 StringRef Name = F.getName();
3321 if (!allowKCFIIdentifier(Name))
3322 continue;
3323
3324 std::string Asm = (".weak __kcfi_typeid_" + Name + "\n.set __kcfi_typeid_" +
3325 Name + ", " + Twine(Type->getZExtValue()) + " /* " +
3326 Twine(Type->getSExtValue()) + " */\n")
3327 .str();
3328 M.appendModuleInlineAsm(Asm);
3329 }
3330}
3331
3332void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
3333 bool IsIncompleteFunction,
3334 bool IsThunk) {
3335
3336 if (F->getIntrinsicID() != llvm::Intrinsic::not_intrinsic) {
3337 // If this is an intrinsic function, the attributes will have been set
3338 // when the function was created.
3339 return;
3340 }
3341
3342 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
3343
3344 if (!IsIncompleteFunction)
3345 SetLLVMFunctionAttributes(GD, Info: getTypes().arrangeGlobalDeclaration(GD), F,
3346 IsThunk);
3347
3348 // Add the Returned attribute for "this", except for iOS 5 and earlier
3349 // where substantial code, including the libstdc++ dylib, was compiled with
3350 // GCC and does not actually return "this".
3351 if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
3352 !(getTriple().isiOS() && getTriple().isOSVersionLT(Major: 6))) {
3353 assert(!F->arg_empty() &&
3354 F->arg_begin()->getType()
3355 ->canLosslesslyBitCastTo(F->getReturnType()) &&
3356 "unexpected this return");
3357 F->addParamAttr(ArgNo: 0, Kind: llvm::Attribute::Returned);
3358 }
3359
3360 // Only a few attributes are set on declarations; these may later be
3361 // overridden by a definition.
3362
3363 setLinkageForGV(GV: F, ND: FD);
3364 setGVProperties(GV: F, D: FD);
3365
3366 // Setup target-specific attributes.
3367 if (!IsIncompleteFunction && F->isDeclaration())
3368 getTargetCodeGenInfo().setTargetAttributes(D: FD, GV: F, M&: *this);
3369
3370 if (const auto *CSA = FD->getAttr<CodeSegAttr>())
3371 F->setSection(CSA->getName());
3372 else if (const auto *SA = FD->getAttr<SectionAttr>())
3373 F->setSection(SA->getName());
3374
3375 if (const auto *EA = FD->getAttr<ErrorAttr>()) {
3376 if (EA->isError())
3377 F->addFnAttr(Kind: "dontcall-error", Val: EA->getUserDiagnostic());
3378 else if (EA->isWarning())
3379 F->addFnAttr(Kind: "dontcall-warn", Val: EA->getUserDiagnostic());
3380 }
3381
3382 // If we plan on emitting this inline builtin, we can't treat it as a builtin.
3383 if (FD->isInlineBuiltinDeclaration()) {
3384 const FunctionDecl *FDBody;
3385 bool HasBody = FD->hasBody(Definition&: FDBody);
3386 (void)HasBody;
3387 assert(HasBody && "Inline builtin declarations should always have an "
3388 "available body!");
3389 if (shouldEmitFunction(GD: FDBody))
3390 F->addFnAttr(Kind: llvm::Attribute::NoBuiltin);
3391 }
3392
3393 if (FD->isReplaceableGlobalAllocationFunction()) {
3394 // A replaceable global allocation function does not act like a builtin by
3395 // default, only if it is invoked by a new-expression or delete-expression.
3396 F->addFnAttr(Kind: llvm::Attribute::NoBuiltin);
3397 }
3398
3399 if (isa<CXXConstructorDecl>(Val: FD) || isa<CXXDestructorDecl>(Val: FD))
3400 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3401 else if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD))
3402 if (MD->isVirtual())
3403 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3404
3405 // Don't emit entries for function declarations in the cross-DSO mode. This
3406 // is handled with better precision by the receiving DSO. But if jump tables
3407 // are non-canonical then we need type metadata in order to produce the local
3408 // jump table.
3409 if (!CodeGenOpts.SanitizeCfiCrossDso ||
3410 !CodeGenOpts.SanitizeCfiCanonicalJumpTables)
3411 createFunctionTypeMetadataForIcall(FD, F);
3412
3413 if (CodeGenOpts.CallGraphSection)
3414 createIndirectFunctionTypeMD(FD, F);
3415
3416 if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI))
3417 setKCFIType(FD, F);
3418
3419 if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
3420 getOpenMPRuntime().emitDeclareSimdFunction(FD, Fn: F);
3421
3422 if (CodeGenOpts.InlineMaxStackSize != UINT_MAX)
3423 F->addFnAttr(Kind: "inline-max-stacksize", Val: llvm::utostr(X: CodeGenOpts.InlineMaxStackSize));
3424
3425 if (const auto *CB = FD->getAttr<CallbackAttr>()) {
3426 // Annotate the callback behavior as metadata:
3427 // - The callback callee (as argument number).
3428 // - The callback payloads (as argument numbers).
3429 llvm::LLVMContext &Ctx = F->getContext();
3430 llvm::MDBuilder MDB(Ctx);
3431
3432 // The payload indices are all but the first one in the encoding. The first
3433 // identifies the callback callee.
3434 int CalleeIdx = *CB->encoding_begin();
3435 ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
3436 F->addMetadata(KindID: llvm::LLVMContext::MD_callback,
3437 MD&: *llvm::MDNode::get(Context&: Ctx, MDs: {MDB.createCallbackEncoding(
3438 CalleeArgNo: CalleeIdx, Arguments: PayloadIndices,
3439 /* VarArgsArePassed */ false)}));
3440 }
3441}
3442
3443void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
3444 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
3445 "Only globals with definition can force usage.");
3446 LLVMUsed.emplace_back(args&: GV);
3447}
3448
3449void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
3450 assert(!GV->isDeclaration() &&
3451 "Only globals with definition can force usage.");
3452 LLVMCompilerUsed.emplace_back(args&: GV);
3453}
3454
3455void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) {
3456 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
3457 "Only globals with definition can force usage.");
3458 if (getTriple().isOSBinFormatELF())
3459 LLVMCompilerUsed.emplace_back(args&: GV);
3460 else
3461 LLVMUsed.emplace_back(args&: GV);
3462}
3463
3464static void emitUsed(CodeGenModule &CGM, StringRef Name,
3465 std::vector<llvm::WeakTrackingVH> &List) {
3466 // Don't create llvm.used if there is no need.
3467 if (List.empty())
3468 return;
3469
3470 // Convert List to what ConstantArray needs.
3471 SmallVector<llvm::Constant*, 8> UsedArray;
3472 UsedArray.resize(N: List.size());
3473 for (unsigned i = 0, e = List.size(); i != e; ++i) {
3474 UsedArray[i] =
3475 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
3476 C: cast<llvm::Constant>(Val: &*List[i]), Ty: CGM.Int8PtrTy);
3477 }
3478
3479 if (UsedArray.empty())
3480 return;
3481 llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: CGM.Int8PtrTy, NumElements: UsedArray.size());
3482
3483 auto *GV = new llvm::GlobalVariable(
3484 CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
3485 llvm::ConstantArray::get(T: ATy, V: UsedArray), Name);
3486
3487 GV->setSection("llvm.metadata");
3488}
3489
3490void CodeGenModule::emitLLVMUsed() {
3491 emitUsed(CGM&: *this, Name: "llvm.used", List&: LLVMUsed);
3492 emitUsed(CGM&: *this, Name: "llvm.compiler.used", List&: LLVMCompilerUsed);
3493}
3494
3495void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
3496 auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opts);
3497 LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts));
3498}
3499
3500void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
3501 llvm::SmallString<32> Opt;
3502 getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
3503 if (Opt.empty())
3504 return;
3505 auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt);
3506 LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts));
3507}
3508
3509void CodeGenModule::AddDependentLib(StringRef Lib) {
3510 auto &C = getLLVMContext();
3511 if (getTarget().getTriple().isOSBinFormatELF()) {
3512 ELFDependentLibraries.push_back(
3513 Elt: llvm::MDNode::get(Context&: C, MDs: llvm::MDString::get(Context&: C, Str: Lib)));
3514 return;
3515 }
3516
3517 llvm::SmallString<24> Opt;
3518 getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
3519 auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt);
3520 LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: C, MDs: MDOpts));
3521}
3522
3523/// Add link options implied by the given module, including modules
3524/// it depends on, using a postorder walk.
3525static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
3526 SmallVectorImpl<llvm::MDNode *> &Metadata,
3527 llvm::SmallPtrSet<Module *, 16> &Visited) {
3528 // Import this module's parent.
3529 if (Mod->Parent && Visited.insert(Ptr: Mod->Parent).second) {
3530 addLinkOptionsPostorder(CGM, Mod: Mod->Parent, Metadata, Visited);
3531 }
3532
3533 // Import this module's dependencies.
3534 for (Module *Import : llvm::reverse(C&: Mod->Imports)) {
3535 if (Visited.insert(Ptr: Import).second)
3536 addLinkOptionsPostorder(CGM, Mod: Import, Metadata, Visited);
3537 }
3538
3539 // Add linker options to link against the libraries/frameworks
3540 // described by this module.
3541 llvm::LLVMContext &Context = CGM.getLLVMContext();
3542 bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
3543
3544 // For modules that use export_as for linking, use that module
3545 // name instead.
3546 if (Mod->UseExportAsModuleLinkName)
3547 return;
3548
3549 for (const Module::LinkLibrary &LL : llvm::reverse(C&: Mod->LinkLibraries)) {
3550 // Link against a framework. Frameworks are currently Darwin only, so we
3551 // don't to ask TargetCodeGenInfo for the spelling of the linker option.
3552 if (LL.IsFramework) {
3553 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, Str: "-framework"),
3554 llvm::MDString::get(Context, Str: LL.Library)};
3555
3556 Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args));
3557 continue;
3558 }
3559
3560 // Link against a library.
3561 if (IsELF) {
3562 llvm::Metadata *Args[2] = {
3563 llvm::MDString::get(Context, Str: "lib"),
3564 llvm::MDString::get(Context, Str: LL.Library),
3565 };
3566 Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args));
3567 } else {
3568 llvm::SmallString<24> Opt;
3569 CGM.getTargetCodeGenInfo().getDependentLibraryOption(Lib: LL.Library, Opt);
3570 auto *OptString = llvm::MDString::get(Context, Str: Opt);
3571 Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: OptString));
3572 }
3573 }
3574}
3575
3576void CodeGenModule::EmitModuleInitializers(clang::Module *Primary) {
3577 assert(Primary->isNamedModuleUnit() &&
3578 "We should only emit module initializers for named modules.");
3579
3580 // Emit the initializers in the order that sub-modules appear in the
3581 // source, first Global Module Fragments, if present.
3582 if (auto GMF = Primary->getGlobalModuleFragment()) {
3583 for (Decl *D : getContext().getModuleInitializers(M: GMF)) {
3584 if (isa<ImportDecl>(Val: D))
3585 continue;
3586 assert(isa<VarDecl>(D) && "GMF initializer decl is not a var?");
3587 EmitTopLevelDecl(D);
3588 }
3589 }
3590 // Second any associated with the module, itself.
3591 for (Decl *D : getContext().getModuleInitializers(M: Primary)) {
3592 // Skip import decls, the inits for those are called explicitly.
3593 if (isa<ImportDecl>(Val: D))
3594 continue;
3595 EmitTopLevelDecl(D);
3596 }
3597 // Third any associated with the Privat eMOdule Fragment, if present.
3598 if (auto PMF = Primary->getPrivateModuleFragment()) {
3599 for (Decl *D : getContext().getModuleInitializers(M: PMF)) {
3600 // Skip import decls, the inits for those are called explicitly.
3601 if (isa<ImportDecl>(Val: D))
3602 continue;
3603 assert(isa<VarDecl>(D) && "PMF initializer decl is not a var?");
3604 EmitTopLevelDecl(D);
3605 }
3606 }
3607}
3608
3609void CodeGenModule::EmitModuleLinkOptions() {
3610 // Collect the set of all of the modules we want to visit to emit link
3611 // options, which is essentially the imported modules and all of their
3612 // non-explicit child modules.
3613 llvm::SetVector<clang::Module *> LinkModules;
3614 llvm::SmallPtrSet<clang::Module *, 16> Visited;
3615 SmallVector<clang::Module *, 16> Stack;
3616
3617 // Seed the stack with imported modules.
3618 for (Module *M : ImportedModules) {
3619 // Do not add any link flags when an implementation TU of a module imports
3620 // a header of that same module.
3621 if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
3622 !getLangOpts().isCompilingModule())
3623 continue;
3624 if (Visited.insert(Ptr: M).second)
3625 Stack.push_back(Elt: M);
3626 }
3627
3628 // Find all of the modules to import, making a little effort to prune
3629 // non-leaf modules.
3630 while (!Stack.empty()) {
3631 clang::Module *Mod = Stack.pop_back_val();
3632
3633 bool AnyChildren = false;
3634
3635 // Visit the submodules of this module.
3636 for (const auto &SM : Mod->submodules()) {
3637 // Skip explicit children; they need to be explicitly imported to be
3638 // linked against.
3639 if (SM->IsExplicit)
3640 continue;
3641
3642 if (Visited.insert(Ptr: SM).second) {
3643 Stack.push_back(Elt: SM);
3644 AnyChildren = true;
3645 }
3646 }
3647
3648 // We didn't find any children, so add this module to the list of
3649 // modules to link against.
3650 if (!AnyChildren) {
3651 LinkModules.insert(X: Mod);
3652 }
3653 }
3654
3655 // Add link options for all of the imported modules in reverse topological
3656 // order. We don't do anything to try to order import link flags with respect
3657 // to linker options inserted by things like #pragma comment().
3658 SmallVector<llvm::MDNode *, 16> MetadataArgs;
3659 Visited.clear();
3660 for (Module *M : LinkModules)
3661 if (Visited.insert(Ptr: M).second)
3662 addLinkOptionsPostorder(CGM&: *this, Mod: M, Metadata&: MetadataArgs, Visited);
3663 std::reverse(first: MetadataArgs.begin(), last: MetadataArgs.end());
3664 LinkerOptionsMetadata.append(in_start: MetadataArgs.begin(), in_end: MetadataArgs.end());
3665
3666 // Add the linker options metadata flag.
3667 if (!LinkerOptionsMetadata.empty()) {
3668 auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.linker.options");
3669 for (auto *MD : LinkerOptionsMetadata)
3670 NMD->addOperand(M: MD);
3671 }
3672}
3673
3674void CodeGenModule::EmitDeferred() {
3675 // Emit deferred declare target declarations.
3676 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
3677 getOpenMPRuntime().emitDeferredTargetDecls();
3678
3679 // Emit code for any potentially referenced deferred decls. Since a
3680 // previously unused static decl may become used during the generation of code
3681 // for a static function, iterate until no changes are made.
3682
3683 if (!DeferredVTables.empty()) {
3684 EmitDeferredVTables();
3685
3686 // Emitting a vtable doesn't directly cause more vtables to
3687 // become deferred, although it can cause functions to be
3688 // emitted that then need those vtables.
3689 assert(DeferredVTables.empty());
3690 }
3691
3692 // Emit CUDA/HIP static device variables referenced by host code only.
3693 // Note we should not clear CUDADeviceVarODRUsedByHost since it is still
3694 // needed for further handling.
3695 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
3696 llvm::append_range(C&: DeferredDeclsToEmit,
3697 R&: getContext().CUDADeviceVarODRUsedByHost);
3698
3699 // Stop if we're out of both deferred vtables and deferred declarations.
3700 if (DeferredDeclsToEmit.empty())
3701 return;
3702
3703 // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
3704 // work, it will not interfere with this.
3705 std::vector<GlobalDecl> CurDeclsToEmit;
3706 CurDeclsToEmit.swap(x&: DeferredDeclsToEmit);
3707
3708 for (GlobalDecl &D : CurDeclsToEmit) {
3709 // Functions declared with the sycl_kernel_entry_point attribute are
3710 // emitted normally during host compilation. During device compilation,
3711 // a SYCL kernel caller offload entry point function is generated and
3712 // emitted in place of each of these functions.
3713 if (const auto *FD = D.getDecl()->getAsFunction()) {
3714 if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>() &&
3715 FD->isDefined()) {
3716 // Functions with an invalid sycl_kernel_entry_point attribute are
3717 // ignored during device compilation.
3718 if (!FD->getAttr<SYCLKernelEntryPointAttr>()->isInvalidAttr()) {
3719 // Generate and emit the SYCL kernel caller function.
3720 EmitSYCLKernelCaller(KernelEntryPointFn: FD, Ctx&: getContext());
3721 // Recurse to emit any symbols directly or indirectly referenced
3722 // by the SYCL kernel caller function.
3723 EmitDeferred();
3724 }
3725 // Do not emit the sycl_kernel_entry_point attributed function.
3726 continue;
3727 }
3728 }
3729
3730 // We should call GetAddrOfGlobal with IsForDefinition set to true in order
3731 // to get GlobalValue with exactly the type we need, not something that
3732 // might had been created for another decl with the same mangled name but
3733 // different type.
3734 llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
3735 Val: GetAddrOfGlobal(GD: D, IsForDefinition: ForDefinition));
3736
3737 // In case of different address spaces, we may still get a cast, even with
3738 // IsForDefinition equal to true. Query mangled names table to get
3739 // GlobalValue.
3740 if (!GV)
3741 GV = GetGlobalValue(Name: getMangledName(GD: D));
3742
3743 // Make sure GetGlobalValue returned non-null.
3744 assert(GV);
3745
3746 // Check to see if we've already emitted this. This is necessary
3747 // for a couple of reasons: first, decls can end up in the
3748 // deferred-decls queue multiple times, and second, decls can end
3749 // up with definitions in unusual ways (e.g. by an extern inline
3750 // function acquiring a strong function redefinition). Just
3751 // ignore these cases.
3752 if (!GV->isDeclaration())
3753 continue;
3754
3755 // If this is OpenMP, check if it is legal to emit this global normally.
3756 if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD: D))
3757 continue;
3758
3759 // Otherwise, emit the definition and move on to the next one.
3760 EmitGlobalDefinition(D, GV);
3761
3762 // If we found out that we need to emit more decls, do that recursively.
3763 // This has the advantage that the decls are emitted in a DFS and related
3764 // ones are close together, which is convenient for testing.
3765 if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
3766 EmitDeferred();
3767 assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
3768 }
3769 }
3770}
3771
3772void CodeGenModule::EmitVTablesOpportunistically() {
3773 // Try to emit external vtables as available_externally if they have emitted
3774 // all inlined virtual functions. It runs after EmitDeferred() and therefore
3775 // is not allowed to create new references to things that need to be emitted
3776 // lazily. Note that it also uses fact that we eagerly emitting RTTI.
3777
3778 assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
3779 && "Only emit opportunistic vtables with optimizations");
3780
3781 for (const CXXRecordDecl *RD : OpportunisticVTables) {
3782 assert(getVTables().isVTableExternal(RD) &&
3783 "This queue should only contain external vtables");
3784 if (getCXXABI().canSpeculativelyEmitVTable(RD))
3785 VTables.GenerateClassData(RD);
3786 }
3787 OpportunisticVTables.clear();
3788}
3789
3790void CodeGenModule::EmitGlobalAnnotations() {
3791 for (const auto& [MangledName, VD] : DeferredAnnotations) {
3792 llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName);
3793 if (GV)
3794 AddGlobalAnnotations(D: VD, GV);
3795 }
3796 DeferredAnnotations.clear();
3797
3798 if (Annotations.empty())
3799 return;
3800
3801 // Create a new global variable for the ConstantStruct in the Module.
3802 llvm::Constant *Array = llvm::ConstantArray::get(T: llvm::ArrayType::get(
3803 ElementType: Annotations[0]->getType(), NumElements: Annotations.size()), V: Annotations);
3804 auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
3805 llvm::GlobalValue::AppendingLinkage,
3806 Array, "llvm.global.annotations");
3807 gv->setSection(AnnotationSection);
3808}
3809
3810llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
3811 llvm::Constant *&AStr = AnnotationStrings[Str];
3812 if (AStr)
3813 return AStr;
3814
3815 // Not found yet, create a new global.
3816 llvm::Constant *s = llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Str);
3817 auto *gv = new llvm::GlobalVariable(
3818 getModule(), s->getType(), true, llvm::GlobalValue::PrivateLinkage, s,
3819 ".str", nullptr, llvm::GlobalValue::NotThreadLocal,
3820 ConstGlobalsPtrTy->getAddressSpace());
3821 gv->setSection(AnnotationSection);
3822 gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3823 AStr = gv;
3824 return gv;
3825}
3826
3827llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
3828 SourceManager &SM = getContext().getSourceManager();
3829 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
3830 if (PLoc.isValid())
3831 return EmitAnnotationString(Str: PLoc.getFilename());
3832 return EmitAnnotationString(Str: SM.getBufferName(Loc));
3833}
3834
3835llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
3836 SourceManager &SM = getContext().getSourceManager();
3837 PresumedLoc PLoc = SM.getPresumedLoc(Loc: L);
3838 unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
3839 SM.getExpansionLineNumber(Loc: L);
3840 return llvm::ConstantInt::get(Ty: Int32Ty, V: LineNo);
3841}
3842
3843llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
3844 ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
3845 if (Exprs.empty())
3846 return llvm::ConstantPointerNull::get(T: ConstGlobalsPtrTy);
3847
3848 llvm::FoldingSetNodeID ID;
3849 for (Expr *E : Exprs) {
3850 ID.Add(x: cast<clang::ConstantExpr>(Val: E)->getAPValueResult());
3851 }
3852 llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()];
3853 if (Lookup)
3854 return Lookup;
3855
3856 llvm::SmallVector<llvm::Constant *, 4> LLVMArgs;
3857 LLVMArgs.reserve(N: Exprs.size());
3858 ConstantEmitter ConstEmiter(*this);
3859 llvm::transform(Range&: Exprs, d_first: std::back_inserter(x&: LLVMArgs), F: [&](const Expr *E) {
3860 const auto *CE = cast<clang::ConstantExpr>(Val: E);
3861 return ConstEmiter.emitAbstract(loc: CE->getBeginLoc(), value: CE->getAPValueResult(),
3862 T: CE->getType());
3863 });
3864 auto *Struct = llvm::ConstantStruct::getAnon(V: LLVMArgs);
3865 auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true,
3866 llvm::GlobalValue::PrivateLinkage, Struct,
3867 ".args");
3868 GV->setSection(AnnotationSection);
3869 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3870
3871 Lookup = GV;
3872 return GV;
3873}
3874
3875llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
3876 const AnnotateAttr *AA,
3877 SourceLocation L) {
3878 // Get the globals for file name, annotation, and the line number.
3879 llvm::Constant *AnnoGV = EmitAnnotationString(Str: AA->getAnnotation()),
3880 *UnitGV = EmitAnnotationUnit(Loc: L),
3881 *LineNoCst = EmitAnnotationLineNo(L),
3882 *Args = EmitAnnotationArgs(Attr: AA);
3883
3884 llvm::Constant *GVInGlobalsAS = GV;
3885 if (GV->getAddressSpace() !=
3886 getDataLayout().getDefaultGlobalsAddressSpace()) {
3887 GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast(
3888 C: GV,
3889 Ty: llvm::PointerType::get(
3890 C&: GV->getContext(), AddressSpace: getDataLayout().getDefaultGlobalsAddressSpace()));
3891 }
3892
3893 // Create the ConstantStruct for the global annotation.
3894 llvm::Constant *Fields[] = {
3895 GVInGlobalsAS, AnnoGV, UnitGV, LineNoCst, Args,
3896 };
3897 return llvm::ConstantStruct::getAnon(V: Fields);
3898}
3899
3900void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
3901 llvm::GlobalValue *GV) {
3902 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
3903 // Get the struct elements for these annotations.
3904 for (const auto *I : D->specific_attrs<AnnotateAttr>())
3905 Annotations.push_back(x: EmitAnnotateAttr(GV, AA: I, L: D->getLocation()));
3906}
3907
3908bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
3909 SourceLocation Loc) const {
3910 const auto &NoSanitizeL = getContext().getNoSanitizeList();
3911 // NoSanitize by function name.
3912 if (NoSanitizeL.containsFunction(Mask: Kind, FunctionName: Fn->getName()))
3913 return true;
3914 // NoSanitize by location. Check "mainfile" prefix.
3915 auto &SM = Context.getSourceManager();
3916 FileEntryRef MainFile = *SM.getFileEntryRefForID(FID: SM.getMainFileID());
3917 if (NoSanitizeL.containsMainFile(Mask: Kind, FileName: MainFile.getName()))
3918 return true;
3919
3920 // Check "src" prefix.
3921 if (Loc.isValid())
3922 return NoSanitizeL.containsLocation(Mask: Kind, Loc);
3923 // If location is unknown, this may be a compiler-generated function. Assume
3924 // it's located in the main file.
3925 return NoSanitizeL.containsFile(Mask: Kind, FileName: MainFile.getName());
3926}
3927
3928bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind,
3929 llvm::GlobalVariable *GV,
3930 SourceLocation Loc, QualType Ty,
3931 StringRef Category) const {
3932 const auto &NoSanitizeL = getContext().getNoSanitizeList();
3933 if (NoSanitizeL.containsGlobal(Mask: Kind, GlobalName: GV->getName(), Category))
3934 return true;
3935 auto &SM = Context.getSourceManager();
3936 if (NoSanitizeL.containsMainFile(
3937 Mask: Kind, FileName: SM.getFileEntryRefForID(FID: SM.getMainFileID())->getName(),
3938 Category))
3939 return true;
3940 if (NoSanitizeL.containsLocation(Mask: Kind, Loc, Category))
3941 return true;
3942
3943 // Check global type.
3944 if (!Ty.isNull()) {
3945 // Drill down the array types: if global variable of a fixed type is
3946 // not sanitized, we also don't instrument arrays of them.
3947 while (auto AT = dyn_cast<ArrayType>(Val: Ty.getTypePtr()))
3948 Ty = AT->getElementType();
3949 Ty = Ty.getCanonicalType().getUnqualifiedType();
3950 // Only record types (classes, structs etc.) are ignored.
3951 if (Ty->isRecordType()) {
3952 std::string TypeStr = Ty.getAsString(Policy: getContext().getPrintingPolicy());
3953 if (NoSanitizeL.containsType(Mask: Kind, MangledTypeName: TypeStr, Category))
3954 return true;
3955 }
3956 }
3957 return false;
3958}
3959
3960bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
3961 StringRef Category) const {
3962 const auto &XRayFilter = getContext().getXRayFilter();
3963 using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
3964 auto Attr = ImbueAttr::NONE;
3965 if (Loc.isValid())
3966 Attr = XRayFilter.shouldImbueLocation(Loc, Category);
3967 if (Attr == ImbueAttr::NONE)
3968 Attr = XRayFilter.shouldImbueFunction(FunctionName: Fn->getName());
3969 switch (Attr) {
3970 case ImbueAttr::NONE:
3971 return false;
3972 case ImbueAttr::ALWAYS:
3973 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-always");
3974 break;
3975 case ImbueAttr::ALWAYS_ARG1:
3976 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-always");
3977 Fn->addFnAttr(Kind: "xray-log-args", Val: "1");
3978 break;
3979 case ImbueAttr::NEVER:
3980 Fn->addFnAttr(Kind: "function-instrument", Val: "xray-never");
3981 break;
3982 }
3983 return true;
3984}
3985
3986ProfileList::ExclusionType
3987CodeGenModule::isFunctionBlockedByProfileList(llvm::Function *Fn,
3988 SourceLocation Loc) const {
3989 const auto &ProfileList = getContext().getProfileList();
3990 // If the profile list is empty, then instrument everything.
3991 if (ProfileList.isEmpty())
3992 return ProfileList::Allow;
3993 llvm::driver::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr();
3994 // First, check the function name.
3995 if (auto V = ProfileList.isFunctionExcluded(FunctionName: Fn->getName(), Kind))
3996 return *V;
3997 // Next, check the source location.
3998 if (Loc.isValid())
3999 if (auto V = ProfileList.isLocationExcluded(Loc, Kind))
4000 return *V;
4001 // If location is unknown, this may be a compiler-generated function. Assume
4002 // it's located in the main file.
4003 auto &SM = Context.getSourceManager();
4004 if (auto MainFile = SM.getFileEntryRefForID(FID: SM.getMainFileID()))
4005 if (auto V = ProfileList.isFileExcluded(FileName: MainFile->getName(), Kind))
4006 return *V;
4007 return ProfileList.getDefault(Kind);
4008}
4009
4010ProfileList::ExclusionType
4011CodeGenModule::isFunctionBlockedFromProfileInstr(llvm::Function *Fn,
4012 SourceLocation Loc) const {
4013 auto V = isFunctionBlockedByProfileList(Fn, Loc);
4014 if (V != ProfileList::Allow)
4015 return V;
4016
4017 auto NumGroups = getCodeGenOpts().ProfileTotalFunctionGroups;
4018 if (NumGroups > 1) {
4019 auto Group = llvm::crc32(Data: arrayRefFromStringRef(Input: Fn->getName())) % NumGroups;
4020 if (Group != getCodeGenOpts().ProfileSelectedFunctionGroup)
4021 return ProfileList::Skip;
4022 }
4023 return ProfileList::Allow;
4024}
4025
4026bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
4027 // Never defer when EmitAllDecls is specified.
4028 if (LangOpts.EmitAllDecls)
4029 return true;
4030
4031 const auto *VD = dyn_cast<VarDecl>(Val: Global);
4032 if (VD &&
4033 ((CodeGenOpts.KeepPersistentStorageVariables &&
4034 (VD->getStorageDuration() == SD_Static ||
4035 VD->getStorageDuration() == SD_Thread)) ||
4036 (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static &&
4037 VD->getType().isConstQualified())))
4038 return true;
4039
4040 return getContext().DeclMustBeEmitted(D: Global);
4041}
4042
4043bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
4044 // In OpenMP 5.0 variables and function may be marked as
4045 // device_type(host/nohost) and we should not emit them eagerly unless we sure
4046 // that they must be emitted on the host/device. To be sure we need to have
4047 // seen a declare target with an explicit mentioning of the function, we know
4048 // we have if the level of the declare target attribute is -1. Note that we
4049 // check somewhere else if we should emit this at all.
4050 if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) {
4051 std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
4052 OMPDeclareTargetDeclAttr::getActiveAttr(VD: Global);
4053 if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1)
4054 return false;
4055 }
4056
4057 if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) {
4058 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
4059 // Implicit template instantiations may change linkage if they are later
4060 // explicitly instantiated, so they should not be emitted eagerly.
4061 return false;
4062 // Defer until all versions have been semantically checked.
4063 if (FD->hasAttr<TargetVersionAttr>() && !FD->isMultiVersion())
4064 return false;
4065 // Defer emission of SYCL kernel entry point functions during device
4066 // compilation.
4067 if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>())
4068 return false;
4069 }
4070 if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) {
4071 if (Context.getInlineVariableDefinitionKind(VD) ==
4072 ASTContext::InlineVariableDefinitionKind::WeakUnknown)
4073 // A definition of an inline constexpr static data member may change
4074 // linkage later if it's redeclared outside the class.
4075 return false;
4076 if (CXX20ModuleInits && VD->getOwningModule() &&
4077 !VD->getOwningModule()->isModuleMapModule()) {
4078 // For CXX20, module-owned initializers need to be deferred, since it is
4079 // not known at this point if they will be run for the current module or
4080 // as part of the initializer for an imported one.
4081 return false;
4082 }
4083 }
4084 // If OpenMP is enabled and threadprivates must be generated like TLS, delay
4085 // codegen for global variables, because they may be marked as threadprivate.
4086 if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
4087 getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Val: Global) &&
4088 !Global->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: false, ExcludeDtor: false) &&
4089 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD: Global))
4090 return false;
4091
4092 return true;
4093}
4094
4095ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
4096 StringRef Name = getMangledName(GD);
4097
4098 // The UUID descriptor should be pointer aligned.
4099 CharUnits Alignment = CharUnits::fromQuantity(Quantity: PointerAlignInBytes);
4100
4101 // Look for an existing global.
4102 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
4103 return ConstantAddress(GV, GV->getValueType(), Alignment);
4104
4105 ConstantEmitter Emitter(*this);
4106 llvm::Constant *Init;
4107
4108 APValue &V = GD->getAsAPValue();
4109 if (!V.isAbsent()) {
4110 // If possible, emit the APValue version of the initializer. In particular,
4111 // this gets the type of the constant right.
4112 Init = Emitter.emitForInitializer(
4113 value: GD->getAsAPValue(), destAddrSpace: GD->getType().getAddressSpace(), destType: GD->getType());
4114 } else {
4115 // As a fallback, directly construct the constant.
4116 // FIXME: This may get padding wrong under esoteric struct layout rules.
4117 // MSVC appears to create a complete type 'struct __s_GUID' that it
4118 // presumably uses to represent these constants.
4119 MSGuidDecl::Parts Parts = GD->getParts();
4120 llvm::Constant *Fields[4] = {
4121 llvm::ConstantInt::get(Ty: Int32Ty, V: Parts.Part1),
4122 llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part2),
4123 llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part3),
4124 llvm::ConstantDataArray::getRaw(
4125 Data: StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), NumElements: 8,
4126 ElementTy: Int8Ty)};
4127 Init = llvm::ConstantStruct::getAnon(V: Fields);
4128 }
4129
4130 auto *GV = new llvm::GlobalVariable(
4131 getModule(), Init->getType(),
4132 /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
4133 if (supportsCOMDAT())
4134 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
4135 setDSOLocal(GV);
4136
4137 if (!V.isAbsent()) {
4138 Emitter.finalize(global: GV);
4139 return ConstantAddress(GV, GV->getValueType(), Alignment);
4140 }
4141
4142 llvm::Type *Ty = getTypes().ConvertTypeForMem(T: GD->getType());
4143 return ConstantAddress(GV, Ty, Alignment);
4144}
4145
4146ConstantAddress CodeGenModule::GetAddrOfUnnamedGlobalConstantDecl(
4147 const UnnamedGlobalConstantDecl *GCD) {
4148 CharUnits Alignment = getContext().getTypeAlignInChars(T: GCD->getType());
4149
4150 llvm::GlobalVariable **Entry = nullptr;
4151 Entry = &UnnamedGlobalConstantDeclMap[GCD];
4152 if (*Entry)
4153 return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment);
4154
4155 ConstantEmitter Emitter(*this);
4156 llvm::Constant *Init;
4157
4158 const APValue &V = GCD->getValue();
4159
4160 assert(!V.isAbsent());
4161 Init = Emitter.emitForInitializer(value: V, destAddrSpace: GCD->getType().getAddressSpace(),
4162 destType: GCD->getType());
4163
4164 auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
4165 /*isConstant=*/true,
4166 llvm::GlobalValue::PrivateLinkage, Init,
4167 ".constant");
4168 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4169 GV->setAlignment(Alignment.getAsAlign());
4170
4171 Emitter.finalize(global: GV);
4172
4173 *Entry = GV;
4174 return ConstantAddress(GV, GV->getValueType(), Alignment);
4175}
4176
4177ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
4178 const TemplateParamObjectDecl *TPO) {
4179 StringRef Name = getMangledName(GD: TPO);
4180 CharUnits Alignment = getNaturalTypeAlignment(T: TPO->getType());
4181
4182 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
4183 return ConstantAddress(GV, GV->getValueType(), Alignment);
4184
4185 ConstantEmitter Emitter(*this);
4186 llvm::Constant *Init = Emitter.emitForInitializer(
4187 value: TPO->getValue(), destAddrSpace: TPO->getType().getAddressSpace(), destType: TPO->getType());
4188
4189 if (!Init) {
4190 ErrorUnsupported(D: TPO, Type: "template parameter object");
4191 return ConstantAddress::invalid();
4192 }
4193
4194 llvm::GlobalValue::LinkageTypes Linkage =
4195 isExternallyVisible(L: TPO->getLinkageAndVisibility().getLinkage())
4196 ? llvm::GlobalValue::LinkOnceODRLinkage
4197 : llvm::GlobalValue::InternalLinkage;
4198 auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
4199 /*isConstant=*/true, Linkage, Init, Name);
4200 setGVProperties(GV, D: TPO);
4201 if (supportsCOMDAT() && Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4202 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
4203 Emitter.finalize(global: GV);
4204
4205 return ConstantAddress(GV, GV->getValueType(), Alignment);
4206}
4207
4208ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
4209 const AliasAttr *AA = VD->getAttr<AliasAttr>();
4210 assert(AA && "No alias?");
4211
4212 CharUnits Alignment = getContext().getDeclAlign(D: VD);
4213 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: VD->getType());
4214
4215 // See if there is already something with the target's name in the module.
4216 llvm::GlobalValue *Entry = GetGlobalValue(Name: AA->getAliasee());
4217 if (Entry)
4218 return ConstantAddress(Entry, DeclTy, Alignment);
4219
4220 llvm::Constant *Aliasee;
4221 if (isa<llvm::FunctionType>(Val: DeclTy))
4222 Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy,
4223 D: GlobalDecl(cast<FunctionDecl>(Val: VD)),
4224 /*ForVTable=*/false);
4225 else
4226 Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default,
4227 D: nullptr);
4228
4229 auto *F = cast<llvm::GlobalValue>(Val: Aliasee);
4230 F->setLinkage(llvm::Function::ExternalWeakLinkage);
4231 WeakRefReferences.insert(Ptr: F);
4232
4233 return ConstantAddress(Aliasee, DeclTy, Alignment);
4234}
4235
4236template <typename AttrT> static bool hasImplicitAttr(const ValueDecl *D) {
4237 if (!D)
4238 return false;
4239 if (auto *A = D->getAttr<AttrT>())
4240 return A->isImplicit();
4241 return D->isImplicit();
4242}
4243
4244static bool shouldSkipAliasEmission(const CodeGenModule &CGM,
4245 const ValueDecl *Global) {
4246 const LangOptions &LangOpts = CGM.getLangOpts();
4247 if (!LangOpts.OpenMPIsTargetDevice && !LangOpts.CUDA)
4248 return false;
4249
4250 const auto *AA = Global->getAttr<AliasAttr>();
4251 GlobalDecl AliaseeGD;
4252
4253 // Check if the aliasee exists, if the aliasee is not found, skip the alias
4254 // emission. This is executed for both the host and device.
4255 if (!CGM.lookupRepresentativeDecl(MangledName: AA->getAliasee(), Result&: AliaseeGD))
4256 return true;
4257
4258 const auto *AliaseeDecl = dyn_cast<ValueDecl>(Val: AliaseeGD.getDecl());
4259 if (LangOpts.OpenMPIsTargetDevice)
4260 return !AliaseeDecl ||
4261 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD: AliaseeDecl);
4262
4263 // CUDA / HIP
4264 const bool HasDeviceAttr = Global->hasAttr<CUDADeviceAttr>();
4265 const bool AliaseeHasDeviceAttr =
4266 AliaseeDecl && AliaseeDecl->hasAttr<CUDADeviceAttr>();
4267
4268 if (LangOpts.CUDAIsDevice)
4269 return !HasDeviceAttr || !AliaseeHasDeviceAttr;
4270
4271 // CUDA / HIP Host
4272 // we know that the aliasee exists from above, so we know to emit
4273 return false;
4274}
4275
4276bool CodeGenModule::shouldEmitCUDAGlobalVar(const VarDecl *Global) const {
4277 assert(LangOpts.CUDA && "Should not be called by non-CUDA languages");
4278 // We need to emit host-side 'shadows' for all global
4279 // device-side variables because the CUDA runtime needs their
4280 // size and host-side address in order to provide access to
4281 // their device-side incarnations.
4282 return !LangOpts.CUDAIsDevice || Global->hasAttr<CUDADeviceAttr>() ||
4283 Global->hasAttr<CUDAConstantAttr>() ||
4284 Global->hasAttr<CUDASharedAttr>() ||
4285 Global->getType()->isCUDADeviceBuiltinSurfaceType() ||
4286 Global->getType()->isCUDADeviceBuiltinTextureType();
4287}
4288
4289void CodeGenModule::EmitGlobal(GlobalDecl GD) {
4290 const auto *Global = cast<ValueDecl>(Val: GD.getDecl());
4291
4292 // Weak references don't produce any output by themselves.
4293 if (Global->hasAttr<WeakRefAttr>())
4294 return;
4295
4296 // If this is an alias definition (which otherwise looks like a declaration)
4297 // emit it now.
4298 if (Global->hasAttr<AliasAttr>()) {
4299 if (shouldSkipAliasEmission(CGM: *this, Global))
4300 return;
4301 return EmitAliasDefinition(GD);
4302 }
4303
4304 // IFunc like an alias whose value is resolved at runtime by calling resolver.
4305 if (Global->hasAttr<IFuncAttr>())
4306 return emitIFuncDefinition(GD);
4307
4308 // If this is a cpu_dispatch multiversion function, emit the resolver.
4309 if (Global->hasAttr<CPUDispatchAttr>())
4310 return emitCPUDispatchDefinition(GD);
4311
4312 // If this is CUDA, be selective about which declarations we emit.
4313 // Non-constexpr non-lambda implicit host device functions are not emitted
4314 // unless they are used on device side.
4315 if (LangOpts.CUDA) {
4316 assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
4317 "Expected Variable or Function");
4318 if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) {
4319 if (!shouldEmitCUDAGlobalVar(Global: VD))
4320 return;
4321 } else if (LangOpts.CUDAIsDevice) {
4322 const auto *FD = dyn_cast<FunctionDecl>(Val: Global);
4323 if ((!Global->hasAttr<CUDADeviceAttr>() ||
4324 (LangOpts.OffloadImplicitHostDeviceTemplates &&
4325 hasImplicitAttr<CUDAHostAttr>(D: FD) &&
4326 hasImplicitAttr<CUDADeviceAttr>(D: FD) && !FD->isConstexpr() &&
4327 !isLambdaCallOperator(DC: FD) &&
4328 !getContext().CUDAImplicitHostDeviceFunUsedByDevice.count(V: FD))) &&
4329 !Global->hasAttr<CUDAGlobalAttr>() &&
4330 !(LangOpts.HIPStdPar && isa<FunctionDecl>(Val: Global) &&
4331 !Global->hasAttr<CUDAHostAttr>()))
4332 return;
4333 // Device-only functions are the only things we skip.
4334 } else if (!Global->hasAttr<CUDAHostAttr>() &&
4335 Global->hasAttr<CUDADeviceAttr>())
4336 return;
4337 }
4338
4339 if (LangOpts.OpenMP) {
4340 // If this is OpenMP, check if it is legal to emit this global normally.
4341 if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
4342 return;
4343 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Val: Global)) {
4344 if (MustBeEmitted(Global))
4345 EmitOMPDeclareReduction(D: DRD);
4346 return;
4347 }
4348 if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Val: Global)) {
4349 if (MustBeEmitted(Global))
4350 EmitOMPDeclareMapper(D: DMD);
4351 return;
4352 }
4353 }
4354
4355 // Ignore declarations, they will be emitted on their first use.
4356 if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) {
4357 if (DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>()) &&
4358 FD->doesThisDeclarationHaveABody())
4359 addDeferredDeclToEmit(GD: GlobalDecl(FD, KernelReferenceKind::Stub));
4360
4361 // Update deferred annotations with the latest declaration if the function
4362 // function was already used or defined.
4363 if (FD->hasAttr<AnnotateAttr>()) {
4364 StringRef MangledName = getMangledName(GD);
4365 if (GetGlobalValue(Name: MangledName))
4366 DeferredAnnotations[MangledName] = FD;
4367 }
4368
4369 // Forward declarations are emitted lazily on first use.
4370 if (!FD->doesThisDeclarationHaveABody()) {
4371 if (!FD->doesDeclarationForceExternallyVisibleDefinition() &&
4372 (!FD->isMultiVersion() || !getTarget().getTriple().isAArch64()))
4373 return;
4374
4375 StringRef MangledName = getMangledName(GD);
4376
4377 // Compute the function info and LLVM type.
4378 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4379 llvm::Type *Ty = getTypes().GetFunctionType(Info: FI);
4380
4381 GetOrCreateLLVMFunction(MangledName, Ty, D: GD, /*ForVTable=*/false,
4382 /*DontDefer=*/false);
4383 return;
4384 }
4385 } else {
4386 const auto *VD = cast<VarDecl>(Val: Global);
4387 assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
4388 if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
4389 !Context.isMSStaticDataMemberInlineDefinition(VD)) {
4390 if (LangOpts.OpenMP) {
4391 // Emit declaration of the must-be-emitted declare target variable.
4392 if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
4393 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
4394
4395 // If this variable has external storage and doesn't require special
4396 // link handling we defer to its canonical definition.
4397 if (VD->hasExternalStorage() &&
4398 Res != OMPDeclareTargetDeclAttr::MT_Link)
4399 return;
4400
4401 bool UnifiedMemoryEnabled =
4402 getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
4403 if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
4404 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
4405 !UnifiedMemoryEnabled) {
4406 (void)GetAddrOfGlobalVar(D: VD);
4407 } else {
4408 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
4409 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
4410 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
4411 UnifiedMemoryEnabled)) &&
4412 "Link clause or to clause with unified memory expected.");
4413 (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
4414 }
4415
4416 return;
4417 }
4418 }
4419 // If this declaration may have caused an inline variable definition to
4420 // change linkage, make sure that it's emitted.
4421 if (Context.getInlineVariableDefinitionKind(VD) ==
4422 ASTContext::InlineVariableDefinitionKind::Strong)
4423 GetAddrOfGlobalVar(D: VD);
4424 return;
4425 }
4426 }
4427
4428 // Defer code generation to first use when possible, e.g. if this is an inline
4429 // function. If the global must always be emitted, do it eagerly if possible
4430 // to benefit from cache locality.
4431 if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
4432 // Emit the definition if it can't be deferred.
4433 EmitGlobalDefinition(D: GD);
4434 addEmittedDeferredDecl(GD);
4435 return;
4436 }
4437
4438 // If we're deferring emission of a C++ variable with an
4439 // initializer, remember the order in which it appeared in the file.
4440 if (getLangOpts().CPlusPlus && isa<VarDecl>(Val: Global) &&
4441 cast<VarDecl>(Val: Global)->hasInit()) {
4442 DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
4443 CXXGlobalInits.push_back(x: nullptr);
4444 }
4445
4446 StringRef MangledName = getMangledName(GD);
4447 if (GetGlobalValue(Name: MangledName) != nullptr) {
4448 // The value has already been used and should therefore be emitted.
4449 addDeferredDeclToEmit(GD);
4450 } else if (MustBeEmitted(Global)) {
4451 // The value must be emitted, but cannot be emitted eagerly.
4452 assert(!MayBeEmittedEagerly(Global));
4453 addDeferredDeclToEmit(GD);
4454 } else {
4455 // Otherwise, remember that we saw a deferred decl with this name. The
4456 // first use of the mangled name will cause it to move into
4457 // DeferredDeclsToEmit.
4458 DeferredDecls[MangledName] = GD;
4459 }
4460}
4461
4462// Check if T is a class type with a destructor that's not dllimport.
4463static bool HasNonDllImportDtor(QualType T) {
4464 if (const auto *RT =
4465 T->getBaseElementTypeUnsafe()->getAsCanonical<RecordType>())
4466 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) {
4467 RD = RD->getDefinitionOrSelf();
4468 if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
4469 return true;
4470 }
4471
4472 return false;
4473}
4474
4475namespace {
4476 struct FunctionIsDirectlyRecursive
4477 : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
4478 const StringRef Name;
4479 const Builtin::Context &BI;
4480 FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
4481 : Name(N), BI(C) {}
4482
4483 bool VisitCallExpr(const CallExpr *E) {
4484 const FunctionDecl *FD = E->getDirectCallee();
4485 if (!FD)
4486 return false;
4487 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
4488 if (Attr && Name == Attr->getLabel())
4489 return true;
4490 unsigned BuiltinID = FD->getBuiltinID();
4491 if (!BuiltinID || !BI.isLibFunction(ID: BuiltinID))
4492 return false;
4493 std::string BuiltinNameStr = BI.getName(ID: BuiltinID);
4494 StringRef BuiltinName = BuiltinNameStr;
4495 return BuiltinName.consume_front(Prefix: "__builtin_") && Name == BuiltinName;
4496 }
4497
4498 bool VisitStmt(const Stmt *S) {
4499 for (const Stmt *Child : S->children())
4500 if (Child && this->Visit(S: Child))
4501 return true;
4502 return false;
4503 }
4504 };
4505
4506 // Make sure we're not referencing non-imported vars or functions.
4507 struct DLLImportFunctionVisitor
4508 : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
4509 bool SafeToInline = true;
4510
4511 bool shouldVisitImplicitCode() const { return true; }
4512
4513 bool VisitVarDecl(VarDecl *VD) {
4514 if (VD->getTLSKind()) {
4515 // A thread-local variable cannot be imported.
4516 SafeToInline = false;
4517 return SafeToInline;
4518 }
4519
4520 // A variable definition might imply a destructor call.
4521 if (VD->isThisDeclarationADefinition())
4522 SafeToInline = !HasNonDllImportDtor(T: VD->getType());
4523
4524 return SafeToInline;
4525 }
4526
4527 bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
4528 if (const auto *D = E->getTemporary()->getDestructor())
4529 SafeToInline = D->hasAttr<DLLImportAttr>();
4530 return SafeToInline;
4531 }
4532
4533 bool VisitDeclRefExpr(DeclRefExpr *E) {
4534 ValueDecl *VD = E->getDecl();
4535 if (isa<FunctionDecl>(Val: VD))
4536 SafeToInline = VD->hasAttr<DLLImportAttr>();
4537 else if (VarDecl *V = dyn_cast<VarDecl>(Val: VD))
4538 SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
4539 return SafeToInline;
4540 }
4541
4542 bool VisitCXXConstructExpr(CXXConstructExpr *E) {
4543 SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
4544 return SafeToInline;
4545 }
4546
4547 bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
4548 CXXMethodDecl *M = E->getMethodDecl();
4549 if (!M) {
4550 // Call through a pointer to member function. This is safe to inline.
4551 SafeToInline = true;
4552 } else {
4553 SafeToInline = M->hasAttr<DLLImportAttr>();
4554 }
4555 return SafeToInline;
4556 }
4557
4558 bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
4559 SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
4560 return SafeToInline;
4561 }
4562
4563 bool VisitCXXNewExpr(CXXNewExpr *E) {
4564 SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
4565 return SafeToInline;
4566 }
4567 };
4568}
4569
4570// isTriviallyRecursive - Check if this function calls another
4571// decl that, because of the asm attribute or the other decl being a builtin,
4572// ends up pointing to itself.
4573bool
4574CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
4575 StringRef Name;
4576 if (getCXXABI().getMangleContext().shouldMangleDeclName(D: FD)) {
4577 // asm labels are a special kind of mangling we have to support.
4578 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
4579 if (!Attr)
4580 return false;
4581 Name = Attr->getLabel();
4582 } else {
4583 Name = FD->getName();
4584 }
4585
4586 FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
4587 const Stmt *Body = FD->getBody();
4588 return Body ? Walker.Visit(S: Body) : false;
4589}
4590
4591bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
4592 if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
4593 return true;
4594
4595 const auto *F = cast<FunctionDecl>(Val: GD.getDecl());
4596 // Inline builtins declaration must be emitted. They often are fortified
4597 // functions.
4598 if (F->isInlineBuiltinDeclaration())
4599 return true;
4600
4601 if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
4602 return false;
4603
4604 // We don't import function bodies from other named module units since that
4605 // behavior may break ABI compatibility of the current unit.
4606 if (const Module *M = F->getOwningModule();
4607 M && M->getTopLevelModule()->isNamedModule() &&
4608 getContext().getCurrentNamedModule() != M->getTopLevelModule()) {
4609 // There are practices to mark template member function as always-inline
4610 // and mark the template as extern explicit instantiation but not give
4611 // the definition for member function. So we have to emit the function
4612 // from explicitly instantiation with always-inline.
4613 //
4614 // See https://github.com/llvm/llvm-project/issues/86893 for details.
4615 //
4616 // TODO: Maybe it is better to give it a warning if we call a non-inline
4617 // function from other module units which is marked as always-inline.
4618 if (!F->isTemplateInstantiation() || !F->hasAttr<AlwaysInlineAttr>()) {
4619 return false;
4620 }
4621 }
4622
4623 if (F->hasAttr<NoInlineAttr>())
4624 return false;
4625
4626 if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) {
4627 // Check whether it would be safe to inline this dllimport function.
4628 DLLImportFunctionVisitor Visitor;
4629 Visitor.TraverseFunctionDecl(D: const_cast<FunctionDecl*>(F));
4630 if (!Visitor.SafeToInline)
4631 return false;
4632
4633 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: F)) {
4634 // Implicit destructor invocations aren't captured in the AST, so the
4635 // check above can't see them. Check for them manually here.
4636 for (const Decl *Member : Dtor->getParent()->decls())
4637 if (isa<FieldDecl>(Val: Member))
4638 if (HasNonDllImportDtor(T: cast<FieldDecl>(Val: Member)->getType()))
4639 return false;
4640 for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
4641 if (HasNonDllImportDtor(T: B.getType()))
4642 return false;
4643 }
4644 }
4645
4646 // PR9614. Avoid cases where the source code is lying to us. An available
4647 // externally function should have an equivalent function somewhere else,
4648 // but a function that calls itself through asm label/`__builtin_` trickery is
4649 // clearly not equivalent to the real implementation.
4650 // This happens in glibc's btowc and in some configure checks.
4651 return !isTriviallyRecursive(FD: F);
4652}
4653
4654bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
4655 return CodeGenOpts.OptimizationLevel > 0;
4656}
4657
4658void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
4659 llvm::GlobalValue *GV) {
4660 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
4661
4662 if (FD->isCPUSpecificMultiVersion()) {
4663 auto *Spec = FD->getAttr<CPUSpecificAttr>();
4664 for (unsigned I = 0; I < Spec->cpus_size(); ++I)
4665 EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr);
4666 } else if (auto *TC = FD->getAttr<TargetClonesAttr>()) {
4667 for (unsigned I = 0; I < TC->featuresStrs_size(); ++I)
4668 if (TC->isFirstOfVersion(Index: I))
4669 EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr);
4670 } else
4671 EmitGlobalFunctionDefinition(GD, GV);
4672
4673 // Ensure that the resolver function is also emitted.
4674 if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) {
4675 // On AArch64 defer the resolver emission until the entire TU is processed.
4676 if (getTarget().getTriple().isAArch64())
4677 AddDeferredMultiVersionResolverToEmit(GD);
4678 else
4679 GetOrCreateMultiVersionResolver(GD);
4680 }
4681}
4682
4683void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
4684 const auto *D = cast<ValueDecl>(Val: GD.getDecl());
4685
4686 PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
4687 Context.getSourceManager(),
4688 "Generating code for declaration");
4689
4690 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
4691 // At -O0, don't generate IR for functions with available_externally
4692 // linkage.
4693 if (!shouldEmitFunction(GD))
4694 return;
4695
4696 llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
4697 std::string Name;
4698 llvm::raw_string_ostream OS(Name);
4699 FD->getNameForDiagnostic(OS, Policy: getContext().getPrintingPolicy(),
4700 /*Qualified=*/true);
4701 return Name;
4702 });
4703
4704 if (const auto *Method = dyn_cast<CXXMethodDecl>(Val: D)) {
4705 // Make sure to emit the definition(s) before we emit the thunks.
4706 // This is necessary for the generation of certain thunks.
4707 if (isa<CXXConstructorDecl>(Val: Method) || isa<CXXDestructorDecl>(Val: Method))
4708 ABI->emitCXXStructor(GD);
4709 else if (FD->isMultiVersion())
4710 EmitMultiVersionFunctionDefinition(GD, GV);
4711 else
4712 EmitGlobalFunctionDefinition(GD, GV);
4713
4714 if (Method->isVirtual())
4715 getVTables().EmitThunks(GD);
4716
4717 return;
4718 }
4719
4720 if (FD->isMultiVersion())
4721 return EmitMultiVersionFunctionDefinition(GD, GV);
4722 return EmitGlobalFunctionDefinition(GD, GV);
4723 }
4724
4725 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
4726 return EmitGlobalVarDefinition(D: VD, IsTentative: !VD->hasDefinition());
4727
4728 llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
4729}
4730
4731static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
4732 llvm::Function *NewFn);
4733
4734static llvm::APInt
4735getFMVPriority(const TargetInfo &TI,
4736 const CodeGenFunction::FMVResolverOption &RO) {
4737 llvm::SmallVector<StringRef, 8> Features{RO.Features};
4738 if (RO.Architecture)
4739 Features.push_back(Elt: *RO.Architecture);
4740 return TI.getFMVPriority(Features);
4741}
4742
4743// Multiversion functions should be at most 'WeakODRLinkage' so that a different
4744// TU can forward declare the function without causing problems. Particularly
4745// in the cases of CPUDispatch, this causes issues. This also makes sure we
4746// work with internal linkage functions, so that the same function name can be
4747// used with internal linkage in multiple TUs.
4748static llvm::GlobalValue::LinkageTypes
4749getMultiversionLinkage(CodeGenModule &CGM, GlobalDecl GD) {
4750 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
4751 if (FD->getFormalLinkage() == Linkage::Internal)
4752 return llvm::GlobalValue::InternalLinkage;
4753 return llvm::GlobalValue::WeakODRLinkage;
4754}
4755
4756void CodeGenModule::emitMultiVersionFunctions() {
4757 std::vector<GlobalDecl> MVFuncsToEmit;
4758 MultiVersionFuncs.swap(x&: MVFuncsToEmit);
4759 for (GlobalDecl GD : MVFuncsToEmit) {
4760 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
4761 assert(FD && "Expected a FunctionDecl");
4762
4763 auto createFunction = [&](const FunctionDecl *Decl, unsigned MVIdx = 0) {
4764 GlobalDecl CurGD{Decl->isDefined() ? Decl->getDefinition() : Decl, MVIdx};
4765 StringRef MangledName = getMangledName(GD: CurGD);
4766 llvm::Constant *Func = GetGlobalValue(Name: MangledName);
4767 if (!Func) {
4768 if (Decl->isDefined()) {
4769 EmitGlobalFunctionDefinition(GD: CurGD, GV: nullptr);
4770 Func = GetGlobalValue(Name: MangledName);
4771 } else {
4772 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD: CurGD);
4773 llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI);
4774 Func = GetAddrOfFunction(GD: CurGD, Ty, /*ForVTable=*/false,
4775 /*DontDefer=*/false, IsForDefinition: ForDefinition);
4776 }
4777 assert(Func && "This should have just been created");
4778 }
4779 return cast<llvm::Function>(Val: Func);
4780 };
4781
4782 // For AArch64, a resolver is only emitted if a function marked with
4783 // target_version("default")) or target_clones("default") is defined
4784 // in this TU. For other architectures it is always emitted.
4785 bool ShouldEmitResolver = !getTarget().getTriple().isAArch64();
4786 SmallVector<CodeGenFunction::FMVResolverOption, 10> Options;
4787 llvm::DenseMap<llvm::Function *, const FunctionDecl *> DeclMap;
4788
4789 getContext().forEachMultiversionedFunctionVersion(
4790 FD, Pred: [&](const FunctionDecl *CurFD) {
4791 llvm::SmallVector<StringRef, 8> Feats;
4792 bool IsDefined = CurFD->getDefinition() != nullptr;
4793
4794 if (const auto *TA = CurFD->getAttr<TargetAttr>()) {
4795 assert(getTarget().getTriple().isX86() && "Unsupported target");
4796 TA->getX86AddedFeatures(Out&: Feats);
4797 llvm::Function *Func = createFunction(CurFD);
4798 DeclMap.insert(KV: {Func, CurFD});
4799 Options.emplace_back(Args&: Func, Args&: Feats, Args: TA->getX86Architecture());
4800 } else if (const auto *TVA = CurFD->getAttr<TargetVersionAttr>()) {
4801 if (TVA->isDefaultVersion() && IsDefined)
4802 ShouldEmitResolver = true;
4803 llvm::Function *Func = createFunction(CurFD);
4804 DeclMap.insert(KV: {Func, CurFD});
4805 char Delim = getTarget().getTriple().isAArch64() ? '+' : ',';
4806 TVA->getFeatures(Out&: Feats, Delim);
4807 Options.emplace_back(Args&: Func, Args&: Feats);
4808 } else if (const auto *TC = CurFD->getAttr<TargetClonesAttr>()) {
4809 for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) {
4810 if (!TC->isFirstOfVersion(Index: I))
4811 continue;
4812 if (TC->isDefaultVersion(Index: I) && IsDefined)
4813 ShouldEmitResolver = true;
4814 llvm::Function *Func = createFunction(CurFD, I);
4815 DeclMap.insert(KV: {Func, CurFD});
4816 Feats.clear();
4817 if (getTarget().getTriple().isX86()) {
4818 TC->getX86Feature(Out&: Feats, Index: I);
4819 Options.emplace_back(Args&: Func, Args&: Feats, Args: TC->getX86Architecture(Index: I));
4820 } else {
4821 char Delim = getTarget().getTriple().isAArch64() ? '+' : ',';
4822 TC->getFeatures(Out&: Feats, Index: I, Delim);
4823 Options.emplace_back(Args&: Func, Args&: Feats);
4824 }
4825 }
4826 } else
4827 llvm_unreachable("unexpected MultiVersionKind");
4828 });
4829
4830 if (!ShouldEmitResolver)
4831 continue;
4832
4833 llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD);
4834 if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: ResolverConstant)) {
4835 ResolverConstant = IFunc->getResolver();
4836 if (FD->isTargetClonesMultiVersion() &&
4837 !getTarget().getTriple().isAArch64()) {
4838 std::string MangledName = getMangledNameImpl(
4839 CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
4840 if (!GetGlobalValue(Name: MangledName + ".ifunc")) {
4841 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4842 llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI);
4843 // In prior versions of Clang, the mangling for ifuncs incorrectly
4844 // included an .ifunc suffix. This alias is generated for backward
4845 // compatibility. It is deprecated, and may be removed in the future.
4846 auto *Alias = llvm::GlobalAlias::create(
4847 Ty: DeclTy, AddressSpace: 0, Linkage: getMultiversionLinkage(CGM&: *this, GD),
4848 Name: MangledName + ".ifunc", Aliasee: IFunc, Parent: &getModule());
4849 SetCommonAttributes(GD: FD, GV: Alias);
4850 }
4851 }
4852 }
4853 llvm::Function *ResolverFunc = cast<llvm::Function>(Val: ResolverConstant);
4854
4855 const TargetInfo &TI = getTarget();
4856 llvm::stable_sort(
4857 Range&: Options, C: [&TI](const CodeGenFunction::FMVResolverOption &LHS,
4858 const CodeGenFunction::FMVResolverOption &RHS) {
4859 return getFMVPriority(TI, RO: LHS).ugt(RHS: getFMVPriority(TI, RO: RHS));
4860 });
4861
4862 // Diagnose unreachable function versions.
4863 if (getTarget().getTriple().isAArch64()) {
4864 for (auto I = Options.begin() + 1, E = Options.end(); I != E; ++I) {
4865 llvm::APInt RHS = llvm::AArch64::getCpuSupportsMask(Features: I->Features);
4866 if (std::any_of(first: Options.begin(), last: I, pred: [RHS](auto RO) {
4867 llvm::APInt LHS = llvm::AArch64::getCpuSupportsMask(Features: RO.Features);
4868 return LHS.isSubsetOf(RHS);
4869 })) {
4870 Diags.Report(Loc: DeclMap[I->Function]->getLocation(),
4871 DiagID: diag::warn_unreachable_version)
4872 << I->Function->getName();
4873 assert(I->Function->user_empty() && "unexpected users");
4874 I->Function->eraseFromParent();
4875 I->Function = nullptr;
4876 }
4877 }
4878 }
4879 CodeGenFunction CGF(*this);
4880 CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options);
4881
4882 setMultiVersionResolverAttributes(Resolver: ResolverFunc, GD);
4883 if (!ResolverFunc->hasLocalLinkage() && supportsCOMDAT())
4884 ResolverFunc->setComdat(
4885 getModule().getOrInsertComdat(Name: ResolverFunc->getName()));
4886 }
4887
4888 // Ensure that any additions to the deferred decls list caused by emitting a
4889 // variant are emitted. This can happen when the variant itself is inline and
4890 // calls a function without linkage.
4891 if (!MVFuncsToEmit.empty())
4892 EmitDeferred();
4893
4894 // Ensure that any additions to the multiversion funcs list from either the
4895 // deferred decls or the multiversion functions themselves are emitted.
4896 if (!MultiVersionFuncs.empty())
4897 emitMultiVersionFunctions();
4898}
4899
4900// Symbols with this prefix are used as deactivation symbols for PFP fields.
4901// See clang/docs/StructureProtection.rst for more information.
4902static const char PFPDeactivationSymbolPrefix[] = "__pfp_ds_";
4903
4904llvm::GlobalValue *
4905CodeGenModule::getPFPDeactivationSymbol(const FieldDecl *FD) {
4906 std::string DSName = PFPDeactivationSymbolPrefix + getPFPFieldName(FD);
4907 llvm::GlobalValue *DS = TheModule.getNamedValue(Name: DSName);
4908 if (!DS) {
4909 DS = new llvm::GlobalVariable(TheModule, Int8Ty, false,
4910 llvm::GlobalVariable::ExternalWeakLinkage,
4911 nullptr, DSName);
4912 DS->setVisibility(llvm::GlobalValue::HiddenVisibility);
4913 }
4914 return DS;
4915}
4916
4917void CodeGenModule::emitPFPFieldsWithEvaluatedOffset() {
4918 llvm::Constant *Nop = llvm::ConstantExpr::getIntToPtr(
4919 C: llvm::ConstantInt::get(Ty: Int64Ty, V: 0xd503201f), Ty: VoidPtrTy);
4920 for (auto *FD : getContext().PFPFieldsWithEvaluatedOffset) {
4921 std::string DSName = PFPDeactivationSymbolPrefix + getPFPFieldName(FD);
4922 llvm::GlobalValue *OldDS = TheModule.getNamedValue(Name: DSName);
4923 llvm::GlobalValue *DS = llvm::GlobalAlias::create(
4924 Ty: Int8Ty, AddressSpace: 0, Linkage: llvm::GlobalValue::ExternalLinkage, Name: DSName, Aliasee: Nop, Parent: &TheModule);
4925 DS->setVisibility(llvm::GlobalValue::HiddenVisibility);
4926 if (OldDS) {
4927 DS->takeName(V: OldDS);
4928 OldDS->replaceAllUsesWith(V: DS);
4929 OldDS->eraseFromParent();
4930 }
4931 }
4932}
4933
4934static void replaceDeclarationWith(llvm::GlobalValue *Old,
4935 llvm::Constant *New) {
4936 assert(cast<llvm::Function>(Old)->isDeclaration() && "Not a declaration");
4937 New->takeName(V: Old);
4938 Old->replaceAllUsesWith(V: New);
4939 Old->eraseFromParent();
4940}
4941
4942void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
4943 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
4944 assert(FD && "Not a FunctionDecl?");
4945 assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
4946 const auto *DD = FD->getAttr<CPUDispatchAttr>();
4947 assert(DD && "Not a cpu_dispatch Function?");
4948
4949 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4950 llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI);
4951
4952 StringRef ResolverName = getMangledName(GD);
4953 UpdateMultiVersionNames(GD, FD, CurName&: ResolverName);
4954
4955 llvm::Type *ResolverType;
4956 GlobalDecl ResolverGD;
4957 if (getTarget().supportsIFunc()) {
4958 ResolverType = llvm::FunctionType::get(
4959 Result: llvm::PointerType::get(C&: getLLVMContext(),
4960 AddressSpace: getTypes().getTargetAddressSpace(T: FD->getType())),
4961 isVarArg: false);
4962 }
4963 else {
4964 ResolverType = DeclTy;
4965 ResolverGD = GD;
4966 }
4967
4968 auto *ResolverFunc = cast<llvm::Function>(Val: GetOrCreateLLVMFunction(
4969 MangledName: ResolverName, Ty: ResolverType, D: ResolverGD, /*ForVTable=*/false));
4970
4971 if (supportsCOMDAT())
4972 ResolverFunc->setComdat(
4973 getModule().getOrInsertComdat(Name: ResolverFunc->getName()));
4974
4975 SmallVector<CodeGenFunction::FMVResolverOption, 10> Options;
4976 const TargetInfo &Target = getTarget();
4977 unsigned Index = 0;
4978 for (const IdentifierInfo *II : DD->cpus()) {
4979 // Get the name of the target function so we can look it up/create it.
4980 std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND: FD, OmitMultiVersionMangling: true) +
4981 getCPUSpecificMangling(CGM: *this, Name: II->getName());
4982
4983 llvm::Constant *Func = GetGlobalValue(Name: MangledName);
4984
4985 if (!Func) {
4986 GlobalDecl ExistingDecl = Manglings.lookup(Key: MangledName);
4987 if (ExistingDecl.getDecl() &&
4988 ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
4989 EmitGlobalFunctionDefinition(GD: ExistingDecl, GV: nullptr);
4990 Func = GetGlobalValue(Name: MangledName);
4991 } else {
4992 if (!ExistingDecl.getDecl())
4993 ExistingDecl = GD.getWithMultiVersionIndex(Index);
4994
4995 Func = GetOrCreateLLVMFunction(
4996 MangledName, Ty: DeclTy, D: ExistingDecl,
4997 /*ForVTable=*/false, /*DontDefer=*/true,
4998 /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(), IsForDefinition: ForDefinition);
4999 }
5000 }
5001
5002 llvm::SmallVector<StringRef, 32> Features;
5003 Target.getCPUSpecificCPUDispatchFeatures(Name: II->getName(), Features);
5004 llvm::transform(Range&: Features, d_first: Features.begin(),
5005 F: [](StringRef Str) { return Str.substr(Start: 1); });
5006 llvm::erase_if(C&: Features, P: [&Target](StringRef Feat) {
5007 return !Target.validateCpuSupports(Name: Feat);
5008 });
5009 Options.emplace_back(Args: cast<llvm::Function>(Val: Func), Args&: Features);
5010 ++Index;
5011 }
5012
5013 llvm::stable_sort(Range&: Options, C: [](const CodeGenFunction::FMVResolverOption &LHS,
5014 const CodeGenFunction::FMVResolverOption &RHS) {
5015 return llvm::X86::getCpuSupportsMask(FeatureStrs: LHS.Features) >
5016 llvm::X86::getCpuSupportsMask(FeatureStrs: RHS.Features);
5017 });
5018
5019 // If the list contains multiple 'default' versions, such as when it contains
5020 // 'pentium' and 'generic', don't emit the call to the generic one (since we
5021 // always run on at least a 'pentium'). We do this by deleting the 'least
5022 // advanced' (read, lowest mangling letter).
5023 while (Options.size() > 1 && llvm::all_of(Range: llvm::X86::getCpuSupportsMask(
5024 FeatureStrs: (Options.end() - 2)->Features),
5025 P: [](auto X) { return X == 0; })) {
5026 StringRef LHSName = (Options.end() - 2)->Function->getName();
5027 StringRef RHSName = (Options.end() - 1)->Function->getName();
5028 if (LHSName.compare(RHS: RHSName) < 0)
5029 Options.erase(CI: Options.end() - 2);
5030 else
5031 Options.erase(CI: Options.end() - 1);
5032 }
5033
5034 CodeGenFunction CGF(*this);
5035 CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options);
5036 setMultiVersionResolverAttributes(Resolver: ResolverFunc, GD);
5037
5038 if (getTarget().supportsIFunc()) {
5039 llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(CGM&: *this, GD);
5040 auto *IFunc = cast<llvm::GlobalValue>(Val: GetOrCreateMultiVersionResolver(GD));
5041 unsigned AS = IFunc->getType()->getPointerAddressSpace();
5042
5043 // Fix up function declarations that were created for cpu_specific before
5044 // cpu_dispatch was known
5045 if (!isa<llvm::GlobalIFunc>(Val: IFunc)) {
5046 auto *GI = llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: AS, Linkage, Name: "",
5047 Resolver: ResolverFunc, Parent: &getModule());
5048 replaceDeclarationWith(Old: IFunc, New: GI);
5049 IFunc = GI;
5050 }
5051
5052 std::string AliasName = getMangledNameImpl(
5053 CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
5054 llvm::Constant *AliasFunc = GetGlobalValue(Name: AliasName);
5055 if (!AliasFunc) {
5056 auto *GA = llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage, Name: AliasName,
5057 Aliasee: IFunc, Parent: &getModule());
5058 SetCommonAttributes(GD, GV: GA);
5059 }
5060 }
5061}
5062
5063/// Adds a declaration to the list of multi version functions if not present.
5064void CodeGenModule::AddDeferredMultiVersionResolverToEmit(GlobalDecl GD) {
5065 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
5066 assert(FD && "Not a FunctionDecl?");
5067
5068 if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) {
5069 std::string MangledName =
5070 getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
5071 if (!DeferredResolversToEmit.insert(key: MangledName).second)
5072 return;
5073 }
5074 MultiVersionFuncs.push_back(x: GD);
5075}
5076
5077/// If a dispatcher for the specified mangled name is not in the module, create
5078/// and return it. The dispatcher is either an llvm Function with the specified
5079/// type, or a global ifunc.
5080llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
5081 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
5082 assert(FD && "Not a FunctionDecl?");
5083
5084 std::string MangledName =
5085 getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
5086
5087 // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
5088 // a separate resolver).
5089 std::string ResolverName = MangledName;
5090 if (getTarget().supportsIFunc()) {
5091 switch (FD->getMultiVersionKind()) {
5092 case MultiVersionKind::None:
5093 llvm_unreachable("unexpected MultiVersionKind::None for resolver");
5094 case MultiVersionKind::Target:
5095 case MultiVersionKind::CPUSpecific:
5096 case MultiVersionKind::CPUDispatch:
5097 ResolverName += ".ifunc";
5098 break;
5099 case MultiVersionKind::TargetClones:
5100 case MultiVersionKind::TargetVersion:
5101 break;
5102 }
5103 } else if (FD->isTargetMultiVersion()) {
5104 ResolverName += ".resolver";
5105 }
5106
5107 bool ShouldReturnIFunc =
5108 getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion();
5109
5110 // If the resolver has already been created, just return it. This lookup may
5111 // yield a function declaration instead of a resolver on AArch64. That is
5112 // because we didn't know whether a resolver will be generated when we first
5113 // encountered a use of the symbol named after this resolver. Therefore,
5114 // targets which support ifuncs should not return here unless we actually
5115 // found an ifunc.
5116 llvm::GlobalValue *ResolverGV = GetGlobalValue(Name: ResolverName);
5117 if (ResolverGV && (isa<llvm::GlobalIFunc>(Val: ResolverGV) || !ShouldReturnIFunc))
5118 return ResolverGV;
5119
5120 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
5121 llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI);
5122
5123 // The resolver needs to be created. For target and target_clones, defer
5124 // creation until the end of the TU.
5125 if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion())
5126 AddDeferredMultiVersionResolverToEmit(GD);
5127
5128 // For cpu_specific, don't create an ifunc yet because we don't know if the
5129 // cpu_dispatch will be emitted in this translation unit.
5130 if (ShouldReturnIFunc) {
5131 unsigned AS = getTypes().getTargetAddressSpace(T: FD->getType());
5132 llvm::Type *ResolverType = llvm::FunctionType::get(
5133 Result: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS), isVarArg: false);
5134 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
5135 MangledName: MangledName + ".resolver", Ty: ResolverType, D: GlobalDecl{},
5136 /*ForVTable=*/false);
5137 llvm::GlobalIFunc *GIF =
5138 llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: AS, Linkage: getMultiversionLinkage(CGM&: *this, GD),
5139 Name: "", Resolver, Parent: &getModule());
5140 GIF->setName(ResolverName);
5141 SetCommonAttributes(GD: FD, GV: GIF);
5142 if (ResolverGV)
5143 replaceDeclarationWith(Old: ResolverGV, New: GIF);
5144 return GIF;
5145 }
5146
5147 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
5148 MangledName: ResolverName, Ty: DeclTy, D: GlobalDecl{}, /*ForVTable=*/false);
5149 assert(isa<llvm::GlobalValue>(Resolver) && !ResolverGV &&
5150 "Resolver should be created for the first time");
5151 SetCommonAttributes(GD: FD, GV: cast<llvm::GlobalValue>(Val: Resolver));
5152 return Resolver;
5153}
5154
5155void CodeGenModule::setMultiVersionResolverAttributes(llvm::Function *Resolver,
5156 GlobalDecl GD) {
5157 const NamedDecl *D = dyn_cast_or_null<NamedDecl>(Val: GD.getDecl());
5158 Resolver->setLinkage(getMultiversionLinkage(CGM&: *this, GD));
5159
5160 // Function body has to be emitted before calling setGlobalVisibility
5161 // for Resolver to be considered as definition.
5162 setGlobalVisibility(GV: Resolver, D);
5163
5164 setDSOLocal(Resolver);
5165
5166 // The resolver must be exempt from sanitizer instrumentation, as it can run
5167 // before the sanitizer is initialized.
5168 // (https://github.com/llvm/llvm-project/issues/163369)
5169 Resolver->addFnAttr(Kind: llvm::Attribute::DisableSanitizerInstrumentation);
5170
5171 // Set the default target-specific attributes, such as PAC and BTI ones on
5172 // AArch64. Not passing Decl to prevent setting unrelated attributes,
5173 // as Resolver can be shared by multiple declarations.
5174 // FIXME Some targets may require a non-null D to set some attributes
5175 // (such as "stackrealign" on X86, even when it is requested via
5176 // "-mstackrealign" command line option).
5177 getTargetCodeGenInfo().setTargetAttributes(/*D=*/nullptr, GV: Resolver, M&: *this);
5178}
5179
5180bool CodeGenModule::shouldDropDLLAttribute(const Decl *D,
5181 const llvm::GlobalValue *GV) const {
5182 auto SC = GV->getDLLStorageClass();
5183 if (SC == llvm::GlobalValue::DefaultStorageClass)
5184 return false;
5185 const Decl *MRD = D->getMostRecentDecl();
5186 return (((SC == llvm::GlobalValue::DLLImportStorageClass &&
5187 !MRD->hasAttr<DLLImportAttr>()) ||
5188 (SC == llvm::GlobalValue::DLLExportStorageClass &&
5189 !MRD->hasAttr<DLLExportAttr>())) &&
5190 !shouldMapVisibilityToDLLExport(D: cast<NamedDecl>(Val: MRD)));
5191}
5192
5193/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
5194/// module, create and return an llvm Function with the specified type. If there
5195/// is something in the module with the specified name, return it potentially
5196/// bitcasted to the right type.
5197///
5198/// If D is non-null, it specifies a decl that correspond to this. This is used
5199/// to set the attributes on the function when it is first created.
5200llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
5201 StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable,
5202 bool DontDefer, bool IsThunk, llvm::AttributeList ExtraAttrs,
5203 ForDefinition_t IsForDefinition) {
5204 const Decl *D = GD.getDecl();
5205
5206 std::string NameWithoutMultiVersionMangling;
5207 if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D)) {
5208 // For the device mark the function as one that should be emitted.
5209 if (getLangOpts().OpenMPIsTargetDevice && OpenMPRuntime &&
5210 !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
5211 !DontDefer && !IsForDefinition) {
5212 if (const FunctionDecl *FDDef = FD->getDefinition()) {
5213 GlobalDecl GDDef;
5214 if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: FDDef))
5215 GDDef = GlobalDecl(CD, GD.getCtorType());
5216 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: FDDef))
5217 GDDef = GlobalDecl(DD, GD.getDtorType());
5218 else
5219 GDDef = GlobalDecl(FDDef);
5220 EmitGlobal(GD: GDDef);
5221 }
5222 }
5223
5224 // Any attempts to use a MultiVersion function should result in retrieving
5225 // the iFunc instead. Name Mangling will handle the rest of the changes.
5226 if (FD->isMultiVersion()) {
5227 UpdateMultiVersionNames(GD, FD, CurName&: MangledName);
5228 if (!IsForDefinition) {
5229 // On AArch64 we do not immediatelly emit an ifunc resolver when a
5230 // function is used. Instead we defer the emission until we see a
5231 // default definition. In the meantime we just reference the symbol
5232 // without FMV mangling (it may or may not be replaced later).
5233 if (getTarget().getTriple().isAArch64()) {
5234 AddDeferredMultiVersionResolverToEmit(GD);
5235 NameWithoutMultiVersionMangling = getMangledNameImpl(
5236 CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true);
5237 } else
5238 return GetOrCreateMultiVersionResolver(GD);
5239 }
5240 }
5241 }
5242
5243 if (!NameWithoutMultiVersionMangling.empty())
5244 MangledName = NameWithoutMultiVersionMangling;
5245
5246 // Lookup the entry, lazily creating it if necessary.
5247 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
5248 if (Entry) {
5249 if (WeakRefReferences.erase(Ptr: Entry)) {
5250 const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D);
5251 if (FD && !FD->hasAttr<WeakAttr>())
5252 Entry->setLinkage(llvm::Function::ExternalLinkage);
5253 }
5254
5255 // Handle dropped DLL attributes.
5256 if (D && shouldDropDLLAttribute(D, GV: Entry)) {
5257 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
5258 setDSOLocal(Entry);
5259 }
5260
5261 // If there are two attempts to define the same mangled name, issue an
5262 // error.
5263 if (IsForDefinition && !Entry->isDeclaration()) {
5264 GlobalDecl OtherGD;
5265 // Check that GD is not yet in DiagnosedConflictingDefinitions is required
5266 // to make sure that we issue an error only once.
5267 if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) &&
5268 (GD.getCanonicalDecl().getDecl() !=
5269 OtherGD.getCanonicalDecl().getDecl()) &&
5270 DiagnosedConflictingDefinitions.insert(V: GD).second) {
5271 getDiags().Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name)
5272 << MangledName;
5273 getDiags().Report(Loc: OtherGD.getDecl()->getLocation(),
5274 DiagID: diag::note_previous_definition);
5275 }
5276 }
5277
5278 if ((isa<llvm::Function>(Val: Entry) || isa<llvm::GlobalAlias>(Val: Entry)) &&
5279 (Entry->getValueType() == Ty)) {
5280 return Entry;
5281 }
5282
5283 // Make sure the result is of the correct type.
5284 // (If function is requested for a definition, we always need to create a new
5285 // function, not just return a bitcast.)
5286 if (!IsForDefinition)
5287 return Entry;
5288 }
5289
5290 // This function doesn't have a complete type (for example, the return
5291 // type is an incomplete struct). Use a fake type instead, and make
5292 // sure not to try to set attributes.
5293 bool IsIncompleteFunction = false;
5294
5295 llvm::FunctionType *FTy;
5296 if (isa<llvm::FunctionType>(Val: Ty)) {
5297 FTy = cast<llvm::FunctionType>(Val: Ty);
5298 } else {
5299 FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false);
5300 IsIncompleteFunction = true;
5301 }
5302
5303 llvm::Function *F =
5304 llvm::Function::Create(Ty: FTy, Linkage: llvm::Function::ExternalLinkage,
5305 N: Entry ? StringRef() : MangledName, M: &getModule());
5306
5307 // Store the declaration associated with this function so it is potentially
5308 // updated by further declarations or definitions and emitted at the end.
5309 if (D && D->hasAttr<AnnotateAttr>())
5310 DeferredAnnotations[MangledName] = cast<ValueDecl>(Val: D);
5311
5312 // If we already created a function with the same mangled name (but different
5313 // type) before, take its name and add it to the list of functions to be
5314 // replaced with F at the end of CodeGen.
5315 //
5316 // This happens if there is a prototype for a function (e.g. "int f()") and
5317 // then a definition of a different type (e.g. "int f(int x)").
5318 if (Entry) {
5319 F->takeName(V: Entry);
5320
5321 // This might be an implementation of a function without a prototype, in
5322 // which case, try to do special replacement of calls which match the new
5323 // prototype. The really key thing here is that we also potentially drop
5324 // arguments from the call site so as to make a direct call, which makes the
5325 // inliner happier and suppresses a number of optimizer warnings (!) about
5326 // dropping arguments.
5327 if (!Entry->use_empty()) {
5328 ReplaceUsesOfNonProtoTypeWithRealFunction(Old: Entry, NewFn: F);
5329 Entry->removeDeadConstantUsers();
5330 }
5331
5332 addGlobalValReplacement(GV: Entry, C: F);
5333 }
5334
5335 assert(F->getName() == MangledName && "name was uniqued!");
5336 if (D)
5337 SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
5338 if (ExtraAttrs.hasFnAttrs()) {
5339 llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
5340 F->addFnAttrs(Attrs: B);
5341 }
5342
5343 if (!DontDefer) {
5344 // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
5345 // each other bottoming out with the base dtor. Therefore we emit non-base
5346 // dtors on usage, even if there is no dtor definition in the TU.
5347 if (isa_and_nonnull<CXXDestructorDecl>(Val: D) &&
5348 getCXXABI().useThunkForDtorVariant(Dtor: cast<CXXDestructorDecl>(Val: D),
5349 DT: GD.getDtorType()))
5350 addDeferredDeclToEmit(GD);
5351
5352 // This is the first use or definition of a mangled name. If there is a
5353 // deferred decl with this name, remember that we need to emit it at the end
5354 // of the file.
5355 auto DDI = DeferredDecls.find(Val: MangledName);
5356 if (DDI != DeferredDecls.end()) {
5357 // Move the potentially referenced deferred decl to the
5358 // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
5359 // don't need it anymore).
5360 addDeferredDeclToEmit(GD: DDI->second);
5361 DeferredDecls.erase(I: DDI);
5362
5363 // Otherwise, there are cases we have to worry about where we're
5364 // using a declaration for which we must emit a definition but where
5365 // we might not find a top-level definition:
5366 // - member functions defined inline in their classes
5367 // - friend functions defined inline in some class
5368 // - special member functions with implicit definitions
5369 // If we ever change our AST traversal to walk into class methods,
5370 // this will be unnecessary.
5371 //
5372 // We also don't emit a definition for a function if it's going to be an
5373 // entry in a vtable, unless it's already marked as used.
5374 } else if (getLangOpts().CPlusPlus && D) {
5375 // Look for a declaration that's lexically in a record.
5376 for (const auto *FD = cast<FunctionDecl>(Val: D)->getMostRecentDecl(); FD;
5377 FD = FD->getPreviousDecl()) {
5378 if (isa<CXXRecordDecl>(Val: FD->getLexicalDeclContext())) {
5379 if (FD->doesThisDeclarationHaveABody()) {
5380 addDeferredDeclToEmit(GD: GD.getWithDecl(D: FD));
5381 break;
5382 }
5383 }
5384 }
5385 }
5386 }
5387
5388 // Make sure the result is of the requested type.
5389 if (!IsIncompleteFunction) {
5390 assert(F->getFunctionType() == Ty);
5391 return F;
5392 }
5393
5394 return F;
5395}
5396
5397/// GetAddrOfFunction - Return the address of the given function. If Ty is
5398/// non-null, then this function will use the specified type if it has to
5399/// create it (this occurs when we see a definition of the function).
5400llvm::Constant *
5401CodeGenModule::GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty, bool ForVTable,
5402 bool DontDefer,
5403 ForDefinition_t IsForDefinition) {
5404 // If there was no specific requested type, just convert it now.
5405 if (!Ty) {
5406 const auto *FD = cast<FunctionDecl>(Val: GD.getDecl());
5407 Ty = getTypes().ConvertType(T: FD->getType());
5408 if (DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>()) &&
5409 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
5410 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
5411 Ty = getTypes().GetFunctionType(Info: FI);
5412 }
5413 }
5414
5415 // Devirtualized destructor calls may come through here instead of via
5416 // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
5417 // of the complete destructor when necessary.
5418 if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: GD.getDecl())) {
5419 if (getTarget().getCXXABI().isMicrosoft() &&
5420 GD.getDtorType() == Dtor_Complete &&
5421 DD->getParent()->getNumVBases() == 0)
5422 GD = GlobalDecl(DD, Dtor_Base);
5423 }
5424
5425 StringRef MangledName = getMangledName(GD);
5426 auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
5427 /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(),
5428 IsForDefinition);
5429 // Returns kernel handle for HIP kernel stub function.
5430 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
5431 cast<FunctionDecl>(Val: GD.getDecl())->hasAttr<CUDAGlobalAttr>()) {
5432 auto *Handle = getCUDARuntime().getKernelHandle(
5433 Stub: cast<llvm::Function>(Val: F->stripPointerCasts()), GD);
5434 if (IsForDefinition)
5435 return F;
5436 return Handle;
5437 }
5438 return F;
5439}
5440
5441llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) {
5442 llvm::GlobalValue *F =
5443 cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD: Decl)->stripPointerCasts());
5444
5445 return llvm::NoCFIValue::get(GV: F);
5446}
5447
5448static const FunctionDecl *
5449GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
5450 TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
5451 DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl);
5452
5453 IdentifierInfo &CII = C.Idents.get(Name);
5454 for (const auto *Result : DC->lookup(Name: &CII))
5455 if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result))
5456 return FD;
5457
5458 if (!C.getLangOpts().CPlusPlus)
5459 return nullptr;
5460
5461 // Demangle the premangled name from getTerminateFn()
5462 IdentifierInfo &CXXII =
5463 (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
5464 ? C.Idents.get(Name: "terminate")
5465 : C.Idents.get(Name);
5466
5467 for (const auto &N : {"__cxxabiv1", "std"}) {
5468 IdentifierInfo &NS = C.Idents.get(Name: N);
5469 for (const auto *Result : DC->lookup(Name: &NS)) {
5470 const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Val: Result);
5471 if (auto *LSD = dyn_cast<LinkageSpecDecl>(Val: Result))
5472 for (const auto *Result : LSD->lookup(Name: &NS))
5473 if ((ND = dyn_cast<NamespaceDecl>(Val: Result)))
5474 break;
5475
5476 if (ND)
5477 for (const auto *Result : ND->lookup(Name: &CXXII))
5478 if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result))
5479 return FD;
5480 }
5481 }
5482
5483 return nullptr;
5484}
5485
5486static void setWindowsItaniumDLLImport(CodeGenModule &CGM, bool Local,
5487 llvm::Function *F, StringRef Name) {
5488 // In Windows Itanium environments, try to mark runtime functions
5489 // dllimport. For Mingw and MSVC, don't. We don't really know if the user
5490 // will link their standard library statically or dynamically. Marking
5491 // functions imported when they are not imported can cause linker errors
5492 // and warnings.
5493 if (!Local && CGM.getTriple().isWindowsItaniumEnvironment() &&
5494 !CGM.getCodeGenOpts().LTOVisibilityPublicStd) {
5495 const FunctionDecl *FD = GetRuntimeFunctionDecl(C&: CGM.getContext(), Name);
5496 if (!FD || FD->hasAttr<DLLImportAttr>()) {
5497 F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
5498 F->setLinkage(llvm::GlobalValue::ExternalLinkage);
5499 }
5500 }
5501}
5502
5503llvm::FunctionCallee CodeGenModule::CreateRuntimeFunction(
5504 QualType ReturnTy, ArrayRef<QualType> ArgTys, StringRef Name,
5505 llvm::AttributeList ExtraAttrs, bool Local, bool AssumeConvergent) {
5506 if (AssumeConvergent) {
5507 ExtraAttrs =
5508 ExtraAttrs.addFnAttribute(C&: VMContext, Kind: llvm::Attribute::Convergent);
5509 }
5510
5511 QualType FTy = Context.getFunctionType(ResultTy: ReturnTy, Args: ArgTys,
5512 EPI: FunctionProtoType::ExtProtoInfo());
5513 const CGFunctionInfo &Info = getTypes().arrangeFreeFunctionType(
5514 Ty: Context.getCanonicalType(T: FTy).castAs<FunctionProtoType>());
5515 auto *ConvTy = getTypes().GetFunctionType(Info);
5516 llvm::Constant *C = GetOrCreateLLVMFunction(
5517 MangledName: Name, Ty: ConvTy, GD: GlobalDecl(), /*ForVTable=*/false,
5518 /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
5519
5520 if (auto *F = dyn_cast<llvm::Function>(Val: C)) {
5521 if (F->empty()) {
5522 SetLLVMFunctionAttributes(GD: GlobalDecl(), Info, F, /*IsThunk*/ false);
5523 // FIXME: Set calling-conv properly in ExtProtoInfo
5524 F->setCallingConv(getRuntimeCC());
5525 setWindowsItaniumDLLImport(CGM&: *this, Local, F, Name);
5526 setDSOLocal(F);
5527 }
5528 }
5529 return {ConvTy, C};
5530}
5531
5532/// CreateRuntimeFunction - Create a new runtime function with the specified
5533/// type and name.
5534llvm::FunctionCallee
5535CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
5536 llvm::AttributeList ExtraAttrs, bool Local,
5537 bool AssumeConvergent) {
5538 if (AssumeConvergent) {
5539 ExtraAttrs =
5540 ExtraAttrs.addFnAttribute(C&: VMContext, Kind: llvm::Attribute::Convergent);
5541 }
5542
5543 llvm::Constant *C =
5544 GetOrCreateLLVMFunction(MangledName: Name, Ty: FTy, GD: GlobalDecl(), /*ForVTable=*/false,
5545 /*DontDefer=*/false, /*IsThunk=*/false,
5546 ExtraAttrs);
5547
5548 if (auto *F = dyn_cast<llvm::Function>(Val: C)) {
5549 if (F->empty()) {
5550 F->setCallingConv(getRuntimeCC());
5551 setWindowsItaniumDLLImport(CGM&: *this, Local, F, Name);
5552 setDSOLocal(F);
5553 // FIXME: We should use CodeGenModule::SetLLVMFunctionAttributes() instead
5554 // of trying to approximate the attributes using the LLVM function
5555 // signature. The other overload of CreateRuntimeFunction does this; it
5556 // should be used for new code.
5557 markRegisterParameterAttributes(F);
5558 }
5559 }
5560
5561 return {FTy, C};
5562}
5563
5564/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
5565/// create and return an llvm GlobalVariable with the specified type and address
5566/// space. If there is something in the module with the specified name, return
5567/// it potentially bitcasted to the right type.
5568///
5569/// If D is non-null, it specifies a decl that correspond to this. This is used
5570/// to set the attributes on the global when it is first created.
5571///
5572/// If IsForDefinition is true, it is guaranteed that an actual global with
5573/// type Ty will be returned, not conversion of a variable with the same
5574/// mangled name but some other type.
5575llvm::Constant *
5576CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
5577 LangAS AddrSpace, const VarDecl *D,
5578 ForDefinition_t IsForDefinition) {
5579 // Lookup the entry, lazily creating it if necessary.
5580 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
5581 unsigned TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace);
5582 if (Entry) {
5583 if (WeakRefReferences.erase(Ptr: Entry)) {
5584 if (D && !D->hasAttr<WeakAttr>())
5585 Entry->setLinkage(llvm::Function::ExternalLinkage);
5586 }
5587
5588 // Handle dropped DLL attributes.
5589 if (D && shouldDropDLLAttribute(D, GV: Entry))
5590 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
5591
5592 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
5593 getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: Entry);
5594
5595 if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS)
5596 return Entry;
5597
5598 // If there are two attempts to define the same mangled name, issue an
5599 // error.
5600 if (IsForDefinition && !Entry->isDeclaration()) {
5601 GlobalDecl OtherGD;
5602 const VarDecl *OtherD;
5603
5604 // Check that D is not yet in DiagnosedConflictingDefinitions is required
5605 // to make sure that we issue an error only once.
5606 if (D && lookupRepresentativeDecl(MangledName, Result&: OtherGD) &&
5607 (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) &&
5608 (OtherD = dyn_cast<VarDecl>(Val: OtherGD.getDecl())) &&
5609 OtherD->hasInit() &&
5610 DiagnosedConflictingDefinitions.insert(V: D).second) {
5611 getDiags().Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name)
5612 << MangledName;
5613 getDiags().Report(Loc: OtherGD.getDecl()->getLocation(),
5614 DiagID: diag::note_previous_definition);
5615 }
5616 }
5617
5618 // Make sure the result is of the correct type.
5619 if (Entry->getType()->getAddressSpace() != TargetAS)
5620 return llvm::ConstantExpr::getAddrSpaceCast(
5621 C: Entry, Ty: llvm::PointerType::get(C&: Ty->getContext(), AddressSpace: TargetAS));
5622
5623 // (If global is requested for a definition, we always need to create a new
5624 // global, not just return a bitcast.)
5625 if (!IsForDefinition)
5626 return Entry;
5627 }
5628
5629 auto DAddrSpace = GetGlobalVarAddressSpace(D);
5630
5631 auto *GV = new llvm::GlobalVariable(
5632 getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr,
5633 MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal,
5634 getContext().getTargetAddressSpace(AS: DAddrSpace));
5635
5636 // If we already created a global with the same mangled name (but different
5637 // type) before, take its name and remove it from its parent.
5638 if (Entry) {
5639 GV->takeName(V: Entry);
5640
5641 if (!Entry->use_empty()) {
5642 Entry->replaceAllUsesWith(V: GV);
5643 }
5644
5645 Entry->eraseFromParent();
5646 }
5647
5648 // This is the first use or definition of a mangled name. If there is a
5649 // deferred decl with this name, remember that we need to emit it at the end
5650 // of the file.
5651 auto DDI = DeferredDecls.find(Val: MangledName);
5652 if (DDI != DeferredDecls.end()) {
5653 // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
5654 // list, and remove it from DeferredDecls (since we don't need it anymore).
5655 addDeferredDeclToEmit(GD: DDI->second);
5656 DeferredDecls.erase(I: DDI);
5657 }
5658
5659 // Handle things which are present even on external declarations.
5660 if (D) {
5661 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
5662 getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: GV);
5663
5664 // FIXME: This code is overly simple and should be merged with other global
5665 // handling.
5666 GV->setConstant(D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: false, ExcludeDtor: false));
5667
5668 GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
5669
5670 setLinkageForGV(GV, ND: D);
5671
5672 if (D->getTLSKind()) {
5673 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
5674 CXXThreadLocals.push_back(x: D);
5675 setTLSMode(GV, D: *D);
5676 }
5677
5678 setGVProperties(GV, D);
5679
5680 // If required by the ABI, treat declarations of static data members with
5681 // inline initializers as definitions.
5682 if (getContext().isMSStaticDataMemberInlineDefinition(VD: D)) {
5683 EmitGlobalVarDefinition(D);
5684 }
5685
5686 // Emit section information for extern variables.
5687 if (D->hasExternalStorage()) {
5688 if (const SectionAttr *SA = D->getAttr<SectionAttr>())
5689 GV->setSection(SA->getName());
5690 }
5691
5692 // Handle XCore specific ABI requirements.
5693 if (getTriple().getArch() == llvm::Triple::xcore &&
5694 D->getLanguageLinkage() == CLanguageLinkage &&
5695 D->getType().isConstant(Ctx: Context) &&
5696 isExternallyVisible(L: D->getLinkageAndVisibility().getLinkage()))
5697 GV->setSection(".cp.rodata");
5698
5699 // Handle code model attribute
5700 if (const auto *CMA = D->getAttr<CodeModelAttr>())
5701 GV->setCodeModel(CMA->getModel());
5702
5703 // Check if we a have a const declaration with an initializer, we may be
5704 // able to emit it as available_externally to expose it's value to the
5705 // optimizer.
5706 if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() &&
5707 D->getType().isConstQualified() && !GV->hasInitializer() &&
5708 !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) {
5709 const auto *Record =
5710 Context.getBaseElementType(QT: D->getType())->getAsCXXRecordDecl();
5711 bool HasMutableFields = Record && Record->hasMutableFields();
5712 if (!HasMutableFields) {
5713 const VarDecl *InitDecl;
5714 const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl);
5715 if (InitExpr) {
5716 ConstantEmitter emitter(*this);
5717 llvm::Constant *Init = emitter.tryEmitForInitializer(D: *InitDecl);
5718 if (Init) {
5719 auto *InitType = Init->getType();
5720 if (GV->getValueType() != InitType) {
5721 // The type of the initializer does not match the definition.
5722 // This happens when an initializer has a different type from
5723 // the type of the global (because of padding at the end of a
5724 // structure for instance).
5725 GV->setName(StringRef());
5726 // Make a new global with the correct type, this is now guaranteed
5727 // to work.
5728 auto *NewGV = cast<llvm::GlobalVariable>(
5729 Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition)
5730 ->stripPointerCasts());
5731
5732 // Erase the old global, since it is no longer used.
5733 GV->eraseFromParent();
5734 GV = NewGV;
5735 } else {
5736 GV->setInitializer(Init);
5737 GV->setConstant(true);
5738 GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
5739 }
5740 emitter.finalize(global: GV);
5741 }
5742 }
5743 }
5744 }
5745 }
5746
5747 if (D &&
5748 D->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly) {
5749 getTargetCodeGenInfo().setTargetAttributes(D, GV, M&: *this);
5750 // External HIP managed variables needed to be recorded for transformation
5751 // in both device and host compilations.
5752 if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() &&
5753 D->hasExternalStorage())
5754 getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV);
5755 }
5756
5757 if (D)
5758 SanitizerMD->reportGlobal(GV, D: *D);
5759
5760 LangAS ExpectedAS =
5761 D ? D->getType().getAddressSpace()
5762 : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
5763 assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS);
5764 if (DAddrSpace != ExpectedAS)
5765 return performAddrSpaceCast(
5766 Src: GV, DestTy: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: TargetAS));
5767
5768 return GV;
5769}
5770
5771llvm::Constant *
5772CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
5773 const Decl *D = GD.getDecl();
5774
5775 if (isa<CXXConstructorDecl>(Val: D) || isa<CXXDestructorDecl>(Val: D))
5776 return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
5777 /*DontDefer=*/false, IsForDefinition);
5778
5779 if (isa<CXXMethodDecl>(Val: D)) {
5780 auto FInfo =
5781 &getTypes().arrangeCXXMethodDeclaration(MD: cast<CXXMethodDecl>(Val: D));
5782 auto Ty = getTypes().GetFunctionType(Info: *FInfo);
5783 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
5784 IsForDefinition);
5785 }
5786
5787 if (isa<FunctionDecl>(Val: D)) {
5788 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
5789 llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI);
5790 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
5791 IsForDefinition);
5792 }
5793
5794 return GetAddrOfGlobalVar(D: cast<VarDecl>(Val: D), /*Ty=*/nullptr, IsForDefinition);
5795}
5796
5797llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
5798 StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
5799 llvm::Align Alignment) {
5800 llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
5801 llvm::GlobalVariable *OldGV = nullptr;
5802
5803 if (GV) {
5804 // Check if the variable has the right type.
5805 if (GV->getValueType() == Ty)
5806 return GV;
5807
5808 // Because C++ name mangling, the only way we can end up with an already
5809 // existing global with the same name is if it has been declared extern "C".
5810 assert(GV->isDeclaration() && "Declaration has wrong type!");
5811 OldGV = GV;
5812 }
5813
5814 // Create a new variable.
5815 GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
5816 Linkage, nullptr, Name);
5817
5818 if (OldGV) {
5819 // Replace occurrences of the old variable if needed.
5820 GV->takeName(V: OldGV);
5821
5822 if (!OldGV->use_empty()) {
5823 OldGV->replaceAllUsesWith(V: GV);
5824 }
5825
5826 OldGV->eraseFromParent();
5827 }
5828
5829 if (supportsCOMDAT() && GV->isWeakForLinker() &&
5830 !GV->hasAvailableExternallyLinkage())
5831 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
5832
5833 GV->setAlignment(Alignment);
5834
5835 return GV;
5836}
5837
5838/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
5839/// given global variable. If Ty is non-null and if the global doesn't exist,
5840/// then it will be created with the specified type instead of whatever the
5841/// normal requested type would be. If IsForDefinition is true, it is guaranteed
5842/// that an actual global with type Ty will be returned, not conversion of a
5843/// variable with the same mangled name but some other type.
5844llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
5845 llvm::Type *Ty,
5846 ForDefinition_t IsForDefinition) {
5847 assert(D->hasGlobalStorage() && "Not a global variable");
5848 QualType ASTTy = D->getType();
5849 if (!Ty)
5850 Ty = getTypes().ConvertTypeForMem(T: ASTTy);
5851
5852 StringRef MangledName = getMangledName(GD: D);
5853 return GetOrCreateLLVMGlobal(MangledName, Ty, AddrSpace: ASTTy.getAddressSpace(), D,
5854 IsForDefinition);
5855}
5856
5857/// CreateRuntimeVariable - Create a new runtime global variable with the
5858/// specified type and name.
5859llvm::Constant *
5860CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
5861 StringRef Name) {
5862 LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global
5863 : LangAS::Default;
5864 auto *Ret = GetOrCreateLLVMGlobal(MangledName: Name, Ty, AddrSpace, D: nullptr);
5865 setDSOLocal(cast<llvm::GlobalValue>(Val: Ret->stripPointerCasts()));
5866 return Ret;
5867}
5868
5869void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
5870 assert(!D->getInit() && "Cannot emit definite definitions here!");
5871
5872 StringRef MangledName = getMangledName(GD: D);
5873 llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName);
5874
5875 // We already have a definition, not declaration, with the same mangled name.
5876 // Emitting of declaration is not required (and actually overwrites emitted
5877 // definition).
5878 if (GV && !GV->isDeclaration())
5879 return;
5880
5881 // If we have not seen a reference to this variable yet, place it into the
5882 // deferred declarations table to be emitted if needed later.
5883 if (!MustBeEmitted(Global: D) && !GV) {
5884 DeferredDecls[MangledName] = D;
5885 return;
5886 }
5887
5888 // The tentative definition is the only definition.
5889 EmitGlobalVarDefinition(D);
5890}
5891
5892// Return a GlobalDecl. Use the base variants for destructors and constructors.
5893static GlobalDecl getBaseVariantGlobalDecl(const NamedDecl *D) {
5894 if (auto const *CD = dyn_cast<const CXXConstructorDecl>(Val: D))
5895 return GlobalDecl(CD, CXXCtorType::Ctor_Base);
5896 else if (auto const *DD = dyn_cast<const CXXDestructorDecl>(Val: D))
5897 return GlobalDecl(DD, CXXDtorType::Dtor_Base);
5898 return GlobalDecl(D);
5899}
5900
5901void CodeGenModule::EmitExternalDeclaration(const DeclaratorDecl *D) {
5902 CGDebugInfo *DI = getModuleDebugInfo();
5903 if (!DI || !getCodeGenOpts().hasReducedDebugInfo())
5904 return;
5905
5906 GlobalDecl GD = getBaseVariantGlobalDecl(D);
5907 if (!GD)
5908 return;
5909
5910 llvm::Constant *Addr = GetAddrOfGlobal(GD)->stripPointerCasts();
5911 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
5912 DI->EmitExternalVariable(
5913 GV: cast<llvm::GlobalVariable>(Val: Addr->stripPointerCasts()), Decl: VD);
5914 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
5915 llvm::Function *Fn = cast<llvm::Function>(Val: Addr);
5916 if (!Fn->getSubprogram())
5917 DI->EmitFunctionDecl(GD, Loc: FD->getLocation(), FnType: FD->getType(), Fn);
5918 }
5919}
5920
5921CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
5922 return Context.toCharUnitsFromBits(
5923 BitSize: getDataLayout().getTypeStoreSizeInBits(Ty));
5924}
5925
5926LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
5927 if (LangOpts.OpenCL) {
5928 LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
5929 assert(AS == LangAS::opencl_global ||
5930 AS == LangAS::opencl_global_device ||
5931 AS == LangAS::opencl_global_host ||
5932 AS == LangAS::opencl_constant ||
5933 AS == LangAS::opencl_local ||
5934 AS >= LangAS::FirstTargetAddressSpace);
5935 return AS;
5936 }
5937
5938 if (LangOpts.SYCLIsDevice &&
5939 (!D || D->getType().getAddressSpace() == LangAS::Default))
5940 return LangAS::sycl_global;
5941
5942 if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
5943 if (D) {
5944 if (D->hasAttr<CUDAConstantAttr>())
5945 return LangAS::cuda_constant;
5946 if (D->hasAttr<CUDASharedAttr>())
5947 return LangAS::cuda_shared;
5948 if (D->hasAttr<CUDADeviceAttr>())
5949 return LangAS::cuda_device;
5950 if (D->getType().isConstQualified())
5951 return LangAS::cuda_constant;
5952 }
5953 return LangAS::cuda_device;
5954 }
5955
5956 if (LangOpts.OpenMP) {
5957 LangAS AS;
5958 if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(VD: D, AS))
5959 return AS;
5960 }
5961 return getTargetCodeGenInfo().getGlobalVarAddressSpace(CGM&: *this, D);
5962}
5963
5964LangAS CodeGenModule::GetGlobalConstantAddressSpace() const {
5965 // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
5966 if (LangOpts.OpenCL)
5967 return LangAS::opencl_constant;
5968 if (LangOpts.SYCLIsDevice)
5969 return LangAS::sycl_global;
5970 if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV())
5971 // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V)
5972 // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up
5973 // with OpVariable instructions with Generic storage class which is not
5974 // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V
5975 // UniformConstant storage class is not viable as pointers to it may not be
5976 // casted to Generic pointers which are used to model HIP's "flat" pointers.
5977 return LangAS::cuda_device;
5978 if (auto AS = getTarget().getConstantAddressSpace())
5979 return *AS;
5980 return LangAS::Default;
5981}
5982
5983// In address space agnostic languages, string literals are in default address
5984// space in AST. However, certain targets (e.g. amdgcn) request them to be
5985// emitted in constant address space in LLVM IR. To be consistent with other
5986// parts of AST, string literal global variables in constant address space
5987// need to be casted to default address space before being put into address
5988// map and referenced by other part of CodeGen.
5989// In OpenCL, string literals are in constant address space in AST, therefore
5990// they should not be casted to default address space.
5991static llvm::Constant *
5992castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM,
5993 llvm::GlobalVariable *GV) {
5994 llvm::Constant *Cast = GV;
5995 if (!CGM.getLangOpts().OpenCL) {
5996 auto AS = CGM.GetGlobalConstantAddressSpace();
5997 if (AS != LangAS::Default)
5998 Cast = CGM.performAddrSpaceCast(
5999 Src: GV, DestTy: llvm::PointerType::get(
6000 C&: CGM.getLLVMContext(),
6001 AddressSpace: CGM.getContext().getTargetAddressSpace(AS: LangAS::Default)));
6002 }
6003 return Cast;
6004}
6005
6006template<typename SomeDecl>
6007void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
6008 llvm::GlobalValue *GV) {
6009 if (!getLangOpts().CPlusPlus)
6010 return;
6011
6012 // Must have 'used' attribute, or else inline assembly can't rely on
6013 // the name existing.
6014 if (!D->template hasAttr<UsedAttr>())
6015 return;
6016
6017 // Must have internal linkage and an ordinary name.
6018 if (!D->getIdentifier() || D->getFormalLinkage() != Linkage::Internal)
6019 return;
6020
6021 // Must be in an extern "C" context. Entities declared directly within
6022 // a record are not extern "C" even if the record is in such a context.
6023 const SomeDecl *First = D->getFirstDecl();
6024 if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
6025 return;
6026
6027 // OK, this is an internal linkage entity inside an extern "C" linkage
6028 // specification. Make a note of that so we can give it the "expected"
6029 // mangled name if nothing else is using that name.
6030 std::pair<StaticExternCMap::iterator, bool> R =
6031 StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
6032
6033 // If we have multiple internal linkage entities with the same name
6034 // in extern "C" regions, none of them gets that name.
6035 if (!R.second)
6036 R.first->second = nullptr;
6037}
6038
6039static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
6040 if (!CGM.supportsCOMDAT())
6041 return false;
6042
6043 if (D.hasAttr<SelectAnyAttr>())
6044 return true;
6045
6046 GVALinkage Linkage;
6047 if (auto *VD = dyn_cast<VarDecl>(Val: &D))
6048 Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
6049 else
6050 Linkage = CGM.getContext().GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: &D));
6051
6052 switch (Linkage) {
6053 case GVA_Internal:
6054 case GVA_AvailableExternally:
6055 case GVA_StrongExternal:
6056 return false;
6057 case GVA_DiscardableODR:
6058 case GVA_StrongODR:
6059 return true;
6060 }
6061 llvm_unreachable("No such linkage");
6062}
6063
6064bool CodeGenModule::supportsCOMDAT() const {
6065 return getTriple().supportsCOMDAT();
6066}
6067
6068void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
6069 llvm::GlobalObject &GO) {
6070 if (!shouldBeInCOMDAT(CGM&: *this, D))
6071 return;
6072 GO.setComdat(TheModule.getOrInsertComdat(Name: GO.getName()));
6073}
6074
6075const ABIInfo &CodeGenModule::getABIInfo() {
6076 return getTargetCodeGenInfo().getABIInfo();
6077}
6078
6079/// Pass IsTentative as true if you want to create a tentative definition.
6080void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
6081 bool IsTentative) {
6082 // OpenCL global variables of sampler type are translated to function calls,
6083 // therefore no need to be translated.
6084 QualType ASTTy = D->getType();
6085 if (getLangOpts().OpenCL && ASTTy->isSamplerT())
6086 return;
6087
6088 // HLSL default buffer constants will be emitted during HLSLBufferDecl codegen
6089 if (getLangOpts().HLSL &&
6090 D->getType().getAddressSpace() == LangAS::hlsl_constant)
6091 return;
6092
6093 // If this is OpenMP device, check if it is legal to emit this global
6094 // normally.
6095 if (LangOpts.OpenMPIsTargetDevice && OpenMPRuntime &&
6096 OpenMPRuntime->emitTargetGlobalVariable(GD: D))
6097 return;
6098
6099 llvm::TrackingVH<llvm::Constant> Init;
6100 bool NeedsGlobalCtor = false;
6101 // Whether the definition of the variable is available externally.
6102 // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable
6103 // since this is the job for its original source.
6104 bool IsDefinitionAvailableExternally =
6105 getContext().GetGVALinkageForVariable(VD: D) == GVA_AvailableExternally;
6106 bool NeedsGlobalDtor =
6107 !IsDefinitionAvailableExternally &&
6108 D->needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
6109
6110 // It is helpless to emit the definition for an available_externally variable
6111 // which can't be marked as const.
6112 // We don't need to check if it needs global ctor or dtor. See the above
6113 // comment for ideas.
6114 if (IsDefinitionAvailableExternally &&
6115 (!D->hasConstantInitialization() ||
6116 // TODO: Update this when we have interface to check constexpr
6117 // destructor.
6118 D->needsDestruction(Ctx: getContext()) ||
6119 !D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: true)))
6120 return;
6121
6122 const VarDecl *InitDecl;
6123 const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl);
6124
6125 std::optional<ConstantEmitter> emitter;
6126
6127 // CUDA E.2.4.1 "__shared__ variables cannot have an initialization
6128 // as part of their declaration." Sema has already checked for
6129 // error cases, so we just need to set Init to UndefValue.
6130 bool IsCUDASharedVar =
6131 getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
6132 // Shadows of initialized device-side global variables are also left
6133 // undefined.
6134 // Managed Variables should be initialized on both host side and device side.
6135 bool IsCUDAShadowVar =
6136 !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
6137 (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
6138 D->hasAttr<CUDASharedAttr>());
6139 bool IsCUDADeviceShadowVar =
6140 getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
6141 (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
6142 D->getType()->isCUDADeviceBuiltinTextureType());
6143 if (getLangOpts().CUDA &&
6144 (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar)) {
6145 Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy));
6146 } else if (getLangOpts().HLSL &&
6147 (D->getType()->isHLSLResourceRecord() ||
6148 D->getType()->isHLSLResourceRecordArray())) {
6149 Init = llvm::PoisonValue::get(T: getTypes().ConvertType(T: ASTTy));
6150 NeedsGlobalCtor = D->getType()->isHLSLResourceRecord() ||
6151 D->getStorageClass() == SC_Static;
6152 } else if (D->hasAttr<LoaderUninitializedAttr>()) {
6153 Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy));
6154 } else if (!InitExpr) {
6155 // This is a tentative definition; tentative definitions are
6156 // implicitly initialized with { 0 }.
6157 //
6158 // Note that tentative definitions are only emitted at the end of
6159 // a translation unit, so they should never have incomplete
6160 // type. In addition, EmitTentativeDefinition makes sure that we
6161 // never attempt to emit a tentative definition if a real one
6162 // exists. A use may still exists, however, so we still may need
6163 // to do a RAUW.
6164 assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
6165 Init = EmitNullConstant(T: D->getType());
6166 } else {
6167 initializedGlobalDecl = GlobalDecl(D);
6168 emitter.emplace(args&: *this);
6169 llvm::Constant *Initializer = emitter->tryEmitForInitializer(D: *InitDecl);
6170 if (!Initializer) {
6171 QualType T = InitExpr->getType();
6172 if (D->getType()->isReferenceType())
6173 T = D->getType();
6174
6175 if (getLangOpts().CPlusPlus) {
6176 Init = EmitNullConstant(T);
6177 if (!IsDefinitionAvailableExternally)
6178 NeedsGlobalCtor = true;
6179 if (InitDecl->hasFlexibleArrayInit(Ctx: getContext())) {
6180 ErrorUnsupported(D, Type: "flexible array initializer");
6181 // We cannot create ctor for flexible array initializer
6182 NeedsGlobalCtor = false;
6183 }
6184 } else {
6185 ErrorUnsupported(D, Type: "static initializer");
6186 Init = llvm::PoisonValue::get(T: getTypes().ConvertType(T));
6187 }
6188 } else {
6189 Init = Initializer;
6190 // We don't need an initializer, so remove the entry for the delayed
6191 // initializer position (just in case this entry was delayed) if we
6192 // also don't need to register a destructor.
6193 if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
6194 DelayedCXXInitPosition.erase(Val: D);
6195
6196#ifndef NDEBUG
6197 CharUnits VarSize = getContext().getTypeSizeInChars(ASTTy) +
6198 InitDecl->getFlexibleArrayInitChars(getContext());
6199 CharUnits CstSize = CharUnits::fromQuantity(
6200 getDataLayout().getTypeAllocSize(Init->getType()));
6201 assert(VarSize == CstSize && "Emitted constant has unexpected size");
6202#endif
6203 }
6204 }
6205
6206 llvm::Type* InitType = Init->getType();
6207 llvm::Constant *Entry =
6208 GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative));
6209
6210 // Strip off pointer casts if we got them.
6211 Entry = Entry->stripPointerCasts();
6212
6213 // Entry is now either a Function or GlobalVariable.
6214 auto *GV = dyn_cast<llvm::GlobalVariable>(Val: Entry);
6215
6216 // We have a definition after a declaration with the wrong type.
6217 // We must make a new GlobalVariable* and update everything that used OldGV
6218 // (a declaration or tentative definition) with the new GlobalVariable*
6219 // (which will be a definition).
6220 //
6221 // This happens if there is a prototype for a global (e.g.
6222 // "extern int x[];") and then a definition of a different type (e.g.
6223 // "int x[10];"). This also happens when an initializer has a different type
6224 // from the type of the global (this happens with unions).
6225 if (!GV || GV->getValueType() != InitType ||
6226 GV->getType()->getAddressSpace() !=
6227 getContext().getTargetAddressSpace(AS: GetGlobalVarAddressSpace(D))) {
6228
6229 // Move the old entry aside so that we'll create a new one.
6230 Entry->setName(StringRef());
6231
6232 // Make a new global with the correct type, this is now guaranteed to work.
6233 GV = cast<llvm::GlobalVariable>(
6234 Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative))
6235 ->stripPointerCasts());
6236
6237 // Replace all uses of the old global with the new global
6238 llvm::Constant *NewPtrForOldDecl =
6239 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: GV,
6240 Ty: Entry->getType());
6241 Entry->replaceAllUsesWith(V: NewPtrForOldDecl);
6242
6243 // Erase the old global, since it is no longer used.
6244 cast<llvm::GlobalValue>(Val: Entry)->eraseFromParent();
6245 }
6246
6247 MaybeHandleStaticInExternC(D, GV);
6248
6249 if (D->hasAttr<AnnotateAttr>())
6250 AddGlobalAnnotations(D, GV);
6251
6252 // Set the llvm linkage type as appropriate.
6253 llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD: D);
6254
6255 // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
6256 // the device. [...]"
6257 // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
6258 // __device__, declares a variable that: [...]
6259 // Is accessible from all the threads within the grid and from the host
6260 // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
6261 // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
6262 if (LangOpts.CUDA) {
6263 if (LangOpts.CUDAIsDevice) {
6264 if (Linkage != llvm::GlobalValue::InternalLinkage && !D->isConstexpr() &&
6265 !D->getType().isConstQualified() &&
6266 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
6267 D->getType()->isCUDADeviceBuiltinSurfaceType() ||
6268 D->getType()->isCUDADeviceBuiltinTextureType()))
6269 GV->setExternallyInitialized(true);
6270 } else {
6271 getCUDARuntime().internalizeDeviceSideVar(D, Linkage);
6272 }
6273 getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV);
6274 }
6275
6276 if (LangOpts.HLSL &&
6277 hlsl::isInitializedByPipeline(AS: GetGlobalVarAddressSpace(D))) {
6278 // HLSL Input variables are considered to be set by the driver/pipeline, but
6279 // only visible to a single thread/wave. Push constants are also externally
6280 // initialized, but constant, hence cross-wave visibility is not relevant.
6281 GV->setExternallyInitialized(true);
6282 } else {
6283 GV->setInitializer(Init);
6284 }
6285
6286 if (LangOpts.HLSL)
6287 getHLSLRuntime().handleGlobalVarDefinition(VD: D, Var: GV);
6288
6289 if (emitter)
6290 emitter->finalize(global: GV);
6291
6292 // If it is safe to mark the global 'constant', do so now.
6293 GV->setConstant((D->hasAttr<CUDAConstantAttr>() && LangOpts.CUDAIsDevice) ||
6294 (!NeedsGlobalCtor && !NeedsGlobalDtor &&
6295 D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: true)));
6296
6297 // If it is in a read-only section, mark it 'constant'.
6298 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
6299 const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
6300 if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
6301 GV->setConstant(true);
6302 }
6303
6304 CharUnits AlignVal = getContext().getDeclAlign(D);
6305 // Check for alignment specifed in an 'omp allocate' directive.
6306 if (std::optional<CharUnits> AlignValFromAllocate =
6307 getOMPAllocateAlignment(VD: D))
6308 AlignVal = *AlignValFromAllocate;
6309 GV->setAlignment(AlignVal.getAsAlign());
6310
6311 // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
6312 // function is only defined alongside the variable, not also alongside
6313 // callers. Normally, all accesses to a thread_local go through the
6314 // thread-wrapper in order to ensure initialization has occurred, underlying
6315 // variable will never be used other than the thread-wrapper, so it can be
6316 // converted to internal linkage.
6317 //
6318 // However, if the variable has the 'constinit' attribute, it _can_ be
6319 // referenced directly, without calling the thread-wrapper, so the linkage
6320 // must not be changed.
6321 //
6322 // Additionally, if the variable isn't plain external linkage, e.g. if it's
6323 // weak or linkonce, the de-duplication semantics are important to preserve,
6324 // so we don't change the linkage.
6325 if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
6326 Linkage == llvm::GlobalValue::ExternalLinkage &&
6327 Context.getTargetInfo().getTriple().isOSDarwin() &&
6328 !D->hasAttr<ConstInitAttr>())
6329 Linkage = llvm::GlobalValue::InternalLinkage;
6330
6331 // HLSL variables in the input or push-constant address space maps are like
6332 // memory-mapped variables. Even if they are 'static', they are externally
6333 // initialized and read/write by the hardware/driver/pipeline.
6334 if (LangOpts.HLSL &&
6335 hlsl::isInitializedByPipeline(AS: GetGlobalVarAddressSpace(D)))
6336 Linkage = llvm::GlobalValue::ExternalLinkage;
6337
6338 GV->setLinkage(Linkage);
6339 if (D->hasAttr<DLLImportAttr>())
6340 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
6341 else if (D->hasAttr<DLLExportAttr>())
6342 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
6343 else
6344 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
6345
6346 if (Linkage == llvm::GlobalVariable::CommonLinkage) {
6347 // common vars aren't constant even if declared const.
6348 GV->setConstant(false);
6349 // Tentative definition of global variables may be initialized with
6350 // non-zero null pointers. In this case they should have weak linkage
6351 // since common linkage must have zero initializer and must not have
6352 // explicit section therefore cannot have non-zero initial value.
6353 if (!GV->getInitializer()->isNullValue())
6354 GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
6355 }
6356
6357 setNonAliasAttributes(GD: D, GO: GV);
6358
6359 if (D->getTLSKind() && !GV->isThreadLocal()) {
6360 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
6361 CXXThreadLocals.push_back(x: D);
6362 setTLSMode(GV, D: *D);
6363 }
6364
6365 maybeSetTrivialComdat(D: *D, GO&: *GV);
6366
6367 // Emit the initializer function if necessary.
6368 if (NeedsGlobalCtor || NeedsGlobalDtor)
6369 EmitCXXGlobalVarDeclInitFunc(D, Addr: GV, PerformInit: NeedsGlobalCtor);
6370
6371 SanitizerMD->reportGlobal(GV, D: *D, IsDynInit: NeedsGlobalCtor);
6372
6373 // Emit global variable debug information.
6374 if (CGDebugInfo *DI = getModuleDebugInfo())
6375 if (getCodeGenOpts().hasReducedDebugInfo())
6376 DI->EmitGlobalVariable(GV, Decl: D);
6377}
6378
6379static bool isVarDeclStrongDefinition(const ASTContext &Context,
6380 CodeGenModule &CGM, const VarDecl *D,
6381 bool NoCommon) {
6382 // Don't give variables common linkage if -fno-common was specified unless it
6383 // was overridden by a NoCommon attribute.
6384 if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
6385 return true;
6386
6387 // C11 6.9.2/2:
6388 // A declaration of an identifier for an object that has file scope without
6389 // an initializer, and without a storage-class specifier or with the
6390 // storage-class specifier static, constitutes a tentative definition.
6391 if (D->getInit() || D->hasExternalStorage())
6392 return true;
6393
6394 // A variable cannot be both common and exist in a section.
6395 if (D->hasAttr<SectionAttr>())
6396 return true;
6397
6398 // A variable cannot be both common and exist in a section.
6399 // We don't try to determine which is the right section in the front-end.
6400 // If no specialized section name is applicable, it will resort to default.
6401 if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
6402 D->hasAttr<PragmaClangDataSectionAttr>() ||
6403 D->hasAttr<PragmaClangRelroSectionAttr>() ||
6404 D->hasAttr<PragmaClangRodataSectionAttr>())
6405 return true;
6406
6407 // Thread local vars aren't considered common linkage.
6408 if (D->getTLSKind())
6409 return true;
6410
6411 // Tentative definitions marked with WeakImportAttr are true definitions.
6412 if (D->hasAttr<WeakImportAttr>())
6413 return true;
6414
6415 // A variable cannot be both common and exist in a comdat.
6416 if (shouldBeInCOMDAT(CGM, D: *D))
6417 return true;
6418
6419 // Declarations with a required alignment do not have common linkage in MSVC
6420 // mode.
6421 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
6422 if (D->hasAttr<AlignedAttr>())
6423 return true;
6424 QualType VarType = D->getType();
6425 if (Context.isAlignmentRequired(T: VarType))
6426 return true;
6427
6428 if (const auto *RD = VarType->getAsRecordDecl()) {
6429 for (const FieldDecl *FD : RD->fields()) {
6430 if (FD->isBitField())
6431 continue;
6432 if (FD->hasAttr<AlignedAttr>())
6433 return true;
6434 if (Context.isAlignmentRequired(T: FD->getType()))
6435 return true;
6436 }
6437 }
6438 }
6439
6440 // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
6441 // common symbols, so symbols with greater alignment requirements cannot be
6442 // common.
6443 // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
6444 // alignments for common symbols via the aligncomm directive, so this
6445 // restriction only applies to MSVC environments.
6446 if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
6447 Context.getTypeAlignIfKnown(T: D->getType()) >
6448 Context.toBits(CharSize: CharUnits::fromQuantity(Quantity: 32)))
6449 return true;
6450
6451 return false;
6452}
6453
6454llvm::GlobalValue::LinkageTypes
6455CodeGenModule::getLLVMLinkageForDeclarator(const DeclaratorDecl *D,
6456 GVALinkage Linkage) {
6457 if (Linkage == GVA_Internal)
6458 return llvm::Function::InternalLinkage;
6459
6460 if (D->hasAttr<WeakAttr>())
6461 return llvm::GlobalVariable::WeakAnyLinkage;
6462
6463 if (const auto *FD = D->getAsFunction())
6464 if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
6465 return llvm::GlobalVariable::LinkOnceAnyLinkage;
6466
6467 // We are guaranteed to have a strong definition somewhere else,
6468 // so we can use available_externally linkage.
6469 if (Linkage == GVA_AvailableExternally)
6470 return llvm::GlobalValue::AvailableExternallyLinkage;
6471
6472 // Note that Apple's kernel linker doesn't support symbol
6473 // coalescing, so we need to avoid linkonce and weak linkages there.
6474 // Normally, this means we just map to internal, but for explicit
6475 // instantiations we'll map to external.
6476
6477 // In C++, the compiler has to emit a definition in every translation unit
6478 // that references the function. We should use linkonce_odr because
6479 // a) if all references in this translation unit are optimized away, we
6480 // don't need to codegen it. b) if the function persists, it needs to be
6481 // merged with other definitions. c) C++ has the ODR, so we know the
6482 // definition is dependable.
6483 if (Linkage == GVA_DiscardableODR)
6484 return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
6485 : llvm::Function::InternalLinkage;
6486
6487 // An explicit instantiation of a template has weak linkage, since
6488 // explicit instantiations can occur in multiple translation units
6489 // and must all be equivalent. However, we are not allowed to
6490 // throw away these explicit instantiations.
6491 //
6492 // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
6493 // so say that CUDA templates are either external (for kernels) or internal.
6494 // This lets llvm perform aggressive inter-procedural optimizations. For
6495 // -fgpu-rdc case, device function calls across multiple TU's are allowed,
6496 // therefore we need to follow the normal linkage paradigm.
6497 if (Linkage == GVA_StrongODR) {
6498 if (getLangOpts().AppleKext)
6499 return llvm::Function::ExternalLinkage;
6500 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
6501 !getLangOpts().GPURelocatableDeviceCode)
6502 return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
6503 : llvm::Function::InternalLinkage;
6504 return llvm::Function::WeakODRLinkage;
6505 }
6506
6507 // C++ doesn't have tentative definitions and thus cannot have common
6508 // linkage.
6509 if (!getLangOpts().CPlusPlus && isa<VarDecl>(Val: D) &&
6510 !isVarDeclStrongDefinition(Context, CGM&: *this, D: cast<VarDecl>(Val: D),
6511 NoCommon: CodeGenOpts.NoCommon))
6512 return llvm::GlobalVariable::CommonLinkage;
6513
6514 // selectany symbols are externally visible, so use weak instead of
6515 // linkonce. MSVC optimizes away references to const selectany globals, so
6516 // all definitions should be the same and ODR linkage should be used.
6517 // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
6518 if (D->hasAttr<SelectAnyAttr>())
6519 return llvm::GlobalVariable::WeakODRLinkage;
6520
6521 // Otherwise, we have strong external linkage.
6522 assert(Linkage == GVA_StrongExternal);
6523 return llvm::GlobalVariable::ExternalLinkage;
6524}
6525
6526llvm::GlobalValue::LinkageTypes
6527CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) {
6528 GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
6529 return getLLVMLinkageForDeclarator(D: VD, Linkage);
6530}
6531
6532/// Replace the uses of a function that was declared with a non-proto type.
6533/// We want to silently drop extra arguments from call sites
6534static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
6535 llvm::Function *newFn) {
6536 // Fast path.
6537 if (old->use_empty())
6538 return;
6539
6540 llvm::Type *newRetTy = newFn->getReturnType();
6541 SmallVector<llvm::Value *, 4> newArgs;
6542
6543 SmallVector<llvm::CallBase *> callSitesToBeRemovedFromParent;
6544
6545 for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
6546 ui != ue; ui++) {
6547 llvm::User *user = ui->getUser();
6548
6549 // Recognize and replace uses of bitcasts. Most calls to
6550 // unprototyped functions will use bitcasts.
6551 if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(Val: user)) {
6552 if (bitcast->getOpcode() == llvm::Instruction::BitCast)
6553 replaceUsesOfNonProtoConstant(old: bitcast, newFn);
6554 continue;
6555 }
6556
6557 // Recognize calls to the function.
6558 llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(Val: user);
6559 if (!callSite)
6560 continue;
6561 if (!callSite->isCallee(U: &*ui))
6562 continue;
6563
6564 // If the return types don't match exactly, then we can't
6565 // transform this call unless it's dead.
6566 if (callSite->getType() != newRetTy && !callSite->use_empty())
6567 continue;
6568
6569 // Get the call site's attribute list.
6570 SmallVector<llvm::AttributeSet, 8> newArgAttrs;
6571 llvm::AttributeList oldAttrs = callSite->getAttributes();
6572
6573 // If the function was passed too few arguments, don't transform.
6574 unsigned newNumArgs = newFn->arg_size();
6575 if (callSite->arg_size() < newNumArgs)
6576 continue;
6577
6578 // If extra arguments were passed, we silently drop them.
6579 // If any of the types mismatch, we don't transform.
6580 unsigned argNo = 0;
6581 bool dontTransform = false;
6582 for (llvm::Argument &A : newFn->args()) {
6583 if (callSite->getArgOperand(i: argNo)->getType() != A.getType()) {
6584 dontTransform = true;
6585 break;
6586 }
6587
6588 // Add any parameter attributes.
6589 newArgAttrs.push_back(Elt: oldAttrs.getParamAttrs(ArgNo: argNo));
6590 argNo++;
6591 }
6592 if (dontTransform)
6593 continue;
6594
6595 // Okay, we can transform this. Create the new call instruction and copy
6596 // over the required information.
6597 newArgs.append(in_start: callSite->arg_begin(), in_end: callSite->arg_begin() + argNo);
6598
6599 // Copy over any operand bundles.
6600 SmallVector<llvm::OperandBundleDef, 1> newBundles;
6601 callSite->getOperandBundlesAsDefs(Defs&: newBundles);
6602
6603 llvm::CallBase *newCall;
6604 if (isa<llvm::CallInst>(Val: callSite)) {
6605 newCall = llvm::CallInst::Create(Func: newFn, Args: newArgs, Bundles: newBundles, NameStr: "",
6606 InsertBefore: callSite->getIterator());
6607 } else {
6608 auto *oldInvoke = cast<llvm::InvokeInst>(Val: callSite);
6609 newCall = llvm::InvokeInst::Create(
6610 Func: newFn, IfNormal: oldInvoke->getNormalDest(), IfException: oldInvoke->getUnwindDest(),
6611 Args: newArgs, Bundles: newBundles, NameStr: "", InsertBefore: callSite->getIterator());
6612 }
6613 newArgs.clear(); // for the next iteration
6614
6615 if (!newCall->getType()->isVoidTy())
6616 newCall->takeName(V: callSite);
6617 newCall->setAttributes(
6618 llvm::AttributeList::get(C&: newFn->getContext(), FnAttrs: oldAttrs.getFnAttrs(),
6619 RetAttrs: oldAttrs.getRetAttrs(), ArgAttrs: newArgAttrs));
6620 newCall->setCallingConv(callSite->getCallingConv());
6621
6622 // Finally, remove the old call, replacing any uses with the new one.
6623 if (!callSite->use_empty())
6624 callSite->replaceAllUsesWith(V: newCall);
6625
6626 // Copy debug location attached to CI.
6627 if (callSite->getDebugLoc())
6628 newCall->setDebugLoc(callSite->getDebugLoc());
6629
6630 callSitesToBeRemovedFromParent.push_back(Elt: callSite);
6631 }
6632
6633 for (auto *callSite : callSitesToBeRemovedFromParent) {
6634 callSite->eraseFromParent();
6635 }
6636}
6637
6638/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
6639/// implement a function with no prototype, e.g. "int foo() {}". If there are
6640/// existing call uses of the old function in the module, this adjusts them to
6641/// call the new function directly.
6642///
6643/// This is not just a cleanup: the always_inline pass requires direct calls to
6644/// functions to be able to inline them. If there is a bitcast in the way, it
6645/// won't inline them. Instcombine normally deletes these calls, but it isn't
6646/// run at -O0.
6647static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
6648 llvm::Function *NewFn) {
6649 // If we're redefining a global as a function, don't transform it.
6650 if (!isa<llvm::Function>(Val: Old)) return;
6651
6652 replaceUsesOfNonProtoConstant(old: Old, newFn: NewFn);
6653}
6654
6655void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
6656 auto DK = VD->isThisDeclarationADefinition();
6657 if ((DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>()) ||
6658 (LangOpts.CUDA && !shouldEmitCUDAGlobalVar(Global: VD)))
6659 return;
6660
6661 TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
6662 // If we have a definition, this might be a deferred decl. If the
6663 // instantiation is explicit, make sure we emit it at the end.
6664 if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
6665 GetAddrOfGlobalVar(D: VD);
6666
6667 EmitTopLevelDecl(D: VD);
6668}
6669
6670void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
6671 llvm::GlobalValue *GV) {
6672 const auto *D = cast<FunctionDecl>(Val: GD.getDecl());
6673
6674 // Compute the function info and LLVM type.
6675 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
6676 llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI);
6677
6678 // Get or create the prototype for the function.
6679 if (!GV || (GV->getValueType() != Ty))
6680 GV = cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
6681 /*DontDefer=*/true,
6682 IsForDefinition: ForDefinition));
6683
6684 // Already emitted.
6685 if (!GV->isDeclaration())
6686 return;
6687
6688 // We need to set linkage and visibility on the function before
6689 // generating code for it because various parts of IR generation
6690 // want to propagate this information down (e.g. to local static
6691 // declarations).
6692 auto *Fn = cast<llvm::Function>(Val: GV);
6693 setFunctionLinkage(GD, F: Fn);
6694
6695 // FIXME: this is redundant with part of setFunctionDefinitionAttributes
6696 setGVProperties(GV: Fn, GD);
6697
6698 MaybeHandleStaticInExternC(D, GV: Fn);
6699
6700 maybeSetTrivialComdat(D: *D, GO&: *Fn);
6701
6702 CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo: FI);
6703
6704 setNonAliasAttributes(GD, GO: Fn);
6705
6706 bool ShouldAddOptNone = !CodeGenOpts.DisableO0ImplyOptNone &&
6707 (CodeGenOpts.OptimizationLevel == 0) &&
6708 !D->hasAttr<MinSizeAttr>();
6709
6710 if (DeviceKernelAttr::isOpenCLSpelling(A: D->getAttr<DeviceKernelAttr>())) {
6711 if (GD.getKernelReferenceKind() == KernelReferenceKind::Stub &&
6712 !D->hasAttr<NoInlineAttr>() &&
6713 !Fn->hasFnAttribute(Kind: llvm::Attribute::NoInline) &&
6714 !D->hasAttr<OptimizeNoneAttr>() &&
6715 !Fn->hasFnAttribute(Kind: llvm::Attribute::OptimizeNone) &&
6716 !ShouldAddOptNone) {
6717 Fn->addFnAttr(Kind: llvm::Attribute::AlwaysInline);
6718 }
6719 }
6720
6721 SetLLVMFunctionAttributesForDefinition(D, F: Fn);
6722
6723 auto GetPriority = [this](const auto *Attr) -> int {
6724 Expr *E = Attr->getPriority();
6725 if (E) {
6726 return E->EvaluateKnownConstInt(Ctx: this->getContext()).getExtValue();
6727 }
6728 return Attr->DefaultPriority;
6729 };
6730
6731 if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
6732 AddGlobalCtor(Ctor: Fn, Priority: GetPriority(CA));
6733 if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
6734 AddGlobalDtor(Dtor: Fn, Priority: GetPriority(DA), IsDtorAttrFunc: true);
6735 if (getLangOpts().OpenMP && D->hasAttr<OMPDeclareTargetDeclAttr>())
6736 getOpenMPRuntime().emitDeclareTargetFunction(FD: D, GV);
6737}
6738
6739void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
6740 const auto *D = cast<ValueDecl>(Val: GD.getDecl());
6741 const AliasAttr *AA = D->getAttr<AliasAttr>();
6742 assert(AA && "Not an alias?");
6743
6744 StringRef MangledName = getMangledName(GD);
6745
6746 if (AA->getAliasee() == MangledName) {
6747 Diags.Report(Loc: AA->getLocation(), DiagID: diag::err_cyclic_alias) << 0;
6748 return;
6749 }
6750
6751 // If there is a definition in the module, then it wins over the alias.
6752 // This is dubious, but allow it to be safe. Just ignore the alias.
6753 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
6754 if (Entry && !Entry->isDeclaration())
6755 return;
6756
6757 Aliases.push_back(x: GD);
6758
6759 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType());
6760
6761 // Create a reference to the named value. This ensures that it is emitted
6762 // if a deferred decl.
6763 llvm::Constant *Aliasee;
6764 llvm::GlobalValue::LinkageTypes LT;
6765 if (isa<llvm::FunctionType>(Val: DeclTy)) {
6766 Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy, GD,
6767 /*ForVTable=*/false);
6768 LT = getFunctionLinkage(GD);
6769 } else {
6770 Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default,
6771 /*D=*/nullptr);
6772 if (const auto *VD = dyn_cast<VarDecl>(Val: GD.getDecl()))
6773 LT = getLLVMLinkageVarDefinition(VD);
6774 else
6775 LT = getFunctionLinkage(GD);
6776 }
6777
6778 // Create the new alias itself, but don't set a name yet.
6779 unsigned AS = Aliasee->getType()->getPointerAddressSpace();
6780 auto *GA =
6781 llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage: LT, Name: "", Aliasee, Parent: &getModule());
6782
6783 if (Entry) {
6784 if (GA->getAliasee() == Entry) {
6785 Diags.Report(Loc: AA->getLocation(), DiagID: diag::err_cyclic_alias) << 0;
6786 return;
6787 }
6788
6789 assert(Entry->isDeclaration());
6790
6791 // If there is a declaration in the module, then we had an extern followed
6792 // by the alias, as in:
6793 // extern int test6();
6794 // ...
6795 // int test6() __attribute__((alias("test7")));
6796 //
6797 // Remove it and replace uses of it with the alias.
6798 GA->takeName(V: Entry);
6799
6800 Entry->replaceAllUsesWith(V: GA);
6801 Entry->eraseFromParent();
6802 } else {
6803 GA->setName(MangledName);
6804 }
6805
6806 // Set attributes which are particular to an alias; this is a
6807 // specialization of the attributes which may be set on a global
6808 // variable/function.
6809 if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
6810 D->isWeakImported()) {
6811 GA->setLinkage(llvm::Function::WeakAnyLinkage);
6812 }
6813
6814 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
6815 if (VD->getTLSKind())
6816 setTLSMode(GV: GA, D: *VD);
6817
6818 SetCommonAttributes(GD, GV: GA);
6819
6820 // Emit global alias debug information.
6821 if (isa<VarDecl>(Val: D))
6822 if (CGDebugInfo *DI = getModuleDebugInfo())
6823 DI->EmitGlobalAlias(GV: cast<llvm::GlobalValue>(Val: GA->getAliasee()->stripPointerCasts()), Decl: GD);
6824}
6825
6826void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
6827 const auto *D = cast<ValueDecl>(Val: GD.getDecl());
6828 const IFuncAttr *IFA = D->getAttr<IFuncAttr>();
6829 assert(IFA && "Not an ifunc?");
6830
6831 StringRef MangledName = getMangledName(GD);
6832
6833 if (IFA->getResolver() == MangledName) {
6834 Diags.Report(Loc: IFA->getLocation(), DiagID: diag::err_cyclic_alias) << 1;
6835 return;
6836 }
6837
6838 // Report an error if some definition overrides ifunc.
6839 llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName);
6840 if (Entry && !Entry->isDeclaration()) {
6841 GlobalDecl OtherGD;
6842 if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) &&
6843 DiagnosedConflictingDefinitions.insert(V: GD).second) {
6844 Diags.Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name)
6845 << MangledName;
6846 Diags.Report(Loc: OtherGD.getDecl()->getLocation(),
6847 DiagID: diag::note_previous_definition);
6848 }
6849 return;
6850 }
6851
6852 Aliases.push_back(x: GD);
6853
6854 // The resolver might not be visited yet. Specify a dummy non-function type to
6855 // indicate IsIncompleteFunction. Either the type is ignored (if the resolver
6856 // was emitted) or the whole function will be replaced (if the resolver has
6857 // not been emitted).
6858 llvm::Constant *Resolver =
6859 GetOrCreateLLVMFunction(MangledName: IFA->getResolver(), Ty: VoidTy, GD: {},
6860 /*ForVTable=*/false);
6861 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType());
6862 unsigned AS = getTypes().getTargetAddressSpace(T: D->getType());
6863 llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
6864 Ty: DeclTy, AddressSpace: AS, Linkage: llvm::Function::ExternalLinkage, Name: "", Resolver, Parent: &getModule());
6865 if (Entry) {
6866 if (GIF->getResolver() == Entry) {
6867 Diags.Report(Loc: IFA->getLocation(), DiagID: diag::err_cyclic_alias) << 1;
6868 return;
6869 }
6870 assert(Entry->isDeclaration());
6871
6872 // If there is a declaration in the module, then we had an extern followed
6873 // by the ifunc, as in:
6874 // extern int test();
6875 // ...
6876 // int test() __attribute__((ifunc("resolver")));
6877 //
6878 // Remove it and replace uses of it with the ifunc.
6879 GIF->takeName(V: Entry);
6880
6881 Entry->replaceAllUsesWith(V: GIF);
6882 Entry->eraseFromParent();
6883 } else
6884 GIF->setName(MangledName);
6885 SetCommonAttributes(GD, GV: GIF);
6886}
6887
6888llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
6889 ArrayRef<llvm::Type*> Tys) {
6890 return llvm::Intrinsic::getOrInsertDeclaration(M: &getModule(),
6891 id: (llvm::Intrinsic::ID)IID, Tys);
6892}
6893
6894static llvm::StringMapEntry<llvm::GlobalVariable *> &
6895GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
6896 const StringLiteral *Literal, bool TargetIsLSB,
6897 bool &IsUTF16, unsigned &StringLength) {
6898 StringRef String = Literal->getString();
6899 unsigned NumBytes = String.size();
6900
6901 // Check for simple case.
6902 if (!Literal->containsNonAsciiOrNull()) {
6903 StringLength = NumBytes;
6904 return *Map.insert(KV: std::make_pair(x&: String, y: nullptr)).first;
6905 }
6906
6907 // Otherwise, convert the UTF8 literals into a string of shorts.
6908 IsUTF16 = true;
6909
6910 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
6911 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
6912 llvm::UTF16 *ToPtr = &ToBuf[0];
6913
6914 (void)llvm::ConvertUTF8toUTF16(sourceStart: &FromPtr, sourceEnd: FromPtr + NumBytes, targetStart: &ToPtr,
6915 targetEnd: ToPtr + NumBytes, flags: llvm::strictConversion);
6916
6917 // ConvertUTF8toUTF16 returns the length in ToPtr.
6918 StringLength = ToPtr - &ToBuf[0];
6919
6920 // Add an explicit null.
6921 *ToPtr = 0;
6922 return *Map.insert(KV: std::make_pair(
6923 x: StringRef(reinterpret_cast<const char *>(ToBuf.data()),
6924 (StringLength + 1) * 2),
6925 y: nullptr)).first;
6926}
6927
6928ConstantAddress
6929CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
6930 unsigned StringLength = 0;
6931 bool isUTF16 = false;
6932 llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
6933 GetConstantCFStringEntry(Map&: CFConstantStringMap, Literal,
6934 TargetIsLSB: getDataLayout().isLittleEndian(), IsUTF16&: isUTF16,
6935 StringLength);
6936
6937 if (auto *C = Entry.second)
6938 return ConstantAddress(
6939 C, C->getValueType(), CharUnits::fromQuantity(Quantity: C->getAlignment()));
6940
6941 const ASTContext &Context = getContext();
6942 const llvm::Triple &Triple = getTriple();
6943
6944 const auto CFRuntime = getLangOpts().CFRuntime;
6945 const bool IsSwiftABI =
6946 static_cast<unsigned>(CFRuntime) >=
6947 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
6948 const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
6949
6950 // If we don't already have it, get __CFConstantStringClassReference.
6951 if (!CFConstantStringClassRef) {
6952 const char *CFConstantStringClassName = "__CFConstantStringClassReference";
6953 llvm::Type *Ty = getTypes().ConvertType(T: getContext().IntTy);
6954 Ty = llvm::ArrayType::get(ElementType: Ty, NumElements: 0);
6955
6956 switch (CFRuntime) {
6957 default: break;
6958 case LangOptions::CoreFoundationABI::Swift: [[fallthrough]];
6959 case LangOptions::CoreFoundationABI::Swift5_0:
6960 CFConstantStringClassName =
6961 Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
6962 : "$s10Foundation19_NSCFConstantStringCN";
6963 Ty = IntPtrTy;
6964 break;
6965 case LangOptions::CoreFoundationABI::Swift4_2:
6966 CFConstantStringClassName =
6967 Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
6968 : "$S10Foundation19_NSCFConstantStringCN";
6969 Ty = IntPtrTy;
6970 break;
6971 case LangOptions::CoreFoundationABI::Swift4_1:
6972 CFConstantStringClassName =
6973 Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
6974 : "__T010Foundation19_NSCFConstantStringCN";
6975 Ty = IntPtrTy;
6976 break;
6977 }
6978
6979 llvm::Constant *C = CreateRuntimeVariable(Ty, Name: CFConstantStringClassName);
6980
6981 if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
6982 llvm::GlobalValue *GV = nullptr;
6983
6984 if ((GV = dyn_cast<llvm::GlobalValue>(Val: C))) {
6985 IdentifierInfo &II = Context.Idents.get(Name: GV->getName());
6986 TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
6987 DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl);
6988
6989 const VarDecl *VD = nullptr;
6990 for (const auto *Result : DC->lookup(Name: &II))
6991 if ((VD = dyn_cast<VarDecl>(Val: Result)))
6992 break;
6993
6994 if (Triple.isOSBinFormatELF()) {
6995 if (!VD)
6996 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
6997 } else {
6998 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
6999 if (!VD || !VD->hasAttr<DLLExportAttr>())
7000 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
7001 else
7002 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
7003 }
7004
7005 setDSOLocal(GV);
7006 }
7007 }
7008
7009 // Decay array -> ptr
7010 CFConstantStringClassRef =
7011 IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty) : C;
7012 }
7013
7014 QualType CFTy = Context.getCFConstantStringType();
7015
7016 auto *STy = cast<llvm::StructType>(Val: getTypes().ConvertType(T: CFTy));
7017
7018 ConstantInitBuilder Builder(*this);
7019 auto Fields = Builder.beginStruct(structTy: STy);
7020
7021 // Class pointer.
7022 Fields.addSignedPointer(Pointer: cast<llvm::Constant>(Val&: CFConstantStringClassRef),
7023 Schema: getCodeGenOpts().PointerAuth.ObjCIsaPointers,
7024 CalleeDecl: GlobalDecl(), CalleeType: QualType());
7025
7026 // Flags.
7027 if (IsSwiftABI) {
7028 Fields.addInt(intTy: IntPtrTy, value: IsSwift4_1 ? 0x05 : 0x01);
7029 Fields.addInt(intTy: Int64Ty, value: isUTF16 ? 0x07d0 : 0x07c8);
7030 } else {
7031 Fields.addInt(intTy: IntTy, value: isUTF16 ? 0x07d0 : 0x07C8);
7032 }
7033
7034 // String pointer.
7035 llvm::Constant *C = nullptr;
7036 if (isUTF16) {
7037 auto Arr = llvm::ArrayRef(
7038 reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
7039 Entry.first().size() / 2);
7040 C = llvm::ConstantDataArray::get(Context&: VMContext, Elts: Arr);
7041 } else {
7042 C = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Entry.first());
7043 }
7044
7045 // Note: -fwritable-strings doesn't make the backing store strings of
7046 // CFStrings writable.
7047 auto *GV =
7048 new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
7049 llvm::GlobalValue::PrivateLinkage, C, ".str");
7050 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7051 // Don't enforce the target's minimum global alignment, since the only use
7052 // of the string is via this class initializer.
7053 CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(T: Context.ShortTy)
7054 : Context.getTypeAlignInChars(T: Context.CharTy);
7055 GV->setAlignment(Align.getAsAlign());
7056
7057 // FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
7058 // Without it LLVM can merge the string with a non unnamed_addr one during
7059 // LTO. Doing that changes the section it ends in, which surprises ld64.
7060 if (Triple.isOSBinFormatMachO())
7061 GV->setSection(isUTF16 ? "__TEXT,__ustring"
7062 : "__TEXT,__cstring,cstring_literals");
7063 // Make sure the literal ends up in .rodata to allow for safe ICF and for
7064 // the static linker to adjust permissions to read-only later on.
7065 else if (Triple.isOSBinFormatELF())
7066 GV->setSection(".rodata");
7067
7068 // String.
7069 Fields.add(value: GV);
7070
7071 // String length.
7072 llvm::IntegerType *LengthTy =
7073 llvm::IntegerType::get(C&: getModule().getContext(),
7074 NumBits: Context.getTargetInfo().getLongWidth());
7075 if (IsSwiftABI) {
7076 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
7077 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
7078 LengthTy = Int32Ty;
7079 else
7080 LengthTy = IntPtrTy;
7081 }
7082 Fields.addInt(intTy: LengthTy, value: StringLength);
7083
7084 // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is
7085 // properly aligned on 32-bit platforms.
7086 CharUnits Alignment =
7087 IsSwiftABI ? Context.toCharUnitsFromBits(BitSize: 64) : getPointerAlign();
7088
7089 // The struct.
7090 GV = Fields.finishAndCreateGlobal(args: "_unnamed_cfstring_", args&: Alignment,
7091 /*isConstant=*/args: false,
7092 args: llvm::GlobalVariable::PrivateLinkage);
7093 GV->addAttribute(Kind: "objc_arc_inert");
7094 switch (Triple.getObjectFormat()) {
7095 case llvm::Triple::UnknownObjectFormat:
7096 llvm_unreachable("unknown file format");
7097 case llvm::Triple::DXContainer:
7098 case llvm::Triple::GOFF:
7099 case llvm::Triple::SPIRV:
7100 case llvm::Triple::XCOFF:
7101 llvm_unreachable("unimplemented");
7102 case llvm::Triple::COFF:
7103 case llvm::Triple::ELF:
7104 case llvm::Triple::Wasm:
7105 GV->setSection("cfstring");
7106 break;
7107 case llvm::Triple::MachO:
7108 GV->setSection("__DATA,__cfstring");
7109 break;
7110 }
7111 Entry.second = GV;
7112
7113 return ConstantAddress(GV, GV->getValueType(), Alignment);
7114}
7115
7116bool CodeGenModule::getExpressionLocationsEnabled() const {
7117 return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo;
7118}
7119
7120QualType CodeGenModule::getObjCFastEnumerationStateType() {
7121 if (ObjCFastEnumerationStateType.isNull()) {
7122 RecordDecl *D = Context.buildImplicitRecord(Name: "__objcFastEnumerationState");
7123 D->startDefinition();
7124
7125 QualType FieldTypes[] = {
7126 Context.UnsignedLongTy, Context.getPointerType(T: Context.getObjCIdType()),
7127 Context.getPointerType(T: Context.UnsignedLongTy),
7128 Context.getConstantArrayType(EltTy: Context.UnsignedLongTy, ArySize: llvm::APInt(32, 5),
7129 SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0)};
7130
7131 for (size_t i = 0; i < 4; ++i) {
7132 FieldDecl *Field = FieldDecl::Create(C: Context,
7133 DC: D,
7134 StartLoc: SourceLocation(),
7135 IdLoc: SourceLocation(), Id: nullptr,
7136 T: FieldTypes[i], /*TInfo=*/nullptr,
7137 /*BitWidth=*/BW: nullptr,
7138 /*Mutable=*/false,
7139 InitStyle: ICIS_NoInit);
7140 Field->setAccess(AS_public);
7141 D->addDecl(D: Field);
7142 }
7143
7144 D->completeDefinition();
7145 ObjCFastEnumerationStateType = Context.getCanonicalTagType(TD: D);
7146 }
7147
7148 return ObjCFastEnumerationStateType;
7149}
7150
7151llvm::Constant *
7152CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
7153 assert(!E->getType()->isPointerType() && "Strings are always arrays");
7154
7155 // Don't emit it as the address of the string, emit the string data itself
7156 // as an inline array.
7157 if (E->getCharByteWidth() == 1) {
7158 SmallString<64> Str(E->getString());
7159
7160 // Resize the string to the right size, which is indicated by its type.
7161 const ConstantArrayType *CAT = Context.getAsConstantArrayType(T: E->getType());
7162 assert(CAT && "String literal not of constant array type!");
7163 Str.resize(N: CAT->getZExtSize());
7164 return llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Str, AddNull: false);
7165 }
7166
7167 auto *AType = cast<llvm::ArrayType>(Val: getTypes().ConvertType(T: E->getType()));
7168 llvm::Type *ElemTy = AType->getElementType();
7169 unsigned NumElements = AType->getNumElements();
7170
7171 // Wide strings have either 2-byte or 4-byte elements.
7172 if (ElemTy->getPrimitiveSizeInBits() == 16) {
7173 SmallVector<uint16_t, 32> Elements;
7174 Elements.reserve(N: NumElements);
7175
7176 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
7177 Elements.push_back(Elt: E->getCodeUnit(i));
7178 Elements.resize(N: NumElements);
7179 return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements);
7180 }
7181
7182 assert(ElemTy->getPrimitiveSizeInBits() == 32);
7183 SmallVector<uint32_t, 32> Elements;
7184 Elements.reserve(N: NumElements);
7185
7186 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
7187 Elements.push_back(Elt: E->getCodeUnit(i));
7188 Elements.resize(N: NumElements);
7189 return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements);
7190}
7191
7192static llvm::GlobalVariable *
7193GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
7194 CodeGenModule &CGM, StringRef GlobalName,
7195 CharUnits Alignment) {
7196 unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
7197 AS: CGM.GetGlobalConstantAddressSpace());
7198
7199 llvm::Module &M = CGM.getModule();
7200 // Create a global variable for this string
7201 auto *GV = new llvm::GlobalVariable(
7202 M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
7203 nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
7204 GV->setAlignment(Alignment.getAsAlign());
7205 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7206 if (GV->isWeakForLinker()) {
7207 assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
7208 GV->setComdat(M.getOrInsertComdat(Name: GV->getName()));
7209 }
7210 CGM.setDSOLocal(GV);
7211
7212 return GV;
7213}
7214
7215/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
7216/// constant array for the given string literal.
7217ConstantAddress
7218CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
7219 StringRef Name) {
7220 CharUnits Alignment =
7221 getContext().getAlignOfGlobalVarInChars(T: S->getType(), /*VD=*/nullptr);
7222
7223 llvm::Constant *C = GetConstantArrayFromStringLiteral(E: S);
7224 llvm::GlobalVariable **Entry = nullptr;
7225 if (!LangOpts.WritableStrings) {
7226 Entry = &ConstantStringMap[C];
7227 if (auto GV = *Entry) {
7228 if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
7229 GV->setAlignment(Alignment.getAsAlign());
7230 return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV),
7231 GV->getValueType(), Alignment);
7232 }
7233 }
7234
7235 SmallString<256> MangledNameBuffer;
7236 StringRef GlobalVariableName;
7237 llvm::GlobalValue::LinkageTypes LT;
7238
7239 // Mangle the string literal if that's how the ABI merges duplicate strings.
7240 // Don't do it if they are writable, since we don't want writes in one TU to
7241 // affect strings in another.
7242 if (getCXXABI().getMangleContext().shouldMangleStringLiteral(SL: S) &&
7243 !LangOpts.WritableStrings) {
7244 llvm::raw_svector_ostream Out(MangledNameBuffer);
7245 getCXXABI().getMangleContext().mangleStringLiteral(SL: S, Out);
7246 LT = llvm::GlobalValue::LinkOnceODRLinkage;
7247 GlobalVariableName = MangledNameBuffer;
7248 } else {
7249 LT = llvm::GlobalValue::PrivateLinkage;
7250 GlobalVariableName = Name;
7251 }
7252
7253 auto GV = GenerateStringLiteral(C, LT, CGM&: *this, GlobalName: GlobalVariableName, Alignment);
7254
7255 CGDebugInfo *DI = getModuleDebugInfo();
7256 if (DI && getCodeGenOpts().hasReducedDebugInfo())
7257 DI->AddStringLiteralDebugInfo(GV, S);
7258
7259 if (Entry)
7260 *Entry = GV;
7261
7262 SanitizerMD->reportGlobal(GV, Loc: S->getStrTokenLoc(TokNum: 0), Name: "<string literal>");
7263
7264 return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV),
7265 GV->getValueType(), Alignment);
7266}
7267
7268/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
7269/// array for the given ObjCEncodeExpr node.
7270ConstantAddress
7271CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
7272 std::string Str;
7273 getContext().getObjCEncodingForType(T: E->getEncodedType(), S&: Str);
7274
7275 return GetAddrOfConstantCString(Str);
7276}
7277
7278/// GetAddrOfConstantCString - Returns a pointer to a character array containing
7279/// the literal and a terminating '\0' character.
7280/// The result has pointer to array type.
7281ConstantAddress CodeGenModule::GetAddrOfConstantCString(const std::string &Str,
7282 StringRef GlobalName) {
7283 StringRef StrWithNull(Str.c_str(), Str.size() + 1);
7284 CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(
7285 T: getContext().CharTy, /*VD=*/nullptr);
7286
7287 llvm::Constant *C =
7288 llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: StrWithNull, AddNull: false);
7289
7290 // Don't share any string literals if strings aren't constant.
7291 llvm::GlobalVariable **Entry = nullptr;
7292 if (!LangOpts.WritableStrings) {
7293 Entry = &ConstantStringMap[C];
7294 if (auto GV = *Entry) {
7295 if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
7296 GV->setAlignment(Alignment.getAsAlign());
7297 return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV),
7298 GV->getValueType(), Alignment);
7299 }
7300 }
7301
7302 // Create a global variable for this.
7303 auto GV = GenerateStringLiteral(C, LT: llvm::GlobalValue::PrivateLinkage, CGM&: *this,
7304 GlobalName, Alignment);
7305 if (Entry)
7306 *Entry = GV;
7307
7308 return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV),
7309 GV->getValueType(), Alignment);
7310}
7311
7312ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
7313 const MaterializeTemporaryExpr *E, const Expr *Init) {
7314 assert((E->getStorageDuration() == SD_Static ||
7315 E->getStorageDuration() == SD_Thread) && "not a global temporary");
7316 const auto *VD = cast<VarDecl>(Val: E->getExtendingDecl());
7317
7318 // If we're not materializing a subobject of the temporary, keep the
7319 // cv-qualifiers from the type of the MaterializeTemporaryExpr.
7320 QualType MaterializedType = Init->getType();
7321 if (Init == E->getSubExpr())
7322 MaterializedType = E->getType();
7323
7324 CharUnits Align = getContext().getTypeAlignInChars(T: MaterializedType);
7325
7326 auto InsertResult = MaterializedGlobalTemporaryMap.insert(KV: {E, nullptr});
7327 if (!InsertResult.second) {
7328 // We've seen this before: either we already created it or we're in the
7329 // process of doing so.
7330 if (!InsertResult.first->second) {
7331 // We recursively re-entered this function, probably during emission of
7332 // the initializer. Create a placeholder. We'll clean this up in the
7333 // outer call, at the end of this function.
7334 llvm::Type *Type = getTypes().ConvertTypeForMem(T: MaterializedType);
7335 InsertResult.first->second = new llvm::GlobalVariable(
7336 getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
7337 nullptr);
7338 }
7339 return ConstantAddress(InsertResult.first->second,
7340 llvm::cast<llvm::GlobalVariable>(
7341 Val: InsertResult.first->second->stripPointerCasts())
7342 ->getValueType(),
7343 Align);
7344 }
7345
7346 // FIXME: If an externally-visible declaration extends multiple temporaries,
7347 // we need to give each temporary the same name in every translation unit (and
7348 // we also need to make the temporaries externally-visible).
7349 SmallString<256> Name;
7350 llvm::raw_svector_ostream Out(Name);
7351 getCXXABI().getMangleContext().mangleReferenceTemporary(
7352 D: VD, ManglingNumber: E->getManglingNumber(), Out);
7353
7354 APValue *Value = nullptr;
7355 if (E->getStorageDuration() == SD_Static && VD->evaluateValue()) {
7356 // If the initializer of the extending declaration is a constant
7357 // initializer, we should have a cached constant initializer for this
7358 // temporary. Note that this might have a different value from the value
7359 // computed by evaluating the initializer if the surrounding constant
7360 // expression modifies the temporary.
7361 Value = E->getOrCreateValue(MayCreate: false);
7362 }
7363
7364 // Try evaluating it now, it might have a constant initializer.
7365 Expr::EvalResult EvalResult;
7366 if (!Value && Init->EvaluateAsRValue(Result&: EvalResult, Ctx: getContext()) &&
7367 !EvalResult.hasSideEffects())
7368 Value = &EvalResult.Val;
7369
7370 LangAS AddrSpace = GetGlobalVarAddressSpace(D: VD);
7371
7372 std::optional<ConstantEmitter> emitter;
7373 llvm::Constant *InitialValue = nullptr;
7374 bool Constant = false;
7375 llvm::Type *Type;
7376 if (Value) {
7377 // The temporary has a constant initializer, use it.
7378 emitter.emplace(args&: *this);
7379 InitialValue = emitter->emitForInitializer(value: *Value, destAddrSpace: AddrSpace,
7380 destType: MaterializedType);
7381 Constant =
7382 MaterializedType.isConstantStorage(Ctx: getContext(), /*ExcludeCtor*/ Value,
7383 /*ExcludeDtor*/ false);
7384 Type = InitialValue->getType();
7385 } else {
7386 // No initializer, the initialization will be provided when we
7387 // initialize the declaration which performed lifetime extension.
7388 Type = getTypes().ConvertTypeForMem(T: MaterializedType);
7389 }
7390
7391 // Create a global variable for this lifetime-extended temporary.
7392 llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD);
7393 if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
7394 const VarDecl *InitVD;
7395 if (VD->isStaticDataMember() && VD->getAnyInitializer(D&: InitVD) &&
7396 isa<CXXRecordDecl>(Val: InitVD->getLexicalDeclContext())) {
7397 // Temporaries defined inside a class get linkonce_odr linkage because the
7398 // class can be defined in multiple translation units.
7399 Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
7400 } else {
7401 // There is no need for this temporary to have external linkage if the
7402 // VarDecl has external linkage.
7403 Linkage = llvm::GlobalVariable::InternalLinkage;
7404 }
7405 }
7406 auto TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace);
7407 auto *GV = new llvm::GlobalVariable(
7408 getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
7409 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
7410 if (emitter) emitter->finalize(global: GV);
7411 // Don't assign dllimport or dllexport to local linkage globals.
7412 if (!llvm::GlobalValue::isLocalLinkage(Linkage)) {
7413 setGVProperties(GV, D: VD);
7414 if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass)
7415 // The reference temporary should never be dllexport.
7416 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
7417 }
7418 GV->setAlignment(Align.getAsAlign());
7419 if (supportsCOMDAT() && GV->isWeakForLinker())
7420 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
7421 if (VD->getTLSKind())
7422 setTLSMode(GV, D: *VD);
7423 llvm::Constant *CV = GV;
7424 if (AddrSpace != LangAS::Default)
7425 CV = performAddrSpaceCast(
7426 Src: GV, DestTy: llvm::PointerType::get(
7427 C&: getLLVMContext(),
7428 AddressSpace: getContext().getTargetAddressSpace(AS: LangAS::Default)));
7429
7430 // Update the map with the new temporary. If we created a placeholder above,
7431 // replace it with the new global now.
7432 llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E];
7433 if (Entry) {
7434 Entry->replaceAllUsesWith(V: CV);
7435 llvm::cast<llvm::GlobalVariable>(Val: Entry)->eraseFromParent();
7436 }
7437 Entry = CV;
7438
7439 return ConstantAddress(CV, Type, Align);
7440}
7441
7442/// EmitObjCPropertyImplementations - Emit information for synthesized
7443/// properties for an implementation.
7444void CodeGenModule::EmitObjCPropertyImplementations(const
7445 ObjCImplementationDecl *D) {
7446 for (const auto *PID : D->property_impls()) {
7447 // Dynamic is just for type-checking.
7448 if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
7449 ObjCPropertyDecl *PD = PID->getPropertyDecl();
7450
7451 // Determine which methods need to be implemented, some may have
7452 // been overridden. Note that ::isPropertyAccessor is not the method
7453 // we want, that just indicates if the decl came from a
7454 // property. What we want to know is if the method is defined in
7455 // this implementation.
7456 auto *Getter = PID->getGetterMethodDecl();
7457 if (!Getter || Getter->isSynthesizedAccessorStub())
7458 CodeGenFunction(*this).GenerateObjCGetter(
7459 IMP: const_cast<ObjCImplementationDecl *>(D), PID);
7460 auto *Setter = PID->getSetterMethodDecl();
7461 if (!PD->isReadOnly() && (!Setter || Setter->isSynthesizedAccessorStub()))
7462 CodeGenFunction(*this).GenerateObjCSetter(
7463 IMP: const_cast<ObjCImplementationDecl *>(D), PID);
7464 }
7465 }
7466}
7467
7468static bool needsDestructMethod(ObjCImplementationDecl *impl) {
7469 const ObjCInterfaceDecl *iface = impl->getClassInterface();
7470 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
7471 ivar; ivar = ivar->getNextIvar())
7472 if (ivar->getType().isDestructedType())
7473 return true;
7474
7475 return false;
7476}
7477
7478static bool AllTrivialInitializers(CodeGenModule &CGM,
7479 ObjCImplementationDecl *D) {
7480 CodeGenFunction CGF(CGM);
7481 for (ObjCImplementationDecl::init_iterator B = D->init_begin(),
7482 E = D->init_end(); B != E; ++B) {
7483 CXXCtorInitializer *CtorInitExp = *B;
7484 Expr *Init = CtorInitExp->getInit();
7485 if (!CGF.isTrivialInitializer(Init))
7486 return false;
7487 }
7488 return true;
7489}
7490
7491/// EmitObjCIvarInitializations - Emit information for ivar initialization
7492/// for an implementation.
7493void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
7494 // We might need a .cxx_destruct even if we don't have any ivar initializers.
7495 if (needsDestructMethod(impl: D)) {
7496 const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_destruct");
7497 Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II);
7498 ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(
7499 C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector,
7500 T: getContext().VoidTy, ReturnTInfo: nullptr, contextDecl: D,
7501 /*isInstance=*/true, /*isVariadic=*/false,
7502 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
7503 /*isImplicitlyDeclared=*/true,
7504 /*isDefined=*/false, impControl: ObjCImplementationControl::Required);
7505 D->addInstanceMethod(method: DTORMethod);
7506 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: DTORMethod, ctor: false);
7507 D->setHasDestructors(true);
7508 }
7509
7510 // If the implementation doesn't have any ivar initializers, we don't need
7511 // a .cxx_construct.
7512 if (D->getNumIvarInitializers() == 0 ||
7513 AllTrivialInitializers(CGM&: *this, D))
7514 return;
7515
7516 const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_construct");
7517 Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II);
7518 // The constructor returns 'self'.
7519 ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(
7520 C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector,
7521 T: getContext().getObjCIdType(), ReturnTInfo: nullptr, contextDecl: D, /*isInstance=*/true,
7522 /*isVariadic=*/false,
7523 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
7524 /*isImplicitlyDeclared=*/true,
7525 /*isDefined=*/false, impControl: ObjCImplementationControl::Required);
7526 D->addInstanceMethod(method: CTORMethod);
7527 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: CTORMethod, ctor: true);
7528 D->setHasNonZeroConstructors(true);
7529}
7530
7531// EmitLinkageSpec - Emit all declarations in a linkage spec.
7532void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
7533 if (LSD->getLanguage() != LinkageSpecLanguageIDs::C &&
7534 LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) {
7535 ErrorUnsupported(D: LSD, Type: "linkage spec");
7536 return;
7537 }
7538
7539 EmitDeclContext(DC: LSD);
7540}
7541
7542void CodeGenModule::EmitTopLevelStmt(const TopLevelStmtDecl *D) {
7543 // Device code should not be at top level.
7544 if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
7545 return;
7546
7547 std::unique_ptr<CodeGenFunction> &CurCGF =
7548 GlobalTopLevelStmtBlockInFlight.first;
7549
7550 // We emitted a top-level stmt but after it there is initialization.
7551 // Stop squashing the top-level stmts into a single function.
7552 if (CurCGF && CXXGlobalInits.back() != CurCGF->CurFn) {
7553 CurCGF->FinishFunction(EndLoc: D->getEndLoc());
7554 CurCGF = nullptr;
7555 }
7556
7557 if (!CurCGF) {
7558 // void __stmts__N(void)
7559 // FIXME: Ask the ABI name mangler to pick a name.
7560 std::string Name = "__stmts__" + llvm::utostr(X: CXXGlobalInits.size());
7561 FunctionArgList Args;
7562 QualType RetTy = getContext().VoidTy;
7563 const CGFunctionInfo &FnInfo =
7564 getTypes().arrangeBuiltinFunctionDeclaration(resultType: RetTy, args: Args);
7565 llvm::FunctionType *FnTy = getTypes().GetFunctionType(Info: FnInfo);
7566 llvm::Function *Fn = llvm::Function::Create(
7567 Ty: FnTy, Linkage: llvm::GlobalValue::InternalLinkage, N: Name, M: &getModule());
7568
7569 CurCGF.reset(p: new CodeGenFunction(*this));
7570 GlobalTopLevelStmtBlockInFlight.second = D;
7571 CurCGF->StartFunction(GD: GlobalDecl(), RetTy, Fn, FnInfo, Args,
7572 Loc: D->getBeginLoc(), StartLoc: D->getBeginLoc());
7573 CXXGlobalInits.push_back(x: Fn);
7574 }
7575
7576 CurCGF->EmitStmt(S: D->getStmt());
7577}
7578
7579void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
7580 for (auto *I : DC->decls()) {
7581 // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
7582 // are themselves considered "top-level", so EmitTopLevelDecl on an
7583 // ObjCImplDecl does not recursively visit them. We need to do that in
7584 // case they're nested inside another construct (LinkageSpecDecl /
7585 // ExportDecl) that does stop them from being considered "top-level".
7586 if (auto *OID = dyn_cast<ObjCImplDecl>(Val: I)) {
7587 for (auto *M : OID->methods())
7588 EmitTopLevelDecl(D: M);
7589 }
7590
7591 EmitTopLevelDecl(D: I);
7592 }
7593}
7594
7595/// EmitTopLevelDecl - Emit code for a single top level declaration.
7596void CodeGenModule::EmitTopLevelDecl(Decl *D) {
7597 // Ignore dependent declarations.
7598 if (D->isTemplated())
7599 return;
7600
7601 // Consteval function shouldn't be emitted.
7602 if (auto *FD = dyn_cast<FunctionDecl>(Val: D); FD && FD->isImmediateFunction())
7603 return;
7604
7605 switch (D->getKind()) {
7606 case Decl::CXXConversion:
7607 case Decl::CXXMethod:
7608 case Decl::Function:
7609 EmitGlobal(GD: cast<FunctionDecl>(Val: D));
7610 // Always provide some coverage mapping
7611 // even for the functions that aren't emitted.
7612 AddDeferredUnusedCoverageMapping(D);
7613 break;
7614
7615 case Decl::CXXDeductionGuide:
7616 // Function-like, but does not result in code emission.
7617 break;
7618
7619 case Decl::Var:
7620 case Decl::Decomposition:
7621 case Decl::VarTemplateSpecialization:
7622 EmitGlobal(GD: cast<VarDecl>(Val: D));
7623 if (auto *DD = dyn_cast<DecompositionDecl>(Val: D))
7624 for (auto *B : DD->flat_bindings())
7625 if (auto *HD = B->getHoldingVar())
7626 EmitGlobal(GD: HD);
7627
7628 break;
7629
7630 // Indirect fields from global anonymous structs and unions can be
7631 // ignored; only the actual variable requires IR gen support.
7632 case Decl::IndirectField:
7633 break;
7634
7635 // C++ Decls
7636 case Decl::Namespace:
7637 EmitDeclContext(DC: cast<NamespaceDecl>(Val: D));
7638 break;
7639 case Decl::ClassTemplateSpecialization: {
7640 const auto *Spec = cast<ClassTemplateSpecializationDecl>(Val: D);
7641 if (CGDebugInfo *DI = getModuleDebugInfo())
7642 if (Spec->getSpecializationKind() ==
7643 TSK_ExplicitInstantiationDefinition &&
7644 Spec->hasDefinition())
7645 DI->completeTemplateDefinition(SD: *Spec);
7646 } [[fallthrough]];
7647 case Decl::CXXRecord: {
7648 CXXRecordDecl *CRD = cast<CXXRecordDecl>(Val: D);
7649 if (CGDebugInfo *DI = getModuleDebugInfo()) {
7650 if (CRD->hasDefinition())
7651 DI->EmitAndRetainType(
7652 Ty: getContext().getCanonicalTagType(TD: cast<RecordDecl>(Val: D)));
7653 if (auto *ES = D->getASTContext().getExternalSource())
7654 if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
7655 DI->completeUnusedClass(D: *CRD);
7656 }
7657 // Emit any static data members, they may be definitions.
7658 for (auto *I : CRD->decls())
7659 if (isa<VarDecl>(Val: I) || isa<CXXRecordDecl>(Val: I) || isa<EnumDecl>(Val: I))
7660 EmitTopLevelDecl(D: I);
7661 break;
7662 }
7663 // No code generation needed.
7664 case Decl::UsingShadow:
7665 case Decl::ClassTemplate:
7666 case Decl::VarTemplate:
7667 case Decl::Concept:
7668 case Decl::VarTemplatePartialSpecialization:
7669 case Decl::FunctionTemplate:
7670 case Decl::TypeAliasTemplate:
7671 case Decl::Block:
7672 case Decl::Empty:
7673 case Decl::Binding:
7674 break;
7675 case Decl::Using: // using X; [C++]
7676 if (CGDebugInfo *DI = getModuleDebugInfo())
7677 DI->EmitUsingDecl(UD: cast<UsingDecl>(Val&: *D));
7678 break;
7679 case Decl::UsingEnum: // using enum X; [C++]
7680 if (CGDebugInfo *DI = getModuleDebugInfo())
7681 DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val&: *D));
7682 break;
7683 case Decl::NamespaceAlias:
7684 if (CGDebugInfo *DI = getModuleDebugInfo())
7685 DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val&: *D));
7686 break;
7687 case Decl::UsingDirective: // using namespace X; [C++]
7688 if (CGDebugInfo *DI = getModuleDebugInfo())
7689 DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val&: *D));
7690 break;
7691 case Decl::CXXConstructor:
7692 getCXXABI().EmitCXXConstructors(D: cast<CXXConstructorDecl>(Val: D));
7693 break;
7694 case Decl::CXXDestructor:
7695 getCXXABI().EmitCXXDestructors(D: cast<CXXDestructorDecl>(Val: D));
7696 break;
7697
7698 case Decl::StaticAssert:
7699 // Nothing to do.
7700 break;
7701
7702 // Objective-C Decls
7703
7704 // Forward declarations, no (immediate) code generation.
7705 case Decl::ObjCInterface:
7706 case Decl::ObjCCategory:
7707 break;
7708
7709 case Decl::ObjCProtocol: {
7710 auto *Proto = cast<ObjCProtocolDecl>(Val: D);
7711 if (Proto->isThisDeclarationADefinition())
7712 ObjCRuntime->GenerateProtocol(OPD: Proto);
7713 break;
7714 }
7715
7716 case Decl::ObjCCategoryImpl:
7717 // Categories have properties but don't support synthesize so we
7718 // can ignore them here.
7719 ObjCRuntime->GenerateCategory(OCD: cast<ObjCCategoryImplDecl>(Val: D));
7720 break;
7721
7722 case Decl::ObjCImplementation: {
7723 auto *OMD = cast<ObjCImplementationDecl>(Val: D);
7724 EmitObjCPropertyImplementations(D: OMD);
7725 EmitObjCIvarInitializations(D: OMD);
7726 ObjCRuntime->GenerateClass(OID: OMD);
7727 // Emit global variable debug information.
7728 if (CGDebugInfo *DI = getModuleDebugInfo())
7729 if (getCodeGenOpts().hasReducedDebugInfo())
7730 DI->getOrCreateInterfaceType(Ty: getContext().getObjCInterfaceType(
7731 Decl: OMD->getClassInterface()), Loc: OMD->getLocation());
7732 break;
7733 }
7734 case Decl::ObjCMethod: {
7735 auto *OMD = cast<ObjCMethodDecl>(Val: D);
7736 // If this is not a prototype, emit the body.
7737 if (OMD->getBody())
7738 CodeGenFunction(*this).GenerateObjCMethod(OMD);
7739 break;
7740 }
7741 case Decl::ObjCCompatibleAlias:
7742 ObjCRuntime->RegisterAlias(OAD: cast<ObjCCompatibleAliasDecl>(Val: D));
7743 break;
7744
7745 case Decl::PragmaComment: {
7746 const auto *PCD = cast<PragmaCommentDecl>(Val: D);
7747 switch (PCD->getCommentKind()) {
7748 case PCK_Unknown:
7749 llvm_unreachable("unexpected pragma comment kind");
7750 case PCK_Linker:
7751 AppendLinkerOptions(Opts: PCD->getArg());
7752 break;
7753 case PCK_Lib:
7754 AddDependentLib(Lib: PCD->getArg());
7755 break;
7756 case PCK_Compiler:
7757 case PCK_ExeStr:
7758 case PCK_User:
7759 break; // We ignore all of these.
7760 }
7761 break;
7762 }
7763
7764 case Decl::PragmaDetectMismatch: {
7765 const auto *PDMD = cast<PragmaDetectMismatchDecl>(Val: D);
7766 AddDetectMismatch(Name: PDMD->getName(), Value: PDMD->getValue());
7767 break;
7768 }
7769
7770 case Decl::LinkageSpec:
7771 EmitLinkageSpec(LSD: cast<LinkageSpecDecl>(Val: D));
7772 break;
7773
7774 case Decl::FileScopeAsm: {
7775 // File-scope asm is ignored during device-side CUDA compilation.
7776 if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
7777 break;
7778 // File-scope asm is ignored during device-side OpenMP compilation.
7779 if (LangOpts.OpenMPIsTargetDevice)
7780 break;
7781 // File-scope asm is ignored during device-side SYCL compilation.
7782 if (LangOpts.SYCLIsDevice)
7783 break;
7784 auto *AD = cast<FileScopeAsmDecl>(Val: D);
7785 getModule().appendModuleInlineAsm(Asm: AD->getAsmString());
7786 break;
7787 }
7788
7789 case Decl::TopLevelStmt:
7790 EmitTopLevelStmt(D: cast<TopLevelStmtDecl>(Val: D));
7791 break;
7792
7793 case Decl::Import: {
7794 auto *Import = cast<ImportDecl>(Val: D);
7795
7796 // If we've already imported this module, we're done.
7797 if (!ImportedModules.insert(X: Import->getImportedModule()))
7798 break;
7799
7800 // Emit debug information for direct imports.
7801 if (!Import->getImportedOwningModule()) {
7802 if (CGDebugInfo *DI = getModuleDebugInfo())
7803 DI->EmitImportDecl(ID: *Import);
7804 }
7805
7806 // For C++ standard modules we are done - we will call the module
7807 // initializer for imported modules, and that will likewise call those for
7808 // any imports it has.
7809 if (CXX20ModuleInits && Import->getImportedModule() &&
7810 Import->getImportedModule()->isNamedModule())
7811 break;
7812
7813 // For clang C++ module map modules the initializers for sub-modules are
7814 // emitted here.
7815
7816 // Find all of the submodules and emit the module initializers.
7817 llvm::SmallPtrSet<clang::Module *, 16> Visited;
7818 SmallVector<clang::Module *, 16> Stack;
7819 Visited.insert(Ptr: Import->getImportedModule());
7820 Stack.push_back(Elt: Import->getImportedModule());
7821
7822 while (!Stack.empty()) {
7823 clang::Module *Mod = Stack.pop_back_val();
7824 if (!EmittedModuleInitializers.insert(Ptr: Mod).second)
7825 continue;
7826
7827 for (auto *D : Context.getModuleInitializers(M: Mod))
7828 EmitTopLevelDecl(D);
7829
7830 // Visit the submodules of this module.
7831 for (auto *Submodule : Mod->submodules()) {
7832 // Skip explicit children; they need to be explicitly imported to emit
7833 // the initializers.
7834 if (Submodule->IsExplicit)
7835 continue;
7836
7837 if (Visited.insert(Ptr: Submodule).second)
7838 Stack.push_back(Elt: Submodule);
7839 }
7840 }
7841 break;
7842 }
7843
7844 case Decl::Export:
7845 EmitDeclContext(DC: cast<ExportDecl>(Val: D));
7846 break;
7847
7848 case Decl::OMPThreadPrivate:
7849 EmitOMPThreadPrivateDecl(D: cast<OMPThreadPrivateDecl>(Val: D));
7850 break;
7851
7852 case Decl::OMPAllocate:
7853 EmitOMPAllocateDecl(D: cast<OMPAllocateDecl>(Val: D));
7854 break;
7855
7856 case Decl::OMPDeclareReduction:
7857 EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: D));
7858 break;
7859
7860 case Decl::OMPDeclareMapper:
7861 EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: D));
7862 break;
7863
7864 case Decl::OMPRequires:
7865 EmitOMPRequiresDecl(D: cast<OMPRequiresDecl>(Val: D));
7866 break;
7867
7868 case Decl::Typedef:
7869 case Decl::TypeAlias: // using foo = bar; [C++11]
7870 if (CGDebugInfo *DI = getModuleDebugInfo())
7871 DI->EmitAndRetainType(Ty: getContext().getTypedefType(
7872 Keyword: ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
7873 Decl: cast<TypedefNameDecl>(Val: D)));
7874 break;
7875
7876 case Decl::Record:
7877 if (CGDebugInfo *DI = getModuleDebugInfo())
7878 if (cast<RecordDecl>(Val: D)->getDefinition())
7879 DI->EmitAndRetainType(
7880 Ty: getContext().getCanonicalTagType(TD: cast<RecordDecl>(Val: D)));
7881 break;
7882
7883 case Decl::Enum:
7884 if (CGDebugInfo *DI = getModuleDebugInfo())
7885 if (cast<EnumDecl>(Val: D)->getDefinition())
7886 DI->EmitAndRetainType(
7887 Ty: getContext().getCanonicalTagType(TD: cast<EnumDecl>(Val: D)));
7888 break;
7889
7890 case Decl::HLSLRootSignature:
7891 getHLSLRuntime().addRootSignature(D: cast<HLSLRootSignatureDecl>(Val: D));
7892 break;
7893 case Decl::HLSLBuffer:
7894 getHLSLRuntime().addBuffer(D: cast<HLSLBufferDecl>(Val: D));
7895 break;
7896
7897 case Decl::OpenACCDeclare:
7898 EmitOpenACCDeclare(D: cast<OpenACCDeclareDecl>(Val: D));
7899 break;
7900 case Decl::OpenACCRoutine:
7901 EmitOpenACCRoutine(D: cast<OpenACCRoutineDecl>(Val: D));
7902 break;
7903
7904 default:
7905 // Make sure we handled everything we should, every other kind is a
7906 // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
7907 // function. Need to recode Decl::Kind to do that easily.
7908 assert(isa<TypeDecl>(D) && "Unsupported decl kind");
7909 break;
7910 }
7911}
7912
7913void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
7914 // Do we need to generate coverage mapping?
7915 if (!CodeGenOpts.CoverageMapping)
7916 return;
7917 switch (D->getKind()) {
7918 case Decl::CXXConversion:
7919 case Decl::CXXMethod:
7920 case Decl::Function:
7921 case Decl::ObjCMethod:
7922 case Decl::CXXConstructor:
7923 case Decl::CXXDestructor: {
7924 if (!cast<FunctionDecl>(Val: D)->doesThisDeclarationHaveABody())
7925 break;
7926 SourceManager &SM = getContext().getSourceManager();
7927 if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(SpellingLoc: D->getBeginLoc()))
7928 break;
7929 if (!llvm::coverage::SystemHeadersCoverage &&
7930 SM.isInSystemHeader(Loc: D->getBeginLoc()))
7931 break;
7932 DeferredEmptyCoverageMappingDecls.try_emplace(Key: D, Args: true);
7933 break;
7934 }
7935 default:
7936 break;
7937 };
7938}
7939
7940void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
7941 // Do we need to generate coverage mapping?
7942 if (!CodeGenOpts.CoverageMapping)
7943 return;
7944 if (const auto *Fn = dyn_cast<FunctionDecl>(Val: D)) {
7945 if (Fn->isTemplateInstantiation())
7946 ClearUnusedCoverageMapping(D: Fn->getTemplateInstantiationPattern());
7947 }
7948 DeferredEmptyCoverageMappingDecls.insert_or_assign(Key: D, Val: false);
7949}
7950
7951void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
7952 // We call takeVector() here to avoid use-after-free.
7953 // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because
7954 // we deserialize function bodies to emit coverage info for them, and that
7955 // deserializes more declarations. How should we handle that case?
7956 for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) {
7957 if (!Entry.second)
7958 continue;
7959 const Decl *D = Entry.first;
7960 switch (D->getKind()) {
7961 case Decl::CXXConversion:
7962 case Decl::CXXMethod:
7963 case Decl::Function:
7964 case Decl::ObjCMethod: {
7965 CodeGenPGO PGO(*this);
7966 GlobalDecl GD(cast<FunctionDecl>(Val: D));
7967 PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD),
7968 Linkage: getFunctionLinkage(GD));
7969 break;
7970 }
7971 case Decl::CXXConstructor: {
7972 CodeGenPGO PGO(*this);
7973 GlobalDecl GD(cast<CXXConstructorDecl>(Val: D), Ctor_Base);
7974 PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD),
7975 Linkage: getFunctionLinkage(GD));
7976 break;
7977 }
7978 case Decl::CXXDestructor: {
7979 CodeGenPGO PGO(*this);
7980 GlobalDecl GD(cast<CXXDestructorDecl>(Val: D), Dtor_Base);
7981 PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD),
7982 Linkage: getFunctionLinkage(GD));
7983 break;
7984 }
7985 default:
7986 break;
7987 };
7988 }
7989}
7990
7991void CodeGenModule::EmitMainVoidAlias() {
7992 // In order to transition away from "__original_main" gracefully, emit an
7993 // alias for "main" in the no-argument case so that libc can detect when
7994 // new-style no-argument main is in used.
7995 if (llvm::Function *F = getModule().getFunction(Name: "main")) {
7996 if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
7997 F->getReturnType()->isIntegerTy(Bitwidth: Context.getTargetInfo().getIntWidth())) {
7998 auto *GA = llvm::GlobalAlias::create(Name: "__main_void", Aliasee: F);
7999 GA->setVisibility(llvm::GlobalValue::HiddenVisibility);
8000 }
8001 }
8002}
8003
8004/// Turns the given pointer into a constant.
8005static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
8006 const void *Ptr) {
8007 uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
8008 llvm::Type *i64 = llvm::Type::getInt64Ty(C&: Context);
8009 return llvm::ConstantInt::get(Ty: i64, V: PtrInt);
8010}
8011
8012static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
8013 llvm::NamedMDNode *&GlobalMetadata,
8014 GlobalDecl D,
8015 llvm::GlobalValue *Addr) {
8016 if (!GlobalMetadata)
8017 GlobalMetadata =
8018 CGM.getModule().getOrInsertNamedMetadata(Name: "clang.global.decl.ptrs");
8019
8020 // TODO: should we report variant information for ctors/dtors?
8021 llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(C: Addr),
8022 llvm::ConstantAsMetadata::get(C: GetPointerConstant(
8023 Context&: CGM.getLLVMContext(), Ptr: D.getDecl()))};
8024 GlobalMetadata->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops));
8025}
8026
8027bool CodeGenModule::CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem,
8028 llvm::GlobalValue *CppFunc) {
8029 // Store the list of ifuncs we need to replace uses in.
8030 llvm::SmallVector<llvm::GlobalIFunc *> IFuncs;
8031 // List of ConstantExprs that we should be able to delete when we're done
8032 // here.
8033 llvm::SmallVector<llvm::ConstantExpr *> CEs;
8034
8035 // It isn't valid to replace the extern-C ifuncs if all we find is itself!
8036 if (Elem == CppFunc)
8037 return false;
8038
8039 // First make sure that all users of this are ifuncs (or ifuncs via a
8040 // bitcast), and collect the list of ifuncs and CEs so we can work on them
8041 // later.
8042 for (llvm::User *User : Elem->users()) {
8043 // Users can either be a bitcast ConstExpr that is used by the ifuncs, OR an
8044 // ifunc directly. In any other case, just give up, as we don't know what we
8045 // could break by changing those.
8046 if (auto *ConstExpr = dyn_cast<llvm::ConstantExpr>(Val: User)) {
8047 if (ConstExpr->getOpcode() != llvm::Instruction::BitCast)
8048 return false;
8049
8050 for (llvm::User *CEUser : ConstExpr->users()) {
8051 if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: CEUser)) {
8052 IFuncs.push_back(Elt: IFunc);
8053 } else {
8054 return false;
8055 }
8056 }
8057 CEs.push_back(Elt: ConstExpr);
8058 } else if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: User)) {
8059 IFuncs.push_back(Elt: IFunc);
8060 } else {
8061 // This user is one we don't know how to handle, so fail redirection. This
8062 // will result in an ifunc retaining a resolver name that will ultimately
8063 // fail to be resolved to a defined function.
8064 return false;
8065 }
8066 }
8067
8068 // Now we know this is a valid case where we can do this alias replacement, we
8069 // need to remove all of the references to Elem (and the bitcasts!) so we can
8070 // delete it.
8071 for (llvm::GlobalIFunc *IFunc : IFuncs)
8072 IFunc->setResolver(nullptr);
8073 for (llvm::ConstantExpr *ConstExpr : CEs)
8074 ConstExpr->destroyConstant();
8075
8076 // We should now be out of uses for the 'old' version of this function, so we
8077 // can erase it as well.
8078 Elem->eraseFromParent();
8079
8080 for (llvm::GlobalIFunc *IFunc : IFuncs) {
8081 // The type of the resolver is always just a function-type that returns the
8082 // type of the IFunc, so create that here. If the type of the actual
8083 // resolver doesn't match, it just gets bitcast to the right thing.
8084 auto *ResolverTy =
8085 llvm::FunctionType::get(Result: IFunc->getType(), /*isVarArg*/ false);
8086 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
8087 MangledName: CppFunc->getName(), Ty: ResolverTy, GD: {}, /*ForVTable*/ false);
8088 IFunc->setResolver(Resolver);
8089 }
8090 return true;
8091}
8092
8093/// For each function which is declared within an extern "C" region and marked
8094/// as 'used', but has internal linkage, create an alias from the unmangled
8095/// name to the mangled name if possible. People expect to be able to refer
8096/// to such functions with an unmangled name from inline assembly within the
8097/// same translation unit.
8098void CodeGenModule::EmitStaticExternCAliases() {
8099 if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases())
8100 return;
8101 for (auto &I : StaticExternCValues) {
8102 const IdentifierInfo *Name = I.first;
8103 llvm::GlobalValue *Val = I.second;
8104
8105 // If Val is null, that implies there were multiple declarations that each
8106 // had a claim to the unmangled name. In this case, generation of the alias
8107 // is suppressed. See CodeGenModule::MaybeHandleStaticInExternC.
8108 if (!Val)
8109 break;
8110
8111 llvm::GlobalValue *ExistingElem =
8112 getModule().getNamedValue(Name: Name->getName());
8113
8114 // If there is either not something already by this name, or we were able to
8115 // replace all uses from IFuncs, create the alias.
8116 if (!ExistingElem || CheckAndReplaceExternCIFuncs(Elem: ExistingElem, CppFunc: Val))
8117 addCompilerUsedGlobal(GV: llvm::GlobalAlias::create(Name: Name->getName(), Aliasee: Val));
8118 }
8119}
8120
8121bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
8122 GlobalDecl &Result) const {
8123 auto Res = Manglings.find(Key: MangledName);
8124 if (Res == Manglings.end())
8125 return false;
8126 Result = Res->getValue();
8127 return true;
8128}
8129
8130/// Emits metadata nodes associating all the global values in the
8131/// current module with the Decls they came from. This is useful for
8132/// projects using IR gen as a subroutine.
8133///
8134/// Since there's currently no way to associate an MDNode directly
8135/// with an llvm::GlobalValue, we create a global named metadata
8136/// with the name 'clang.global.decl.ptrs'.
8137void CodeGenModule::EmitDeclMetadata() {
8138 llvm::NamedMDNode *GlobalMetadata = nullptr;
8139
8140 for (auto &I : MangledDeclNames) {
8141 llvm::GlobalValue *Addr = getModule().getNamedValue(Name: I.second);
8142 // Some mangled names don't necessarily have an associated GlobalValue
8143 // in this module, e.g. if we mangled it for DebugInfo.
8144 if (Addr)
8145 EmitGlobalDeclMetadata(CGM&: *this, GlobalMetadata, D: I.first, Addr);
8146 }
8147}
8148
8149/// Emits metadata nodes for all the local variables in the current
8150/// function.
8151void CodeGenFunction::EmitDeclMetadata() {
8152 if (LocalDeclMap.empty()) return;
8153
8154 llvm::LLVMContext &Context = getLLVMContext();
8155
8156 // Find the unique metadata ID for this name.
8157 unsigned DeclPtrKind = Context.getMDKindID(Name: "clang.decl.ptr");
8158
8159 llvm::NamedMDNode *GlobalMetadata = nullptr;
8160
8161 for (auto &I : LocalDeclMap) {
8162 const Decl *D = I.first;
8163 llvm::Value *Addr = I.second.emitRawPointer(CGF&: *this);
8164 if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Val: Addr)) {
8165 llvm::Value *DAddr = GetPointerConstant(Context&: getLLVMContext(), Ptr: D);
8166 Alloca->setMetadata(
8167 KindID: DeclPtrKind, Node: llvm::MDNode::get(
8168 Context, MDs: llvm::ValueAsMetadata::getConstant(C: DAddr)));
8169 } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr)) {
8170 GlobalDecl GD = GlobalDecl(cast<VarDecl>(Val: D));
8171 EmitGlobalDeclMetadata(CGM, GlobalMetadata, D: GD, Addr: GV);
8172 }
8173 }
8174}
8175
8176void CodeGenModule::EmitVersionIdentMetadata() {
8177 llvm::NamedMDNode *IdentMetadata =
8178 TheModule.getOrInsertNamedMetadata(Name: "llvm.ident");
8179 std::string Version = getClangFullVersion();
8180 llvm::LLVMContext &Ctx = TheModule.getContext();
8181
8182 llvm::Metadata *IdentNode[] = {llvm::MDString::get(Context&: Ctx, Str: Version)};
8183 IdentMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: IdentNode));
8184}
8185
8186void CodeGenModule::EmitCommandLineMetadata() {
8187 llvm::NamedMDNode *CommandLineMetadata =
8188 TheModule.getOrInsertNamedMetadata(Name: "llvm.commandline");
8189 std::string CommandLine = getCodeGenOpts().RecordCommandLine;
8190 llvm::LLVMContext &Ctx = TheModule.getContext();
8191
8192 llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Context&: Ctx, Str: CommandLine)};
8193 CommandLineMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: CommandLineNode));
8194}
8195
8196void CodeGenModule::EmitCoverageFile() {
8197 llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata(Name: "llvm.dbg.cu");
8198 if (!CUNode)
8199 return;
8200
8201 llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata(Name: "llvm.gcov");
8202 llvm::LLVMContext &Ctx = TheModule.getContext();
8203 auto *CoverageDataFile =
8204 llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageDataFile);
8205 auto *CoverageNotesFile =
8206 llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageNotesFile);
8207 for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
8208 llvm::MDNode *CU = CUNode->getOperand(i);
8209 llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU};
8210 GCov->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: Elts));
8211 }
8212}
8213
8214llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
8215 bool ForEH) {
8216 // Return a bogus pointer if RTTI is disabled, unless it's for EH.
8217 // FIXME: should we even be calling this method if RTTI is disabled
8218 // and it's not for EH?
8219 if (!shouldEmitRTTI(ForEH))
8220 return llvm::Constant::getNullValue(Ty: GlobalsInt8PtrTy);
8221
8222 if (ForEH && Ty->isObjCObjectPointerType() &&
8223 LangOpts.ObjCRuntime.isGNUFamily())
8224 return ObjCRuntime->GetEHType(T: Ty);
8225
8226 return getCXXABI().getAddrOfRTTIDescriptor(Ty);
8227}
8228
8229void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
8230 // Do not emit threadprivates in simd-only mode.
8231 if (LangOpts.OpenMP && LangOpts.OpenMPSimd)
8232 return;
8233 for (auto RefExpr : D->varlist()) {
8234 auto *VD = cast<VarDecl>(Val: cast<DeclRefExpr>(Val: RefExpr)->getDecl());
8235 bool PerformInit =
8236 VD->getAnyInitializer() &&
8237 !VD->getAnyInitializer()->isConstantInitializer(Ctx&: getContext(),
8238 /*ForRef=*/false);
8239
8240 Address Addr(GetAddrOfGlobalVar(D: VD),
8241 getTypes().ConvertTypeForMem(T: VD->getType()),
8242 getContext().getDeclAlign(D: VD));
8243 if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
8244 VD, VDAddr: Addr, Loc: RefExpr->getBeginLoc(), PerformInit))
8245 CXXGlobalInits.push_back(x: InitFunction);
8246 }
8247}
8248
8249llvm::Metadata *
8250CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
8251 StringRef Suffix) {
8252 if (auto *FnType = T->getAs<FunctionProtoType>())
8253 T = getContext().getFunctionType(
8254 ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(),
8255 EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None));
8256
8257 llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
8258 if (InternalId)
8259 return InternalId;
8260
8261 if (isExternallyVisible(L: T->getLinkage())) {
8262 std::string OutName;
8263 llvm::raw_string_ostream Out(OutName);
8264 getCXXABI().getMangleContext().mangleCanonicalTypeName(
8265 T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
8266
8267 if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
8268 Out << ".normalized";
8269
8270 Out << Suffix;
8271
8272 InternalId = llvm::MDString::get(Context&: getLLVMContext(), Str: Out.str());
8273 } else {
8274 InternalId = llvm::MDNode::getDistinct(Context&: getLLVMContext(),
8275 MDs: llvm::ArrayRef<llvm::Metadata *>());
8276 }
8277
8278 return InternalId;
8279}
8280
8281llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForFnType(QualType T) {
8282 assert(isa<FunctionType>(T));
8283 T = GeneralizeFunctionType(
8284 Ctx&: getContext(), Ty: T, GeneralizePointers: getCodeGenOpts().SanitizeCfiICallGeneralizePointers);
8285 if (getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
8286 return CreateMetadataIdentifierGeneralized(T);
8287 return CreateMetadataIdentifierForType(T);
8288}
8289
8290llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
8291 return CreateMetadataIdentifierImpl(T, Map&: MetadataIdMap, Suffix: "");
8292}
8293
8294llvm::Metadata *
8295CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) {
8296 return CreateMetadataIdentifierImpl(T, Map&: VirtualMetadataIdMap, Suffix: ".virtual");
8297}
8298
8299llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) {
8300 return CreateMetadataIdentifierImpl(T, Map&: GeneralizedMetadataIdMap,
8301 Suffix: ".generalized");
8302}
8303
8304/// Returns whether this module needs the "all-vtables" type identifier.
8305bool CodeGenModule::NeedAllVtablesTypeId() const {
8306 // Returns true if at least one of vtable-based CFI checkers is enabled and
8307 // is not in the trapping mode.
8308 return ((LangOpts.Sanitize.has(K: SanitizerKind::CFIVCall) &&
8309 !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIVCall)) ||
8310 (LangOpts.Sanitize.has(K: SanitizerKind::CFINVCall) &&
8311 !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFINVCall)) ||
8312 (LangOpts.Sanitize.has(K: SanitizerKind::CFIDerivedCast) &&
8313 !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIDerivedCast)) ||
8314 (LangOpts.Sanitize.has(K: SanitizerKind::CFIUnrelatedCast) &&
8315 !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIUnrelatedCast)));
8316}
8317
8318void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
8319 CharUnits Offset,
8320 const CXXRecordDecl *RD) {
8321 CanQualType T = getContext().getCanonicalTagType(TD: RD);
8322 llvm::Metadata *MD = CreateMetadataIdentifierForType(T);
8323 VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD);
8324
8325 if (CodeGenOpts.SanitizeCfiCrossDso)
8326 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
8327 VTable->addTypeMetadata(Offset: Offset.getQuantity(),
8328 TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId));
8329
8330 if (NeedAllVtablesTypeId()) {
8331 llvm::Metadata *MD = llvm::MDString::get(Context&: getLLVMContext(), Str: "all-vtables");
8332 VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD);
8333 }
8334}
8335
8336llvm::SanitizerStatReport &CodeGenModule::getSanStats() {
8337 if (!SanStats)
8338 SanStats = std::make_unique<llvm::SanitizerStatReport>(args: &getModule());
8339
8340 return *SanStats;
8341}
8342
8343llvm::Value *
8344CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
8345 CodeGenFunction &CGF) {
8346 llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, T: E->getType());
8347 auto *SamplerT = getOpenCLRuntime().getSamplerType(T: E->getType().getTypePtr());
8348 auto *FTy = llvm::FunctionType::get(Result: SamplerT, Params: {C->getType()}, isVarArg: false);
8349 auto *Call = CGF.EmitRuntimeCall(
8350 callee: CreateRuntimeFunction(FTy, Name: "__translate_sampler_initializer"), args: {C});
8351 return Call;
8352}
8353
8354CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
8355 QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
8356 return getNaturalTypeAlignment(T: T->getPointeeType(), BaseInfo, TBAAInfo,
8357 /* forPointeeType= */ true);
8358}
8359
8360CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
8361 LValueBaseInfo *BaseInfo,
8362 TBAAAccessInfo *TBAAInfo,
8363 bool forPointeeType) {
8364 if (TBAAInfo)
8365 *TBAAInfo = getTBAAAccessInfo(AccessType: T);
8366
8367 // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But
8368 // that doesn't return the information we need to compute BaseInfo.
8369
8370 // Honor alignment typedef attributes even on incomplete types.
8371 // We also honor them straight for C++ class types, even as pointees;
8372 // there's an expressivity gap here.
8373 if (auto TT = T->getAs<TypedefType>()) {
8374 if (auto Align = TT->getDecl()->getMaxAlignment()) {
8375 if (BaseInfo)
8376 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
8377 return getContext().toCharUnitsFromBits(BitSize: Align);
8378 }
8379 }
8380
8381 bool AlignForArray = T->isArrayType();
8382
8383 // Analyze the base element type, so we don't get confused by incomplete
8384 // array types.
8385 T = getContext().getBaseElementType(QT: T);
8386
8387 if (T->isIncompleteType()) {
8388 // We could try to replicate the logic from
8389 // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
8390 // type is incomplete, so it's impossible to test. We could try to reuse
8391 // getTypeAlignIfKnown, but that doesn't return the information we need
8392 // to set BaseInfo. So just ignore the possibility that the alignment is
8393 // greater than one.
8394 if (BaseInfo)
8395 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
8396 return CharUnits::One();
8397 }
8398
8399 if (BaseInfo)
8400 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
8401
8402 CharUnits Alignment;
8403 const CXXRecordDecl *RD;
8404 if (T.getQualifiers().hasUnaligned()) {
8405 Alignment = CharUnits::One();
8406 } else if (forPointeeType && !AlignForArray &&
8407 (RD = T->getAsCXXRecordDecl())) {
8408 // For C++ class pointees, we don't know whether we're pointing at a
8409 // base or a complete object, so we generally need to use the
8410 // non-virtual alignment.
8411 Alignment = getClassPointerAlignment(CD: RD);
8412 } else {
8413 Alignment = getContext().getTypeAlignInChars(T);
8414 }
8415
8416 // Cap to the global maximum type alignment unless the alignment
8417 // was somehow explicit on the type.
8418 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
8419 if (Alignment.getQuantity() > MaxAlign &&
8420 !getContext().isAlignmentRequired(T))
8421 Alignment = CharUnits::fromQuantity(Quantity: MaxAlign);
8422 }
8423 return Alignment;
8424}
8425
8426bool CodeGenModule::stopAutoInit() {
8427 unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter;
8428 if (StopAfter) {
8429 // This number is positive only when -ftrivial-auto-var-init-stop-after=* is
8430 // used
8431 if (NumAutoVarInit >= StopAfter) {
8432 return true;
8433 }
8434 if (!NumAutoVarInit) {
8435 getDiags().Report(DiagID: diag::warn_trivial_auto_var_limit)
8436 << StopAfter
8437 << (getContext().getLangOpts().getTrivialAutoVarInit() ==
8438 LangOptions::TrivialAutoVarInitKind::Zero
8439 ? "zero"
8440 : "pattern");
8441 }
8442 ++NumAutoVarInit;
8443 }
8444 return false;
8445}
8446
8447void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS,
8448 const Decl *D) const {
8449 // ptxas does not allow '.' in symbol names. On the other hand, HIP prefers
8450 // postfix beginning with '.' since the symbol name can be demangled.
8451 if (LangOpts.HIP)
8452 OS << (isa<VarDecl>(Val: D) ? ".static." : ".intern.");
8453 else
8454 OS << (isa<VarDecl>(Val: D) ? "__static__" : "__intern__");
8455
8456 // If the CUID is not specified we try to generate a unique postfix.
8457 if (getLangOpts().CUID.empty()) {
8458 SourceManager &SM = getContext().getSourceManager();
8459 PresumedLoc PLoc = SM.getPresumedLoc(Loc: D->getLocation());
8460 assert(PLoc.isValid() && "Source location is expected to be valid.");
8461
8462 // Get the hash of the user defined macros.
8463 llvm::MD5 Hash;
8464 llvm::MD5::MD5Result Result;
8465 for (const auto &Arg : PreprocessorOpts.Macros)
8466 Hash.update(Str: Arg.first);
8467 Hash.final(Result);
8468
8469 // Get the UniqueID for the file containing the decl.
8470 llvm::sys::fs::UniqueID ID;
8471 auto Status = FS->status(Path: PLoc.getFilename());
8472 if (!Status) {
8473 PLoc = SM.getPresumedLoc(Loc: D->getLocation(), /*UseLineDirectives=*/false);
8474 assert(PLoc.isValid() && "Source location is expected to be valid.");
8475 Status = FS->status(Path: PLoc.getFilename());
8476 }
8477 if (!Status) {
8478 SM.getDiagnostics().Report(DiagID: diag::err_cannot_open_file)
8479 << PLoc.getFilename() << Status.getError().message();
8480 } else {
8481 ID = Status->getUniqueID();
8482 }
8483 OS << llvm::format(Fmt: "%x", Vals: ID.getFile()) << llvm::format(Fmt: "%x", Vals: ID.getDevice())
8484 << "_" << llvm::utohexstr(X: Result.low(), /*LowerCase=*/true, /*Width=*/8);
8485 } else {
8486 OS << getContext().getCUIDHash();
8487 }
8488}
8489
8490void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) {
8491 assert(DeferredDeclsToEmit.empty() &&
8492 "Should have emitted all decls deferred to emit.");
8493 assert(NewBuilder->DeferredDecls.empty() &&
8494 "Newly created module should not have deferred decls");
8495 NewBuilder->DeferredDecls = std::move(DeferredDecls);
8496 assert(EmittedDeferredDecls.empty() &&
8497 "Still have (unmerged) EmittedDeferredDecls deferred decls");
8498
8499 assert(NewBuilder->DeferredVTables.empty() &&
8500 "Newly created module should not have deferred vtables");
8501 NewBuilder->DeferredVTables = std::move(DeferredVTables);
8502
8503 assert(NewBuilder->MangledDeclNames.empty() &&
8504 "Newly created module should not have mangled decl names");
8505 assert(NewBuilder->Manglings.empty() &&
8506 "Newly created module should not have manglings");
8507 NewBuilder->Manglings = std::move(Manglings);
8508
8509 NewBuilder->WeakRefReferences = std::move(WeakRefReferences);
8510
8511 NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx);
8512}
8513
8514std::string CodeGenModule::getPFPFieldName(const FieldDecl *FD) {
8515 std::string OutName;
8516 llvm::raw_string_ostream Out(OutName);
8517 getCXXABI().getMangleContext().mangleCanonicalTypeName(
8518 T: getContext().getCanonicalTagType(TD: FD->getParent()), Out, NormalizeIntegers: false);
8519 Out << "." << FD->getName();
8520 return OutName;
8521}
8522