1 | //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This coordinates the per-module state used while generating code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CodeGenModule.h" |
14 | #include "ABIInfo.h" |
15 | #include "CGBlocks.h" |
16 | #include "CGCUDARuntime.h" |
17 | #include "CGCXXABI.h" |
18 | #include "CGCall.h" |
19 | #include "CGDebugInfo.h" |
20 | #include "CGHLSLRuntime.h" |
21 | #include "CGObjCRuntime.h" |
22 | #include "CGOpenCLRuntime.h" |
23 | #include "CGOpenMPRuntime.h" |
24 | #include "CGOpenMPRuntimeGPU.h" |
25 | #include "CodeGenFunction.h" |
26 | #include "CodeGenPGO.h" |
27 | #include "ConstantEmitter.h" |
28 | #include "CoverageMappingGen.h" |
29 | #include "TargetInfo.h" |
30 | #include "clang/AST/ASTContext.h" |
31 | #include "clang/AST/ASTLambda.h" |
32 | #include "clang/AST/CharUnits.h" |
33 | #include "clang/AST/Decl.h" |
34 | #include "clang/AST/DeclCXX.h" |
35 | #include "clang/AST/DeclObjC.h" |
36 | #include "clang/AST/DeclTemplate.h" |
37 | #include "clang/AST/Mangle.h" |
38 | #include "clang/AST/RecursiveASTVisitor.h" |
39 | #include "clang/AST/StmtVisitor.h" |
40 | #include "clang/Basic/Builtins.h" |
41 | #include "clang/Basic/CharInfo.h" |
42 | #include "clang/Basic/CodeGenOptions.h" |
43 | #include "clang/Basic/Diagnostic.h" |
44 | #include "clang/Basic/FileManager.h" |
45 | #include "clang/Basic/Module.h" |
46 | #include "clang/Basic/SourceManager.h" |
47 | #include "clang/Basic/TargetInfo.h" |
48 | #include "clang/Basic/Version.h" |
49 | #include "clang/CodeGen/BackendUtil.h" |
50 | #include "clang/CodeGen/ConstantInitBuilder.h" |
51 | #include "clang/Frontend/FrontendDiagnostic.h" |
52 | #include "llvm/ADT/STLExtras.h" |
53 | #include "llvm/ADT/StringExtras.h" |
54 | #include "llvm/ADT/StringSwitch.h" |
55 | #include "llvm/Analysis/TargetLibraryInfo.h" |
56 | #include "llvm/BinaryFormat/ELF.h" |
57 | #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" |
58 | #include "llvm/IR/AttributeMask.h" |
59 | #include "llvm/IR/CallingConv.h" |
60 | #include "llvm/IR/DataLayout.h" |
61 | #include "llvm/IR/Intrinsics.h" |
62 | #include "llvm/IR/LLVMContext.h" |
63 | #include "llvm/IR/Module.h" |
64 | #include "llvm/IR/ProfileSummary.h" |
65 | #include "llvm/ProfileData/InstrProfReader.h" |
66 | #include "llvm/ProfileData/SampleProf.h" |
67 | #include "llvm/Support/CRC.h" |
68 | #include "llvm/Support/CodeGen.h" |
69 | #include "llvm/Support/CommandLine.h" |
70 | #include "llvm/Support/ConvertUTF.h" |
71 | #include "llvm/Support/ErrorHandling.h" |
72 | #include "llvm/Support/TimeProfiler.h" |
73 | #include "llvm/Support/xxhash.h" |
74 | #include "llvm/TargetParser/RISCVISAInfo.h" |
75 | #include "llvm/TargetParser/Triple.h" |
76 | #include "llvm/TargetParser/X86TargetParser.h" |
77 | #include "llvm/Transforms/Utils/BuildLibCalls.h" |
78 | #include <optional> |
79 | |
80 | using namespace clang; |
81 | using namespace CodeGen; |
82 | |
83 | static llvm::cl::opt<bool> LimitedCoverage( |
84 | "limited-coverage-experimental" , llvm::cl::Hidden, |
85 | llvm::cl::desc("Emit limited coverage mapping information (experimental)" )); |
86 | |
87 | static const char AnnotationSection[] = "llvm.metadata" ; |
88 | |
89 | static CGCXXABI *createCXXABI(CodeGenModule &CGM) { |
90 | switch (CGM.getContext().getCXXABIKind()) { |
91 | case TargetCXXABI::AppleARM64: |
92 | case TargetCXXABI::Fuchsia: |
93 | case TargetCXXABI::GenericAArch64: |
94 | case TargetCXXABI::GenericARM: |
95 | case TargetCXXABI::iOS: |
96 | case TargetCXXABI::WatchOS: |
97 | case TargetCXXABI::GenericMIPS: |
98 | case TargetCXXABI::GenericItanium: |
99 | case TargetCXXABI::WebAssembly: |
100 | case TargetCXXABI::XL: |
101 | return CreateItaniumCXXABI(CGM); |
102 | case TargetCXXABI::Microsoft: |
103 | return CreateMicrosoftCXXABI(CGM); |
104 | } |
105 | |
106 | llvm_unreachable("invalid C++ ABI kind" ); |
107 | } |
108 | |
109 | static std::unique_ptr<TargetCodeGenInfo> |
110 | createTargetCodeGenInfo(CodeGenModule &CGM) { |
111 | const TargetInfo &Target = CGM.getTarget(); |
112 | const llvm::Triple &Triple = Target.getTriple(); |
113 | const CodeGenOptions &CodeGenOpts = CGM.getCodeGenOpts(); |
114 | |
115 | switch (Triple.getArch()) { |
116 | default: |
117 | return createDefaultTargetCodeGenInfo(CGM); |
118 | |
119 | case llvm::Triple::le32: |
120 | return createPNaClTargetCodeGenInfo(CGM); |
121 | case llvm::Triple::m68k: |
122 | return createM68kTargetCodeGenInfo(CGM); |
123 | case llvm::Triple::mips: |
124 | case llvm::Triple::mipsel: |
125 | if (Triple.getOS() == llvm::Triple::NaCl) |
126 | return createPNaClTargetCodeGenInfo(CGM); |
127 | return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true); |
128 | |
129 | case llvm::Triple::mips64: |
130 | case llvm::Triple::mips64el: |
131 | return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/false); |
132 | |
133 | case llvm::Triple::avr: { |
134 | // For passing parameters, R8~R25 are used on avr, and R18~R25 are used |
135 | // on avrtiny. For passing return value, R18~R25 are used on avr, and |
136 | // R22~R25 are used on avrtiny. |
137 | unsigned NPR = Target.getABI() == "avrtiny" ? 6 : 18; |
138 | unsigned NRR = Target.getABI() == "avrtiny" ? 4 : 8; |
139 | return createAVRTargetCodeGenInfo(CGM, NPR, NRR); |
140 | } |
141 | |
142 | case llvm::Triple::aarch64: |
143 | case llvm::Triple::aarch64_32: |
144 | case llvm::Triple::aarch64_be: { |
145 | AArch64ABIKind Kind = AArch64ABIKind::AAPCS; |
146 | if (Target.getABI() == "darwinpcs" ) |
147 | Kind = AArch64ABIKind::DarwinPCS; |
148 | else if (Triple.isOSWindows()) |
149 | return createWindowsAArch64TargetCodeGenInfo(CGM, K: AArch64ABIKind::Win64); |
150 | else if (Target.getABI() == "aapcs-soft" ) |
151 | Kind = AArch64ABIKind::AAPCSSoft; |
152 | else if (Target.getABI() == "pauthtest" ) |
153 | Kind = AArch64ABIKind::PAuthTest; |
154 | |
155 | return createAArch64TargetCodeGenInfo(CGM, Kind); |
156 | } |
157 | |
158 | case llvm::Triple::wasm32: |
159 | case llvm::Triple::wasm64: { |
160 | WebAssemblyABIKind Kind = WebAssemblyABIKind::MVP; |
161 | if (Target.getABI() == "experimental-mv" ) |
162 | Kind = WebAssemblyABIKind::ExperimentalMV; |
163 | return createWebAssemblyTargetCodeGenInfo(CGM, K: Kind); |
164 | } |
165 | |
166 | case llvm::Triple::arm: |
167 | case llvm::Triple::armeb: |
168 | case llvm::Triple::thumb: |
169 | case llvm::Triple::thumbeb: { |
170 | if (Triple.getOS() == llvm::Triple::Win32) |
171 | return createWindowsARMTargetCodeGenInfo(CGM, K: ARMABIKind::AAPCS_VFP); |
172 | |
173 | ARMABIKind Kind = ARMABIKind::AAPCS; |
174 | StringRef ABIStr = Target.getABI(); |
175 | if (ABIStr == "apcs-gnu" ) |
176 | Kind = ARMABIKind::APCS; |
177 | else if (ABIStr == "aapcs16" ) |
178 | Kind = ARMABIKind::AAPCS16_VFP; |
179 | else if (CodeGenOpts.FloatABI == "hard" || |
180 | (CodeGenOpts.FloatABI != "soft" && |
181 | (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || |
182 | Triple.getEnvironment() == llvm::Triple::MuslEABIHF || |
183 | Triple.getEnvironment() == llvm::Triple::EABIHF))) |
184 | Kind = ARMABIKind::AAPCS_VFP; |
185 | |
186 | return createARMTargetCodeGenInfo(CGM, Kind); |
187 | } |
188 | |
189 | case llvm::Triple::ppc: { |
190 | if (Triple.isOSAIX()) |
191 | return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/false); |
192 | |
193 | bool IsSoftFloat = |
194 | CodeGenOpts.FloatABI == "soft" || Target.hasFeature(Feature: "spe" ); |
195 | return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat); |
196 | } |
197 | case llvm::Triple::ppcle: { |
198 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
199 | return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat); |
200 | } |
201 | case llvm::Triple::ppc64: |
202 | if (Triple.isOSAIX()) |
203 | return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/true); |
204 | |
205 | if (Triple.isOSBinFormatELF()) { |
206 | PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv1; |
207 | if (Target.getABI() == "elfv2" ) |
208 | Kind = PPC64_SVR4_ABIKind::ELFv2; |
209 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
210 | |
211 | return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat); |
212 | } |
213 | return createPPC64TargetCodeGenInfo(CGM); |
214 | case llvm::Triple::ppc64le: { |
215 | assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!" ); |
216 | PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv2; |
217 | if (Target.getABI() == "elfv1" ) |
218 | Kind = PPC64_SVR4_ABIKind::ELFv1; |
219 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
220 | |
221 | return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat); |
222 | } |
223 | |
224 | case llvm::Triple::nvptx: |
225 | case llvm::Triple::nvptx64: |
226 | return createNVPTXTargetCodeGenInfo(CGM); |
227 | |
228 | case llvm::Triple::msp430: |
229 | return createMSP430TargetCodeGenInfo(CGM); |
230 | |
231 | case llvm::Triple::riscv32: |
232 | case llvm::Triple::riscv64: { |
233 | StringRef ABIStr = Target.getABI(); |
234 | unsigned XLen = Target.getPointerWidth(AddrSpace: LangAS::Default); |
235 | unsigned ABIFLen = 0; |
236 | if (ABIStr.ends_with(Suffix: "f" )) |
237 | ABIFLen = 32; |
238 | else if (ABIStr.ends_with(Suffix: "d" )) |
239 | ABIFLen = 64; |
240 | bool EABI = ABIStr.ends_with(Suffix: "e" ); |
241 | return createRISCVTargetCodeGenInfo(CGM, XLen, FLen: ABIFLen, EABI); |
242 | } |
243 | |
244 | case llvm::Triple::systemz: { |
245 | bool SoftFloat = CodeGenOpts.FloatABI == "soft" ; |
246 | bool HasVector = !SoftFloat && Target.getABI() == "vector" ; |
247 | return createSystemZTargetCodeGenInfo(CGM, HasVector, SoftFloatABI: SoftFloat); |
248 | } |
249 | |
250 | case llvm::Triple::tce: |
251 | case llvm::Triple::tcele: |
252 | return createTCETargetCodeGenInfo(CGM); |
253 | |
254 | case llvm::Triple::x86: { |
255 | bool IsDarwinVectorABI = Triple.isOSDarwin(); |
256 | bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); |
257 | |
258 | if (Triple.getOS() == llvm::Triple::Win32) { |
259 | return createWinX86_32TargetCodeGenInfo( |
260 | CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI, |
261 | NumRegisterParameters: CodeGenOpts.NumRegisterParameters); |
262 | } |
263 | return createX86_32TargetCodeGenInfo( |
264 | CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI, |
265 | NumRegisterParameters: CodeGenOpts.NumRegisterParameters, SoftFloatABI: CodeGenOpts.FloatABI == "soft" ); |
266 | } |
267 | |
268 | case llvm::Triple::x86_64: { |
269 | StringRef ABI = Target.getABI(); |
270 | X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 |
271 | : ABI == "avx" ? X86AVXABILevel::AVX |
272 | : X86AVXABILevel::None); |
273 | |
274 | switch (Triple.getOS()) { |
275 | case llvm::Triple::Win32: |
276 | return createWinX86_64TargetCodeGenInfo(CGM, AVXLevel); |
277 | default: |
278 | return createX86_64TargetCodeGenInfo(CGM, AVXLevel); |
279 | } |
280 | } |
281 | case llvm::Triple::hexagon: |
282 | return createHexagonTargetCodeGenInfo(CGM); |
283 | case llvm::Triple::lanai: |
284 | return createLanaiTargetCodeGenInfo(CGM); |
285 | case llvm::Triple::r600: |
286 | return createAMDGPUTargetCodeGenInfo(CGM); |
287 | case llvm::Triple::amdgcn: |
288 | return createAMDGPUTargetCodeGenInfo(CGM); |
289 | case llvm::Triple::sparc: |
290 | return createSparcV8TargetCodeGenInfo(CGM); |
291 | case llvm::Triple::sparcv9: |
292 | return createSparcV9TargetCodeGenInfo(CGM); |
293 | case llvm::Triple::xcore: |
294 | return createXCoreTargetCodeGenInfo(CGM); |
295 | case llvm::Triple::arc: |
296 | return createARCTargetCodeGenInfo(CGM); |
297 | case llvm::Triple::spir: |
298 | case llvm::Triple::spir64: |
299 | return createCommonSPIRTargetCodeGenInfo(CGM); |
300 | case llvm::Triple::spirv32: |
301 | case llvm::Triple::spirv64: |
302 | return createSPIRVTargetCodeGenInfo(CGM); |
303 | case llvm::Triple::ve: |
304 | return createVETargetCodeGenInfo(CGM); |
305 | case llvm::Triple::csky: { |
306 | bool IsSoftFloat = !Target.hasFeature(Feature: "hard-float-abi" ); |
307 | bool hasFP64 = |
308 | Target.hasFeature(Feature: "fpuv2_df" ) || Target.hasFeature(Feature: "fpuv3_df" ); |
309 | return createCSKYTargetCodeGenInfo(CGM, FLen: IsSoftFloat ? 0 |
310 | : hasFP64 ? 64 |
311 | : 32); |
312 | } |
313 | case llvm::Triple::bpfeb: |
314 | case llvm::Triple::bpfel: |
315 | return createBPFTargetCodeGenInfo(CGM); |
316 | case llvm::Triple::loongarch32: |
317 | case llvm::Triple::loongarch64: { |
318 | StringRef ABIStr = Target.getABI(); |
319 | unsigned ABIFRLen = 0; |
320 | if (ABIStr.ends_with(Suffix: "f" )) |
321 | ABIFRLen = 32; |
322 | else if (ABIStr.ends_with(Suffix: "d" )) |
323 | ABIFRLen = 64; |
324 | return createLoongArchTargetCodeGenInfo( |
325 | CGM, GRLen: Target.getPointerWidth(AddrSpace: LangAS::Default), FLen: ABIFRLen); |
326 | } |
327 | } |
328 | } |
329 | |
330 | const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { |
331 | if (!TheTargetCodeGenInfo) |
332 | TheTargetCodeGenInfo = createTargetCodeGenInfo(CGM&: *this); |
333 | return *TheTargetCodeGenInfo; |
334 | } |
335 | |
336 | CodeGenModule::(ASTContext &C, |
337 | IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS, |
338 | const HeaderSearchOptions &HSO, |
339 | const PreprocessorOptions &PPO, |
340 | const CodeGenOptions &CGO, llvm::Module &M, |
341 | DiagnosticsEngine &diags, |
342 | CoverageSourceInfo *CoverageInfo) |
343 | : Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO), |
344 | PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags), |
345 | Target(C.getTargetInfo()), ABI(createCXXABI(CGM&: *this)), |
346 | VMContext(M.getContext()), Types(*this), VTables(*this), |
347 | SanitizerMD(new SanitizerMetadata(*this)) { |
348 | |
349 | // Initialize the type cache. |
350 | llvm::LLVMContext &LLVMContext = M.getContext(); |
351 | VoidTy = llvm::Type::getVoidTy(C&: LLVMContext); |
352 | Int8Ty = llvm::Type::getInt8Ty(C&: LLVMContext); |
353 | Int16Ty = llvm::Type::getInt16Ty(C&: LLVMContext); |
354 | Int32Ty = llvm::Type::getInt32Ty(C&: LLVMContext); |
355 | Int64Ty = llvm::Type::getInt64Ty(C&: LLVMContext); |
356 | HalfTy = llvm::Type::getHalfTy(C&: LLVMContext); |
357 | BFloatTy = llvm::Type::getBFloatTy(C&: LLVMContext); |
358 | FloatTy = llvm::Type::getFloatTy(C&: LLVMContext); |
359 | DoubleTy = llvm::Type::getDoubleTy(C&: LLVMContext); |
360 | PointerWidthInBits = C.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default); |
361 | PointerAlignInBytes = |
362 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getPointerAlign(AddrSpace: LangAS::Default)) |
363 | .getQuantity(); |
364 | SizeSizeInBytes = |
365 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getMaxPointerWidth()).getQuantity(); |
366 | IntAlignInBytes = |
367 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getIntAlign()).getQuantity(); |
368 | CharTy = |
369 | llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getCharWidth()); |
370 | IntTy = llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getIntWidth()); |
371 | IntPtrTy = llvm::IntegerType::get(C&: LLVMContext, |
372 | NumBits: C.getTargetInfo().getMaxPointerWidth()); |
373 | Int8PtrTy = llvm::PointerType::get(C&: LLVMContext, |
374 | AddressSpace: C.getTargetAddressSpace(AS: LangAS::Default)); |
375 | const llvm::DataLayout &DL = M.getDataLayout(); |
376 | AllocaInt8PtrTy = |
377 | llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getAllocaAddrSpace()); |
378 | GlobalsInt8PtrTy = |
379 | llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getDefaultGlobalsAddressSpace()); |
380 | ConstGlobalsPtrTy = llvm::PointerType::get( |
381 | C&: LLVMContext, AddressSpace: C.getTargetAddressSpace(AS: GetGlobalConstantAddressSpace())); |
382 | ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace(); |
383 | |
384 | // Build C++20 Module initializers. |
385 | // TODO: Add Microsoft here once we know the mangling required for the |
386 | // initializers. |
387 | CXX20ModuleInits = |
388 | LangOpts.CPlusPlusModules && getCXXABI().getMangleContext().getKind() == |
389 | ItaniumMangleContext::MK_Itanium; |
390 | |
391 | RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC(); |
392 | |
393 | if (LangOpts.ObjC) |
394 | createObjCRuntime(); |
395 | if (LangOpts.OpenCL) |
396 | createOpenCLRuntime(); |
397 | if (LangOpts.OpenMP) |
398 | createOpenMPRuntime(); |
399 | if (LangOpts.CUDA) |
400 | createCUDARuntime(); |
401 | if (LangOpts.HLSL) |
402 | createHLSLRuntime(); |
403 | |
404 | // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0. |
405 | if (LangOpts.Sanitize.has(K: SanitizerKind::Thread) || |
406 | (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)) |
407 | TBAA.reset(p: new CodeGenTBAA(Context, getTypes(), TheModule, CodeGenOpts, |
408 | getLangOpts(), getCXXABI().getMangleContext())); |
409 | |
410 | // If debug info or coverage generation is enabled, create the CGDebugInfo |
411 | // object. |
412 | if (CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo || |
413 | CodeGenOpts.CoverageNotesFile.size() || |
414 | CodeGenOpts.CoverageDataFile.size()) |
415 | DebugInfo.reset(p: new CGDebugInfo(*this)); |
416 | |
417 | Block.GlobalUniqueCount = 0; |
418 | |
419 | if (C.getLangOpts().ObjC) |
420 | ObjCData.reset(p: new ObjCEntrypoints()); |
421 | |
422 | if (CodeGenOpts.hasProfileClangUse()) { |
423 | auto ReaderOrErr = llvm::IndexedInstrProfReader::create( |
424 | Path: CodeGenOpts.ProfileInstrumentUsePath, FS&: *FS, |
425 | RemappingPath: CodeGenOpts.ProfileRemappingFile); |
426 | // We're checking for profile read errors in CompilerInvocation, so if |
427 | // there was an error it should've already been caught. If it hasn't been |
428 | // somehow, trip an assertion. |
429 | assert(ReaderOrErr); |
430 | PGOReader = std::move(ReaderOrErr.get()); |
431 | } |
432 | |
433 | // If coverage mapping generation is enabled, create the |
434 | // CoverageMappingModuleGen object. |
435 | if (CodeGenOpts.CoverageMapping) |
436 | CoverageMapping.reset(p: new CoverageMappingModuleGen(*this, *CoverageInfo)); |
437 | |
438 | // Generate the module name hash here if needed. |
439 | if (CodeGenOpts.UniqueInternalLinkageNames && |
440 | !getModule().getSourceFileName().empty()) { |
441 | std::string Path = getModule().getSourceFileName(); |
442 | // Check if a path substitution is needed from the MacroPrefixMap. |
443 | for (const auto &Entry : LangOpts.MacroPrefixMap) |
444 | if (Path.rfind(str: Entry.first, pos: 0) != std::string::npos) { |
445 | Path = Entry.second + Path.substr(pos: Entry.first.size()); |
446 | break; |
447 | } |
448 | ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(FName: Path); |
449 | } |
450 | |
451 | // Record mregparm value now so it is visible through all of codegen. |
452 | if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86) |
453 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "NumRegisterParameters" , |
454 | Val: CodeGenOpts.NumRegisterParameters); |
455 | } |
456 | |
457 | CodeGenModule::~CodeGenModule() {} |
458 | |
459 | void CodeGenModule::createObjCRuntime() { |
460 | // This is just isGNUFamily(), but we want to force implementors of |
461 | // new ABIs to decide how best to do this. |
462 | switch (LangOpts.ObjCRuntime.getKind()) { |
463 | case ObjCRuntime::GNUstep: |
464 | case ObjCRuntime::GCC: |
465 | case ObjCRuntime::ObjFW: |
466 | ObjCRuntime.reset(p: CreateGNUObjCRuntime(CGM&: *this)); |
467 | return; |
468 | |
469 | case ObjCRuntime::FragileMacOSX: |
470 | case ObjCRuntime::MacOSX: |
471 | case ObjCRuntime::iOS: |
472 | case ObjCRuntime::WatchOS: |
473 | ObjCRuntime.reset(p: CreateMacObjCRuntime(CGM&: *this)); |
474 | return; |
475 | } |
476 | llvm_unreachable("bad runtime kind" ); |
477 | } |
478 | |
479 | void CodeGenModule::createOpenCLRuntime() { |
480 | OpenCLRuntime.reset(p: new CGOpenCLRuntime(*this)); |
481 | } |
482 | |
483 | void CodeGenModule::createOpenMPRuntime() { |
484 | // Select a specialized code generation class based on the target, if any. |
485 | // If it does not exist use the default implementation. |
486 | switch (getTriple().getArch()) { |
487 | case llvm::Triple::nvptx: |
488 | case llvm::Triple::nvptx64: |
489 | case llvm::Triple::amdgcn: |
490 | assert(getLangOpts().OpenMPIsTargetDevice && |
491 | "OpenMP AMDGPU/NVPTX is only prepared to deal with device code." ); |
492 | OpenMPRuntime.reset(p: new CGOpenMPRuntimeGPU(*this)); |
493 | break; |
494 | default: |
495 | if (LangOpts.OpenMPSimd) |
496 | OpenMPRuntime.reset(p: new CGOpenMPSIMDRuntime(*this)); |
497 | else |
498 | OpenMPRuntime.reset(p: new CGOpenMPRuntime(*this)); |
499 | break; |
500 | } |
501 | } |
502 | |
503 | void CodeGenModule::createCUDARuntime() { |
504 | CUDARuntime.reset(p: CreateNVCUDARuntime(CGM&: *this)); |
505 | } |
506 | |
507 | void CodeGenModule::createHLSLRuntime() { |
508 | HLSLRuntime.reset(p: new CGHLSLRuntime(*this)); |
509 | } |
510 | |
511 | void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) { |
512 | Replacements[Name] = C; |
513 | } |
514 | |
515 | void CodeGenModule::applyReplacements() { |
516 | for (auto &I : Replacements) { |
517 | StringRef MangledName = I.first; |
518 | llvm::Constant *Replacement = I.second; |
519 | llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName); |
520 | if (!Entry) |
521 | continue; |
522 | auto *OldF = cast<llvm::Function>(Val: Entry); |
523 | auto *NewF = dyn_cast<llvm::Function>(Val: Replacement); |
524 | if (!NewF) { |
525 | if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Val: Replacement)) { |
526 | NewF = dyn_cast<llvm::Function>(Val: Alias->getAliasee()); |
527 | } else { |
528 | auto *CE = cast<llvm::ConstantExpr>(Val: Replacement); |
529 | assert(CE->getOpcode() == llvm::Instruction::BitCast || |
530 | CE->getOpcode() == llvm::Instruction::GetElementPtr); |
531 | NewF = dyn_cast<llvm::Function>(Val: CE->getOperand(i_nocapture: 0)); |
532 | } |
533 | } |
534 | |
535 | // Replace old with new, but keep the old order. |
536 | OldF->replaceAllUsesWith(V: Replacement); |
537 | if (NewF) { |
538 | NewF->removeFromParent(); |
539 | OldF->getParent()->getFunctionList().insertAfter(where: OldF->getIterator(), |
540 | New: NewF); |
541 | } |
542 | OldF->eraseFromParent(); |
543 | } |
544 | } |
545 | |
546 | void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) { |
547 | GlobalValReplacements.push_back(Elt: std::make_pair(x&: GV, y&: C)); |
548 | } |
549 | |
550 | void CodeGenModule::applyGlobalValReplacements() { |
551 | for (auto &I : GlobalValReplacements) { |
552 | llvm::GlobalValue *GV = I.first; |
553 | llvm::Constant *C = I.second; |
554 | |
555 | GV->replaceAllUsesWith(V: C); |
556 | GV->eraseFromParent(); |
557 | } |
558 | } |
559 | |
560 | // This is only used in aliases that we created and we know they have a |
561 | // linear structure. |
562 | static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) { |
563 | const llvm::Constant *C; |
564 | if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: GV)) |
565 | C = GA->getAliasee(); |
566 | else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(Val: GV)) |
567 | C = GI->getResolver(); |
568 | else |
569 | return GV; |
570 | |
571 | const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(Val: C->stripPointerCasts()); |
572 | if (!AliaseeGV) |
573 | return nullptr; |
574 | |
575 | const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject(); |
576 | if (FinalGV == GV) |
577 | return nullptr; |
578 | |
579 | return FinalGV; |
580 | } |
581 | |
582 | static bool checkAliasedGlobal( |
583 | const ASTContext &Context, DiagnosticsEngine &Diags, SourceLocation Location, |
584 | bool IsIFunc, const llvm::GlobalValue *Alias, const llvm::GlobalValue *&GV, |
585 | const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames, |
586 | SourceRange AliasRange) { |
587 | GV = getAliasedGlobal(GV: Alias); |
588 | if (!GV) { |
589 | Diags.Report(Loc: Location, DiagID: diag::err_cyclic_alias) << IsIFunc; |
590 | return false; |
591 | } |
592 | |
593 | if (GV->hasCommonLinkage()) { |
594 | const llvm::Triple &Triple = Context.getTargetInfo().getTriple(); |
595 | if (Triple.getObjectFormat() == llvm::Triple::XCOFF) { |
596 | Diags.Report(Loc: Location, DiagID: diag::err_alias_to_common); |
597 | return false; |
598 | } |
599 | } |
600 | |
601 | if (GV->isDeclaration()) { |
602 | Diags.Report(Loc: Location, DiagID: diag::err_alias_to_undefined) << IsIFunc << IsIFunc; |
603 | Diags.Report(Loc: Location, DiagID: diag::note_alias_requires_mangled_name) |
604 | << IsIFunc << IsIFunc; |
605 | // Provide a note if the given function is not found and exists as a |
606 | // mangled name. |
607 | for (const auto &[Decl, Name] : MangledDeclNames) { |
608 | if (const auto *ND = dyn_cast<NamedDecl>(Val: Decl.getDecl())) { |
609 | if (ND->getName() == GV->getName()) { |
610 | Diags.Report(Loc: Location, DiagID: diag::note_alias_mangled_name_alternative) |
611 | << Name |
612 | << FixItHint::CreateReplacement( |
613 | RemoveRange: AliasRange, |
614 | Code: (Twine(IsIFunc ? "ifunc" : "alias" ) + "(\"" + Name + "\")" ) |
615 | .str()); |
616 | } |
617 | } |
618 | } |
619 | return false; |
620 | } |
621 | |
622 | if (IsIFunc) { |
623 | // Check resolver function type. |
624 | const auto *F = dyn_cast<llvm::Function>(Val: GV); |
625 | if (!F) { |
626 | Diags.Report(Loc: Location, DiagID: diag::err_alias_to_undefined) |
627 | << IsIFunc << IsIFunc; |
628 | return false; |
629 | } |
630 | |
631 | llvm::FunctionType *FTy = F->getFunctionType(); |
632 | if (!FTy->getReturnType()->isPointerTy()) { |
633 | Diags.Report(Loc: Location, DiagID: diag::err_ifunc_resolver_return); |
634 | return false; |
635 | } |
636 | } |
637 | |
638 | return true; |
639 | } |
640 | |
641 | // Emit a warning if toc-data attribute is requested for global variables that |
642 | // have aliases and remove the toc-data attribute. |
643 | static void checkAliasForTocData(llvm::GlobalVariable *GVar, |
644 | const CodeGenOptions &CodeGenOpts, |
645 | DiagnosticsEngine &Diags, |
646 | SourceLocation Location) { |
647 | if (GVar->hasAttribute(Kind: "toc-data" )) { |
648 | auto GVId = GVar->getName(); |
649 | // Is this a global variable specified by the user as local? |
650 | if ((llvm::binary_search(Range: CodeGenOpts.TocDataVarsUserSpecified, Value&: GVId))) { |
651 | Diags.Report(Loc: Location, DiagID: diag::warn_toc_unsupported_type) |
652 | << GVId << "the variable has an alias" ; |
653 | } |
654 | llvm::AttributeSet CurrAttributes = GVar->getAttributes(); |
655 | llvm::AttributeSet NewAttributes = |
656 | CurrAttributes.removeAttribute(C&: GVar->getContext(), Kind: "toc-data" ); |
657 | GVar->setAttributes(NewAttributes); |
658 | } |
659 | } |
660 | |
661 | void CodeGenModule::checkAliases() { |
662 | // Check if the constructed aliases are well formed. It is really unfortunate |
663 | // that we have to do this in CodeGen, but we only construct mangled names |
664 | // and aliases during codegen. |
665 | bool Error = false; |
666 | DiagnosticsEngine &Diags = getDiags(); |
667 | for (const GlobalDecl &GD : Aliases) { |
668 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
669 | SourceLocation Location; |
670 | SourceRange Range; |
671 | bool IsIFunc = D->hasAttr<IFuncAttr>(); |
672 | if (const Attr *A = D->getDefiningAttr()) { |
673 | Location = A->getLocation(); |
674 | Range = A->getRange(); |
675 | } else |
676 | llvm_unreachable("Not an alias or ifunc?" ); |
677 | |
678 | StringRef MangledName = getMangledName(GD); |
679 | llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName); |
680 | const llvm::GlobalValue *GV = nullptr; |
681 | if (!checkAliasedGlobal(Context: getContext(), Diags, Location, IsIFunc, Alias, GV, |
682 | MangledDeclNames, AliasRange: Range)) { |
683 | Error = true; |
684 | continue; |
685 | } |
686 | |
687 | if (getContext().getTargetInfo().getTriple().isOSAIX()) |
688 | if (const llvm::GlobalVariable *GVar = |
689 | dyn_cast<const llvm::GlobalVariable>(Val: GV)) |
690 | checkAliasForTocData(GVar: const_cast<llvm::GlobalVariable *>(GVar), |
691 | CodeGenOpts: getCodeGenOpts(), Diags, Location); |
692 | |
693 | llvm::Constant *Aliasee = |
694 | IsIFunc ? cast<llvm::GlobalIFunc>(Val: Alias)->getResolver() |
695 | : cast<llvm::GlobalAlias>(Val: Alias)->getAliasee(); |
696 | |
697 | llvm::GlobalValue *AliaseeGV; |
698 | if (auto CE = dyn_cast<llvm::ConstantExpr>(Val: Aliasee)) |
699 | AliaseeGV = cast<llvm::GlobalValue>(Val: CE->getOperand(i_nocapture: 0)); |
700 | else |
701 | AliaseeGV = cast<llvm::GlobalValue>(Val: Aliasee); |
702 | |
703 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) { |
704 | StringRef AliasSection = SA->getName(); |
705 | if (AliasSection != AliaseeGV->getSection()) |
706 | Diags.Report(Loc: SA->getLocation(), DiagID: diag::warn_alias_with_section) |
707 | << AliasSection << IsIFunc << IsIFunc; |
708 | } |
709 | |
710 | // We have to handle alias to weak aliases in here. LLVM itself disallows |
711 | // this since the object semantics would not match the IL one. For |
712 | // compatibility with gcc we implement it by just pointing the alias |
713 | // to its aliasee's aliasee. We also warn, since the user is probably |
714 | // expecting the link to be weak. |
715 | if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: AliaseeGV)) { |
716 | if (GA->isInterposable()) { |
717 | Diags.Report(Loc: Location, DiagID: diag::warn_alias_to_weak_alias) |
718 | << GV->getName() << GA->getName() << IsIFunc; |
719 | Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
720 | C: GA->getAliasee(), Ty: Alias->getType()); |
721 | |
722 | if (IsIFunc) |
723 | cast<llvm::GlobalIFunc>(Val: Alias)->setResolver(Aliasee); |
724 | else |
725 | cast<llvm::GlobalAlias>(Val: Alias)->setAliasee(Aliasee); |
726 | } |
727 | } |
728 | // ifunc resolvers are usually implemented to run before sanitizer |
729 | // initialization. Disable instrumentation to prevent the ordering issue. |
730 | if (IsIFunc) |
731 | cast<llvm::Function>(Val: Aliasee)->addFnAttr( |
732 | Kind: llvm::Attribute::DisableSanitizerInstrumentation); |
733 | } |
734 | if (!Error) |
735 | return; |
736 | |
737 | for (const GlobalDecl &GD : Aliases) { |
738 | StringRef MangledName = getMangledName(GD); |
739 | llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName); |
740 | Alias->replaceAllUsesWith(V: llvm::UndefValue::get(T: Alias->getType())); |
741 | Alias->eraseFromParent(); |
742 | } |
743 | } |
744 | |
745 | void CodeGenModule::clear() { |
746 | DeferredDeclsToEmit.clear(); |
747 | EmittedDeferredDecls.clear(); |
748 | DeferredAnnotations.clear(); |
749 | if (OpenMPRuntime) |
750 | OpenMPRuntime->clear(); |
751 | } |
752 | |
753 | void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags, |
754 | StringRef MainFile) { |
755 | if (!hasDiagnostics()) |
756 | return; |
757 | if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) { |
758 | if (MainFile.empty()) |
759 | MainFile = "<stdin>" ; |
760 | Diags.Report(DiagID: diag::warn_profile_data_unprofiled) << MainFile; |
761 | } else { |
762 | if (Mismatched > 0) |
763 | Diags.Report(DiagID: diag::warn_profile_data_out_of_date) << Visited << Mismatched; |
764 | |
765 | if (Missing > 0) |
766 | Diags.Report(DiagID: diag::warn_profile_data_missing) << Visited << Missing; |
767 | } |
768 | } |
769 | |
770 | static std::optional<llvm::GlobalValue::VisibilityTypes> |
771 | getLLVMVisibility(clang::LangOptions::VisibilityFromDLLStorageClassKinds K) { |
772 | // Map to LLVM visibility. |
773 | switch (K) { |
774 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Keep: |
775 | return std::nullopt; |
776 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Default: |
777 | return llvm::GlobalValue::DefaultVisibility; |
778 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Hidden: |
779 | return llvm::GlobalValue::HiddenVisibility; |
780 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Protected: |
781 | return llvm::GlobalValue::ProtectedVisibility; |
782 | } |
783 | llvm_unreachable("unknown option value!" ); |
784 | } |
785 | |
786 | void setLLVMVisibility(llvm::GlobalValue &GV, |
787 | std::optional<llvm::GlobalValue::VisibilityTypes> V) { |
788 | if (!V) |
789 | return; |
790 | |
791 | // Reset DSO locality before setting the visibility. This removes |
792 | // any effects that visibility options and annotations may have |
793 | // had on the DSO locality. Setting the visibility will implicitly set |
794 | // appropriate globals to DSO Local; however, this will be pessimistic |
795 | // w.r.t. to the normal compiler IRGen. |
796 | GV.setDSOLocal(false); |
797 | GV.setVisibility(*V); |
798 | } |
799 | |
800 | static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO, |
801 | llvm::Module &M) { |
802 | if (!LO.VisibilityFromDLLStorageClass) |
803 | return; |
804 | |
805 | std::optional<llvm::GlobalValue::VisibilityTypes> DLLExportVisibility = |
806 | getLLVMVisibility(K: LO.getDLLExportVisibility()); |
807 | |
808 | std::optional<llvm::GlobalValue::VisibilityTypes> |
809 | NoDLLStorageClassVisibility = |
810 | getLLVMVisibility(K: LO.getNoDLLStorageClassVisibility()); |
811 | |
812 | std::optional<llvm::GlobalValue::VisibilityTypes> |
813 | ExternDeclDLLImportVisibility = |
814 | getLLVMVisibility(K: LO.getExternDeclDLLImportVisibility()); |
815 | |
816 | std::optional<llvm::GlobalValue::VisibilityTypes> |
817 | ExternDeclNoDLLStorageClassVisibility = |
818 | getLLVMVisibility(K: LO.getExternDeclNoDLLStorageClassVisibility()); |
819 | |
820 | for (llvm::GlobalValue &GV : M.global_values()) { |
821 | if (GV.hasAppendingLinkage() || GV.hasLocalLinkage()) |
822 | continue; |
823 | |
824 | if (GV.isDeclarationForLinker()) |
825 | setLLVMVisibility(GV, V: GV.getDLLStorageClass() == |
826 | llvm::GlobalValue::DLLImportStorageClass |
827 | ? ExternDeclDLLImportVisibility |
828 | : ExternDeclNoDLLStorageClassVisibility); |
829 | else |
830 | setLLVMVisibility(GV, V: GV.getDLLStorageClass() == |
831 | llvm::GlobalValue::DLLExportStorageClass |
832 | ? DLLExportVisibility |
833 | : NoDLLStorageClassVisibility); |
834 | |
835 | GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
836 | } |
837 | } |
838 | |
839 | static bool isStackProtectorOn(const LangOptions &LangOpts, |
840 | const llvm::Triple &Triple, |
841 | clang::LangOptions::StackProtectorMode Mode) { |
842 | if (Triple.isAMDGPU() || Triple.isNVPTX()) |
843 | return false; |
844 | return LangOpts.getStackProtector() == Mode; |
845 | } |
846 | |
847 | void CodeGenModule::Release() { |
848 | Module *Primary = getContext().getCurrentNamedModule(); |
849 | if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule()) |
850 | EmitModuleInitializers(Primary); |
851 | EmitDeferred(); |
852 | DeferredDecls.insert(I: EmittedDeferredDecls.begin(), |
853 | E: EmittedDeferredDecls.end()); |
854 | EmittedDeferredDecls.clear(); |
855 | EmitVTablesOpportunistically(); |
856 | applyGlobalValReplacements(); |
857 | applyReplacements(); |
858 | emitMultiVersionFunctions(); |
859 | |
860 | if (Context.getLangOpts().IncrementalExtensions && |
861 | GlobalTopLevelStmtBlockInFlight.first) { |
862 | const TopLevelStmtDecl *TLSD = GlobalTopLevelStmtBlockInFlight.second; |
863 | GlobalTopLevelStmtBlockInFlight.first->FinishFunction(EndLoc: TLSD->getEndLoc()); |
864 | GlobalTopLevelStmtBlockInFlight = {nullptr, nullptr}; |
865 | } |
866 | |
867 | // Module implementations are initialized the same way as a regular TU that |
868 | // imports one or more modules. |
869 | if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition()) |
870 | EmitCXXModuleInitFunc(Primary); |
871 | else |
872 | EmitCXXGlobalInitFunc(); |
873 | EmitCXXGlobalCleanUpFunc(); |
874 | registerGlobalDtorsWithAtExit(); |
875 | EmitCXXThreadLocalInitFunc(); |
876 | if (ObjCRuntime) |
877 | if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction()) |
878 | AddGlobalCtor(Ctor: ObjCInitFunction); |
879 | if (Context.getLangOpts().CUDA && CUDARuntime) { |
880 | if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule()) |
881 | AddGlobalCtor(Ctor: CudaCtorFunction); |
882 | } |
883 | if (OpenMPRuntime) { |
884 | OpenMPRuntime->createOffloadEntriesAndInfoMetadata(); |
885 | OpenMPRuntime->clear(); |
886 | } |
887 | if (PGOReader) { |
888 | getModule().setProfileSummary( |
889 | M: PGOReader->getSummary(/* UseCS */ false).getMD(Context&: VMContext), |
890 | Kind: llvm::ProfileSummary::PSK_Instr); |
891 | if (PGOStats.hasDiagnostics()) |
892 | PGOStats.reportDiagnostics(Diags&: getDiags(), MainFile: getCodeGenOpts().MainFileName); |
893 | } |
894 | llvm::stable_sort(Range&: GlobalCtors, C: [](const Structor &L, const Structor &R) { |
895 | return L.LexOrder < R.LexOrder; |
896 | }); |
897 | EmitCtorList(Fns&: GlobalCtors, GlobalName: "llvm.global_ctors" ); |
898 | EmitCtorList(Fns&: GlobalDtors, GlobalName: "llvm.global_dtors" ); |
899 | EmitGlobalAnnotations(); |
900 | EmitStaticExternCAliases(); |
901 | checkAliases(); |
902 | EmitDeferredUnusedCoverageMappings(); |
903 | CodeGenPGO(*this).setValueProfilingFlag(getModule()); |
904 | CodeGenPGO(*this).setProfileVersion(getModule()); |
905 | if (CoverageMapping) |
906 | CoverageMapping->emit(); |
907 | if (CodeGenOpts.SanitizeCfiCrossDso) { |
908 | CodeGenFunction(*this).EmitCfiCheckFail(); |
909 | CodeGenFunction(*this).EmitCfiCheckStub(); |
910 | } |
911 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) |
912 | finalizeKCFITypes(); |
913 | emitAtAvailableLinkGuard(); |
914 | if (Context.getTargetInfo().getTriple().isWasm()) |
915 | EmitMainVoidAlias(); |
916 | |
917 | if (getTriple().isAMDGPU() || |
918 | (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD)) { |
919 | // Emit amdhsa_code_object_version module flag, which is code object version |
920 | // times 100. |
921 | if (getTarget().getTargetOpts().CodeObjectVersion != |
922 | llvm::CodeObjectVersionKind::COV_None) { |
923 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
924 | Key: "amdhsa_code_object_version" , |
925 | Val: getTarget().getTargetOpts().CodeObjectVersion); |
926 | } |
927 | |
928 | // Currently, "-mprintf-kind" option is only supported for HIP |
929 | if (LangOpts.HIP) { |
930 | auto *MDStr = llvm::MDString::get( |
931 | Context&: getLLVMContext(), Str: (getTarget().getTargetOpts().AMDGPUPrintfKindVal == |
932 | TargetOptions::AMDGPUPrintfKind::Hostcall) |
933 | ? "hostcall" |
934 | : "buffered" ); |
935 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "amdgpu_printf_kind" , |
936 | Val: MDStr); |
937 | } |
938 | } |
939 | |
940 | // Emit a global array containing all external kernels or device variables |
941 | // used by host functions and mark it as used for CUDA/HIP. This is necessary |
942 | // to get kernels or device variables in archives linked in even if these |
943 | // kernels or device variables are only used in host functions. |
944 | if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) { |
945 | SmallVector<llvm::Constant *, 8> UsedArray; |
946 | for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) { |
947 | GlobalDecl GD; |
948 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) |
949 | GD = GlobalDecl(FD, KernelReferenceKind::Kernel); |
950 | else |
951 | GD = GlobalDecl(D); |
952 | UsedArray.push_back(Elt: llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
953 | C: GetAddrOfGlobal(GD), Ty: Int8PtrTy)); |
954 | } |
955 | |
956 | llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: Int8PtrTy, NumElements: UsedArray.size()); |
957 | |
958 | auto *GV = new llvm::GlobalVariable( |
959 | getModule(), ATy, false, llvm::GlobalValue::InternalLinkage, |
960 | llvm::ConstantArray::get(T: ATy, V: UsedArray), "__clang_gpu_used_external" ); |
961 | addCompilerUsedGlobal(GV); |
962 | } |
963 | if (LangOpts.HIP && !getLangOpts().OffloadingNewDriver) { |
964 | // Emit a unique ID so that host and device binaries from the same |
965 | // compilation unit can be associated. |
966 | auto *GV = new llvm::GlobalVariable( |
967 | getModule(), Int8Ty, false, llvm::GlobalValue::ExternalLinkage, |
968 | llvm::Constant::getNullValue(Ty: Int8Ty), |
969 | "__hip_cuid_" + getContext().getCUIDHash()); |
970 | addCompilerUsedGlobal(GV); |
971 | } |
972 | emitLLVMUsed(); |
973 | if (SanStats) |
974 | SanStats->finish(); |
975 | |
976 | if (CodeGenOpts.Autolink && |
977 | (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) { |
978 | EmitModuleLinkOptions(); |
979 | } |
980 | |
981 | // On ELF we pass the dependent library specifiers directly to the linker |
982 | // without manipulating them. This is in contrast to other platforms where |
983 | // they are mapped to a specific linker option by the compiler. This |
984 | // difference is a result of the greater variety of ELF linkers and the fact |
985 | // that ELF linkers tend to handle libraries in a more complicated fashion |
986 | // than on other platforms. This forces us to defer handling the dependent |
987 | // libs to the linker. |
988 | // |
989 | // CUDA/HIP device and host libraries are different. Currently there is no |
990 | // way to differentiate dependent libraries for host or device. Existing |
991 | // usage of #pragma comment(lib, *) is intended for host libraries on |
992 | // Windows. Therefore emit llvm.dependent-libraries only for host. |
993 | if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) { |
994 | auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.dependent-libraries" ); |
995 | for (auto *MD : ELFDependentLibraries) |
996 | NMD->addOperand(M: MD); |
997 | } |
998 | |
999 | if (CodeGenOpts.DwarfVersion) { |
1000 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "Dwarf Version" , |
1001 | Val: CodeGenOpts.DwarfVersion); |
1002 | } |
1003 | |
1004 | if (CodeGenOpts.Dwarf64) |
1005 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "DWARF64" , Val: 1); |
1006 | |
1007 | if (Context.getLangOpts().SemanticInterposition) |
1008 | // Require various optimization to respect semantic interposition. |
1009 | getModule().setSemanticInterposition(true); |
1010 | |
1011 | if (CodeGenOpts.EmitCodeView) { |
1012 | // Indicate that we want CodeView in the metadata. |
1013 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeView" , Val: 1); |
1014 | } |
1015 | if (CodeGenOpts.CodeViewGHash) { |
1016 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeViewGHash" , Val: 1); |
1017 | } |
1018 | if (CodeGenOpts.ControlFlowGuard) { |
1019 | // Function ID tables and checks for Control Flow Guard (cfguard=2). |
1020 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "cfguard" , Val: 2); |
1021 | } else if (CodeGenOpts.ControlFlowGuardNoChecks) { |
1022 | // Function ID tables for Control Flow Guard (cfguard=1). |
1023 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "cfguard" , Val: 1); |
1024 | } |
1025 | if (CodeGenOpts.EHContGuard) { |
1026 | // Function ID tables for EH Continuation Guard. |
1027 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ehcontguard" , Val: 1); |
1028 | } |
1029 | if (Context.getLangOpts().Kernel) { |
1030 | // Note if we are compiling with /kernel. |
1031 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ms-kernel" , Val: 1); |
1032 | } |
1033 | if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) { |
1034 | // We don't support LTO with 2 with different StrictVTablePointers |
1035 | // FIXME: we could support it by stripping all the information introduced |
1036 | // by StrictVTablePointers. |
1037 | |
1038 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "StrictVTablePointers" ,Val: 1); |
1039 | |
1040 | llvm::Metadata *Ops[2] = { |
1041 | llvm::MDString::get(Context&: VMContext, Str: "StrictVTablePointers" ), |
1042 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1043 | Ty: llvm::Type::getInt32Ty(C&: VMContext), V: 1))}; |
1044 | |
1045 | getModule().addModuleFlag(Behavior: llvm::Module::Require, |
1046 | Key: "StrictVTablePointersRequirement" , |
1047 | Val: llvm::MDNode::get(Context&: VMContext, MDs: Ops)); |
1048 | } |
1049 | if (getModuleDebugInfo()) |
1050 | // We support a single version in the linked module. The LLVM |
1051 | // parser will drop debug info with a different version number |
1052 | // (and warn about it, too). |
1053 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "Debug Info Version" , |
1054 | Val: llvm::DEBUG_METADATA_VERSION); |
1055 | |
1056 | // We need to record the widths of enums and wchar_t, so that we can generate |
1057 | // the correct build attributes in the ARM backend. wchar_size is also used by |
1058 | // TargetLibraryInfo. |
1059 | uint64_t WCharWidth = |
1060 | Context.getTypeSizeInChars(T: Context.getWideCharType()).getQuantity(); |
1061 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "wchar_size" , Val: WCharWidth); |
1062 | |
1063 | if (getTriple().isOSzOS()) { |
1064 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, |
1065 | Key: "zos_product_major_version" , |
1066 | Val: uint32_t(CLANG_VERSION_MAJOR)); |
1067 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, |
1068 | Key: "zos_product_minor_version" , |
1069 | Val: uint32_t(CLANG_VERSION_MINOR)); |
1070 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "zos_product_patchlevel" , |
1071 | Val: uint32_t(CLANG_VERSION_PATCHLEVEL)); |
1072 | std::string ProductId = getClangVendor() + "clang" ; |
1073 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_product_id" , |
1074 | Val: llvm::MDString::get(Context&: VMContext, Str: ProductId)); |
1075 | |
1076 | // Record the language because we need it for the PPA2. |
1077 | StringRef lang_str = languageToString( |
1078 | L: LangStandard::getLangStandardForKind(K: LangOpts.LangStd).Language); |
1079 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_cu_language" , |
1080 | Val: llvm::MDString::get(Context&: VMContext, Str: lang_str)); |
1081 | |
1082 | time_t TT = PreprocessorOpts.SourceDateEpoch |
1083 | ? *PreprocessorOpts.SourceDateEpoch |
1084 | : std::time(timer: nullptr); |
1085 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "zos_translation_time" , |
1086 | Val: static_cast<uint64_t>(TT)); |
1087 | |
1088 | // Multiple modes will be supported here. |
1089 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_le_char_mode" , |
1090 | Val: llvm::MDString::get(Context&: VMContext, Str: "ascii" )); |
1091 | } |
1092 | |
1093 | llvm::Triple T = Context.getTargetInfo().getTriple(); |
1094 | if (T.isARM() || T.isThumb()) { |
1095 | // The minimum width of an enum in bytes |
1096 | uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4; |
1097 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "min_enum_size" , Val: EnumWidth); |
1098 | } |
1099 | |
1100 | if (T.isRISCV()) { |
1101 | StringRef ABIStr = Target.getABI(); |
1102 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1103 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "target-abi" , |
1104 | Val: llvm::MDString::get(Context&: Ctx, Str: ABIStr)); |
1105 | |
1106 | // Add the canonical ISA string as metadata so the backend can set the ELF |
1107 | // attributes correctly. We use AppendUnique so LTO will keep all of the |
1108 | // unique ISA strings that were linked together. |
1109 | const std::vector<std::string> &Features = |
1110 | getTarget().getTargetOpts().Features; |
1111 | auto ParseResult = |
1112 | llvm::RISCVISAInfo::parseFeatures(XLen: T.isRISCV64() ? 64 : 32, Features); |
1113 | if (!errorToBool(Err: ParseResult.takeError())) |
1114 | getModule().addModuleFlag( |
1115 | Behavior: llvm::Module::AppendUnique, Key: "riscv-isa" , |
1116 | Val: llvm::MDNode::get( |
1117 | Context&: Ctx, MDs: llvm::MDString::get(Context&: Ctx, Str: (*ParseResult)->toString()))); |
1118 | } |
1119 | |
1120 | if (CodeGenOpts.SanitizeCfiCrossDso) { |
1121 | // Indicate that we want cross-DSO control flow integrity checks. |
1122 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "Cross-DSO CFI" , Val: 1); |
1123 | } |
1124 | |
1125 | if (CodeGenOpts.WholeProgramVTables) { |
1126 | // Indicate whether VFE was enabled for this module, so that the |
1127 | // vcall_visibility metadata added under whole program vtables is handled |
1128 | // appropriately in the optimizer. |
1129 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "Virtual Function Elim" , |
1130 | Val: CodeGenOpts.VirtualFunctionElimination); |
1131 | } |
1132 | |
1133 | if (LangOpts.Sanitize.has(K: SanitizerKind::CFIICall)) { |
1134 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1135 | Key: "CFI Canonical Jump Tables" , |
1136 | Val: CodeGenOpts.SanitizeCfiCanonicalJumpTables); |
1137 | } |
1138 | |
1139 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) { |
1140 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi" , Val: 1); |
1141 | // KCFI assumes patchable-function-prefix is the same for all indirectly |
1142 | // called functions. Store the expected offset for code generation. |
1143 | if (CodeGenOpts.PatchableFunctionEntryOffset) |
1144 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-offset" , |
1145 | Val: CodeGenOpts.PatchableFunctionEntryOffset); |
1146 | } |
1147 | |
1148 | if (CodeGenOpts.CFProtectionReturn && |
1149 | Target.checkCFProtectionReturnSupported(Diags&: getDiags())) { |
1150 | // Indicate that we want to instrument return control flow protection. |
1151 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-return" , |
1152 | Val: 1); |
1153 | } |
1154 | |
1155 | if (CodeGenOpts.CFProtectionBranch && |
1156 | Target.checkCFProtectionBranchSupported(Diags&: getDiags())) { |
1157 | // Indicate that we want to instrument branch control flow protection. |
1158 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-branch" , |
1159 | Val: 1); |
1160 | } |
1161 | |
1162 | if (CodeGenOpts.FunctionReturnThunks) |
1163 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "function_return_thunk_extern" , Val: 1); |
1164 | |
1165 | if (CodeGenOpts.IndirectBranchCSPrefix) |
1166 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "indirect_branch_cs_prefix" , Val: 1); |
1167 | |
1168 | // Add module metadata for return address signing (ignoring |
1169 | // non-leaf/all) and stack tagging. These are actually turned on by function |
1170 | // attributes, but we use module metadata to emit build attributes. This is |
1171 | // needed for LTO, where the function attributes are inside bitcode |
1172 | // serialised into a global variable by the time build attributes are |
1173 | // emitted, so we can't access them. LTO objects could be compiled with |
1174 | // different flags therefore module flags are set to "Min" behavior to achieve |
1175 | // the same end result of the normal build where e.g BTI is off if any object |
1176 | // doesn't support it. |
1177 | if (Context.getTargetInfo().hasFeature(Feature: "ptrauth" ) && |
1178 | LangOpts.getSignReturnAddressScope() != |
1179 | LangOptions::SignReturnAddressScopeKind::None) |
1180 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1181 | Key: "sign-return-address-buildattr" , Val: 1); |
1182 | if (LangOpts.Sanitize.has(K: SanitizerKind::MemtagStack)) |
1183 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1184 | Key: "tag-stack-memory-buildattr" , Val: 1); |
1185 | |
1186 | if (T.isARM() || T.isThumb() || T.isAArch64()) { |
1187 | if (LangOpts.BranchTargetEnforcement) |
1188 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-target-enforcement" , |
1189 | Val: 1); |
1190 | if (LangOpts.BranchProtectionPAuthLR) |
1191 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-protection-pauth-lr" , |
1192 | Val: 1); |
1193 | if (LangOpts.GuardedControlStack) |
1194 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "guarded-control-stack" , Val: 1); |
1195 | if (LangOpts.hasSignReturnAddress()) |
1196 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address" , Val: 1); |
1197 | if (LangOpts.isSignReturnAddressScopeAll()) |
1198 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address-all" , |
1199 | Val: 1); |
1200 | if (!LangOpts.isSignReturnAddressWithAKey()) |
1201 | getModule().addModuleFlag(Behavior: llvm::Module::Min, |
1202 | Key: "sign-return-address-with-bkey" , Val: 1); |
1203 | |
1204 | if (getTriple().isOSLinux()) { |
1205 | assert(getTriple().isOSBinFormatELF()); |
1206 | using namespace llvm::ELF; |
1207 | uint64_t PAuthABIVersion = |
1208 | (LangOpts.PointerAuthIntrinsics |
1209 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INTRINSICS) | |
1210 | (LangOpts.PointerAuthCalls |
1211 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_CALLS) | |
1212 | (LangOpts.PointerAuthReturns |
1213 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_RETURNS) | |
1214 | (LangOpts.PointerAuthAuthTraps |
1215 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_AUTHTRAPS) | |
1216 | (LangOpts.PointerAuthVTPtrAddressDiscrimination |
1217 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRADDRDISCR) | |
1218 | (LangOpts.PointerAuthVTPtrTypeDiscrimination |
1219 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR) | |
1220 | (LangOpts.PointerAuthInitFini |
1221 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI); |
1222 | static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI == |
1223 | AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST, |
1224 | "Update when new enum items are defined" ); |
1225 | if (PAuthABIVersion != 0) { |
1226 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
1227 | Key: "aarch64-elf-pauthabi-platform" , |
1228 | Val: AARCH64_PAUTH_PLATFORM_LLVM_LINUX); |
1229 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
1230 | Key: "aarch64-elf-pauthabi-version" , |
1231 | Val: PAuthABIVersion); |
1232 | } |
1233 | } |
1234 | } |
1235 | |
1236 | if (CodeGenOpts.StackClashProtector) |
1237 | getModule().addModuleFlag( |
1238 | Behavior: llvm::Module::Override, Key: "probe-stack" , |
1239 | Val: llvm::MDString::get(Context&: TheModule.getContext(), Str: "inline-asm" )); |
1240 | |
1241 | if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096) |
1242 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "stack-probe-size" , |
1243 | Val: CodeGenOpts.StackProbeSize); |
1244 | |
1245 | if (!CodeGenOpts.MemoryProfileOutput.empty()) { |
1246 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1247 | getModule().addModuleFlag( |
1248 | Behavior: llvm::Module::Error, Key: "MemProfProfileFilename" , |
1249 | Val: llvm::MDString::get(Context&: Ctx, Str: CodeGenOpts.MemoryProfileOutput)); |
1250 | } |
1251 | |
1252 | if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) { |
1253 | // Indicate whether __nvvm_reflect should be configured to flush denormal |
1254 | // floating point values to 0. (This corresponds to its "__CUDA_FTZ" |
1255 | // property.) |
1256 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "nvvm-reflect-ftz" , |
1257 | Val: CodeGenOpts.FP32DenormalMode.Output != |
1258 | llvm::DenormalMode::IEEE); |
1259 | } |
1260 | |
1261 | if (LangOpts.EHAsynch) |
1262 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "eh-asynch" , Val: 1); |
1263 | |
1264 | // Indicate whether this Module was compiled with -fopenmp |
1265 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) |
1266 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp" , Val: LangOpts.OpenMP); |
1267 | if (getLangOpts().OpenMPIsTargetDevice) |
1268 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp-device" , |
1269 | Val: LangOpts.OpenMP); |
1270 | |
1271 | // Emit OpenCL specific module metadata: OpenCL/SPIR version. |
1272 | if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) { |
1273 | EmitOpenCLMetadata(); |
1274 | // Emit SPIR version. |
1275 | if (getTriple().isSPIR()) { |
1276 | // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the |
1277 | // opencl.spir.version named metadata. |
1278 | // C++ for OpenCL has a distinct mapping for version compatibility with |
1279 | // OpenCL. |
1280 | auto Version = LangOpts.getOpenCLCompatibleVersion(); |
1281 | llvm::Metadata *SPIRVerElts[] = { |
1282 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1283 | Ty: Int32Ty, V: Version / 100)), |
1284 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1285 | Ty: Int32Ty, V: (Version / 100 > 1) ? 0 : 2))}; |
1286 | llvm::NamedMDNode *SPIRVerMD = |
1287 | TheModule.getOrInsertNamedMetadata(Name: "opencl.spir.version" ); |
1288 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1289 | SPIRVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: SPIRVerElts)); |
1290 | } |
1291 | } |
1292 | |
1293 | // HLSL related end of code gen work items. |
1294 | if (LangOpts.HLSL) |
1295 | getHLSLRuntime().finishCodeGen(); |
1296 | |
1297 | if (uint32_t PLevel = Context.getLangOpts().PICLevel) { |
1298 | assert(PLevel < 3 && "Invalid PIC Level" ); |
1299 | getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel)); |
1300 | if (Context.getLangOpts().PIE) |
1301 | getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel)); |
1302 | } |
1303 | |
1304 | if (getCodeGenOpts().CodeModel.size() > 0) { |
1305 | unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel) |
1306 | .Case(S: "tiny" , Value: llvm::CodeModel::Tiny) |
1307 | .Case(S: "small" , Value: llvm::CodeModel::Small) |
1308 | .Case(S: "kernel" , Value: llvm::CodeModel::Kernel) |
1309 | .Case(S: "medium" , Value: llvm::CodeModel::Medium) |
1310 | .Case(S: "large" , Value: llvm::CodeModel::Large) |
1311 | .Default(Value: ~0u); |
1312 | if (CM != ~0u) { |
1313 | llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM); |
1314 | getModule().setCodeModel(codeModel); |
1315 | |
1316 | if ((CM == llvm::CodeModel::Medium || CM == llvm::CodeModel::Large) && |
1317 | Context.getTargetInfo().getTriple().getArch() == |
1318 | llvm::Triple::x86_64) { |
1319 | getModule().setLargeDataThreshold(getCodeGenOpts().LargeDataThreshold); |
1320 | } |
1321 | } |
1322 | } |
1323 | |
1324 | if (CodeGenOpts.NoPLT) |
1325 | getModule().setRtLibUseGOT(); |
1326 | if (getTriple().isOSBinFormatELF() && |
1327 | CodeGenOpts.DirectAccessExternalData != |
1328 | getModule().getDirectAccessExternalData()) { |
1329 | getModule().setDirectAccessExternalData( |
1330 | CodeGenOpts.DirectAccessExternalData); |
1331 | } |
1332 | if (CodeGenOpts.UnwindTables) |
1333 | getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables)); |
1334 | |
1335 | switch (CodeGenOpts.getFramePointer()) { |
1336 | case CodeGenOptions::FramePointerKind::None: |
1337 | // 0 ("none") is the default. |
1338 | break; |
1339 | case CodeGenOptions::FramePointerKind::Reserved: |
1340 | getModule().setFramePointer(llvm::FramePointerKind::Reserved); |
1341 | break; |
1342 | case CodeGenOptions::FramePointerKind::NonLeaf: |
1343 | getModule().setFramePointer(llvm::FramePointerKind::NonLeaf); |
1344 | break; |
1345 | case CodeGenOptions::FramePointerKind::All: |
1346 | getModule().setFramePointer(llvm::FramePointerKind::All); |
1347 | break; |
1348 | } |
1349 | |
1350 | SimplifyPersonality(); |
1351 | |
1352 | if (getCodeGenOpts().EmitDeclMetadata) |
1353 | EmitDeclMetadata(); |
1354 | |
1355 | if (getCodeGenOpts().CoverageNotesFile.size() || |
1356 | getCodeGenOpts().CoverageDataFile.size()) |
1357 | EmitCoverageFile(); |
1358 | |
1359 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1360 | DI->finalize(); |
1361 | |
1362 | if (getCodeGenOpts().EmitVersionIdentMetadata) |
1363 | EmitVersionIdentMetadata(); |
1364 | |
1365 | if (!getCodeGenOpts().RecordCommandLine.empty()) |
1366 | EmitCommandLineMetadata(); |
1367 | |
1368 | if (!getCodeGenOpts().StackProtectorGuard.empty()) |
1369 | getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard); |
1370 | if (!getCodeGenOpts().StackProtectorGuardReg.empty()) |
1371 | getModule().setStackProtectorGuardReg( |
1372 | getCodeGenOpts().StackProtectorGuardReg); |
1373 | if (!getCodeGenOpts().StackProtectorGuardSymbol.empty()) |
1374 | getModule().setStackProtectorGuardSymbol( |
1375 | getCodeGenOpts().StackProtectorGuardSymbol); |
1376 | if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX) |
1377 | getModule().setStackProtectorGuardOffset( |
1378 | getCodeGenOpts().StackProtectorGuardOffset); |
1379 | if (getCodeGenOpts().StackAlignment) |
1380 | getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment); |
1381 | if (getCodeGenOpts().SkipRaxSetup) |
1382 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "SkipRaxSetup" , Val: 1); |
1383 | if (getLangOpts().RegCall4) |
1384 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "RegCallv4" , Val: 1); |
1385 | |
1386 | if (getContext().getTargetInfo().getMaxTLSAlign()) |
1387 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "MaxTLSAlign" , |
1388 | Val: getContext().getTargetInfo().getMaxTLSAlign()); |
1389 | |
1390 | getTargetCodeGenInfo().emitTargetGlobals(CGM&: *this); |
1391 | |
1392 | getTargetCodeGenInfo().emitTargetMetadata(CGM&: *this, MangledDeclNames); |
1393 | |
1394 | EmitBackendOptionsMetadata(CodeGenOpts: getCodeGenOpts()); |
1395 | |
1396 | // If there is device offloading code embed it in the host now. |
1397 | EmbedObject(M: &getModule(), CGOpts: CodeGenOpts, Diags&: getDiags()); |
1398 | |
1399 | // Set visibility from DLL storage class |
1400 | // We do this at the end of LLVM IR generation; after any operation |
1401 | // that might affect the DLL storage class or the visibility, and |
1402 | // before anything that might act on these. |
1403 | setVisibilityFromDLLStorageClass(LO: LangOpts, M&: getModule()); |
1404 | |
1405 | // Check the tail call symbols are truly undefined. |
1406 | if (getTriple().isPPC() && !MustTailCallUndefinedGlobals.empty()) { |
1407 | for (auto &I : MustTailCallUndefinedGlobals) { |
1408 | if (!I.first->isDefined()) |
1409 | getDiags().Report(Loc: I.second, DiagID: diag::err_ppc_impossible_musttail) << 2; |
1410 | else { |
1411 | StringRef MangledName = getMangledName(GD: GlobalDecl(I.first)); |
1412 | llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName); |
1413 | if (!Entry || Entry->isWeakForLinker() || |
1414 | Entry->isDeclarationForLinker()) |
1415 | getDiags().Report(Loc: I.second, DiagID: diag::err_ppc_impossible_musttail) << 2; |
1416 | } |
1417 | } |
1418 | } |
1419 | } |
1420 | |
1421 | void CodeGenModule::EmitOpenCLMetadata() { |
1422 | // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the |
1423 | // opencl.ocl.version named metadata node. |
1424 | // C++ for OpenCL has a distinct mapping for versions compatible with OpenCL. |
1425 | auto CLVersion = LangOpts.getOpenCLCompatibleVersion(); |
1426 | |
1427 | auto EmitVersion = [this](StringRef MDName, int Version) { |
1428 | llvm::Metadata *OCLVerElts[] = { |
1429 | llvm::ConstantAsMetadata::get( |
1430 | C: llvm::ConstantInt::get(Ty: Int32Ty, V: Version / 100)), |
1431 | llvm::ConstantAsMetadata::get( |
1432 | C: llvm::ConstantInt::get(Ty: Int32Ty, V: (Version % 100) / 10))}; |
1433 | llvm::NamedMDNode *OCLVerMD = TheModule.getOrInsertNamedMetadata(Name: MDName); |
1434 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1435 | OCLVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: OCLVerElts)); |
1436 | }; |
1437 | |
1438 | EmitVersion("opencl.ocl.version" , CLVersion); |
1439 | if (LangOpts.OpenCLCPlusPlus) { |
1440 | // In addition to the OpenCL compatible version, emit the C++ version. |
1441 | EmitVersion("opencl.cxx.version" , LangOpts.OpenCLCPlusPlusVersion); |
1442 | } |
1443 | } |
1444 | |
1445 | void CodeGenModule::EmitBackendOptionsMetadata( |
1446 | const CodeGenOptions &CodeGenOpts) { |
1447 | if (getTriple().isRISCV()) { |
1448 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "SmallDataLimit" , |
1449 | Val: CodeGenOpts.SmallDataLimit); |
1450 | } |
1451 | } |
1452 | |
1453 | void CodeGenModule::UpdateCompletedType(const TagDecl *TD) { |
1454 | // Make sure that this type is translated. |
1455 | Types.UpdateCompletedType(TD); |
1456 | } |
1457 | |
1458 | void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { |
1459 | // Make sure that this type is translated. |
1460 | Types.RefreshTypeCacheForClass(RD); |
1461 | } |
1462 | |
1463 | llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) { |
1464 | if (!TBAA) |
1465 | return nullptr; |
1466 | return TBAA->getTypeInfo(QTy); |
1467 | } |
1468 | |
1469 | TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) { |
1470 | if (!TBAA) |
1471 | return TBAAAccessInfo(); |
1472 | if (getLangOpts().CUDAIsDevice) { |
1473 | // As CUDA builtin surface/texture types are replaced, skip generating TBAA |
1474 | // access info. |
1475 | if (AccessType->isCUDADeviceBuiltinSurfaceType()) { |
1476 | if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() != |
1477 | nullptr) |
1478 | return TBAAAccessInfo(); |
1479 | } else if (AccessType->isCUDADeviceBuiltinTextureType()) { |
1480 | if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() != |
1481 | nullptr) |
1482 | return TBAAAccessInfo(); |
1483 | } |
1484 | } |
1485 | return TBAA->getAccessInfo(AccessType); |
1486 | } |
1487 | |
1488 | TBAAAccessInfo |
1489 | CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) { |
1490 | if (!TBAA) |
1491 | return TBAAAccessInfo(); |
1492 | return TBAA->getVTablePtrAccessInfo(VTablePtrType); |
1493 | } |
1494 | |
1495 | llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) { |
1496 | if (!TBAA) |
1497 | return nullptr; |
1498 | return TBAA->getTBAAStructInfo(QTy); |
1499 | } |
1500 | |
1501 | llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) { |
1502 | if (!TBAA) |
1503 | return nullptr; |
1504 | return TBAA->getBaseTypeInfo(QTy); |
1505 | } |
1506 | |
1507 | llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) { |
1508 | if (!TBAA) |
1509 | return nullptr; |
1510 | return TBAA->getAccessTagInfo(Info); |
1511 | } |
1512 | |
1513 | TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, |
1514 | TBAAAccessInfo TargetInfo) { |
1515 | if (!TBAA) |
1516 | return TBAAAccessInfo(); |
1517 | return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo); |
1518 | } |
1519 | |
1520 | TBAAAccessInfo |
1521 | CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, |
1522 | TBAAAccessInfo InfoB) { |
1523 | if (!TBAA) |
1524 | return TBAAAccessInfo(); |
1525 | return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB); |
1526 | } |
1527 | |
1528 | TBAAAccessInfo |
1529 | CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, |
1530 | TBAAAccessInfo SrcInfo) { |
1531 | if (!TBAA) |
1532 | return TBAAAccessInfo(); |
1533 | return TBAA->mergeTBAAInfoForConditionalOperator(InfoA: DestInfo, InfoB: SrcInfo); |
1534 | } |
1535 | |
1536 | void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst, |
1537 | TBAAAccessInfo TBAAInfo) { |
1538 | if (llvm::MDNode *Tag = getTBAAAccessTagInfo(Info: TBAAInfo)) |
1539 | Inst->setMetadata(KindID: llvm::LLVMContext::MD_tbaa, Node: Tag); |
1540 | } |
1541 | |
1542 | void CodeGenModule::DecorateInstructionWithInvariantGroup( |
1543 | llvm::Instruction *I, const CXXRecordDecl *RD) { |
1544 | I->setMetadata(KindID: llvm::LLVMContext::MD_invariant_group, |
1545 | Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: {})); |
1546 | } |
1547 | |
1548 | void CodeGenModule::Error(SourceLocation loc, StringRef message) { |
1549 | unsigned diagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, FormatString: "%0" ); |
1550 | getDiags().Report(Loc: Context.getFullLoc(Loc: loc), DiagID: diagID) << message; |
1551 | } |
1552 | |
1553 | /// ErrorUnsupported - Print out an error that codegen doesn't support the |
1554 | /// specified stmt yet. |
1555 | void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) { |
1556 | unsigned DiagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, |
1557 | FormatString: "cannot compile this %0 yet" ); |
1558 | std::string Msg = Type; |
1559 | getDiags().Report(Loc: Context.getFullLoc(Loc: S->getBeginLoc()), DiagID) |
1560 | << Msg << S->getSourceRange(); |
1561 | } |
1562 | |
1563 | /// ErrorUnsupported - Print out an error that codegen doesn't support the |
1564 | /// specified decl yet. |
1565 | void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) { |
1566 | unsigned DiagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, |
1567 | FormatString: "cannot compile this %0 yet" ); |
1568 | std::string Msg = Type; |
1569 | getDiags().Report(Loc: Context.getFullLoc(Loc: D->getLocation()), DiagID) << Msg; |
1570 | } |
1571 | |
1572 | llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) { |
1573 | return llvm::ConstantInt::get(Ty: SizeTy, V: size.getQuantity()); |
1574 | } |
1575 | |
1576 | void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV, |
1577 | const NamedDecl *D) const { |
1578 | // Internal definitions always have default visibility. |
1579 | if (GV->hasLocalLinkage()) { |
1580 | GV->setVisibility(llvm::GlobalValue::DefaultVisibility); |
1581 | return; |
1582 | } |
1583 | if (!D) |
1584 | return; |
1585 | |
1586 | // Set visibility for definitions, and for declarations if requested globally |
1587 | // or set explicitly. |
1588 | LinkageInfo LV = D->getLinkageAndVisibility(); |
1589 | |
1590 | // OpenMP declare target variables must be visible to the host so they can |
1591 | // be registered. We require protected visibility unless the variable has |
1592 | // the DT_nohost modifier and does not need to be registered. |
1593 | if (Context.getLangOpts().OpenMP && |
1594 | Context.getLangOpts().OpenMPIsTargetDevice && isa<VarDecl>(Val: D) && |
1595 | D->hasAttr<OMPDeclareTargetDeclAttr>() && |
1596 | D->getAttr<OMPDeclareTargetDeclAttr>()->getDevType() != |
1597 | OMPDeclareTargetDeclAttr::DT_NoHost && |
1598 | LV.getVisibility() == HiddenVisibility) { |
1599 | GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); |
1600 | return; |
1601 | } |
1602 | |
1603 | if (GV->hasDLLExportStorageClass() || GV->hasDLLImportStorageClass()) { |
1604 | // Reject incompatible dlllstorage and visibility annotations. |
1605 | if (!LV.isVisibilityExplicit()) |
1606 | return; |
1607 | if (GV->hasDLLExportStorageClass()) { |
1608 | if (LV.getVisibility() == HiddenVisibility) |
1609 | getDiags().Report(Loc: D->getLocation(), |
1610 | DiagID: diag::err_hidden_visibility_dllexport); |
1611 | } else if (LV.getVisibility() != DefaultVisibility) { |
1612 | getDiags().Report(Loc: D->getLocation(), |
1613 | DiagID: diag::err_non_default_visibility_dllimport); |
1614 | } |
1615 | return; |
1616 | } |
1617 | |
1618 | if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls || |
1619 | !GV->isDeclarationForLinker()) |
1620 | GV->setVisibility(GetLLVMVisibility(V: LV.getVisibility())); |
1621 | } |
1622 | |
1623 | static bool shouldAssumeDSOLocal(const CodeGenModule &CGM, |
1624 | llvm::GlobalValue *GV) { |
1625 | if (GV->hasLocalLinkage()) |
1626 | return true; |
1627 | |
1628 | if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage()) |
1629 | return true; |
1630 | |
1631 | // DLLImport explicitly marks the GV as external. |
1632 | if (GV->hasDLLImportStorageClass()) |
1633 | return false; |
1634 | |
1635 | const llvm::Triple &TT = CGM.getTriple(); |
1636 | const auto &CGOpts = CGM.getCodeGenOpts(); |
1637 | if (TT.isWindowsGNUEnvironment()) { |
1638 | // In MinGW, variables without DLLImport can still be automatically |
1639 | // imported from a DLL by the linker; don't mark variables that |
1640 | // potentially could come from another DLL as DSO local. |
1641 | |
1642 | // With EmulatedTLS, TLS variables can be autoimported from other DLLs |
1643 | // (and this actually happens in the public interface of libstdc++), so |
1644 | // such variables can't be marked as DSO local. (Native TLS variables |
1645 | // can't be dllimported at all, though.) |
1646 | if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(Val: GV) && |
1647 | (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS) && |
1648 | CGOpts.AutoImport) |
1649 | return false; |
1650 | } |
1651 | |
1652 | // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols |
1653 | // remain unresolved in the link, they can be resolved to zero, which is |
1654 | // outside the current DSO. |
1655 | if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage()) |
1656 | return false; |
1657 | |
1658 | // Every other GV is local on COFF. |
1659 | // Make an exception for windows OS in the triple: Some firmware builds use |
1660 | // *-win32-macho triples. This (accidentally?) produced windows relocations |
1661 | // without GOT tables in older clang versions; Keep this behaviour. |
1662 | // FIXME: even thread local variables? |
1663 | if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO())) |
1664 | return true; |
1665 | |
1666 | // Only handle COFF and ELF for now. |
1667 | if (!TT.isOSBinFormatELF()) |
1668 | return false; |
1669 | |
1670 | // If this is not an executable, don't assume anything is local. |
1671 | llvm::Reloc::Model RM = CGOpts.RelocationModel; |
1672 | const auto &LOpts = CGM.getLangOpts(); |
1673 | if (RM != llvm::Reloc::Static && !LOpts.PIE) { |
1674 | // On ELF, if -fno-semantic-interposition is specified and the target |
1675 | // supports local aliases, there will be neither CC1 |
1676 | // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set |
1677 | // dso_local on the function if using a local alias is preferable (can avoid |
1678 | // PLT indirection). |
1679 | if (!(isa<llvm::Function>(Val: GV) && GV->canBenefitFromLocalAlias())) |
1680 | return false; |
1681 | return !(CGM.getLangOpts().SemanticInterposition || |
1682 | CGM.getLangOpts().HalfNoSemanticInterposition); |
1683 | } |
1684 | |
1685 | // A definition cannot be preempted from an executable. |
1686 | if (!GV->isDeclarationForLinker()) |
1687 | return true; |
1688 | |
1689 | // Most PIC code sequences that assume that a symbol is local cannot produce a |
1690 | // 0 if it turns out the symbol is undefined. While this is ABI and relocation |
1691 | // depended, it seems worth it to handle it here. |
1692 | if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage()) |
1693 | return false; |
1694 | |
1695 | // PowerPC64 prefers TOC indirection to avoid copy relocations. |
1696 | if (TT.isPPC64()) |
1697 | return false; |
1698 | |
1699 | if (CGOpts.DirectAccessExternalData) { |
1700 | // If -fdirect-access-external-data (default for -fno-pic), set dso_local |
1701 | // for non-thread-local variables. If the symbol is not defined in the |
1702 | // executable, a copy relocation will be needed at link time. dso_local is |
1703 | // excluded for thread-local variables because they generally don't support |
1704 | // copy relocations. |
1705 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: GV)) |
1706 | if (!Var->isThreadLocal()) |
1707 | return true; |
1708 | |
1709 | // -fno-pic sets dso_local on a function declaration to allow direct |
1710 | // accesses when taking its address (similar to a data symbol). If the |
1711 | // function is not defined in the executable, a canonical PLT entry will be |
1712 | // needed at link time. -fno-direct-access-external-data can avoid the |
1713 | // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as |
1714 | // it could just cause trouble without providing perceptible benefits. |
1715 | if (isa<llvm::Function>(Val: GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static) |
1716 | return true; |
1717 | } |
1718 | |
1719 | // If we can use copy relocations we can assume it is local. |
1720 | |
1721 | // Otherwise don't assume it is local. |
1722 | return false; |
1723 | } |
1724 | |
1725 | void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const { |
1726 | GV->setDSOLocal(shouldAssumeDSOLocal(CGM: *this, GV)); |
1727 | } |
1728 | |
1729 | void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV, |
1730 | GlobalDecl GD) const { |
1731 | const auto *D = dyn_cast<NamedDecl>(Val: GD.getDecl()); |
1732 | // C++ destructors have a few C++ ABI specific special cases. |
1733 | if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(Val: D)) { |
1734 | getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, DT: GD.getDtorType()); |
1735 | return; |
1736 | } |
1737 | setDLLImportDLLExport(GV, D); |
1738 | } |
1739 | |
1740 | void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV, |
1741 | const NamedDecl *D) const { |
1742 | if (D && D->isExternallyVisible()) { |
1743 | if (D->hasAttr<DLLImportAttr>()) |
1744 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); |
1745 | else if ((D->hasAttr<DLLExportAttr>() || |
1746 | shouldMapVisibilityToDLLExport(D)) && |
1747 | !GV->isDeclarationForLinker()) |
1748 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); |
1749 | } |
1750 | } |
1751 | |
1752 | void CodeGenModule::setGVProperties(llvm::GlobalValue *GV, |
1753 | GlobalDecl GD) const { |
1754 | setDLLImportDLLExport(GV, GD); |
1755 | setGVPropertiesAux(GV, D: dyn_cast<NamedDecl>(Val: GD.getDecl())); |
1756 | } |
1757 | |
1758 | void CodeGenModule::setGVProperties(llvm::GlobalValue *GV, |
1759 | const NamedDecl *D) const { |
1760 | setDLLImportDLLExport(GV, D); |
1761 | setGVPropertiesAux(GV, D); |
1762 | } |
1763 | |
1764 | void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV, |
1765 | const NamedDecl *D) const { |
1766 | setGlobalVisibility(GV, D); |
1767 | setDSOLocal(GV); |
1768 | GV->setPartition(CodeGenOpts.SymbolPartition); |
1769 | } |
1770 | |
1771 | static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) { |
1772 | return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S) |
1773 | .Case(S: "global-dynamic" , Value: llvm::GlobalVariable::GeneralDynamicTLSModel) |
1774 | .Case(S: "local-dynamic" , Value: llvm::GlobalVariable::LocalDynamicTLSModel) |
1775 | .Case(S: "initial-exec" , Value: llvm::GlobalVariable::InitialExecTLSModel) |
1776 | .Case(S: "local-exec" , Value: llvm::GlobalVariable::LocalExecTLSModel); |
1777 | } |
1778 | |
1779 | llvm::GlobalVariable::ThreadLocalMode |
1780 | CodeGenModule::GetDefaultLLVMTLSModel() const { |
1781 | switch (CodeGenOpts.getDefaultTLSModel()) { |
1782 | case CodeGenOptions::GeneralDynamicTLSModel: |
1783 | return llvm::GlobalVariable::GeneralDynamicTLSModel; |
1784 | case CodeGenOptions::LocalDynamicTLSModel: |
1785 | return llvm::GlobalVariable::LocalDynamicTLSModel; |
1786 | case CodeGenOptions::InitialExecTLSModel: |
1787 | return llvm::GlobalVariable::InitialExecTLSModel; |
1788 | case CodeGenOptions::LocalExecTLSModel: |
1789 | return llvm::GlobalVariable::LocalExecTLSModel; |
1790 | } |
1791 | llvm_unreachable("Invalid TLS model!" ); |
1792 | } |
1793 | |
1794 | void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const { |
1795 | assert(D.getTLSKind() && "setting TLS mode on non-TLS var!" ); |
1796 | |
1797 | llvm::GlobalValue::ThreadLocalMode TLM; |
1798 | TLM = GetDefaultLLVMTLSModel(); |
1799 | |
1800 | // Override the TLS model if it is explicitly specified. |
1801 | if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) { |
1802 | TLM = GetLLVMTLSModel(S: Attr->getModel()); |
1803 | } |
1804 | |
1805 | GV->setThreadLocalMode(TLM); |
1806 | } |
1807 | |
1808 | static std::string getCPUSpecificMangling(const CodeGenModule &CGM, |
1809 | StringRef Name) { |
1810 | const TargetInfo &Target = CGM.getTarget(); |
1811 | return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str(); |
1812 | } |
1813 | |
1814 | static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM, |
1815 | const CPUSpecificAttr *Attr, |
1816 | unsigned CPUIndex, |
1817 | raw_ostream &Out) { |
1818 | // cpu_specific gets the current name, dispatch gets the resolver if IFunc is |
1819 | // supported. |
1820 | if (Attr) |
1821 | Out << getCPUSpecificMangling(CGM, Name: Attr->getCPUName(Index: CPUIndex)->getName()); |
1822 | else if (CGM.getTarget().supportsIFunc()) |
1823 | Out << ".resolver" ; |
1824 | } |
1825 | |
1826 | // Returns true if GD is a function decl with internal linkage and |
1827 | // needs a unique suffix after the mangled name. |
1828 | static bool isUniqueInternalLinkageDecl(GlobalDecl GD, |
1829 | CodeGenModule &CGM) { |
1830 | const Decl *D = GD.getDecl(); |
1831 | return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(Val: D) && |
1832 | (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage); |
1833 | } |
1834 | |
1835 | static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD, |
1836 | const NamedDecl *ND, |
1837 | bool OmitMultiVersionMangling = false) { |
1838 | SmallString<256> Buffer; |
1839 | llvm::raw_svector_ostream Out(Buffer); |
1840 | MangleContext &MC = CGM.getCXXABI().getMangleContext(); |
1841 | if (!CGM.getModuleNameHash().empty()) |
1842 | MC.needsUniqueInternalLinkageNames(); |
1843 | bool ShouldMangle = MC.shouldMangleDeclName(D: ND); |
1844 | if (ShouldMangle) |
1845 | MC.mangleName(GD: GD.getWithDecl(D: ND), Out); |
1846 | else { |
1847 | IdentifierInfo *II = ND->getIdentifier(); |
1848 | assert(II && "Attempt to mangle unnamed decl." ); |
1849 | const auto *FD = dyn_cast<FunctionDecl>(Val: ND); |
1850 | |
1851 | if (FD && |
1852 | FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) { |
1853 | if (CGM.getLangOpts().RegCall4) |
1854 | Out << "__regcall4__" << II->getName(); |
1855 | else |
1856 | Out << "__regcall3__" << II->getName(); |
1857 | } else if (FD && FD->hasAttr<CUDAGlobalAttr>() && |
1858 | GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { |
1859 | Out << "__device_stub__" << II->getName(); |
1860 | } else { |
1861 | Out << II->getName(); |
1862 | } |
1863 | } |
1864 | |
1865 | // Check if the module name hash should be appended for internal linkage |
1866 | // symbols. This should come before multi-version target suffixes are |
1867 | // appended. This is to keep the name and module hash suffix of the |
1868 | // internal linkage function together. The unique suffix should only be |
1869 | // added when name mangling is done to make sure that the final name can |
1870 | // be properly demangled. For example, for C functions without prototypes, |
1871 | // name mangling is not done and the unique suffix should not be appeneded |
1872 | // then. |
1873 | if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) { |
1874 | assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames && |
1875 | "Hash computed when not explicitly requested" ); |
1876 | Out << CGM.getModuleNameHash(); |
1877 | } |
1878 | |
1879 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
1880 | if (FD->isMultiVersion() && !OmitMultiVersionMangling) { |
1881 | switch (FD->getMultiVersionKind()) { |
1882 | case MultiVersionKind::CPUDispatch: |
1883 | case MultiVersionKind::CPUSpecific: |
1884 | AppendCPUSpecificCPUDispatchMangling(CGM, |
1885 | Attr: FD->getAttr<CPUSpecificAttr>(), |
1886 | CPUIndex: GD.getMultiVersionIndex(), Out); |
1887 | break; |
1888 | case MultiVersionKind::Target: { |
1889 | auto *Attr = FD->getAttr<TargetAttr>(); |
1890 | assert(Attr && "Expected TargetAttr to be present " |
1891 | "for attribute mangling" ); |
1892 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
1893 | Info.appendAttributeMangling(Attr, Out); |
1894 | break; |
1895 | } |
1896 | case MultiVersionKind::TargetVersion: { |
1897 | auto *Attr = FD->getAttr<TargetVersionAttr>(); |
1898 | assert(Attr && "Expected TargetVersionAttr to be present " |
1899 | "for attribute mangling" ); |
1900 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
1901 | Info.appendAttributeMangling(Attr, Out); |
1902 | break; |
1903 | } |
1904 | case MultiVersionKind::TargetClones: { |
1905 | auto *Attr = FD->getAttr<TargetClonesAttr>(); |
1906 | assert(Attr && "Expected TargetClonesAttr to be present " |
1907 | "for attribute mangling" ); |
1908 | unsigned Index = GD.getMultiVersionIndex(); |
1909 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
1910 | Info.appendAttributeMangling(Attr, Index, Out); |
1911 | break; |
1912 | } |
1913 | case MultiVersionKind::None: |
1914 | llvm_unreachable("None multiversion type isn't valid here" ); |
1915 | } |
1916 | } |
1917 | |
1918 | // Make unique name for device side static file-scope variable for HIP. |
1919 | if (CGM.getContext().shouldExternalize(D: ND) && |
1920 | CGM.getLangOpts().GPURelocatableDeviceCode && |
1921 | CGM.getLangOpts().CUDAIsDevice) |
1922 | CGM.printPostfixForExternalizedDecl(OS&: Out, D: ND); |
1923 | |
1924 | return std::string(Out.str()); |
1925 | } |
1926 | |
1927 | void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD, |
1928 | const FunctionDecl *FD, |
1929 | StringRef &CurName) { |
1930 | if (!FD->isMultiVersion()) |
1931 | return; |
1932 | |
1933 | // Get the name of what this would be without the 'target' attribute. This |
1934 | // allows us to lookup the version that was emitted when this wasn't a |
1935 | // multiversion function. |
1936 | std::string NonTargetName = |
1937 | getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
1938 | GlobalDecl OtherGD; |
1939 | if (lookupRepresentativeDecl(MangledName: NonTargetName, Result&: OtherGD)) { |
1940 | assert(OtherGD.getCanonicalDecl() |
1941 | .getDecl() |
1942 | ->getAsFunction() |
1943 | ->isMultiVersion() && |
1944 | "Other GD should now be a multiversioned function" ); |
1945 | // OtherFD is the version of this function that was mangled BEFORE |
1946 | // becoming a MultiVersion function. It potentially needs to be updated. |
1947 | const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl() |
1948 | .getDecl() |
1949 | ->getAsFunction() |
1950 | ->getMostRecentDecl(); |
1951 | std::string OtherName = getMangledNameImpl(CGM&: *this, GD: OtherGD, ND: OtherFD); |
1952 | // This is so that if the initial version was already the 'default' |
1953 | // version, we don't try to update it. |
1954 | if (OtherName != NonTargetName) { |
1955 | // Remove instead of erase, since others may have stored the StringRef |
1956 | // to this. |
1957 | const auto ExistingRecord = Manglings.find(Key: NonTargetName); |
1958 | if (ExistingRecord != std::end(cont&: Manglings)) |
1959 | Manglings.remove(KeyValue: &(*ExistingRecord)); |
1960 | auto Result = Manglings.insert(KV: std::make_pair(x&: OtherName, y&: OtherGD)); |
1961 | StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] = |
1962 | Result.first->first(); |
1963 | // If this is the current decl is being created, make sure we update the name. |
1964 | if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl()) |
1965 | CurName = OtherNameRef; |
1966 | if (llvm::GlobalValue *Entry = GetGlobalValue(Ref: NonTargetName)) |
1967 | Entry->setName(OtherName); |
1968 | } |
1969 | } |
1970 | } |
1971 | |
1972 | StringRef CodeGenModule::getMangledName(GlobalDecl GD) { |
1973 | GlobalDecl CanonicalGD = GD.getCanonicalDecl(); |
1974 | |
1975 | // Some ABIs don't have constructor variants. Make sure that base and |
1976 | // complete constructors get mangled the same. |
1977 | if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: CanonicalGD.getDecl())) { |
1978 | if (!getTarget().getCXXABI().hasConstructorVariants()) { |
1979 | CXXCtorType OrigCtorType = GD.getCtorType(); |
1980 | assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete); |
1981 | if (OrigCtorType == Ctor_Base) |
1982 | CanonicalGD = GlobalDecl(CD, Ctor_Complete); |
1983 | } |
1984 | } |
1985 | |
1986 | // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a |
1987 | // static device variable depends on whether the variable is referenced by |
1988 | // a host or device host function. Therefore the mangled name cannot be |
1989 | // cached. |
1990 | if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(D: GD.getDecl())) { |
1991 | auto FoundName = MangledDeclNames.find(Key: CanonicalGD); |
1992 | if (FoundName != MangledDeclNames.end()) |
1993 | return FoundName->second; |
1994 | } |
1995 | |
1996 | // Keep the first result in the case of a mangling collision. |
1997 | const auto *ND = cast<NamedDecl>(Val: GD.getDecl()); |
1998 | std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND); |
1999 | |
2000 | // Ensure either we have different ABIs between host and device compilations, |
2001 | // says host compilation following MSVC ABI but device compilation follows |
2002 | // Itanium C++ ABI or, if they follow the same ABI, kernel names after |
2003 | // mangling should be the same after name stubbing. The later checking is |
2004 | // very important as the device kernel name being mangled in host-compilation |
2005 | // is used to resolve the device binaries to be executed. Inconsistent naming |
2006 | // result in undefined behavior. Even though we cannot check that naming |
2007 | // directly between host- and device-compilations, the host- and |
2008 | // device-mangling in host compilation could help catching certain ones. |
2009 | assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || |
2010 | getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice || |
2011 | (getContext().getAuxTargetInfo() && |
2012 | (getContext().getAuxTargetInfo()->getCXXABI() != |
2013 | getContext().getTargetInfo().getCXXABI())) || |
2014 | getCUDARuntime().getDeviceSideName(ND) == |
2015 | getMangledNameImpl( |
2016 | *this, |
2017 | GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), |
2018 | ND)); |
2019 | |
2020 | auto Result = Manglings.insert(KV: std::make_pair(x&: MangledName, y&: GD)); |
2021 | return MangledDeclNames[CanonicalGD] = Result.first->first(); |
2022 | } |
2023 | |
2024 | StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD, |
2025 | const BlockDecl *BD) { |
2026 | MangleContext &MangleCtx = getCXXABI().getMangleContext(); |
2027 | const Decl *D = GD.getDecl(); |
2028 | |
2029 | SmallString<256> Buffer; |
2030 | llvm::raw_svector_ostream Out(Buffer); |
2031 | if (!D) |
2032 | MangleCtx.mangleGlobalBlock(BD, |
2033 | ID: dyn_cast_or_null<VarDecl>(Val: initializedGlobalDecl.getDecl()), Out); |
2034 | else if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: D)) |
2035 | MangleCtx.mangleCtorBlock(CD, CT: GD.getCtorType(), BD, Out); |
2036 | else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: D)) |
2037 | MangleCtx.mangleDtorBlock(CD: DD, DT: GD.getDtorType(), BD, Out); |
2038 | else |
2039 | MangleCtx.mangleBlock(DC: cast<DeclContext>(Val: D), BD, Out); |
2040 | |
2041 | auto Result = Manglings.insert(KV: std::make_pair(x: Out.str(), y&: BD)); |
2042 | return Result.first->first(); |
2043 | } |
2044 | |
2045 | const GlobalDecl CodeGenModule::getMangledNameDecl(StringRef Name) { |
2046 | auto it = MangledDeclNames.begin(); |
2047 | while (it != MangledDeclNames.end()) { |
2048 | if (it->second == Name) |
2049 | return it->first; |
2050 | it++; |
2051 | } |
2052 | return GlobalDecl(); |
2053 | } |
2054 | |
2055 | llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) { |
2056 | return getModule().getNamedValue(Name); |
2057 | } |
2058 | |
2059 | /// AddGlobalCtor - Add a function to the list that will be called before |
2060 | /// main() runs. |
2061 | void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority, |
2062 | unsigned LexOrder, |
2063 | llvm::Constant *AssociatedData) { |
2064 | // FIXME: Type coercion of void()* types. |
2065 | GlobalCtors.push_back(x: Structor(Priority, LexOrder, Ctor, AssociatedData)); |
2066 | } |
2067 | |
2068 | /// AddGlobalDtor - Add a function to the list that will be called |
2069 | /// when the module is unloaded. |
2070 | void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority, |
2071 | bool IsDtorAttrFunc) { |
2072 | if (CodeGenOpts.RegisterGlobalDtorsWithAtExit && |
2073 | (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) { |
2074 | DtorsUsingAtExit[Priority].push_back(NewVal: Dtor); |
2075 | return; |
2076 | } |
2077 | |
2078 | // FIXME: Type coercion of void()* types. |
2079 | GlobalDtors.push_back(x: Structor(Priority, ~0U, Dtor, nullptr)); |
2080 | } |
2081 | |
2082 | void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) { |
2083 | if (Fns.empty()) return; |
2084 | |
2085 | // Ctor function type is void()*. |
2086 | llvm::FunctionType* CtorFTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false); |
2087 | llvm::Type *CtorPFTy = llvm::PointerType::get(ElementType: CtorFTy, |
2088 | AddressSpace: TheModule.getDataLayout().getProgramAddressSpace()); |
2089 | |
2090 | // Get the type of a ctor entry, { i32, void ()*, i8* }. |
2091 | llvm::StructType *CtorStructTy = llvm::StructType::get( |
2092 | elt1: Int32Ty, elts: CtorPFTy, elts: VoidPtrTy); |
2093 | |
2094 | // Construct the constructor and destructor arrays. |
2095 | ConstantInitBuilder builder(*this); |
2096 | auto ctors = builder.beginArray(eltTy: CtorStructTy); |
2097 | for (const auto &I : Fns) { |
2098 | auto ctor = ctors.beginStruct(ty: CtorStructTy); |
2099 | ctor.addInt(intTy: Int32Ty, value: I.Priority); |
2100 | ctor.add(value: I.Initializer); |
2101 | if (I.AssociatedData) |
2102 | ctor.add(value: I.AssociatedData); |
2103 | else |
2104 | ctor.addNullPointer(ptrTy: VoidPtrTy); |
2105 | ctor.finishAndAddTo(parent&: ctors); |
2106 | } |
2107 | |
2108 | auto list = |
2109 | ctors.finishAndCreateGlobal(args&: GlobalName, args: getPointerAlign(), |
2110 | /*constant*/ args: false, |
2111 | args: llvm::GlobalValue::AppendingLinkage); |
2112 | |
2113 | // The LTO linker doesn't seem to like it when we set an alignment |
2114 | // on appending variables. Take it off as a workaround. |
2115 | list->setAlignment(std::nullopt); |
2116 | |
2117 | Fns.clear(); |
2118 | } |
2119 | |
2120 | llvm::GlobalValue::LinkageTypes |
2121 | CodeGenModule::getFunctionLinkage(GlobalDecl GD) { |
2122 | const auto *D = cast<FunctionDecl>(Val: GD.getDecl()); |
2123 | |
2124 | GVALinkage Linkage = getContext().GetGVALinkageForFunction(FD: D); |
2125 | |
2126 | if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Val: D)) |
2127 | return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, DT: GD.getDtorType()); |
2128 | |
2129 | return getLLVMLinkageForDeclarator(D, Linkage); |
2130 | } |
2131 | |
2132 | llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) { |
2133 | llvm::MDString *MDS = dyn_cast<llvm::MDString>(Val: MD); |
2134 | if (!MDS) return nullptr; |
2135 | |
2136 | return llvm::ConstantInt::get(Ty: Int64Ty, V: llvm::MD5Hash(Str: MDS->getString())); |
2137 | } |
2138 | |
2139 | llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T) { |
2140 | if (auto *FnType = T->getAs<FunctionProtoType>()) |
2141 | T = getContext().getFunctionType( |
2142 | ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(), |
2143 | EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None)); |
2144 | |
2145 | std::string OutName; |
2146 | llvm::raw_string_ostream Out(OutName); |
2147 | getCXXABI().getMangleContext().mangleCanonicalTypeName( |
2148 | T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers); |
2149 | |
2150 | if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers) |
2151 | Out << ".normalized" ; |
2152 | |
2153 | return llvm::ConstantInt::get(Ty: Int32Ty, |
2154 | V: static_cast<uint32_t>(llvm::xxHash64(Data: OutName))); |
2155 | } |
2156 | |
2157 | void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD, |
2158 | const CGFunctionInfo &Info, |
2159 | llvm::Function *F, bool IsThunk) { |
2160 | unsigned CallingConv; |
2161 | llvm::AttributeList PAL; |
2162 | ConstructAttributeList(Name: F->getName(), Info, CalleeInfo: GD, Attrs&: PAL, CallingConv, |
2163 | /*AttrOnCallSite=*/false, IsThunk); |
2164 | if (CallingConv == llvm::CallingConv::X86_VectorCall && |
2165 | getTarget().getTriple().isWindowsArm64EC()) { |
2166 | SourceLocation Loc; |
2167 | if (const Decl *D = GD.getDecl()) |
2168 | Loc = D->getLocation(); |
2169 | |
2170 | Error(loc: Loc, message: "__vectorcall calling convention is not currently supported" ); |
2171 | } |
2172 | F->setAttributes(PAL); |
2173 | F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); |
2174 | } |
2175 | |
2176 | static void removeImageAccessQualifier(std::string& TyName) { |
2177 | std::string ReadOnlyQual("__read_only" ); |
2178 | std::string::size_type ReadOnlyPos = TyName.find(str: ReadOnlyQual); |
2179 | if (ReadOnlyPos != std::string::npos) |
2180 | // "+ 1" for the space after access qualifier. |
2181 | TyName.erase(pos: ReadOnlyPos, n: ReadOnlyQual.size() + 1); |
2182 | else { |
2183 | std::string WriteOnlyQual("__write_only" ); |
2184 | std::string::size_type WriteOnlyPos = TyName.find(str: WriteOnlyQual); |
2185 | if (WriteOnlyPos != std::string::npos) |
2186 | TyName.erase(pos: WriteOnlyPos, n: WriteOnlyQual.size() + 1); |
2187 | else { |
2188 | std::string ReadWriteQual("__read_write" ); |
2189 | std::string::size_type ReadWritePos = TyName.find(str: ReadWriteQual); |
2190 | if (ReadWritePos != std::string::npos) |
2191 | TyName.erase(pos: ReadWritePos, n: ReadWriteQual.size() + 1); |
2192 | } |
2193 | } |
2194 | } |
2195 | |
2196 | // Returns the address space id that should be produced to the |
2197 | // kernel_arg_addr_space metadata. This is always fixed to the ids |
2198 | // as specified in the SPIR 2.0 specification in order to differentiate |
2199 | // for example in clGetKernelArgInfo() implementation between the address |
2200 | // spaces with targets without unique mapping to the OpenCL address spaces |
2201 | // (basically all single AS CPUs). |
2202 | static unsigned ArgInfoAddressSpace(LangAS AS) { |
2203 | switch (AS) { |
2204 | case LangAS::opencl_global: |
2205 | return 1; |
2206 | case LangAS::opencl_constant: |
2207 | return 2; |
2208 | case LangAS::opencl_local: |
2209 | return 3; |
2210 | case LangAS::opencl_generic: |
2211 | return 4; // Not in SPIR 2.0 specs. |
2212 | case LangAS::opencl_global_device: |
2213 | return 5; |
2214 | case LangAS::opencl_global_host: |
2215 | return 6; |
2216 | default: |
2217 | return 0; // Assume private. |
2218 | } |
2219 | } |
2220 | |
2221 | void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn, |
2222 | const FunctionDecl *FD, |
2223 | CodeGenFunction *CGF) { |
2224 | assert(((FD && CGF) || (!FD && !CGF)) && |
2225 | "Incorrect use - FD and CGF should either be both null or not!" ); |
2226 | // Create MDNodes that represent the kernel arg metadata. |
2227 | // Each MDNode is a list in the form of "key", N number of values which is |
2228 | // the same number of values as their are kernel arguments. |
2229 | |
2230 | const PrintingPolicy &Policy = Context.getPrintingPolicy(); |
2231 | |
2232 | // MDNode for the kernel argument address space qualifiers. |
2233 | SmallVector<llvm::Metadata *, 8> addressQuals; |
2234 | |
2235 | // MDNode for the kernel argument access qualifiers (images only). |
2236 | SmallVector<llvm::Metadata *, 8> accessQuals; |
2237 | |
2238 | // MDNode for the kernel argument type names. |
2239 | SmallVector<llvm::Metadata *, 8> argTypeNames; |
2240 | |
2241 | // MDNode for the kernel argument base type names. |
2242 | SmallVector<llvm::Metadata *, 8> argBaseTypeNames; |
2243 | |
2244 | // MDNode for the kernel argument type qualifiers. |
2245 | SmallVector<llvm::Metadata *, 8> argTypeQuals; |
2246 | |
2247 | // MDNode for the kernel argument names. |
2248 | SmallVector<llvm::Metadata *, 8> argNames; |
2249 | |
2250 | if (FD && CGF) |
2251 | for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { |
2252 | const ParmVarDecl *parm = FD->getParamDecl(i); |
2253 | // Get argument name. |
2254 | argNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: parm->getName())); |
2255 | |
2256 | if (!getLangOpts().OpenCL) |
2257 | continue; |
2258 | QualType ty = parm->getType(); |
2259 | std::string typeQuals; |
2260 | |
2261 | // Get image and pipe access qualifier: |
2262 | if (ty->isImageType() || ty->isPipeType()) { |
2263 | const Decl *PDecl = parm; |
2264 | if (const auto *TD = ty->getAs<TypedefType>()) |
2265 | PDecl = TD->getDecl(); |
2266 | const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>(); |
2267 | if (A && A->isWriteOnly()) |
2268 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "write_only" )); |
2269 | else if (A && A->isReadWrite()) |
2270 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_write" )); |
2271 | else |
2272 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_only" )); |
2273 | } else |
2274 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "none" )); |
2275 | |
2276 | auto getTypeSpelling = [&](QualType Ty) { |
2277 | auto typeName = Ty.getUnqualifiedType().getAsString(Policy); |
2278 | |
2279 | if (Ty.isCanonical()) { |
2280 | StringRef typeNameRef = typeName; |
2281 | // Turn "unsigned type" to "utype" |
2282 | if (typeNameRef.consume_front(Prefix: "unsigned " )) |
2283 | return std::string("u" ) + typeNameRef.str(); |
2284 | if (typeNameRef.consume_front(Prefix: "signed " )) |
2285 | return typeNameRef.str(); |
2286 | } |
2287 | |
2288 | return typeName; |
2289 | }; |
2290 | |
2291 | if (ty->isPointerType()) { |
2292 | QualType pointeeTy = ty->getPointeeType(); |
2293 | |
2294 | // Get address qualifier. |
2295 | addressQuals.push_back( |
2296 | Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32( |
2297 | C: ArgInfoAddressSpace(AS: pointeeTy.getAddressSpace())))); |
2298 | |
2299 | // Get argument type name. |
2300 | std::string typeName = getTypeSpelling(pointeeTy) + "*" ; |
2301 | std::string baseTypeName = |
2302 | getTypeSpelling(pointeeTy.getCanonicalType()) + "*" ; |
2303 | argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName)); |
2304 | argBaseTypeNames.push_back( |
2305 | Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName)); |
2306 | |
2307 | // Get argument type qualifiers: |
2308 | if (ty.isRestrictQualified()) |
2309 | typeQuals = "restrict" ; |
2310 | if (pointeeTy.isConstQualified() || |
2311 | (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) |
2312 | typeQuals += typeQuals.empty() ? "const" : " const" ; |
2313 | if (pointeeTy.isVolatileQualified()) |
2314 | typeQuals += typeQuals.empty() ? "volatile" : " volatile" ; |
2315 | } else { |
2316 | uint32_t AddrSpc = 0; |
2317 | bool isPipe = ty->isPipeType(); |
2318 | if (ty->isImageType() || isPipe) |
2319 | AddrSpc = ArgInfoAddressSpace(AS: LangAS::opencl_global); |
2320 | |
2321 | addressQuals.push_back( |
2322 | Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32(C: AddrSpc))); |
2323 | |
2324 | // Get argument type name. |
2325 | ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty; |
2326 | std::string typeName = getTypeSpelling(ty); |
2327 | std::string baseTypeName = getTypeSpelling(ty.getCanonicalType()); |
2328 | |
2329 | // Remove access qualifiers on images |
2330 | // (as they are inseparable from type in clang implementation, |
2331 | // but OpenCL spec provides a special query to get access qualifier |
2332 | // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): |
2333 | if (ty->isImageType()) { |
2334 | removeImageAccessQualifier(TyName&: typeName); |
2335 | removeImageAccessQualifier(TyName&: baseTypeName); |
2336 | } |
2337 | |
2338 | argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName)); |
2339 | argBaseTypeNames.push_back( |
2340 | Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName)); |
2341 | |
2342 | if (isPipe) |
2343 | typeQuals = "pipe" ; |
2344 | } |
2345 | argTypeQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeQuals)); |
2346 | } |
2347 | |
2348 | if (getLangOpts().OpenCL) { |
2349 | Fn->setMetadata(Kind: "kernel_arg_addr_space" , |
2350 | Node: llvm::MDNode::get(Context&: VMContext, MDs: addressQuals)); |
2351 | Fn->setMetadata(Kind: "kernel_arg_access_qual" , |
2352 | Node: llvm::MDNode::get(Context&: VMContext, MDs: accessQuals)); |
2353 | Fn->setMetadata(Kind: "kernel_arg_type" , |
2354 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeNames)); |
2355 | Fn->setMetadata(Kind: "kernel_arg_base_type" , |
2356 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argBaseTypeNames)); |
2357 | Fn->setMetadata(Kind: "kernel_arg_type_qual" , |
2358 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeQuals)); |
2359 | } |
2360 | if (getCodeGenOpts().EmitOpenCLArgMetadata || |
2361 | getCodeGenOpts().HIPSaveKernelArgName) |
2362 | Fn->setMetadata(Kind: "kernel_arg_name" , |
2363 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argNames)); |
2364 | } |
2365 | |
2366 | /// Determines whether the language options require us to model |
2367 | /// unwind exceptions. We treat -fexceptions as mandating this |
2368 | /// except under the fragile ObjC ABI with only ObjC exceptions |
2369 | /// enabled. This means, for example, that C with -fexceptions |
2370 | /// enables this. |
2371 | static bool hasUnwindExceptions(const LangOptions &LangOpts) { |
2372 | // If exceptions are completely disabled, obviously this is false. |
2373 | if (!LangOpts.Exceptions) return false; |
2374 | |
2375 | // If C++ exceptions are enabled, this is true. |
2376 | if (LangOpts.CXXExceptions) return true; |
2377 | |
2378 | // If ObjC exceptions are enabled, this depends on the ABI. |
2379 | if (LangOpts.ObjCExceptions) { |
2380 | return LangOpts.ObjCRuntime.hasUnwindExceptions(); |
2381 | } |
2382 | |
2383 | return true; |
2384 | } |
2385 | |
2386 | static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM, |
2387 | const CXXMethodDecl *MD) { |
2388 | // Check that the type metadata can ever actually be used by a call. |
2389 | if (!CGM.getCodeGenOpts().LTOUnit || |
2390 | !CGM.HasHiddenLTOVisibility(RD: MD->getParent())) |
2391 | return false; |
2392 | |
2393 | // Only functions whose address can be taken with a member function pointer |
2394 | // need this sort of type metadata. |
2395 | return MD->isImplicitObjectMemberFunction() && !MD->isVirtual() && |
2396 | !isa<CXXConstructorDecl, CXXDestructorDecl>(Val: MD); |
2397 | } |
2398 | |
2399 | SmallVector<const CXXRecordDecl *, 0> |
2400 | CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) { |
2401 | llvm::SetVector<const CXXRecordDecl *> MostBases; |
2402 | |
2403 | std::function<void (const CXXRecordDecl *)> CollectMostBases; |
2404 | CollectMostBases = [&](const CXXRecordDecl *RD) { |
2405 | if (RD->getNumBases() == 0) |
2406 | MostBases.insert(X: RD); |
2407 | for (const CXXBaseSpecifier &B : RD->bases()) |
2408 | CollectMostBases(B.getType()->getAsCXXRecordDecl()); |
2409 | }; |
2410 | CollectMostBases(RD); |
2411 | return MostBases.takeVector(); |
2412 | } |
2413 | |
2414 | void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D, |
2415 | llvm::Function *F) { |
2416 | llvm::AttrBuilder B(F->getContext()); |
2417 | |
2418 | if ((!D || !D->hasAttr<NoUwtableAttr>()) && CodeGenOpts.UnwindTables) |
2419 | B.addUWTableAttr(Kind: llvm::UWTableKind(CodeGenOpts.UnwindTables)); |
2420 | |
2421 | if (CodeGenOpts.StackClashProtector) |
2422 | B.addAttribute(A: "probe-stack" , V: "inline-asm" ); |
2423 | |
2424 | if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096) |
2425 | B.addAttribute(A: "stack-probe-size" , |
2426 | V: std::to_string(val: CodeGenOpts.StackProbeSize)); |
2427 | |
2428 | if (!hasUnwindExceptions(LangOpts)) |
2429 | B.addAttribute(Val: llvm::Attribute::NoUnwind); |
2430 | |
2431 | if (D && D->hasAttr<NoStackProtectorAttr>()) |
2432 | ; // Do nothing. |
2433 | else if (D && D->hasAttr<StrictGuardStackCheckAttr>() && |
2434 | isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPOn)) |
2435 | B.addAttribute(Val: llvm::Attribute::StackProtectStrong); |
2436 | else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPOn)) |
2437 | B.addAttribute(Val: llvm::Attribute::StackProtect); |
2438 | else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPStrong)) |
2439 | B.addAttribute(Val: llvm::Attribute::StackProtectStrong); |
2440 | else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPReq)) |
2441 | B.addAttribute(Val: llvm::Attribute::StackProtectReq); |
2442 | |
2443 | if (!D) { |
2444 | // If we don't have a declaration to control inlining, the function isn't |
2445 | // explicitly marked as alwaysinline for semantic reasons, and inlining is |
2446 | // disabled, mark the function as noinline. |
2447 | if (!F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline) && |
2448 | CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) |
2449 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2450 | |
2451 | F->addFnAttrs(Attrs: B); |
2452 | return; |
2453 | } |
2454 | |
2455 | // Handle SME attributes that apply to function definitions, |
2456 | // rather than to function prototypes. |
2457 | if (D->hasAttr<ArmLocallyStreamingAttr>()) |
2458 | B.addAttribute(A: "aarch64_pstate_sm_body" ); |
2459 | |
2460 | if (auto *Attr = D->getAttr<ArmNewAttr>()) { |
2461 | if (Attr->isNewZA()) |
2462 | B.addAttribute(A: "aarch64_new_za" ); |
2463 | if (Attr->isNewZT0()) |
2464 | B.addAttribute(A: "aarch64_new_zt0" ); |
2465 | } |
2466 | |
2467 | // Track whether we need to add the optnone LLVM attribute, |
2468 | // starting with the default for this optimization level. |
2469 | bool ShouldAddOptNone = |
2470 | !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0; |
2471 | // We can't add optnone in the following cases, it won't pass the verifier. |
2472 | ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>(); |
2473 | ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>(); |
2474 | |
2475 | // Add optnone, but do so only if the function isn't always_inline. |
2476 | if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) && |
2477 | !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) { |
2478 | B.addAttribute(Val: llvm::Attribute::OptimizeNone); |
2479 | |
2480 | // OptimizeNone implies noinline; we should not be inlining such functions. |
2481 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2482 | |
2483 | // We still need to handle naked functions even though optnone subsumes |
2484 | // much of their semantics. |
2485 | if (D->hasAttr<NakedAttr>()) |
2486 | B.addAttribute(Val: llvm::Attribute::Naked); |
2487 | |
2488 | // OptimizeNone wins over OptimizeForSize and MinSize. |
2489 | F->removeFnAttr(Kind: llvm::Attribute::OptimizeForSize); |
2490 | F->removeFnAttr(Kind: llvm::Attribute::MinSize); |
2491 | } else if (D->hasAttr<NakedAttr>()) { |
2492 | // Naked implies noinline: we should not be inlining such functions. |
2493 | B.addAttribute(Val: llvm::Attribute::Naked); |
2494 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2495 | } else if (D->hasAttr<NoDuplicateAttr>()) { |
2496 | B.addAttribute(Val: llvm::Attribute::NoDuplicate); |
2497 | } else if (D->hasAttr<NoInlineAttr>() && !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) { |
2498 | // Add noinline if the function isn't always_inline. |
2499 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2500 | } else if (D->hasAttr<AlwaysInlineAttr>() && |
2501 | !F->hasFnAttribute(Kind: llvm::Attribute::NoInline)) { |
2502 | // (noinline wins over always_inline, and we can't specify both in IR) |
2503 | B.addAttribute(Val: llvm::Attribute::AlwaysInline); |
2504 | } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { |
2505 | // If we're not inlining, then force everything that isn't always_inline to |
2506 | // carry an explicit noinline attribute. |
2507 | if (!F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) |
2508 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2509 | } else { |
2510 | // Otherwise, propagate the inline hint attribute and potentially use its |
2511 | // absence to mark things as noinline. |
2512 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
2513 | // Search function and template pattern redeclarations for inline. |
2514 | auto CheckForInline = [](const FunctionDecl *FD) { |
2515 | auto CheckRedeclForInline = [](const FunctionDecl *Redecl) { |
2516 | return Redecl->isInlineSpecified(); |
2517 | }; |
2518 | if (any_of(Range: FD->redecls(), P: CheckRedeclForInline)) |
2519 | return true; |
2520 | const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(); |
2521 | if (!Pattern) |
2522 | return false; |
2523 | return any_of(Range: Pattern->redecls(), P: CheckRedeclForInline); |
2524 | }; |
2525 | if (CheckForInline(FD)) { |
2526 | B.addAttribute(Val: llvm::Attribute::InlineHint); |
2527 | } else if (CodeGenOpts.getInlining() == |
2528 | CodeGenOptions::OnlyHintInlining && |
2529 | !FD->isInlined() && |
2530 | !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) { |
2531 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2532 | } |
2533 | } |
2534 | } |
2535 | |
2536 | // Add other optimization related attributes if we are optimizing this |
2537 | // function. |
2538 | if (!D->hasAttr<OptimizeNoneAttr>()) { |
2539 | if (D->hasAttr<ColdAttr>()) { |
2540 | if (!ShouldAddOptNone) |
2541 | B.addAttribute(Val: llvm::Attribute::OptimizeForSize); |
2542 | B.addAttribute(Val: llvm::Attribute::Cold); |
2543 | } |
2544 | if (D->hasAttr<HotAttr>()) |
2545 | B.addAttribute(Val: llvm::Attribute::Hot); |
2546 | if (D->hasAttr<MinSizeAttr>()) |
2547 | B.addAttribute(Val: llvm::Attribute::MinSize); |
2548 | } |
2549 | |
2550 | F->addFnAttrs(Attrs: B); |
2551 | |
2552 | unsigned alignment = D->getMaxAlignment() / Context.getCharWidth(); |
2553 | if (alignment) |
2554 | F->setAlignment(llvm::Align(alignment)); |
2555 | |
2556 | if (!D->hasAttr<AlignedAttr>()) |
2557 | if (LangOpts.FunctionAlignment) |
2558 | F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment)); |
2559 | |
2560 | // Some C++ ABIs require 2-byte alignment for member functions, in order to |
2561 | // reserve a bit for differentiating between virtual and non-virtual member |
2562 | // functions. If the current target's C++ ABI requires this and this is a |
2563 | // member function, set its alignment accordingly. |
2564 | if (getTarget().getCXXABI().areMemberFunctionsAligned()) { |
2565 | if (isa<CXXMethodDecl>(Val: D) && F->getPointerAlignment(DL: getDataLayout()) < 2) |
2566 | F->setAlignment(std::max(a: llvm::Align(2), b: F->getAlign().valueOrOne())); |
2567 | } |
2568 | |
2569 | // In the cross-dso CFI mode with canonical jump tables, we want !type |
2570 | // attributes on definitions only. |
2571 | if (CodeGenOpts.SanitizeCfiCrossDso && |
2572 | CodeGenOpts.SanitizeCfiCanonicalJumpTables) { |
2573 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
2574 | // Skip available_externally functions. They won't be codegen'ed in the |
2575 | // current module anyway. |
2576 | if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally) |
2577 | CreateFunctionTypeMetadataForIcall(FD, F); |
2578 | } |
2579 | } |
2580 | |
2581 | // Emit type metadata on member functions for member function pointer checks. |
2582 | // These are only ever necessary on definitions; we're guaranteed that the |
2583 | // definition will be present in the LTO unit as a result of LTO visibility. |
2584 | auto *MD = dyn_cast<CXXMethodDecl>(Val: D); |
2585 | if (MD && requiresMemberFunctionPointerTypeMetadata(CGM&: *this, MD)) { |
2586 | for (const CXXRecordDecl *Base : getMostBaseClasses(RD: MD->getParent())) { |
2587 | llvm::Metadata *Id = |
2588 | CreateMetadataIdentifierForType(T: Context.getMemberPointerType( |
2589 | T: MD->getType(), Cls: Context.getRecordType(Decl: Base).getTypePtr())); |
2590 | F->addTypeMetadata(Offset: 0, TypeID: Id); |
2591 | } |
2592 | } |
2593 | } |
2594 | |
2595 | void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) { |
2596 | const Decl *D = GD.getDecl(); |
2597 | if (isa_and_nonnull<NamedDecl>(Val: D)) |
2598 | setGVProperties(GV, GD); |
2599 | else |
2600 | GV->setVisibility(llvm::GlobalValue::DefaultVisibility); |
2601 | |
2602 | if (D && D->hasAttr<UsedAttr>()) |
2603 | addUsedOrCompilerUsedGlobal(GV); |
2604 | |
2605 | if (const auto *VD = dyn_cast_if_present<VarDecl>(Val: D); |
2606 | VD && |
2607 | ((CodeGenOpts.KeepPersistentStorageVariables && |
2608 | (VD->getStorageDuration() == SD_Static || |
2609 | VD->getStorageDuration() == SD_Thread)) || |
2610 | (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static && |
2611 | VD->getType().isConstQualified()))) |
2612 | addUsedOrCompilerUsedGlobal(GV); |
2613 | } |
2614 | |
2615 | bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD, |
2616 | llvm::AttrBuilder &Attrs, |
2617 | bool SetTargetFeatures) { |
2618 | // Add target-cpu and target-features attributes to functions. If |
2619 | // we have a decl for the function and it has a target attribute then |
2620 | // parse that and add it to the feature set. |
2621 | StringRef TargetCPU = getTarget().getTargetOpts().CPU; |
2622 | StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU; |
2623 | std::vector<std::string> Features; |
2624 | const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: GD.getDecl()); |
2625 | FD = FD ? FD->getMostRecentDecl() : FD; |
2626 | const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr; |
2627 | const auto *TV = FD ? FD->getAttr<TargetVersionAttr>() : nullptr; |
2628 | assert((!TD || !TV) && "both target_version and target specified" ); |
2629 | const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr; |
2630 | const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr; |
2631 | bool AddedAttr = false; |
2632 | if (TD || TV || SD || TC) { |
2633 | llvm::StringMap<bool> FeatureMap; |
2634 | getContext().getFunctionFeatureMap(FeatureMap, GD); |
2635 | |
2636 | // Produce the canonical string for this set of features. |
2637 | for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap) |
2638 | Features.push_back(x: (Entry.getValue() ? "+" : "-" ) + Entry.getKey().str()); |
2639 | |
2640 | // Now add the target-cpu and target-features to the function. |
2641 | // While we populated the feature map above, we still need to |
2642 | // get and parse the target attribute so we can get the cpu for |
2643 | // the function. |
2644 | if (TD) { |
2645 | ParsedTargetAttr ParsedAttr = |
2646 | Target.parseTargetAttr(Str: TD->getFeaturesStr()); |
2647 | if (!ParsedAttr.CPU.empty() && |
2648 | getTarget().isValidCPUName(Name: ParsedAttr.CPU)) { |
2649 | TargetCPU = ParsedAttr.CPU; |
2650 | TuneCPU = "" ; // Clear the tune CPU. |
2651 | } |
2652 | if (!ParsedAttr.Tune.empty() && |
2653 | getTarget().isValidCPUName(Name: ParsedAttr.Tune)) |
2654 | TuneCPU = ParsedAttr.Tune; |
2655 | } |
2656 | |
2657 | if (SD) { |
2658 | // Apply the given CPU name as the 'tune-cpu' so that the optimizer can |
2659 | // favor this processor. |
2660 | TuneCPU = SD->getCPUName(Index: GD.getMultiVersionIndex())->getName(); |
2661 | } |
2662 | } else { |
2663 | // Otherwise just add the existing target cpu and target features to the |
2664 | // function. |
2665 | Features = getTarget().getTargetOpts().Features; |
2666 | } |
2667 | |
2668 | if (!TargetCPU.empty()) { |
2669 | Attrs.addAttribute(A: "target-cpu" , V: TargetCPU); |
2670 | AddedAttr = true; |
2671 | } |
2672 | if (!TuneCPU.empty()) { |
2673 | Attrs.addAttribute(A: "tune-cpu" , V: TuneCPU); |
2674 | AddedAttr = true; |
2675 | } |
2676 | if (!Features.empty() && SetTargetFeatures) { |
2677 | llvm::erase_if(C&: Features, P: [&](const std::string& F) { |
2678 | return getTarget().isReadOnlyFeature(Feature: F.substr(pos: 1)); |
2679 | }); |
2680 | llvm::sort(C&: Features); |
2681 | Attrs.addAttribute(A: "target-features" , V: llvm::join(R&: Features, Separator: "," )); |
2682 | AddedAttr = true; |
2683 | } |
2684 | |
2685 | return AddedAttr; |
2686 | } |
2687 | |
2688 | void CodeGenModule::setNonAliasAttributes(GlobalDecl GD, |
2689 | llvm::GlobalObject *GO) { |
2690 | const Decl *D = GD.getDecl(); |
2691 | SetCommonAttributes(GD, GV: GO); |
2692 | |
2693 | if (D) { |
2694 | if (auto *GV = dyn_cast<llvm::GlobalVariable>(Val: GO)) { |
2695 | if (D->hasAttr<RetainAttr>()) |
2696 | addUsedGlobal(GV); |
2697 | if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>()) |
2698 | GV->addAttribute(Kind: "bss-section" , Val: SA->getName()); |
2699 | if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>()) |
2700 | GV->addAttribute(Kind: "data-section" , Val: SA->getName()); |
2701 | if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>()) |
2702 | GV->addAttribute(Kind: "rodata-section" , Val: SA->getName()); |
2703 | if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>()) |
2704 | GV->addAttribute(Kind: "relro-section" , Val: SA->getName()); |
2705 | } |
2706 | |
2707 | if (auto *F = dyn_cast<llvm::Function>(Val: GO)) { |
2708 | if (D->hasAttr<RetainAttr>()) |
2709 | addUsedGlobal(GV: F); |
2710 | if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>()) |
2711 | if (!D->getAttr<SectionAttr>()) |
2712 | F->setSection(SA->getName()); |
2713 | |
2714 | llvm::AttrBuilder Attrs(F->getContext()); |
2715 | if (GetCPUAndFeaturesAttributes(GD, Attrs)) { |
2716 | // We know that GetCPUAndFeaturesAttributes will always have the |
2717 | // newest set, since it has the newest possible FunctionDecl, so the |
2718 | // new ones should replace the old. |
2719 | llvm::AttributeMask RemoveAttrs; |
2720 | RemoveAttrs.addAttribute(A: "target-cpu" ); |
2721 | RemoveAttrs.addAttribute(A: "target-features" ); |
2722 | RemoveAttrs.addAttribute(A: "tune-cpu" ); |
2723 | F->removeFnAttrs(Attrs: RemoveAttrs); |
2724 | F->addFnAttrs(Attrs); |
2725 | } |
2726 | } |
2727 | |
2728 | if (const auto *CSA = D->getAttr<CodeSegAttr>()) |
2729 | GO->setSection(CSA->getName()); |
2730 | else if (const auto *SA = D->getAttr<SectionAttr>()) |
2731 | GO->setSection(SA->getName()); |
2732 | } |
2733 | |
2734 | getTargetCodeGenInfo().setTargetAttributes(D, GV: GO, M&: *this); |
2735 | } |
2736 | |
2737 | void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD, |
2738 | llvm::Function *F, |
2739 | const CGFunctionInfo &FI) { |
2740 | const Decl *D = GD.getDecl(); |
2741 | SetLLVMFunctionAttributes(GD, Info: FI, F, /*IsThunk=*/false); |
2742 | SetLLVMFunctionAttributesForDefinition(D, F); |
2743 | |
2744 | F->setLinkage(llvm::Function::InternalLinkage); |
2745 | |
2746 | setNonAliasAttributes(GD, GO: F); |
2747 | } |
2748 | |
2749 | static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) { |
2750 | // Set linkage and visibility in case we never see a definition. |
2751 | LinkageInfo LV = ND->getLinkageAndVisibility(); |
2752 | // Don't set internal linkage on declarations. |
2753 | // "extern_weak" is overloaded in LLVM; we probably should have |
2754 | // separate linkage types for this. |
2755 | if (isExternallyVisible(L: LV.getLinkage()) && |
2756 | (ND->hasAttr<WeakAttr>() || ND->isWeakImported())) |
2757 | GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage); |
2758 | } |
2759 | |
2760 | void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD, |
2761 | llvm::Function *F) { |
2762 | // Only if we are checking indirect calls. |
2763 | if (!LangOpts.Sanitize.has(K: SanitizerKind::CFIICall)) |
2764 | return; |
2765 | |
2766 | // Non-static class methods are handled via vtable or member function pointer |
2767 | // checks elsewhere. |
2768 | if (isa<CXXMethodDecl>(Val: FD) && !cast<CXXMethodDecl>(Val: FD)->isStatic()) |
2769 | return; |
2770 | |
2771 | llvm::Metadata *MD = CreateMetadataIdentifierForType(T: FD->getType()); |
2772 | F->addTypeMetadata(Offset: 0, TypeID: MD); |
2773 | F->addTypeMetadata(Offset: 0, TypeID: CreateMetadataIdentifierGeneralized(T: FD->getType())); |
2774 | |
2775 | // Emit a hash-based bit set entry for cross-DSO calls. |
2776 | if (CodeGenOpts.SanitizeCfiCrossDso) |
2777 | if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD)) |
2778 | F->addTypeMetadata(Offset: 0, TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId)); |
2779 | } |
2780 | |
2781 | void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) { |
2782 | llvm::LLVMContext &Ctx = F->getContext(); |
2783 | llvm::MDBuilder MDB(Ctx); |
2784 | F->setMetadata(KindID: llvm::LLVMContext::MD_kcfi_type, |
2785 | Node: llvm::MDNode::get( |
2786 | Context&: Ctx, MDs: MDB.createConstant(C: CreateKCFITypeId(T: FD->getType())))); |
2787 | } |
2788 | |
2789 | static bool allowKCFIIdentifier(StringRef Name) { |
2790 | // KCFI type identifier constants are only necessary for external assembly |
2791 | // functions, which means it's safe to skip unusual names. Subset of |
2792 | // MCAsmInfo::isAcceptableChar() and MCAsmInfoXCOFF::isAcceptableChar(). |
2793 | return llvm::all_of(Range&: Name, P: [](const char &C) { |
2794 | return llvm::isAlnum(C) || C == '_' || C == '.'; |
2795 | }); |
2796 | } |
2797 | |
2798 | void CodeGenModule::finalizeKCFITypes() { |
2799 | llvm::Module &M = getModule(); |
2800 | for (auto &F : M.functions()) { |
2801 | // Remove KCFI type metadata from non-address-taken local functions. |
2802 | bool AddressTaken = F.hasAddressTaken(); |
2803 | if (!AddressTaken && F.hasLocalLinkage()) |
2804 | F.eraseMetadata(KindID: llvm::LLVMContext::MD_kcfi_type); |
2805 | |
2806 | // Generate a constant with the expected KCFI type identifier for all |
2807 | // address-taken function declarations to support annotating indirectly |
2808 | // called assembly functions. |
2809 | if (!AddressTaken || !F.isDeclaration()) |
2810 | continue; |
2811 | |
2812 | const llvm::ConstantInt *Type; |
2813 | if (const llvm::MDNode *MD = F.getMetadata(KindID: llvm::LLVMContext::MD_kcfi_type)) |
2814 | Type = llvm::mdconst::extract<llvm::ConstantInt>(MD: MD->getOperand(I: 0)); |
2815 | else |
2816 | continue; |
2817 | |
2818 | StringRef Name = F.getName(); |
2819 | if (!allowKCFIIdentifier(Name)) |
2820 | continue; |
2821 | |
2822 | std::string Asm = (".weak __kcfi_typeid_" + Name + "\n.set __kcfi_typeid_" + |
2823 | Name + ", " + Twine(Type->getZExtValue()) + "\n" ) |
2824 | .str(); |
2825 | M.appendModuleInlineAsm(Asm); |
2826 | } |
2827 | } |
2828 | |
2829 | void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F, |
2830 | bool IsIncompleteFunction, |
2831 | bool IsThunk) { |
2832 | |
2833 | if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) { |
2834 | // If this is an intrinsic function, set the function's attributes |
2835 | // to the intrinsic's attributes. |
2836 | F->setAttributes(llvm::Intrinsic::getAttributes(C&: getLLVMContext(), id: IID)); |
2837 | return; |
2838 | } |
2839 | |
2840 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
2841 | |
2842 | if (!IsIncompleteFunction) |
2843 | SetLLVMFunctionAttributes(GD, Info: getTypes().arrangeGlobalDeclaration(GD), F, |
2844 | IsThunk); |
2845 | |
2846 | // Add the Returned attribute for "this", except for iOS 5 and earlier |
2847 | // where substantial code, including the libstdc++ dylib, was compiled with |
2848 | // GCC and does not actually return "this". |
2849 | if (!IsThunk && getCXXABI().HasThisReturn(GD) && |
2850 | !(getTriple().isiOS() && getTriple().isOSVersionLT(Major: 6))) { |
2851 | assert(!F->arg_empty() && |
2852 | F->arg_begin()->getType() |
2853 | ->canLosslesslyBitCastTo(F->getReturnType()) && |
2854 | "unexpected this return" ); |
2855 | F->addParamAttr(ArgNo: 0, Kind: llvm::Attribute::Returned); |
2856 | } |
2857 | |
2858 | // Only a few attributes are set on declarations; these may later be |
2859 | // overridden by a definition. |
2860 | |
2861 | setLinkageForGV(GV: F, ND: FD); |
2862 | setGVProperties(GV: F, D: FD); |
2863 | |
2864 | // Setup target-specific attributes. |
2865 | if (!IsIncompleteFunction && F->isDeclaration()) |
2866 | getTargetCodeGenInfo().setTargetAttributes(D: FD, GV: F, M&: *this); |
2867 | |
2868 | if (const auto *CSA = FD->getAttr<CodeSegAttr>()) |
2869 | F->setSection(CSA->getName()); |
2870 | else if (const auto *SA = FD->getAttr<SectionAttr>()) |
2871 | F->setSection(SA->getName()); |
2872 | |
2873 | if (const auto *EA = FD->getAttr<ErrorAttr>()) { |
2874 | if (EA->isError()) |
2875 | F->addFnAttr(Kind: "dontcall-error" , Val: EA->getUserDiagnostic()); |
2876 | else if (EA->isWarning()) |
2877 | F->addFnAttr(Kind: "dontcall-warn" , Val: EA->getUserDiagnostic()); |
2878 | } |
2879 | |
2880 | // If we plan on emitting this inline builtin, we can't treat it as a builtin. |
2881 | if (FD->isInlineBuiltinDeclaration()) { |
2882 | const FunctionDecl *FDBody; |
2883 | bool HasBody = FD->hasBody(Definition&: FDBody); |
2884 | (void)HasBody; |
2885 | assert(HasBody && "Inline builtin declarations should always have an " |
2886 | "available body!" ); |
2887 | if (shouldEmitFunction(GD: FDBody)) |
2888 | F->addFnAttr(Kind: llvm::Attribute::NoBuiltin); |
2889 | } |
2890 | |
2891 | if (FD->isReplaceableGlobalAllocationFunction()) { |
2892 | // A replaceable global allocation function does not act like a builtin by |
2893 | // default, only if it is invoked by a new-expression or delete-expression. |
2894 | F->addFnAttr(Kind: llvm::Attribute::NoBuiltin); |
2895 | } |
2896 | |
2897 | if (isa<CXXConstructorDecl>(Val: FD) || isa<CXXDestructorDecl>(Val: FD)) |
2898 | F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
2899 | else if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) |
2900 | if (MD->isVirtual()) |
2901 | F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
2902 | |
2903 | // Don't emit entries for function declarations in the cross-DSO mode. This |
2904 | // is handled with better precision by the receiving DSO. But if jump tables |
2905 | // are non-canonical then we need type metadata in order to produce the local |
2906 | // jump table. |
2907 | if (!CodeGenOpts.SanitizeCfiCrossDso || |
2908 | !CodeGenOpts.SanitizeCfiCanonicalJumpTables) |
2909 | CreateFunctionTypeMetadataForIcall(FD, F); |
2910 | |
2911 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) |
2912 | setKCFIType(FD, F); |
2913 | |
2914 | if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>()) |
2915 | getOpenMPRuntime().emitDeclareSimdFunction(FD, Fn: F); |
2916 | |
2917 | if (CodeGenOpts.InlineMaxStackSize != UINT_MAX) |
2918 | F->addFnAttr(Kind: "inline-max-stacksize" , Val: llvm::utostr(X: CodeGenOpts.InlineMaxStackSize)); |
2919 | |
2920 | if (const auto *CB = FD->getAttr<CallbackAttr>()) { |
2921 | // Annotate the callback behavior as metadata: |
2922 | // - The callback callee (as argument number). |
2923 | // - The callback payloads (as argument numbers). |
2924 | llvm::LLVMContext &Ctx = F->getContext(); |
2925 | llvm::MDBuilder MDB(Ctx); |
2926 | |
2927 | // The payload indices are all but the first one in the encoding. The first |
2928 | // identifies the callback callee. |
2929 | int CalleeIdx = *CB->encoding_begin(); |
2930 | ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end()); |
2931 | F->addMetadata(KindID: llvm::LLVMContext::MD_callback, |
2932 | MD&: *llvm::MDNode::get(Context&: Ctx, MDs: {MDB.createCallbackEncoding( |
2933 | CalleeArgNo: CalleeIdx, Arguments: PayloadIndices, |
2934 | /* VarArgsArePassed */ false)})); |
2935 | } |
2936 | } |
2937 | |
2938 | void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) { |
2939 | assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) && |
2940 | "Only globals with definition can force usage." ); |
2941 | LLVMUsed.emplace_back(args&: GV); |
2942 | } |
2943 | |
2944 | void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) { |
2945 | assert(!GV->isDeclaration() && |
2946 | "Only globals with definition can force usage." ); |
2947 | LLVMCompilerUsed.emplace_back(args&: GV); |
2948 | } |
2949 | |
2950 | void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) { |
2951 | assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) && |
2952 | "Only globals with definition can force usage." ); |
2953 | if (getTriple().isOSBinFormatELF()) |
2954 | LLVMCompilerUsed.emplace_back(args&: GV); |
2955 | else |
2956 | LLVMUsed.emplace_back(args&: GV); |
2957 | } |
2958 | |
2959 | static void emitUsed(CodeGenModule &CGM, StringRef Name, |
2960 | std::vector<llvm::WeakTrackingVH> &List) { |
2961 | // Don't create llvm.used if there is no need. |
2962 | if (List.empty()) |
2963 | return; |
2964 | |
2965 | // Convert List to what ConstantArray needs. |
2966 | SmallVector<llvm::Constant*, 8> UsedArray; |
2967 | UsedArray.resize(N: List.size()); |
2968 | for (unsigned i = 0, e = List.size(); i != e; ++i) { |
2969 | UsedArray[i] = |
2970 | llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
2971 | C: cast<llvm::Constant>(Val: &*List[i]), Ty: CGM.Int8PtrTy); |
2972 | } |
2973 | |
2974 | if (UsedArray.empty()) |
2975 | return; |
2976 | llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: CGM.Int8PtrTy, NumElements: UsedArray.size()); |
2977 | |
2978 | auto *GV = new llvm::GlobalVariable( |
2979 | CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage, |
2980 | llvm::ConstantArray::get(T: ATy, V: UsedArray), Name); |
2981 | |
2982 | GV->setSection("llvm.metadata" ); |
2983 | } |
2984 | |
2985 | void CodeGenModule::emitLLVMUsed() { |
2986 | emitUsed(CGM&: *this, Name: "llvm.used" , List&: LLVMUsed); |
2987 | emitUsed(CGM&: *this, Name: "llvm.compiler.used" , List&: LLVMCompilerUsed); |
2988 | } |
2989 | |
2990 | void CodeGenModule::AppendLinkerOptions(StringRef Opts) { |
2991 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opts); |
2992 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts)); |
2993 | } |
2994 | |
2995 | void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) { |
2996 | llvm::SmallString<32> Opt; |
2997 | getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt); |
2998 | if (Opt.empty()) |
2999 | return; |
3000 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt); |
3001 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts)); |
3002 | } |
3003 | |
3004 | void CodeGenModule::AddDependentLib(StringRef Lib) { |
3005 | auto &C = getLLVMContext(); |
3006 | if (getTarget().getTriple().isOSBinFormatELF()) { |
3007 | ELFDependentLibraries.push_back( |
3008 | Elt: llvm::MDNode::get(Context&: C, MDs: llvm::MDString::get(Context&: C, Str: Lib))); |
3009 | return; |
3010 | } |
3011 | |
3012 | llvm::SmallString<24> Opt; |
3013 | getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt); |
3014 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt); |
3015 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: C, MDs: MDOpts)); |
3016 | } |
3017 | |
3018 | /// Add link options implied by the given module, including modules |
3019 | /// it depends on, using a postorder walk. |
3020 | static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod, |
3021 | SmallVectorImpl<llvm::MDNode *> &Metadata, |
3022 | llvm::SmallPtrSet<Module *, 16> &Visited) { |
3023 | // Import this module's parent. |
3024 | if (Mod->Parent && Visited.insert(Ptr: Mod->Parent).second) { |
3025 | addLinkOptionsPostorder(CGM, Mod: Mod->Parent, Metadata, Visited); |
3026 | } |
3027 | |
3028 | // Import this module's dependencies. |
3029 | for (Module *Import : llvm::reverse(C&: Mod->Imports)) { |
3030 | if (Visited.insert(Ptr: Import).second) |
3031 | addLinkOptionsPostorder(CGM, Mod: Import, Metadata, Visited); |
3032 | } |
3033 | |
3034 | // Add linker options to link against the libraries/frameworks |
3035 | // described by this module. |
3036 | llvm::LLVMContext &Context = CGM.getLLVMContext(); |
3037 | bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF(); |
3038 | |
3039 | // For modules that use export_as for linking, use that module |
3040 | // name instead. |
3041 | if (Mod->UseExportAsModuleLinkName) |
3042 | return; |
3043 | |
3044 | for (const Module::LinkLibrary &LL : llvm::reverse(C&: Mod->LinkLibraries)) { |
3045 | // Link against a framework. Frameworks are currently Darwin only, so we |
3046 | // don't to ask TargetCodeGenInfo for the spelling of the linker option. |
3047 | if (LL.IsFramework) { |
3048 | llvm::Metadata *Args[2] = {llvm::MDString::get(Context, Str: "-framework" ), |
3049 | llvm::MDString::get(Context, Str: LL.Library)}; |
3050 | |
3051 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args)); |
3052 | continue; |
3053 | } |
3054 | |
3055 | // Link against a library. |
3056 | if (IsELF) { |
3057 | llvm::Metadata *Args[2] = { |
3058 | llvm::MDString::get(Context, Str: "lib" ), |
3059 | llvm::MDString::get(Context, Str: LL.Library), |
3060 | }; |
3061 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args)); |
3062 | } else { |
3063 | llvm::SmallString<24> Opt; |
3064 | CGM.getTargetCodeGenInfo().getDependentLibraryOption(Lib: LL.Library, Opt); |
3065 | auto *OptString = llvm::MDString::get(Context, Str: Opt); |
3066 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: OptString)); |
3067 | } |
3068 | } |
3069 | } |
3070 | |
3071 | void CodeGenModule::EmitModuleInitializers(clang::Module *Primary) { |
3072 | assert(Primary->isNamedModuleUnit() && |
3073 | "We should only emit module initializers for named modules." ); |
3074 | |
3075 | // Emit the initializers in the order that sub-modules appear in the |
3076 | // source, first Global Module Fragments, if present. |
3077 | if (auto GMF = Primary->getGlobalModuleFragment()) { |
3078 | for (Decl *D : getContext().getModuleInitializers(M: GMF)) { |
3079 | if (isa<ImportDecl>(Val: D)) |
3080 | continue; |
3081 | assert(isa<VarDecl>(D) && "GMF initializer decl is not a var?" ); |
3082 | EmitTopLevelDecl(D); |
3083 | } |
3084 | } |
3085 | // Second any associated with the module, itself. |
3086 | for (Decl *D : getContext().getModuleInitializers(M: Primary)) { |
3087 | // Skip import decls, the inits for those are called explicitly. |
3088 | if (isa<ImportDecl>(Val: D)) |
3089 | continue; |
3090 | EmitTopLevelDecl(D); |
3091 | } |
3092 | // Third any associated with the Privat eMOdule Fragment, if present. |
3093 | if (auto PMF = Primary->getPrivateModuleFragment()) { |
3094 | for (Decl *D : getContext().getModuleInitializers(M: PMF)) { |
3095 | // Skip import decls, the inits for those are called explicitly. |
3096 | if (isa<ImportDecl>(Val: D)) |
3097 | continue; |
3098 | assert(isa<VarDecl>(D) && "PMF initializer decl is not a var?" ); |
3099 | EmitTopLevelDecl(D); |
3100 | } |
3101 | } |
3102 | } |
3103 | |
3104 | void CodeGenModule::EmitModuleLinkOptions() { |
3105 | // Collect the set of all of the modules we want to visit to emit link |
3106 | // options, which is essentially the imported modules and all of their |
3107 | // non-explicit child modules. |
3108 | llvm::SetVector<clang::Module *> LinkModules; |
3109 | llvm::SmallPtrSet<clang::Module *, 16> Visited; |
3110 | SmallVector<clang::Module *, 16> Stack; |
3111 | |
3112 | // Seed the stack with imported modules. |
3113 | for (Module *M : ImportedModules) { |
3114 | // Do not add any link flags when an implementation TU of a module imports |
3115 | // a header of that same module. |
3116 | if (M->getTopLevelModuleName() == getLangOpts().CurrentModule && |
3117 | !getLangOpts().isCompilingModule()) |
3118 | continue; |
3119 | if (Visited.insert(Ptr: M).second) |
3120 | Stack.push_back(Elt: M); |
3121 | } |
3122 | |
3123 | // Find all of the modules to import, making a little effort to prune |
3124 | // non-leaf modules. |
3125 | while (!Stack.empty()) { |
3126 | clang::Module *Mod = Stack.pop_back_val(); |
3127 | |
3128 | bool AnyChildren = false; |
3129 | |
3130 | // Visit the submodules of this module. |
3131 | for (const auto &SM : Mod->submodules()) { |
3132 | // Skip explicit children; they need to be explicitly imported to be |
3133 | // linked against. |
3134 | if (SM->IsExplicit) |
3135 | continue; |
3136 | |
3137 | if (Visited.insert(Ptr: SM).second) { |
3138 | Stack.push_back(Elt: SM); |
3139 | AnyChildren = true; |
3140 | } |
3141 | } |
3142 | |
3143 | // We didn't find any children, so add this module to the list of |
3144 | // modules to link against. |
3145 | if (!AnyChildren) { |
3146 | LinkModules.insert(X: Mod); |
3147 | } |
3148 | } |
3149 | |
3150 | // Add link options for all of the imported modules in reverse topological |
3151 | // order. We don't do anything to try to order import link flags with respect |
3152 | // to linker options inserted by things like #pragma comment(). |
3153 | SmallVector<llvm::MDNode *, 16> MetadataArgs; |
3154 | Visited.clear(); |
3155 | for (Module *M : LinkModules) |
3156 | if (Visited.insert(Ptr: M).second) |
3157 | addLinkOptionsPostorder(CGM&: *this, Mod: M, Metadata&: MetadataArgs, Visited); |
3158 | std::reverse(first: MetadataArgs.begin(), last: MetadataArgs.end()); |
3159 | LinkerOptionsMetadata.append(in_start: MetadataArgs.begin(), in_end: MetadataArgs.end()); |
3160 | |
3161 | // Add the linker options metadata flag. |
3162 | auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.linker.options" ); |
3163 | for (auto *MD : LinkerOptionsMetadata) |
3164 | NMD->addOperand(M: MD); |
3165 | } |
3166 | |
3167 | void CodeGenModule::EmitDeferred() { |
3168 | // Emit deferred declare target declarations. |
3169 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) |
3170 | getOpenMPRuntime().emitDeferredTargetDecls(); |
3171 | |
3172 | // Emit code for any potentially referenced deferred decls. Since a |
3173 | // previously unused static decl may become used during the generation of code |
3174 | // for a static function, iterate until no changes are made. |
3175 | |
3176 | if (!DeferredVTables.empty()) { |
3177 | EmitDeferredVTables(); |
3178 | |
3179 | // Emitting a vtable doesn't directly cause more vtables to |
3180 | // become deferred, although it can cause functions to be |
3181 | // emitted that then need those vtables. |
3182 | assert(DeferredVTables.empty()); |
3183 | } |
3184 | |
3185 | // Emit CUDA/HIP static device variables referenced by host code only. |
3186 | // Note we should not clear CUDADeviceVarODRUsedByHost since it is still |
3187 | // needed for further handling. |
3188 | if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) |
3189 | llvm::append_range(C&: DeferredDeclsToEmit, |
3190 | R&: getContext().CUDADeviceVarODRUsedByHost); |
3191 | |
3192 | // Stop if we're out of both deferred vtables and deferred declarations. |
3193 | if (DeferredDeclsToEmit.empty()) |
3194 | return; |
3195 | |
3196 | // Grab the list of decls to emit. If EmitGlobalDefinition schedules more |
3197 | // work, it will not interfere with this. |
3198 | std::vector<GlobalDecl> CurDeclsToEmit; |
3199 | CurDeclsToEmit.swap(x&: DeferredDeclsToEmit); |
3200 | |
3201 | for (GlobalDecl &D : CurDeclsToEmit) { |
3202 | // We should call GetAddrOfGlobal with IsForDefinition set to true in order |
3203 | // to get GlobalValue with exactly the type we need, not something that |
3204 | // might had been created for another decl with the same mangled name but |
3205 | // different type. |
3206 | llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>( |
3207 | Val: GetAddrOfGlobal(GD: D, IsForDefinition: ForDefinition)); |
3208 | |
3209 | // In case of different address spaces, we may still get a cast, even with |
3210 | // IsForDefinition equal to true. Query mangled names table to get |
3211 | // GlobalValue. |
3212 | if (!GV) |
3213 | GV = GetGlobalValue(Name: getMangledName(GD: D)); |
3214 | |
3215 | // Make sure GetGlobalValue returned non-null. |
3216 | assert(GV); |
3217 | |
3218 | // Check to see if we've already emitted this. This is necessary |
3219 | // for a couple of reasons: first, decls can end up in the |
3220 | // deferred-decls queue multiple times, and second, decls can end |
3221 | // up with definitions in unusual ways (e.g. by an extern inline |
3222 | // function acquiring a strong function redefinition). Just |
3223 | // ignore these cases. |
3224 | if (!GV->isDeclaration()) |
3225 | continue; |
3226 | |
3227 | // If this is OpenMP, check if it is legal to emit this global normally. |
3228 | if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD: D)) |
3229 | continue; |
3230 | |
3231 | // Otherwise, emit the definition and move on to the next one. |
3232 | EmitGlobalDefinition(D, GV); |
3233 | |
3234 | // If we found out that we need to emit more decls, do that recursively. |
3235 | // This has the advantage that the decls are emitted in a DFS and related |
3236 | // ones are close together, which is convenient for testing. |
3237 | if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { |
3238 | EmitDeferred(); |
3239 | assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); |
3240 | } |
3241 | } |
3242 | } |
3243 | |
3244 | void CodeGenModule::EmitVTablesOpportunistically() { |
3245 | // Try to emit external vtables as available_externally if they have emitted |
3246 | // all inlined virtual functions. It runs after EmitDeferred() and therefore |
3247 | // is not allowed to create new references to things that need to be emitted |
3248 | // lazily. Note that it also uses fact that we eagerly emitting RTTI. |
3249 | |
3250 | assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables()) |
3251 | && "Only emit opportunistic vtables with optimizations" ); |
3252 | |
3253 | for (const CXXRecordDecl *RD : OpportunisticVTables) { |
3254 | assert(getVTables().isVTableExternal(RD) && |
3255 | "This queue should only contain external vtables" ); |
3256 | if (getCXXABI().canSpeculativelyEmitVTable(RD)) |
3257 | VTables.GenerateClassData(RD); |
3258 | } |
3259 | OpportunisticVTables.clear(); |
3260 | } |
3261 | |
3262 | void CodeGenModule::EmitGlobalAnnotations() { |
3263 | for (const auto& [MangledName, VD] : DeferredAnnotations) { |
3264 | llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName); |
3265 | if (GV) |
3266 | AddGlobalAnnotations(D: VD, GV); |
3267 | } |
3268 | DeferredAnnotations.clear(); |
3269 | |
3270 | if (Annotations.empty()) |
3271 | return; |
3272 | |
3273 | // Create a new global variable for the ConstantStruct in the Module. |
3274 | llvm::Constant *Array = llvm::ConstantArray::get(T: llvm::ArrayType::get( |
3275 | ElementType: Annotations[0]->getType(), NumElements: Annotations.size()), V: Annotations); |
3276 | auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false, |
3277 | llvm::GlobalValue::AppendingLinkage, |
3278 | Array, "llvm.global.annotations" ); |
3279 | gv->setSection(AnnotationSection); |
3280 | } |
3281 | |
3282 | llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) { |
3283 | llvm::Constant *&AStr = AnnotationStrings[Str]; |
3284 | if (AStr) |
3285 | return AStr; |
3286 | |
3287 | // Not found yet, create a new global. |
3288 | llvm::Constant *s = llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Str); |
3289 | auto *gv = new llvm::GlobalVariable( |
3290 | getModule(), s->getType(), true, llvm::GlobalValue::PrivateLinkage, s, |
3291 | ".str" , nullptr, llvm::GlobalValue::NotThreadLocal, |
3292 | ConstGlobalsPtrTy->getAddressSpace()); |
3293 | gv->setSection(AnnotationSection); |
3294 | gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3295 | AStr = gv; |
3296 | return gv; |
3297 | } |
3298 | |
3299 | llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) { |
3300 | SourceManager &SM = getContext().getSourceManager(); |
3301 | PresumedLoc PLoc = SM.getPresumedLoc(Loc); |
3302 | if (PLoc.isValid()) |
3303 | return EmitAnnotationString(Str: PLoc.getFilename()); |
3304 | return EmitAnnotationString(Str: SM.getBufferName(Loc)); |
3305 | } |
3306 | |
3307 | llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) { |
3308 | SourceManager &SM = getContext().getSourceManager(); |
3309 | PresumedLoc PLoc = SM.getPresumedLoc(Loc: L); |
3310 | unsigned LineNo = PLoc.isValid() ? PLoc.getLine() : |
3311 | SM.getExpansionLineNumber(Loc: L); |
3312 | return llvm::ConstantInt::get(Ty: Int32Ty, V: LineNo); |
3313 | } |
3314 | |
3315 | llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) { |
3316 | ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()}; |
3317 | if (Exprs.empty()) |
3318 | return llvm::ConstantPointerNull::get(T: ConstGlobalsPtrTy); |
3319 | |
3320 | llvm::FoldingSetNodeID ID; |
3321 | for (Expr *E : Exprs) { |
3322 | ID.Add(x: cast<clang::ConstantExpr>(Val: E)->getAPValueResult()); |
3323 | } |
3324 | llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()]; |
3325 | if (Lookup) |
3326 | return Lookup; |
3327 | |
3328 | llvm::SmallVector<llvm::Constant *, 4> LLVMArgs; |
3329 | LLVMArgs.reserve(N: Exprs.size()); |
3330 | ConstantEmitter ConstEmiter(*this); |
3331 | llvm::transform(Range&: Exprs, d_first: std::back_inserter(x&: LLVMArgs), F: [&](const Expr *E) { |
3332 | const auto *CE = cast<clang::ConstantExpr>(Val: E); |
3333 | return ConstEmiter.emitAbstract(loc: CE->getBeginLoc(), value: CE->getAPValueResult(), |
3334 | T: CE->getType()); |
3335 | }); |
3336 | auto *Struct = llvm::ConstantStruct::getAnon(V: LLVMArgs); |
3337 | auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true, |
3338 | llvm::GlobalValue::PrivateLinkage, Struct, |
3339 | ".args" ); |
3340 | GV->setSection(AnnotationSection); |
3341 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3342 | |
3343 | Lookup = GV; |
3344 | return GV; |
3345 | } |
3346 | |
3347 | llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV, |
3348 | const AnnotateAttr *AA, |
3349 | SourceLocation L) { |
3350 | // Get the globals for file name, annotation, and the line number. |
3351 | llvm::Constant *AnnoGV = EmitAnnotationString(Str: AA->getAnnotation()), |
3352 | *UnitGV = EmitAnnotationUnit(Loc: L), |
3353 | *LineNoCst = EmitAnnotationLineNo(L), |
3354 | *Args = EmitAnnotationArgs(Attr: AA); |
3355 | |
3356 | llvm::Constant *GVInGlobalsAS = GV; |
3357 | if (GV->getAddressSpace() != |
3358 | getDataLayout().getDefaultGlobalsAddressSpace()) { |
3359 | GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast( |
3360 | C: GV, |
3361 | Ty: llvm::PointerType::get( |
3362 | C&: GV->getContext(), AddressSpace: getDataLayout().getDefaultGlobalsAddressSpace())); |
3363 | } |
3364 | |
3365 | // Create the ConstantStruct for the global annotation. |
3366 | llvm::Constant *Fields[] = { |
3367 | GVInGlobalsAS, AnnoGV, UnitGV, LineNoCst, Args, |
3368 | }; |
3369 | return llvm::ConstantStruct::getAnon(V: Fields); |
3370 | } |
3371 | |
3372 | void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D, |
3373 | llvm::GlobalValue *GV) { |
3374 | assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute" ); |
3375 | // Get the struct elements for these annotations. |
3376 | for (const auto *I : D->specific_attrs<AnnotateAttr>()) |
3377 | Annotations.push_back(x: EmitAnnotateAttr(GV, AA: I, L: D->getLocation())); |
3378 | } |
3379 | |
3380 | bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn, |
3381 | SourceLocation Loc) const { |
3382 | const auto &NoSanitizeL = getContext().getNoSanitizeList(); |
3383 | // NoSanitize by function name. |
3384 | if (NoSanitizeL.containsFunction(Mask: Kind, FunctionName: Fn->getName())) |
3385 | return true; |
3386 | // NoSanitize by location. Check "mainfile" prefix. |
3387 | auto &SM = Context.getSourceManager(); |
3388 | FileEntryRef MainFile = *SM.getFileEntryRefForID(FID: SM.getMainFileID()); |
3389 | if (NoSanitizeL.containsMainFile(Mask: Kind, FileName: MainFile.getName())) |
3390 | return true; |
3391 | |
3392 | // Check "src" prefix. |
3393 | if (Loc.isValid()) |
3394 | return NoSanitizeL.containsLocation(Mask: Kind, Loc); |
3395 | // If location is unknown, this may be a compiler-generated function. Assume |
3396 | // it's located in the main file. |
3397 | return NoSanitizeL.containsFile(Mask: Kind, FileName: MainFile.getName()); |
3398 | } |
3399 | |
3400 | bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, |
3401 | llvm::GlobalVariable *GV, |
3402 | SourceLocation Loc, QualType Ty, |
3403 | StringRef Category) const { |
3404 | const auto &NoSanitizeL = getContext().getNoSanitizeList(); |
3405 | if (NoSanitizeL.containsGlobal(Mask: Kind, GlobalName: GV->getName(), Category)) |
3406 | return true; |
3407 | auto &SM = Context.getSourceManager(); |
3408 | if (NoSanitizeL.containsMainFile( |
3409 | Mask: Kind, FileName: SM.getFileEntryRefForID(FID: SM.getMainFileID())->getName(), |
3410 | Category)) |
3411 | return true; |
3412 | if (NoSanitizeL.containsLocation(Mask: Kind, Loc, Category)) |
3413 | return true; |
3414 | |
3415 | // Check global type. |
3416 | if (!Ty.isNull()) { |
3417 | // Drill down the array types: if global variable of a fixed type is |
3418 | // not sanitized, we also don't instrument arrays of them. |
3419 | while (auto AT = dyn_cast<ArrayType>(Val: Ty.getTypePtr())) |
3420 | Ty = AT->getElementType(); |
3421 | Ty = Ty.getCanonicalType().getUnqualifiedType(); |
3422 | // Only record types (classes, structs etc.) are ignored. |
3423 | if (Ty->isRecordType()) { |
3424 | std::string TypeStr = Ty.getAsString(Policy: getContext().getPrintingPolicy()); |
3425 | if (NoSanitizeL.containsType(Mask: Kind, MangledTypeName: TypeStr, Category)) |
3426 | return true; |
3427 | } |
3428 | } |
3429 | return false; |
3430 | } |
3431 | |
3432 | bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, |
3433 | StringRef Category) const { |
3434 | const auto &XRayFilter = getContext().getXRayFilter(); |
3435 | using ImbueAttr = XRayFunctionFilter::ImbueAttribute; |
3436 | auto Attr = ImbueAttr::NONE; |
3437 | if (Loc.isValid()) |
3438 | Attr = XRayFilter.shouldImbueLocation(Loc, Category); |
3439 | if (Attr == ImbueAttr::NONE) |
3440 | Attr = XRayFilter.shouldImbueFunction(FunctionName: Fn->getName()); |
3441 | switch (Attr) { |
3442 | case ImbueAttr::NONE: |
3443 | return false; |
3444 | case ImbueAttr::ALWAYS: |
3445 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-always" ); |
3446 | break; |
3447 | case ImbueAttr::ALWAYS_ARG1: |
3448 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-always" ); |
3449 | Fn->addFnAttr(Kind: "xray-log-args" , Val: "1" ); |
3450 | break; |
3451 | case ImbueAttr::NEVER: |
3452 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-never" ); |
3453 | break; |
3454 | } |
3455 | return true; |
3456 | } |
3457 | |
3458 | ProfileList::ExclusionType |
3459 | CodeGenModule::isFunctionBlockedByProfileList(llvm::Function *Fn, |
3460 | SourceLocation Loc) const { |
3461 | const auto &ProfileList = getContext().getProfileList(); |
3462 | // If the profile list is empty, then instrument everything. |
3463 | if (ProfileList.isEmpty()) |
3464 | return ProfileList::Allow; |
3465 | CodeGenOptions::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr(); |
3466 | // First, check the function name. |
3467 | if (auto V = ProfileList.isFunctionExcluded(FunctionName: Fn->getName(), Kind)) |
3468 | return *V; |
3469 | // Next, check the source location. |
3470 | if (Loc.isValid()) |
3471 | if (auto V = ProfileList.isLocationExcluded(Loc, Kind)) |
3472 | return *V; |
3473 | // If location is unknown, this may be a compiler-generated function. Assume |
3474 | // it's located in the main file. |
3475 | auto &SM = Context.getSourceManager(); |
3476 | if (auto MainFile = SM.getFileEntryRefForID(FID: SM.getMainFileID())) |
3477 | if (auto V = ProfileList.isFileExcluded(FileName: MainFile->getName(), Kind)) |
3478 | return *V; |
3479 | return ProfileList.getDefault(Kind); |
3480 | } |
3481 | |
3482 | ProfileList::ExclusionType |
3483 | CodeGenModule::isFunctionBlockedFromProfileInstr(llvm::Function *Fn, |
3484 | SourceLocation Loc) const { |
3485 | auto V = isFunctionBlockedByProfileList(Fn, Loc); |
3486 | if (V != ProfileList::Allow) |
3487 | return V; |
3488 | |
3489 | auto NumGroups = getCodeGenOpts().ProfileTotalFunctionGroups; |
3490 | if (NumGroups > 1) { |
3491 | auto Group = llvm::crc32(Data: arrayRefFromStringRef(Input: Fn->getName())) % NumGroups; |
3492 | if (Group != getCodeGenOpts().ProfileSelectedFunctionGroup) |
3493 | return ProfileList::Skip; |
3494 | } |
3495 | return ProfileList::Allow; |
3496 | } |
3497 | |
3498 | bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) { |
3499 | // Never defer when EmitAllDecls is specified. |
3500 | if (LangOpts.EmitAllDecls) |
3501 | return true; |
3502 | |
3503 | const auto *VD = dyn_cast<VarDecl>(Val: Global); |
3504 | if (VD && |
3505 | ((CodeGenOpts.KeepPersistentStorageVariables && |
3506 | (VD->getStorageDuration() == SD_Static || |
3507 | VD->getStorageDuration() == SD_Thread)) || |
3508 | (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static && |
3509 | VD->getType().isConstQualified()))) |
3510 | return true; |
3511 | |
3512 | return getContext().DeclMustBeEmitted(D: Global); |
3513 | } |
3514 | |
3515 | bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { |
3516 | // In OpenMP 5.0 variables and function may be marked as |
3517 | // device_type(host/nohost) and we should not emit them eagerly unless we sure |
3518 | // that they must be emitted on the host/device. To be sure we need to have |
3519 | // seen a declare target with an explicit mentioning of the function, we know |
3520 | // we have if the level of the declare target attribute is -1. Note that we |
3521 | // check somewhere else if we should emit this at all. |
3522 | if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) { |
3523 | std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = |
3524 | OMPDeclareTargetDeclAttr::getActiveAttr(VD: Global); |
3525 | if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1) |
3526 | return false; |
3527 | } |
3528 | |
3529 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) { |
3530 | if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) |
3531 | // Implicit template instantiations may change linkage if they are later |
3532 | // explicitly instantiated, so they should not be emitted eagerly. |
3533 | return false; |
3534 | // Defer until all versions have been semantically checked. |
3535 | if (FD->hasAttr<TargetVersionAttr>() && !FD->isMultiVersion()) |
3536 | return false; |
3537 | } |
3538 | if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) { |
3539 | if (Context.getInlineVariableDefinitionKind(VD) == |
3540 | ASTContext::InlineVariableDefinitionKind::WeakUnknown) |
3541 | // A definition of an inline constexpr static data member may change |
3542 | // linkage later if it's redeclared outside the class. |
3543 | return false; |
3544 | if (CXX20ModuleInits && VD->getOwningModule() && |
3545 | !VD->getOwningModule()->isModuleMapModule()) { |
3546 | // For CXX20, module-owned initializers need to be deferred, since it is |
3547 | // not known at this point if they will be run for the current module or |
3548 | // as part of the initializer for an imported one. |
3549 | return false; |
3550 | } |
3551 | } |
3552 | // If OpenMP is enabled and threadprivates must be generated like TLS, delay |
3553 | // codegen for global variables, because they may be marked as threadprivate. |
3554 | if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS && |
3555 | getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Val: Global) && |
3556 | !Global->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: false, ExcludeDtor: false) && |
3557 | !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD: Global)) |
3558 | return false; |
3559 | |
3560 | return true; |
3561 | } |
3562 | |
3563 | ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) { |
3564 | StringRef Name = getMangledName(GD); |
3565 | |
3566 | // The UUID descriptor should be pointer aligned. |
3567 | CharUnits Alignment = CharUnits::fromQuantity(Quantity: PointerAlignInBytes); |
3568 | |
3569 | // Look for an existing global. |
3570 | if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name)) |
3571 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3572 | |
3573 | ConstantEmitter Emitter(*this); |
3574 | llvm::Constant *Init; |
3575 | |
3576 | APValue &V = GD->getAsAPValue(); |
3577 | if (!V.isAbsent()) { |
3578 | // If possible, emit the APValue version of the initializer. In particular, |
3579 | // this gets the type of the constant right. |
3580 | Init = Emitter.emitForInitializer( |
3581 | value: GD->getAsAPValue(), destAddrSpace: GD->getType().getAddressSpace(), destType: GD->getType()); |
3582 | } else { |
3583 | // As a fallback, directly construct the constant. |
3584 | // FIXME: This may get padding wrong under esoteric struct layout rules. |
3585 | // MSVC appears to create a complete type 'struct __s_GUID' that it |
3586 | // presumably uses to represent these constants. |
3587 | MSGuidDecl::Parts Parts = GD->getParts(); |
3588 | llvm::Constant *Fields[4] = { |
3589 | llvm::ConstantInt::get(Ty: Int32Ty, V: Parts.Part1), |
3590 | llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part2), |
3591 | llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part3), |
3592 | llvm::ConstantDataArray::getRaw( |
3593 | Data: StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), NumElements: 8, |
3594 | ElementTy: Int8Ty)}; |
3595 | Init = llvm::ConstantStruct::getAnon(V: Fields); |
3596 | } |
3597 | |
3598 | auto *GV = new llvm::GlobalVariable( |
3599 | getModule(), Init->getType(), |
3600 | /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name); |
3601 | if (supportsCOMDAT()) |
3602 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
3603 | setDSOLocal(GV); |
3604 | |
3605 | if (!V.isAbsent()) { |
3606 | Emitter.finalize(global: GV); |
3607 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3608 | } |
3609 | |
3610 | llvm::Type *Ty = getTypes().ConvertTypeForMem(T: GD->getType()); |
3611 | return ConstantAddress(GV, Ty, Alignment); |
3612 | } |
3613 | |
3614 | ConstantAddress CodeGenModule::GetAddrOfUnnamedGlobalConstantDecl( |
3615 | const UnnamedGlobalConstantDecl *GCD) { |
3616 | CharUnits Alignment = getContext().getTypeAlignInChars(T: GCD->getType()); |
3617 | |
3618 | llvm::GlobalVariable **Entry = nullptr; |
3619 | Entry = &UnnamedGlobalConstantDeclMap[GCD]; |
3620 | if (*Entry) |
3621 | return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment); |
3622 | |
3623 | ConstantEmitter Emitter(*this); |
3624 | llvm::Constant *Init; |
3625 | |
3626 | const APValue &V = GCD->getValue(); |
3627 | |
3628 | assert(!V.isAbsent()); |
3629 | Init = Emitter.emitForInitializer(value: V, destAddrSpace: GCD->getType().getAddressSpace(), |
3630 | destType: GCD->getType()); |
3631 | |
3632 | auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(), |
3633 | /*isConstant=*/true, |
3634 | llvm::GlobalValue::PrivateLinkage, Init, |
3635 | ".constant" ); |
3636 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3637 | GV->setAlignment(Alignment.getAsAlign()); |
3638 | |
3639 | Emitter.finalize(global: GV); |
3640 | |
3641 | *Entry = GV; |
3642 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3643 | } |
3644 | |
3645 | ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject( |
3646 | const TemplateParamObjectDecl *TPO) { |
3647 | StringRef Name = getMangledName(GD: TPO); |
3648 | CharUnits Alignment = getNaturalTypeAlignment(T: TPO->getType()); |
3649 | |
3650 | if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name)) |
3651 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3652 | |
3653 | ConstantEmitter Emitter(*this); |
3654 | llvm::Constant *Init = Emitter.emitForInitializer( |
3655 | value: TPO->getValue(), destAddrSpace: TPO->getType().getAddressSpace(), destType: TPO->getType()); |
3656 | |
3657 | if (!Init) { |
3658 | ErrorUnsupported(D: TPO, Type: "template parameter object" ); |
3659 | return ConstantAddress::invalid(); |
3660 | } |
3661 | |
3662 | llvm::GlobalValue::LinkageTypes Linkage = |
3663 | isExternallyVisible(L: TPO->getLinkageAndVisibility().getLinkage()) |
3664 | ? llvm::GlobalValue::LinkOnceODRLinkage |
3665 | : llvm::GlobalValue::InternalLinkage; |
3666 | auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(), |
3667 | /*isConstant=*/true, Linkage, Init, Name); |
3668 | setGVProperties(GV, D: TPO); |
3669 | if (supportsCOMDAT()) |
3670 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
3671 | Emitter.finalize(global: GV); |
3672 | |
3673 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3674 | } |
3675 | |
3676 | ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) { |
3677 | const AliasAttr *AA = VD->getAttr<AliasAttr>(); |
3678 | assert(AA && "No alias?" ); |
3679 | |
3680 | CharUnits Alignment = getContext().getDeclAlign(D: VD); |
3681 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: VD->getType()); |
3682 | |
3683 | // See if there is already something with the target's name in the module. |
3684 | llvm::GlobalValue *Entry = GetGlobalValue(Name: AA->getAliasee()); |
3685 | if (Entry) |
3686 | return ConstantAddress(Entry, DeclTy, Alignment); |
3687 | |
3688 | llvm::Constant *Aliasee; |
3689 | if (isa<llvm::FunctionType>(Val: DeclTy)) |
3690 | Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy, |
3691 | D: GlobalDecl(cast<FunctionDecl>(Val: VD)), |
3692 | /*ForVTable=*/false); |
3693 | else |
3694 | Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default, |
3695 | D: nullptr); |
3696 | |
3697 | auto *F = cast<llvm::GlobalValue>(Val: Aliasee); |
3698 | F->setLinkage(llvm::Function::ExternalWeakLinkage); |
3699 | WeakRefReferences.insert(Ptr: F); |
3700 | |
3701 | return ConstantAddress(Aliasee, DeclTy, Alignment); |
3702 | } |
3703 | |
3704 | template <typename AttrT> static bool hasImplicitAttr(const ValueDecl *D) { |
3705 | if (!D) |
3706 | return false; |
3707 | if (auto *A = D->getAttr<AttrT>()) |
3708 | return A->isImplicit(); |
3709 | return D->isImplicit(); |
3710 | } |
3711 | |
3712 | bool CodeGenModule::shouldEmitCUDAGlobalVar(const VarDecl *Global) const { |
3713 | assert(LangOpts.CUDA && "Should not be called by non-CUDA languages" ); |
3714 | // We need to emit host-side 'shadows' for all global |
3715 | // device-side variables because the CUDA runtime needs their |
3716 | // size and host-side address in order to provide access to |
3717 | // their device-side incarnations. |
3718 | return !LangOpts.CUDAIsDevice || Global->hasAttr<CUDADeviceAttr>() || |
3719 | Global->hasAttr<CUDAConstantAttr>() || |
3720 | Global->hasAttr<CUDASharedAttr>() || |
3721 | Global->getType()->isCUDADeviceBuiltinSurfaceType() || |
3722 | Global->getType()->isCUDADeviceBuiltinTextureType(); |
3723 | } |
3724 | |
3725 | void CodeGenModule::EmitGlobal(GlobalDecl GD) { |
3726 | const auto *Global = cast<ValueDecl>(Val: GD.getDecl()); |
3727 | |
3728 | // Weak references don't produce any output by themselves. |
3729 | if (Global->hasAttr<WeakRefAttr>()) |
3730 | return; |
3731 | |
3732 | // If this is an alias definition (which otherwise looks like a declaration) |
3733 | // emit it now. |
3734 | if (Global->hasAttr<AliasAttr>()) |
3735 | return EmitAliasDefinition(GD); |
3736 | |
3737 | // IFunc like an alias whose value is resolved at runtime by calling resolver. |
3738 | if (Global->hasAttr<IFuncAttr>()) |
3739 | return emitIFuncDefinition(GD); |
3740 | |
3741 | // If this is a cpu_dispatch multiversion function, emit the resolver. |
3742 | if (Global->hasAttr<CPUDispatchAttr>()) |
3743 | return emitCPUDispatchDefinition(GD); |
3744 | |
3745 | // If this is CUDA, be selective about which declarations we emit. |
3746 | // Non-constexpr non-lambda implicit host device functions are not emitted |
3747 | // unless they are used on device side. |
3748 | if (LangOpts.CUDA) { |
3749 | assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) && |
3750 | "Expected Variable or Function" ); |
3751 | if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) { |
3752 | if (!shouldEmitCUDAGlobalVar(Global: VD)) |
3753 | return; |
3754 | } else if (LangOpts.CUDAIsDevice) { |
3755 | const auto *FD = dyn_cast<FunctionDecl>(Val: Global); |
3756 | if ((!Global->hasAttr<CUDADeviceAttr>() || |
3757 | (LangOpts.OffloadImplicitHostDeviceTemplates && |
3758 | hasImplicitAttr<CUDAHostAttr>(D: FD) && |
3759 | hasImplicitAttr<CUDADeviceAttr>(D: FD) && !FD->isConstexpr() && |
3760 | !isLambdaCallOperator(DC: FD) && |
3761 | !getContext().CUDAImplicitHostDeviceFunUsedByDevice.count(V: FD))) && |
3762 | !Global->hasAttr<CUDAGlobalAttr>() && |
3763 | !(LangOpts.HIPStdPar && isa<FunctionDecl>(Val: Global) && |
3764 | !Global->hasAttr<CUDAHostAttr>())) |
3765 | return; |
3766 | // Device-only functions are the only things we skip. |
3767 | } else if (!Global->hasAttr<CUDAHostAttr>() && |
3768 | Global->hasAttr<CUDADeviceAttr>()) |
3769 | return; |
3770 | } |
3771 | |
3772 | if (LangOpts.OpenMP) { |
3773 | // If this is OpenMP, check if it is legal to emit this global normally. |
3774 | if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD)) |
3775 | return; |
3776 | if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Val: Global)) { |
3777 | if (MustBeEmitted(Global)) |
3778 | EmitOMPDeclareReduction(D: DRD); |
3779 | return; |
3780 | } |
3781 | if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Val: Global)) { |
3782 | if (MustBeEmitted(Global)) |
3783 | EmitOMPDeclareMapper(D: DMD); |
3784 | return; |
3785 | } |
3786 | } |
3787 | |
3788 | // Ignore declarations, they will be emitted on their first use. |
3789 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) { |
3790 | // Update deferred annotations with the latest declaration if the function |
3791 | // function was already used or defined. |
3792 | if (FD->hasAttr<AnnotateAttr>()) { |
3793 | StringRef MangledName = getMangledName(GD); |
3794 | if (GetGlobalValue(Name: MangledName)) |
3795 | DeferredAnnotations[MangledName] = FD; |
3796 | } |
3797 | |
3798 | // Forward declarations are emitted lazily on first use. |
3799 | if (!FD->doesThisDeclarationHaveABody()) { |
3800 | if (!FD->doesDeclarationForceExternallyVisibleDefinition() && |
3801 | (!FD->isMultiVersion() || !getTarget().getTriple().isAArch64())) |
3802 | return; |
3803 | |
3804 | StringRef MangledName = getMangledName(GD); |
3805 | |
3806 | // Compute the function info and LLVM type. |
3807 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
3808 | llvm::Type *Ty = getTypes().GetFunctionType(Info: FI); |
3809 | |
3810 | GetOrCreateLLVMFunction(MangledName, Ty, D: GD, /*ForVTable=*/false, |
3811 | /*DontDefer=*/false); |
3812 | return; |
3813 | } |
3814 | } else { |
3815 | const auto *VD = cast<VarDecl>(Val: Global); |
3816 | assert(VD->isFileVarDecl() && "Cannot emit local var decl as global." ); |
3817 | if (VD->isThisDeclarationADefinition() != VarDecl::Definition && |
3818 | !Context.isMSStaticDataMemberInlineDefinition(VD)) { |
3819 | if (LangOpts.OpenMP) { |
3820 | // Emit declaration of the must-be-emitted declare target variable. |
3821 | if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
3822 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) { |
3823 | |
3824 | // If this variable has external storage and doesn't require special |
3825 | // link handling we defer to its canonical definition. |
3826 | if (VD->hasExternalStorage() && |
3827 | Res != OMPDeclareTargetDeclAttr::MT_Link) |
3828 | return; |
3829 | |
3830 | bool UnifiedMemoryEnabled = |
3831 | getOpenMPRuntime().hasRequiresUnifiedSharedMemory(); |
3832 | if ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
3833 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
3834 | !UnifiedMemoryEnabled) { |
3835 | (void)GetAddrOfGlobalVar(D: VD); |
3836 | } else { |
3837 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
3838 | ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
3839 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
3840 | UnifiedMemoryEnabled)) && |
3841 | "Link clause or to clause with unified memory expected." ); |
3842 | (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
3843 | } |
3844 | |
3845 | return; |
3846 | } |
3847 | } |
3848 | // If this declaration may have caused an inline variable definition to |
3849 | // change linkage, make sure that it's emitted. |
3850 | if (Context.getInlineVariableDefinitionKind(VD) == |
3851 | ASTContext::InlineVariableDefinitionKind::Strong) |
3852 | GetAddrOfGlobalVar(D: VD); |
3853 | return; |
3854 | } |
3855 | } |
3856 | |
3857 | // Defer code generation to first use when possible, e.g. if this is an inline |
3858 | // function. If the global must always be emitted, do it eagerly if possible |
3859 | // to benefit from cache locality. |
3860 | if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { |
3861 | // Emit the definition if it can't be deferred. |
3862 | EmitGlobalDefinition(D: GD); |
3863 | addEmittedDeferredDecl(GD); |
3864 | return; |
3865 | } |
3866 | |
3867 | // If we're deferring emission of a C++ variable with an |
3868 | // initializer, remember the order in which it appeared in the file. |
3869 | if (getLangOpts().CPlusPlus && isa<VarDecl>(Val: Global) && |
3870 | cast<VarDecl>(Val: Global)->hasInit()) { |
3871 | DelayedCXXInitPosition[Global] = CXXGlobalInits.size(); |
3872 | CXXGlobalInits.push_back(x: nullptr); |
3873 | } |
3874 | |
3875 | StringRef MangledName = getMangledName(GD); |
3876 | if (GetGlobalValue(Name: MangledName) != nullptr) { |
3877 | // The value has already been used and should therefore be emitted. |
3878 | addDeferredDeclToEmit(GD); |
3879 | } else if (MustBeEmitted(Global)) { |
3880 | // The value must be emitted, but cannot be emitted eagerly. |
3881 | assert(!MayBeEmittedEagerly(Global)); |
3882 | addDeferredDeclToEmit(GD); |
3883 | } else { |
3884 | // Otherwise, remember that we saw a deferred decl with this name. The |
3885 | // first use of the mangled name will cause it to move into |
3886 | // DeferredDeclsToEmit. |
3887 | DeferredDecls[MangledName] = GD; |
3888 | } |
3889 | } |
3890 | |
3891 | // Check if T is a class type with a destructor that's not dllimport. |
3892 | static bool HasNonDllImportDtor(QualType T) { |
3893 | if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>()) |
3894 | if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
3895 | if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>()) |
3896 | return true; |
3897 | |
3898 | return false; |
3899 | } |
3900 | |
3901 | namespace { |
3902 | struct FunctionIsDirectlyRecursive |
3903 | : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> { |
3904 | const StringRef Name; |
3905 | const Builtin::Context &BI; |
3906 | FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) |
3907 | : Name(N), BI(C) {} |
3908 | |
3909 | bool VisitCallExpr(const CallExpr *E) { |
3910 | const FunctionDecl *FD = E->getDirectCallee(); |
3911 | if (!FD) |
3912 | return false; |
3913 | AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>(); |
3914 | if (Attr && Name == Attr->getLabel()) |
3915 | return true; |
3916 | unsigned BuiltinID = FD->getBuiltinID(); |
3917 | if (!BuiltinID || !BI.isLibFunction(ID: BuiltinID)) |
3918 | return false; |
3919 | StringRef BuiltinName = BI.getName(ID: BuiltinID); |
3920 | if (BuiltinName.starts_with(Prefix: "__builtin_" ) && |
3921 | Name == BuiltinName.slice(Start: strlen(s: "__builtin_" ), End: StringRef::npos)) { |
3922 | return true; |
3923 | } |
3924 | return false; |
3925 | } |
3926 | |
3927 | bool VisitStmt(const Stmt *S) { |
3928 | for (const Stmt *Child : S->children()) |
3929 | if (Child && this->Visit(S: Child)) |
3930 | return true; |
3931 | return false; |
3932 | } |
3933 | }; |
3934 | |
3935 | // Make sure we're not referencing non-imported vars or functions. |
3936 | struct DLLImportFunctionVisitor |
3937 | : public RecursiveASTVisitor<DLLImportFunctionVisitor> { |
3938 | bool SafeToInline = true; |
3939 | |
3940 | bool shouldVisitImplicitCode() const { return true; } |
3941 | |
3942 | bool VisitVarDecl(VarDecl *VD) { |
3943 | if (VD->getTLSKind()) { |
3944 | // A thread-local variable cannot be imported. |
3945 | SafeToInline = false; |
3946 | return SafeToInline; |
3947 | } |
3948 | |
3949 | // A variable definition might imply a destructor call. |
3950 | if (VD->isThisDeclarationADefinition()) |
3951 | SafeToInline = !HasNonDllImportDtor(T: VD->getType()); |
3952 | |
3953 | return SafeToInline; |
3954 | } |
3955 | |
3956 | bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { |
3957 | if (const auto *D = E->getTemporary()->getDestructor()) |
3958 | SafeToInline = D->hasAttr<DLLImportAttr>(); |
3959 | return SafeToInline; |
3960 | } |
3961 | |
3962 | bool VisitDeclRefExpr(DeclRefExpr *E) { |
3963 | ValueDecl *VD = E->getDecl(); |
3964 | if (isa<FunctionDecl>(Val: VD)) |
3965 | SafeToInline = VD->hasAttr<DLLImportAttr>(); |
3966 | else if (VarDecl *V = dyn_cast<VarDecl>(Val: VD)) |
3967 | SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>(); |
3968 | return SafeToInline; |
3969 | } |
3970 | |
3971 | bool VisitCXXConstructExpr(CXXConstructExpr *E) { |
3972 | SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>(); |
3973 | return SafeToInline; |
3974 | } |
3975 | |
3976 | bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { |
3977 | CXXMethodDecl *M = E->getMethodDecl(); |
3978 | if (!M) { |
3979 | // Call through a pointer to member function. This is safe to inline. |
3980 | SafeToInline = true; |
3981 | } else { |
3982 | SafeToInline = M->hasAttr<DLLImportAttr>(); |
3983 | } |
3984 | return SafeToInline; |
3985 | } |
3986 | |
3987 | bool VisitCXXDeleteExpr(CXXDeleteExpr *E) { |
3988 | SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>(); |
3989 | return SafeToInline; |
3990 | } |
3991 | |
3992 | bool VisitCXXNewExpr(CXXNewExpr *E) { |
3993 | SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>(); |
3994 | return SafeToInline; |
3995 | } |
3996 | }; |
3997 | } |
3998 | |
3999 | // isTriviallyRecursive - Check if this function calls another |
4000 | // decl that, because of the asm attribute or the other decl being a builtin, |
4001 | // ends up pointing to itself. |
4002 | bool |
4003 | CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) { |
4004 | StringRef Name; |
4005 | if (getCXXABI().getMangleContext().shouldMangleDeclName(D: FD)) { |
4006 | // asm labels are a special kind of mangling we have to support. |
4007 | AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>(); |
4008 | if (!Attr) |
4009 | return false; |
4010 | Name = Attr->getLabel(); |
4011 | } else { |
4012 | Name = FD->getName(); |
4013 | } |
4014 | |
4015 | FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo); |
4016 | const Stmt *Body = FD->getBody(); |
4017 | return Body ? Walker.Visit(S: Body) : false; |
4018 | } |
4019 | |
4020 | bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) { |
4021 | if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage) |
4022 | return true; |
4023 | |
4024 | const auto *F = cast<FunctionDecl>(Val: GD.getDecl()); |
4025 | if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>()) |
4026 | return false; |
4027 | |
4028 | // We don't import function bodies from other named module units since that |
4029 | // behavior may break ABI compatibility of the current unit. |
4030 | if (const Module *M = F->getOwningModule(); |
4031 | M && M->getTopLevelModule()->isNamedModule() && |
4032 | getContext().getCurrentNamedModule() != M->getTopLevelModule()) { |
4033 | // There are practices to mark template member function as always-inline |
4034 | // and mark the template as extern explicit instantiation but not give |
4035 | // the definition for member function. So we have to emit the function |
4036 | // from explicitly instantiation with always-inline. |
4037 | // |
4038 | // See https://github.com/llvm/llvm-project/issues/86893 for details. |
4039 | // |
4040 | // TODO: Maybe it is better to give it a warning if we call a non-inline |
4041 | // function from other module units which is marked as always-inline. |
4042 | if (!F->isTemplateInstantiation() || !F->hasAttr<AlwaysInlineAttr>()) { |
4043 | return false; |
4044 | } |
4045 | } |
4046 | |
4047 | if (F->hasAttr<NoInlineAttr>()) |
4048 | return false; |
4049 | |
4050 | if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) { |
4051 | // Check whether it would be safe to inline this dllimport function. |
4052 | DLLImportFunctionVisitor Visitor; |
4053 | Visitor.TraverseFunctionDecl(D: const_cast<FunctionDecl*>(F)); |
4054 | if (!Visitor.SafeToInline) |
4055 | return false; |
4056 | |
4057 | if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: F)) { |
4058 | // Implicit destructor invocations aren't captured in the AST, so the |
4059 | // check above can't see them. Check for them manually here. |
4060 | for (const Decl *Member : Dtor->getParent()->decls()) |
4061 | if (isa<FieldDecl>(Val: Member)) |
4062 | if (HasNonDllImportDtor(T: cast<FieldDecl>(Val: Member)->getType())) |
4063 | return false; |
4064 | for (const CXXBaseSpecifier &B : Dtor->getParent()->bases()) |
4065 | if (HasNonDllImportDtor(T: B.getType())) |
4066 | return false; |
4067 | } |
4068 | } |
4069 | |
4070 | // Inline builtins declaration must be emitted. They often are fortified |
4071 | // functions. |
4072 | if (F->isInlineBuiltinDeclaration()) |
4073 | return true; |
4074 | |
4075 | // PR9614. Avoid cases where the source code is lying to us. An available |
4076 | // externally function should have an equivalent function somewhere else, |
4077 | // but a function that calls itself through asm label/`__builtin_` trickery is |
4078 | // clearly not equivalent to the real implementation. |
4079 | // This happens in glibc's btowc and in some configure checks. |
4080 | return !isTriviallyRecursive(FD: F); |
4081 | } |
4082 | |
4083 | bool CodeGenModule::shouldOpportunisticallyEmitVTables() { |
4084 | return CodeGenOpts.OptimizationLevel > 0; |
4085 | } |
4086 | |
4087 | void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD, |
4088 | llvm::GlobalValue *GV) { |
4089 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4090 | |
4091 | if (FD->isCPUSpecificMultiVersion()) { |
4092 | auto *Spec = FD->getAttr<CPUSpecificAttr>(); |
4093 | for (unsigned I = 0; I < Spec->cpus_size(); ++I) |
4094 | EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr); |
4095 | } else if (auto *TC = FD->getAttr<TargetClonesAttr>()) { |
4096 | for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) |
4097 | // AArch64 favors the default target version over the clone if any. |
4098 | if ((!TC->isDefaultVersion(Index: I) || !getTarget().getTriple().isAArch64()) && |
4099 | TC->isFirstOfVersion(Index: I)) |
4100 | EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr); |
4101 | // Ensure that the resolver function is also emitted. |
4102 | GetOrCreateMultiVersionResolver(GD); |
4103 | } else |
4104 | EmitGlobalFunctionDefinition(GD, GV); |
4105 | |
4106 | // Defer the resolver emission until we can reason whether the TU |
4107 | // contains a default target version implementation. |
4108 | if (FD->isTargetVersionMultiVersion()) |
4109 | AddDeferredMultiVersionResolverToEmit(GD); |
4110 | } |
4111 | |
4112 | void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) { |
4113 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
4114 | |
4115 | PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(), |
4116 | Context.getSourceManager(), |
4117 | "Generating code for declaration" ); |
4118 | |
4119 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
4120 | // At -O0, don't generate IR for functions with available_externally |
4121 | // linkage. |
4122 | if (!shouldEmitFunction(GD)) |
4123 | return; |
4124 | |
4125 | llvm::TimeTraceScope TimeScope("CodeGen Function" , [&]() { |
4126 | std::string Name; |
4127 | llvm::raw_string_ostream OS(Name); |
4128 | FD->getNameForDiagnostic(OS, Policy: getContext().getPrintingPolicy(), |
4129 | /*Qualified=*/true); |
4130 | return Name; |
4131 | }); |
4132 | |
4133 | if (const auto *Method = dyn_cast<CXXMethodDecl>(Val: D)) { |
4134 | // Make sure to emit the definition(s) before we emit the thunks. |
4135 | // This is necessary for the generation of certain thunks. |
4136 | if (isa<CXXConstructorDecl>(Val: Method) || isa<CXXDestructorDecl>(Val: Method)) |
4137 | ABI->emitCXXStructor(GD); |
4138 | else if (FD->isMultiVersion()) |
4139 | EmitMultiVersionFunctionDefinition(GD, GV); |
4140 | else |
4141 | EmitGlobalFunctionDefinition(GD, GV); |
4142 | |
4143 | if (Method->isVirtual()) |
4144 | getVTables().EmitThunks(GD); |
4145 | |
4146 | return; |
4147 | } |
4148 | |
4149 | if (FD->isMultiVersion()) |
4150 | return EmitMultiVersionFunctionDefinition(GD, GV); |
4151 | return EmitGlobalFunctionDefinition(GD, GV); |
4152 | } |
4153 | |
4154 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) |
4155 | return EmitGlobalVarDefinition(D: VD, IsTentative: !VD->hasDefinition()); |
4156 | |
4157 | llvm_unreachable("Invalid argument to EmitGlobalDefinition()" ); |
4158 | } |
4159 | |
4160 | static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, |
4161 | llvm::Function *NewFn); |
4162 | |
4163 | static unsigned |
4164 | TargetMVPriority(const TargetInfo &TI, |
4165 | const CodeGenFunction::MultiVersionResolverOption &RO) { |
4166 | unsigned Priority = 0; |
4167 | unsigned NumFeatures = 0; |
4168 | for (StringRef Feat : RO.Conditions.Features) { |
4169 | Priority = std::max(a: Priority, b: TI.multiVersionSortPriority(Name: Feat)); |
4170 | NumFeatures++; |
4171 | } |
4172 | |
4173 | if (!RO.Conditions.Architecture.empty()) |
4174 | Priority = std::max( |
4175 | a: Priority, b: TI.multiVersionSortPriority(Name: RO.Conditions.Architecture)); |
4176 | |
4177 | Priority += TI.multiVersionFeatureCost() * NumFeatures; |
4178 | |
4179 | return Priority; |
4180 | } |
4181 | |
4182 | // Multiversion functions should be at most 'WeakODRLinkage' so that a different |
4183 | // TU can forward declare the function without causing problems. Particularly |
4184 | // in the cases of CPUDispatch, this causes issues. This also makes sure we |
4185 | // work with internal linkage functions, so that the same function name can be |
4186 | // used with internal linkage in multiple TUs. |
4187 | llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM, |
4188 | GlobalDecl GD) { |
4189 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4190 | if (FD->getFormalLinkage() == Linkage::Internal) |
4191 | return llvm::GlobalValue::InternalLinkage; |
4192 | return llvm::GlobalValue::WeakODRLinkage; |
4193 | } |
4194 | |
4195 | void CodeGenModule::emitMultiVersionFunctions() { |
4196 | std::vector<GlobalDecl> MVFuncsToEmit; |
4197 | MultiVersionFuncs.swap(x&: MVFuncsToEmit); |
4198 | for (GlobalDecl GD : MVFuncsToEmit) { |
4199 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4200 | assert(FD && "Expected a FunctionDecl" ); |
4201 | |
4202 | auto createFunction = [&](const FunctionDecl *Decl, unsigned MVIdx = 0) { |
4203 | GlobalDecl CurGD{Decl->isDefined() ? Decl->getDefinition() : Decl, MVIdx}; |
4204 | StringRef MangledName = getMangledName(GD: CurGD); |
4205 | llvm::Constant *Func = GetGlobalValue(Name: MangledName); |
4206 | if (!Func) { |
4207 | if (Decl->isDefined()) { |
4208 | EmitGlobalFunctionDefinition(GD: CurGD, GV: nullptr); |
4209 | Func = GetGlobalValue(Name: MangledName); |
4210 | } else { |
4211 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD: CurGD); |
4212 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
4213 | Func = GetAddrOfFunction(GD: CurGD, Ty, /*ForVTable=*/false, |
4214 | /*DontDefer=*/false, IsForDefinition: ForDefinition); |
4215 | } |
4216 | assert(Func && "This should have just been created" ); |
4217 | } |
4218 | return cast<llvm::Function>(Val: Func); |
4219 | }; |
4220 | |
4221 | // For AArch64, a resolver is only emitted if a function marked with |
4222 | // target_version("default")) or target_clones() is present and defined |
4223 | // in this TU. For other architectures it is always emitted. |
4224 | bool ShouldEmitResolver = !getTarget().getTriple().isAArch64(); |
4225 | SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options; |
4226 | |
4227 | getContext().forEachMultiversionedFunctionVersion( |
4228 | FD, Pred: [&](const FunctionDecl *CurFD) { |
4229 | llvm::SmallVector<StringRef, 8> Feats; |
4230 | bool IsDefined = CurFD->doesThisDeclarationHaveABody(); |
4231 | |
4232 | if (const auto *TA = CurFD->getAttr<TargetAttr>()) { |
4233 | TA->getAddedFeatures(Out&: Feats); |
4234 | llvm::Function *Func = createFunction(CurFD); |
4235 | Options.emplace_back(Args&: Func, Args: TA->getArchitecture(), Args&: Feats); |
4236 | } else if (const auto *TVA = CurFD->getAttr<TargetVersionAttr>()) { |
4237 | if (TVA->isDefaultVersion() && IsDefined) |
4238 | ShouldEmitResolver = true; |
4239 | TVA->getFeatures(Out&: Feats); |
4240 | llvm::Function *Func = createFunction(CurFD); |
4241 | Options.emplace_back(Args&: Func, /*Architecture*/ Args: "" , Args&: Feats); |
4242 | } else if (const auto *TC = CurFD->getAttr<TargetClonesAttr>()) { |
4243 | if (IsDefined) |
4244 | ShouldEmitResolver = true; |
4245 | for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) { |
4246 | if (!TC->isFirstOfVersion(Index: I)) |
4247 | continue; |
4248 | |
4249 | llvm::Function *Func = createFunction(CurFD, I); |
4250 | StringRef Architecture; |
4251 | Feats.clear(); |
4252 | if (getTarget().getTriple().isAArch64()) |
4253 | TC->getFeatures(Out&: Feats, Index: I); |
4254 | else { |
4255 | StringRef Version = TC->getFeatureStr(Index: I); |
4256 | if (Version.starts_with(Prefix: "arch=" )) |
4257 | Architecture = Version.drop_front(N: sizeof("arch=" ) - 1); |
4258 | else if (Version != "default" ) |
4259 | Feats.push_back(Elt: Version); |
4260 | } |
4261 | Options.emplace_back(Args&: Func, Args&: Architecture, Args&: Feats); |
4262 | } |
4263 | } else |
4264 | llvm_unreachable("unexpected MultiVersionKind" ); |
4265 | }); |
4266 | |
4267 | if (!ShouldEmitResolver) |
4268 | continue; |
4269 | |
4270 | llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD); |
4271 | if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: ResolverConstant)) { |
4272 | ResolverConstant = IFunc->getResolver(); |
4273 | if (FD->isTargetClonesMultiVersion() && |
4274 | !getTarget().getTriple().isAArch64()) { |
4275 | std::string MangledName = getMangledNameImpl( |
4276 | CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4277 | if (!GetGlobalValue(Name: MangledName + ".ifunc" )) { |
4278 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4279 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4280 | // In prior versions of Clang, the mangling for ifuncs incorrectly |
4281 | // included an .ifunc suffix. This alias is generated for backward |
4282 | // compatibility. It is deprecated, and may be removed in the future. |
4283 | auto *Alias = llvm::GlobalAlias::create( |
4284 | Ty: DeclTy, AddressSpace: 0, Linkage: getMultiversionLinkage(CGM&: *this, GD), |
4285 | Name: MangledName + ".ifunc" , Aliasee: IFunc, Parent: &getModule()); |
4286 | SetCommonAttributes(GD: FD, GV: Alias); |
4287 | } |
4288 | } |
4289 | } |
4290 | llvm::Function *ResolverFunc = cast<llvm::Function>(Val: ResolverConstant); |
4291 | |
4292 | ResolverFunc->setLinkage(getMultiversionLinkage(CGM&: *this, GD)); |
4293 | |
4294 | if (!ResolverFunc->hasLocalLinkage() && supportsCOMDAT()) |
4295 | ResolverFunc->setComdat( |
4296 | getModule().getOrInsertComdat(Name: ResolverFunc->getName())); |
4297 | |
4298 | const TargetInfo &TI = getTarget(); |
4299 | llvm::stable_sort( |
4300 | Range&: Options, C: [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS, |
4301 | const CodeGenFunction::MultiVersionResolverOption &RHS) { |
4302 | return TargetMVPriority(TI, RO: LHS) > TargetMVPriority(TI, RO: RHS); |
4303 | }); |
4304 | CodeGenFunction CGF(*this); |
4305 | CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options); |
4306 | } |
4307 | |
4308 | // Ensure that any additions to the deferred decls list caused by emitting a |
4309 | // variant are emitted. This can happen when the variant itself is inline and |
4310 | // calls a function without linkage. |
4311 | if (!MVFuncsToEmit.empty()) |
4312 | EmitDeferred(); |
4313 | |
4314 | // Ensure that any additions to the multiversion funcs list from either the |
4315 | // deferred decls or the multiversion functions themselves are emitted. |
4316 | if (!MultiVersionFuncs.empty()) |
4317 | emitMultiVersionFunctions(); |
4318 | } |
4319 | |
4320 | static void replaceDeclarationWith(llvm::GlobalValue *Old, |
4321 | llvm::Constant *New) { |
4322 | assert(cast<llvm::Function>(Old)->isDeclaration() && "Not a declaration" ); |
4323 | New->takeName(V: Old); |
4324 | Old->replaceAllUsesWith(V: New); |
4325 | Old->eraseFromParent(); |
4326 | } |
4327 | |
4328 | void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) { |
4329 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4330 | assert(FD && "Not a FunctionDecl?" ); |
4331 | assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?" ); |
4332 | const auto *DD = FD->getAttr<CPUDispatchAttr>(); |
4333 | assert(DD && "Not a cpu_dispatch Function?" ); |
4334 | |
4335 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4336 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4337 | |
4338 | StringRef ResolverName = getMangledName(GD); |
4339 | UpdateMultiVersionNames(GD, FD, CurName&: ResolverName); |
4340 | |
4341 | llvm::Type *ResolverType; |
4342 | GlobalDecl ResolverGD; |
4343 | if (getTarget().supportsIFunc()) { |
4344 | ResolverType = llvm::FunctionType::get( |
4345 | Result: llvm::PointerType::get(ElementType: DeclTy, |
4346 | AddressSpace: getTypes().getTargetAddressSpace(T: FD->getType())), |
4347 | isVarArg: false); |
4348 | } |
4349 | else { |
4350 | ResolverType = DeclTy; |
4351 | ResolverGD = GD; |
4352 | } |
4353 | |
4354 | auto *ResolverFunc = cast<llvm::Function>(Val: GetOrCreateLLVMFunction( |
4355 | MangledName: ResolverName, Ty: ResolverType, D: ResolverGD, /*ForVTable=*/false)); |
4356 | ResolverFunc->setLinkage(getMultiversionLinkage(CGM&: *this, GD)); |
4357 | if (supportsCOMDAT()) |
4358 | ResolverFunc->setComdat( |
4359 | getModule().getOrInsertComdat(Name: ResolverFunc->getName())); |
4360 | |
4361 | SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options; |
4362 | const TargetInfo &Target = getTarget(); |
4363 | unsigned Index = 0; |
4364 | for (const IdentifierInfo *II : DD->cpus()) { |
4365 | // Get the name of the target function so we can look it up/create it. |
4366 | std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND: FD, OmitMultiVersionMangling: true) + |
4367 | getCPUSpecificMangling(CGM: *this, Name: II->getName()); |
4368 | |
4369 | llvm::Constant *Func = GetGlobalValue(Name: MangledName); |
4370 | |
4371 | if (!Func) { |
4372 | GlobalDecl ExistingDecl = Manglings.lookup(Key: MangledName); |
4373 | if (ExistingDecl.getDecl() && |
4374 | ExistingDecl.getDecl()->getAsFunction()->isDefined()) { |
4375 | EmitGlobalFunctionDefinition(GD: ExistingDecl, GV: nullptr); |
4376 | Func = GetGlobalValue(Name: MangledName); |
4377 | } else { |
4378 | if (!ExistingDecl.getDecl()) |
4379 | ExistingDecl = GD.getWithMultiVersionIndex(Index); |
4380 | |
4381 | Func = GetOrCreateLLVMFunction( |
4382 | MangledName, Ty: DeclTy, D: ExistingDecl, |
4383 | /*ForVTable=*/false, /*DontDefer=*/true, |
4384 | /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(), IsForDefinition: ForDefinition); |
4385 | } |
4386 | } |
4387 | |
4388 | llvm::SmallVector<StringRef, 32> Features; |
4389 | Target.getCPUSpecificCPUDispatchFeatures(Name: II->getName(), Features); |
4390 | llvm::transform(Range&: Features, d_first: Features.begin(), |
4391 | F: [](StringRef Str) { return Str.substr(Start: 1); }); |
4392 | llvm::erase_if(C&: Features, P: [&Target](StringRef Feat) { |
4393 | return !Target.validateCpuSupports(Name: Feat); |
4394 | }); |
4395 | Options.emplace_back(Args: cast<llvm::Function>(Val: Func), Args: StringRef{}, Args&: Features); |
4396 | ++Index; |
4397 | } |
4398 | |
4399 | llvm::stable_sort( |
4400 | Range&: Options, C: [](const CodeGenFunction::MultiVersionResolverOption &LHS, |
4401 | const CodeGenFunction::MultiVersionResolverOption &RHS) { |
4402 | return llvm::X86::getCpuSupportsMask(FeatureStrs: LHS.Conditions.Features) > |
4403 | llvm::X86::getCpuSupportsMask(FeatureStrs: RHS.Conditions.Features); |
4404 | }); |
4405 | |
4406 | // If the list contains multiple 'default' versions, such as when it contains |
4407 | // 'pentium' and 'generic', don't emit the call to the generic one (since we |
4408 | // always run on at least a 'pentium'). We do this by deleting the 'least |
4409 | // advanced' (read, lowest mangling letter). |
4410 | while (Options.size() > 1 && |
4411 | llvm::all_of(Range: llvm::X86::getCpuSupportsMask( |
4412 | FeatureStrs: (Options.end() - 2)->Conditions.Features), |
4413 | P: [](auto X) { return X == 0; })) { |
4414 | StringRef LHSName = (Options.end() - 2)->Function->getName(); |
4415 | StringRef RHSName = (Options.end() - 1)->Function->getName(); |
4416 | if (LHSName.compare(RHS: RHSName) < 0) |
4417 | Options.erase(CI: Options.end() - 2); |
4418 | else |
4419 | Options.erase(CI: Options.end() - 1); |
4420 | } |
4421 | |
4422 | CodeGenFunction CGF(*this); |
4423 | CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options); |
4424 | |
4425 | if (getTarget().supportsIFunc()) { |
4426 | llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(CGM&: *this, GD); |
4427 | auto *IFunc = cast<llvm::GlobalValue>(Val: GetOrCreateMultiVersionResolver(GD)); |
4428 | |
4429 | // Fix up function declarations that were created for cpu_specific before |
4430 | // cpu_dispatch was known |
4431 | if (!isa<llvm::GlobalIFunc>(Val: IFunc)) { |
4432 | auto *GI = llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: 0, Linkage, Name: "" , Resolver: ResolverFunc, |
4433 | Parent: &getModule()); |
4434 | replaceDeclarationWith(Old: IFunc, New: GI); |
4435 | IFunc = GI; |
4436 | } |
4437 | |
4438 | std::string AliasName = getMangledNameImpl( |
4439 | CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4440 | llvm::Constant *AliasFunc = GetGlobalValue(Name: AliasName); |
4441 | if (!AliasFunc) { |
4442 | auto *GA = llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: 0, Linkage, Name: AliasName, Aliasee: IFunc, |
4443 | Parent: &getModule()); |
4444 | SetCommonAttributes(GD, GV: GA); |
4445 | } |
4446 | } |
4447 | } |
4448 | |
4449 | /// Adds a declaration to the list of multi version functions if not present. |
4450 | void CodeGenModule::AddDeferredMultiVersionResolverToEmit(GlobalDecl GD) { |
4451 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4452 | assert(FD && "Not a FunctionDecl?" ); |
4453 | |
4454 | if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) { |
4455 | std::string MangledName = |
4456 | getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4457 | if (!DeferredResolversToEmit.insert(key: MangledName).second) |
4458 | return; |
4459 | } |
4460 | MultiVersionFuncs.push_back(x: GD); |
4461 | } |
4462 | |
4463 | /// If a dispatcher for the specified mangled name is not in the module, create |
4464 | /// and return it. The dispatcher is either an llvm Function with the specified |
4465 | /// type, or a global ifunc. |
4466 | llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) { |
4467 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4468 | assert(FD && "Not a FunctionDecl?" ); |
4469 | |
4470 | std::string MangledName = |
4471 | getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4472 | |
4473 | // Holds the name of the resolver, in ifunc mode this is the ifunc (which has |
4474 | // a separate resolver). |
4475 | std::string ResolverName = MangledName; |
4476 | if (getTarget().supportsIFunc()) { |
4477 | switch (FD->getMultiVersionKind()) { |
4478 | case MultiVersionKind::None: |
4479 | llvm_unreachable("unexpected MultiVersionKind::None for resolver" ); |
4480 | case MultiVersionKind::Target: |
4481 | case MultiVersionKind::CPUSpecific: |
4482 | case MultiVersionKind::CPUDispatch: |
4483 | ResolverName += ".ifunc" ; |
4484 | break; |
4485 | case MultiVersionKind::TargetClones: |
4486 | case MultiVersionKind::TargetVersion: |
4487 | break; |
4488 | } |
4489 | } else if (FD->isTargetMultiVersion()) { |
4490 | ResolverName += ".resolver" ; |
4491 | } |
4492 | |
4493 | // If the resolver has already been created, just return it. This lookup may |
4494 | // yield a function declaration instead of a resolver on AArch64. That is |
4495 | // because we didn't know whether a resolver will be generated when we first |
4496 | // encountered a use of the symbol named after this resolver. Therefore, |
4497 | // targets which support ifuncs should not return here unless we actually |
4498 | // found an ifunc. |
4499 | llvm::GlobalValue *ResolverGV = GetGlobalValue(Name: ResolverName); |
4500 | if (ResolverGV && |
4501 | (isa<llvm::GlobalIFunc>(Val: ResolverGV) || !getTarget().supportsIFunc())) |
4502 | return ResolverGV; |
4503 | |
4504 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4505 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4506 | |
4507 | // The resolver needs to be created. For target and target_clones, defer |
4508 | // creation until the end of the TU. |
4509 | if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion()) |
4510 | AddDeferredMultiVersionResolverToEmit(GD); |
4511 | |
4512 | // For cpu_specific, don't create an ifunc yet because we don't know if the |
4513 | // cpu_dispatch will be emitted in this translation unit. |
4514 | if (getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion()) { |
4515 | llvm::Type *ResolverType = llvm::FunctionType::get( |
4516 | Result: llvm::PointerType::get(ElementType: DeclTy, |
4517 | AddressSpace: getTypes().getTargetAddressSpace(T: FD->getType())), |
4518 | isVarArg: false); |
4519 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
4520 | MangledName: MangledName + ".resolver" , Ty: ResolverType, D: GlobalDecl{}, |
4521 | /*ForVTable=*/false); |
4522 | llvm::GlobalIFunc *GIF = |
4523 | llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: 0, Linkage: getMultiversionLinkage(CGM&: *this, GD), |
4524 | Name: "" , Resolver, Parent: &getModule()); |
4525 | GIF->setName(ResolverName); |
4526 | SetCommonAttributes(GD: FD, GV: GIF); |
4527 | if (ResolverGV) |
4528 | replaceDeclarationWith(Old: ResolverGV, New: GIF); |
4529 | return GIF; |
4530 | } |
4531 | |
4532 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
4533 | MangledName: ResolverName, Ty: DeclTy, D: GlobalDecl{}, /*ForVTable=*/false); |
4534 | assert(isa<llvm::GlobalValue>(Resolver) && |
4535 | "Resolver should be created for the first time" ); |
4536 | SetCommonAttributes(GD: FD, GV: cast<llvm::GlobalValue>(Val: Resolver)); |
4537 | if (ResolverGV) |
4538 | replaceDeclarationWith(Old: ResolverGV, New: Resolver); |
4539 | return Resolver; |
4540 | } |
4541 | |
4542 | bool CodeGenModule::shouldDropDLLAttribute(const Decl *D, |
4543 | const llvm::GlobalValue *GV) const { |
4544 | auto SC = GV->getDLLStorageClass(); |
4545 | if (SC == llvm::GlobalValue::DefaultStorageClass) |
4546 | return false; |
4547 | const Decl *MRD = D->getMostRecentDecl(); |
4548 | return (((SC == llvm::GlobalValue::DLLImportStorageClass && |
4549 | !MRD->hasAttr<DLLImportAttr>()) || |
4550 | (SC == llvm::GlobalValue::DLLExportStorageClass && |
4551 | !MRD->hasAttr<DLLExportAttr>())) && |
4552 | !shouldMapVisibilityToDLLExport(D: cast<NamedDecl>(Val: MRD))); |
4553 | } |
4554 | |
4555 | /// GetOrCreateLLVMFunction - If the specified mangled name is not in the |
4556 | /// module, create and return an llvm Function with the specified type. If there |
4557 | /// is something in the module with the specified name, return it potentially |
4558 | /// bitcasted to the right type. |
4559 | /// |
4560 | /// If D is non-null, it specifies a decl that correspond to this. This is used |
4561 | /// to set the attributes on the function when it is first created. |
4562 | llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction( |
4563 | StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable, |
4564 | bool DontDefer, bool IsThunk, llvm::AttributeList , |
4565 | ForDefinition_t IsForDefinition) { |
4566 | const Decl *D = GD.getDecl(); |
4567 | |
4568 | std::string NameWithoutMultiVersionMangling; |
4569 | // Any attempts to use a MultiVersion function should result in retrieving |
4570 | // the iFunc instead. Name Mangling will handle the rest of the changes. |
4571 | if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D)) { |
4572 | // For the device mark the function as one that should be emitted. |
4573 | if (getLangOpts().OpenMPIsTargetDevice && OpenMPRuntime && |
4574 | !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() && |
4575 | !DontDefer && !IsForDefinition) { |
4576 | if (const FunctionDecl *FDDef = FD->getDefinition()) { |
4577 | GlobalDecl GDDef; |
4578 | if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: FDDef)) |
4579 | GDDef = GlobalDecl(CD, GD.getCtorType()); |
4580 | else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: FDDef)) |
4581 | GDDef = GlobalDecl(DD, GD.getDtorType()); |
4582 | else |
4583 | GDDef = GlobalDecl(FDDef); |
4584 | EmitGlobal(GD: GDDef); |
4585 | } |
4586 | } |
4587 | |
4588 | if (FD->isMultiVersion()) { |
4589 | UpdateMultiVersionNames(GD, FD, CurName&: MangledName); |
4590 | if (!IsForDefinition) { |
4591 | // On AArch64 we do not immediatelly emit an ifunc resolver when a |
4592 | // function is used. Instead we defer the emission until we see a |
4593 | // default definition. In the meantime we just reference the symbol |
4594 | // without FMV mangling (it may or may not be replaced later). |
4595 | if (getTarget().getTriple().isAArch64()) { |
4596 | AddDeferredMultiVersionResolverToEmit(GD); |
4597 | NameWithoutMultiVersionMangling = getMangledNameImpl( |
4598 | CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4599 | } else |
4600 | return GetOrCreateMultiVersionResolver(GD); |
4601 | } |
4602 | } |
4603 | } |
4604 | |
4605 | if (!NameWithoutMultiVersionMangling.empty()) |
4606 | MangledName = NameWithoutMultiVersionMangling; |
4607 | |
4608 | // Lookup the entry, lazily creating it if necessary. |
4609 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
4610 | if (Entry) { |
4611 | if (WeakRefReferences.erase(Ptr: Entry)) { |
4612 | const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D); |
4613 | if (FD && !FD->hasAttr<WeakAttr>()) |
4614 | Entry->setLinkage(llvm::Function::ExternalLinkage); |
4615 | } |
4616 | |
4617 | // Handle dropped DLL attributes. |
4618 | if (D && shouldDropDLLAttribute(D, GV: Entry)) { |
4619 | Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
4620 | setDSOLocal(Entry); |
4621 | } |
4622 | |
4623 | // If there are two attempts to define the same mangled name, issue an |
4624 | // error. |
4625 | if (IsForDefinition && !Entry->isDeclaration()) { |
4626 | GlobalDecl OtherGD; |
4627 | // Check that GD is not yet in DiagnosedConflictingDefinitions is required |
4628 | // to make sure that we issue an error only once. |
4629 | if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
4630 | (GD.getCanonicalDecl().getDecl() != |
4631 | OtherGD.getCanonicalDecl().getDecl()) && |
4632 | DiagnosedConflictingDefinitions.insert(V: GD).second) { |
4633 | getDiags().Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name) |
4634 | << MangledName; |
4635 | getDiags().Report(Loc: OtherGD.getDecl()->getLocation(), |
4636 | DiagID: diag::note_previous_definition); |
4637 | } |
4638 | } |
4639 | |
4640 | if ((isa<llvm::Function>(Val: Entry) || isa<llvm::GlobalAlias>(Val: Entry)) && |
4641 | (Entry->getValueType() == Ty)) { |
4642 | return Entry; |
4643 | } |
4644 | |
4645 | // Make sure the result is of the correct type. |
4646 | // (If function is requested for a definition, we always need to create a new |
4647 | // function, not just return a bitcast.) |
4648 | if (!IsForDefinition) |
4649 | return Entry; |
4650 | } |
4651 | |
4652 | // This function doesn't have a complete type (for example, the return |
4653 | // type is an incomplete struct). Use a fake type instead, and make |
4654 | // sure not to try to set attributes. |
4655 | bool IsIncompleteFunction = false; |
4656 | |
4657 | llvm::FunctionType *FTy; |
4658 | if (isa<llvm::FunctionType>(Val: Ty)) { |
4659 | FTy = cast<llvm::FunctionType>(Val: Ty); |
4660 | } else { |
4661 | FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false); |
4662 | IsIncompleteFunction = true; |
4663 | } |
4664 | |
4665 | llvm::Function *F = |
4666 | llvm::Function::Create(Ty: FTy, Linkage: llvm::Function::ExternalLinkage, |
4667 | N: Entry ? StringRef() : MangledName, M: &getModule()); |
4668 | |
4669 | // Store the declaration associated with this function so it is potentially |
4670 | // updated by further declarations or definitions and emitted at the end. |
4671 | if (D && D->hasAttr<AnnotateAttr>()) |
4672 | DeferredAnnotations[MangledName] = cast<ValueDecl>(Val: D); |
4673 | |
4674 | // If we already created a function with the same mangled name (but different |
4675 | // type) before, take its name and add it to the list of functions to be |
4676 | // replaced with F at the end of CodeGen. |
4677 | // |
4678 | // This happens if there is a prototype for a function (e.g. "int f()") and |
4679 | // then a definition of a different type (e.g. "int f(int x)"). |
4680 | if (Entry) { |
4681 | F->takeName(V: Entry); |
4682 | |
4683 | // This might be an implementation of a function without a prototype, in |
4684 | // which case, try to do special replacement of calls which match the new |
4685 | // prototype. The really key thing here is that we also potentially drop |
4686 | // arguments from the call site so as to make a direct call, which makes the |
4687 | // inliner happier and suppresses a number of optimizer warnings (!) about |
4688 | // dropping arguments. |
4689 | if (!Entry->use_empty()) { |
4690 | ReplaceUsesOfNonProtoTypeWithRealFunction(Old: Entry, NewFn: F); |
4691 | Entry->removeDeadConstantUsers(); |
4692 | } |
4693 | |
4694 | addGlobalValReplacement(GV: Entry, C: F); |
4695 | } |
4696 | |
4697 | assert(F->getName() == MangledName && "name was uniqued!" ); |
4698 | if (D) |
4699 | SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk); |
4700 | if (ExtraAttrs.hasFnAttrs()) { |
4701 | llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs()); |
4702 | F->addFnAttrs(Attrs: B); |
4703 | } |
4704 | |
4705 | if (!DontDefer) { |
4706 | // All MSVC dtors other than the base dtor are linkonce_odr and delegate to |
4707 | // each other bottoming out with the base dtor. Therefore we emit non-base |
4708 | // dtors on usage, even if there is no dtor definition in the TU. |
4709 | if (isa_and_nonnull<CXXDestructorDecl>(Val: D) && |
4710 | getCXXABI().useThunkForDtorVariant(Dtor: cast<CXXDestructorDecl>(Val: D), |
4711 | DT: GD.getDtorType())) |
4712 | addDeferredDeclToEmit(GD); |
4713 | |
4714 | // This is the first use or definition of a mangled name. If there is a |
4715 | // deferred decl with this name, remember that we need to emit it at the end |
4716 | // of the file. |
4717 | auto DDI = DeferredDecls.find(Val: MangledName); |
4718 | if (DDI != DeferredDecls.end()) { |
4719 | // Move the potentially referenced deferred decl to the |
4720 | // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we |
4721 | // don't need it anymore). |
4722 | addDeferredDeclToEmit(GD: DDI->second); |
4723 | DeferredDecls.erase(I: DDI); |
4724 | |
4725 | // Otherwise, there are cases we have to worry about where we're |
4726 | // using a declaration for which we must emit a definition but where |
4727 | // we might not find a top-level definition: |
4728 | // - member functions defined inline in their classes |
4729 | // - friend functions defined inline in some class |
4730 | // - special member functions with implicit definitions |
4731 | // If we ever change our AST traversal to walk into class methods, |
4732 | // this will be unnecessary. |
4733 | // |
4734 | // We also don't emit a definition for a function if it's going to be an |
4735 | // entry in a vtable, unless it's already marked as used. |
4736 | } else if (getLangOpts().CPlusPlus && D) { |
4737 | // Look for a declaration that's lexically in a record. |
4738 | for (const auto *FD = cast<FunctionDecl>(Val: D)->getMostRecentDecl(); FD; |
4739 | FD = FD->getPreviousDecl()) { |
4740 | if (isa<CXXRecordDecl>(Val: FD->getLexicalDeclContext())) { |
4741 | if (FD->doesThisDeclarationHaveABody()) { |
4742 | addDeferredDeclToEmit(GD: GD.getWithDecl(D: FD)); |
4743 | break; |
4744 | } |
4745 | } |
4746 | } |
4747 | } |
4748 | } |
4749 | |
4750 | // Make sure the result is of the requested type. |
4751 | if (!IsIncompleteFunction) { |
4752 | assert(F->getFunctionType() == Ty); |
4753 | return F; |
4754 | } |
4755 | |
4756 | return F; |
4757 | } |
4758 | |
4759 | /// GetAddrOfFunction - Return the address of the given function. If Ty is |
4760 | /// non-null, then this function will use the specified type if it has to |
4761 | /// create it (this occurs when we see a definition of the function). |
4762 | llvm::Constant * |
4763 | CodeGenModule::GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty, bool ForVTable, |
4764 | bool DontDefer, |
4765 | ForDefinition_t IsForDefinition) { |
4766 | // If there was no specific requested type, just convert it now. |
4767 | if (!Ty) { |
4768 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4769 | Ty = getTypes().ConvertType(T: FD->getType()); |
4770 | } |
4771 | |
4772 | // Devirtualized destructor calls may come through here instead of via |
4773 | // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead |
4774 | // of the complete destructor when necessary. |
4775 | if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: GD.getDecl())) { |
4776 | if (getTarget().getCXXABI().isMicrosoft() && |
4777 | GD.getDtorType() == Dtor_Complete && |
4778 | DD->getParent()->getNumVBases() == 0) |
4779 | GD = GlobalDecl(DD, Dtor_Base); |
4780 | } |
4781 | |
4782 | StringRef MangledName = getMangledName(GD); |
4783 | auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer, |
4784 | /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(), |
4785 | IsForDefinition); |
4786 | // Returns kernel handle for HIP kernel stub function. |
4787 | if (LangOpts.CUDA && !LangOpts.CUDAIsDevice && |
4788 | cast<FunctionDecl>(Val: GD.getDecl())->hasAttr<CUDAGlobalAttr>()) { |
4789 | auto *Handle = getCUDARuntime().getKernelHandle( |
4790 | Stub: cast<llvm::Function>(Val: F->stripPointerCasts()), GD); |
4791 | if (IsForDefinition) |
4792 | return F; |
4793 | return Handle; |
4794 | } |
4795 | return F; |
4796 | } |
4797 | |
4798 | llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) { |
4799 | llvm::GlobalValue *F = |
4800 | cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD: Decl)->stripPointerCasts()); |
4801 | |
4802 | return llvm::NoCFIValue::get(GV: F); |
4803 | } |
4804 | |
4805 | static const FunctionDecl * |
4806 | GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) { |
4807 | TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl(); |
4808 | DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl); |
4809 | |
4810 | IdentifierInfo &CII = C.Idents.get(Name); |
4811 | for (const auto *Result : DC->lookup(Name: &CII)) |
4812 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result)) |
4813 | return FD; |
4814 | |
4815 | if (!C.getLangOpts().CPlusPlus) |
4816 | return nullptr; |
4817 | |
4818 | // Demangle the premangled name from getTerminateFn() |
4819 | IdentifierInfo &CXXII = |
4820 | (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ" ) |
4821 | ? C.Idents.get(Name: "terminate" ) |
4822 | : C.Idents.get(Name); |
4823 | |
4824 | for (const auto &N : {"__cxxabiv1" , "std" }) { |
4825 | IdentifierInfo &NS = C.Idents.get(Name: N); |
4826 | for (const auto *Result : DC->lookup(Name: &NS)) { |
4827 | const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Val: Result); |
4828 | if (auto *LSD = dyn_cast<LinkageSpecDecl>(Val: Result)) |
4829 | for (const auto *Result : LSD->lookup(Name: &NS)) |
4830 | if ((ND = dyn_cast<NamespaceDecl>(Val: Result))) |
4831 | break; |
4832 | |
4833 | if (ND) |
4834 | for (const auto *Result : ND->lookup(Name: &CXXII)) |
4835 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result)) |
4836 | return FD; |
4837 | } |
4838 | } |
4839 | |
4840 | return nullptr; |
4841 | } |
4842 | |
4843 | /// CreateRuntimeFunction - Create a new runtime function with the specified |
4844 | /// type and name. |
4845 | llvm::FunctionCallee |
4846 | CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name, |
4847 | llvm::AttributeList , bool Local, |
4848 | bool AssumeConvergent) { |
4849 | if (AssumeConvergent) { |
4850 | ExtraAttrs = |
4851 | ExtraAttrs.addFnAttribute(C&: VMContext, Kind: llvm::Attribute::Convergent); |
4852 | } |
4853 | |
4854 | llvm::Constant *C = |
4855 | GetOrCreateLLVMFunction(MangledName: Name, Ty: FTy, GD: GlobalDecl(), /*ForVTable=*/false, |
4856 | /*DontDefer=*/false, /*IsThunk=*/false, |
4857 | ExtraAttrs); |
4858 | |
4859 | if (auto *F = dyn_cast<llvm::Function>(Val: C)) { |
4860 | if (F->empty()) { |
4861 | F->setCallingConv(getRuntimeCC()); |
4862 | |
4863 | // In Windows Itanium environments, try to mark runtime functions |
4864 | // dllimport. For Mingw and MSVC, don't. We don't really know if the user |
4865 | // will link their standard library statically or dynamically. Marking |
4866 | // functions imported when they are not imported can cause linker errors |
4867 | // and warnings. |
4868 | if (!Local && getTriple().isWindowsItaniumEnvironment() && |
4869 | !getCodeGenOpts().LTOVisibilityPublicStd) { |
4870 | const FunctionDecl *FD = GetRuntimeFunctionDecl(C&: Context, Name); |
4871 | if (!FD || FD->hasAttr<DLLImportAttr>()) { |
4872 | F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); |
4873 | F->setLinkage(llvm::GlobalValue::ExternalLinkage); |
4874 | } |
4875 | } |
4876 | setDSOLocal(F); |
4877 | // FIXME: We should use CodeGenModule::SetLLVMFunctionAttributes() instead |
4878 | // of trying to approximate the attributes using the LLVM function |
4879 | // signature. This requires revising the API of CreateRuntimeFunction(). |
4880 | markRegisterParameterAttributes(F); |
4881 | } |
4882 | } |
4883 | |
4884 | return {FTy, C}; |
4885 | } |
4886 | |
4887 | /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module, |
4888 | /// create and return an llvm GlobalVariable with the specified type and address |
4889 | /// space. If there is something in the module with the specified name, return |
4890 | /// it potentially bitcasted to the right type. |
4891 | /// |
4892 | /// If D is non-null, it specifies a decl that correspond to this. This is used |
4893 | /// to set the attributes on the global when it is first created. |
4894 | /// |
4895 | /// If IsForDefinition is true, it is guaranteed that an actual global with |
4896 | /// type Ty will be returned, not conversion of a variable with the same |
4897 | /// mangled name but some other type. |
4898 | llvm::Constant * |
4899 | CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty, |
4900 | LangAS AddrSpace, const VarDecl *D, |
4901 | ForDefinition_t IsForDefinition) { |
4902 | // Lookup the entry, lazily creating it if necessary. |
4903 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
4904 | unsigned TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace); |
4905 | if (Entry) { |
4906 | if (WeakRefReferences.erase(Ptr: Entry)) { |
4907 | if (D && !D->hasAttr<WeakAttr>()) |
4908 | Entry->setLinkage(llvm::Function::ExternalLinkage); |
4909 | } |
4910 | |
4911 | // Handle dropped DLL attributes. |
4912 | if (D && shouldDropDLLAttribute(D, GV: Entry)) |
4913 | Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
4914 | |
4915 | if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D) |
4916 | getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: Entry); |
4917 | |
4918 | if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS) |
4919 | return Entry; |
4920 | |
4921 | // If there are two attempts to define the same mangled name, issue an |
4922 | // error. |
4923 | if (IsForDefinition && !Entry->isDeclaration()) { |
4924 | GlobalDecl OtherGD; |
4925 | const VarDecl *OtherD; |
4926 | |
4927 | // Check that D is not yet in DiagnosedConflictingDefinitions is required |
4928 | // to make sure that we issue an error only once. |
4929 | if (D && lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
4930 | (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) && |
4931 | (OtherD = dyn_cast<VarDecl>(Val: OtherGD.getDecl())) && |
4932 | OtherD->hasInit() && |
4933 | DiagnosedConflictingDefinitions.insert(V: D).second) { |
4934 | getDiags().Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name) |
4935 | << MangledName; |
4936 | getDiags().Report(Loc: OtherGD.getDecl()->getLocation(), |
4937 | DiagID: diag::note_previous_definition); |
4938 | } |
4939 | } |
4940 | |
4941 | // Make sure the result is of the correct type. |
4942 | if (Entry->getType()->getAddressSpace() != TargetAS) |
4943 | return llvm::ConstantExpr::getAddrSpaceCast( |
4944 | C: Entry, Ty: llvm::PointerType::get(C&: Ty->getContext(), AddressSpace: TargetAS)); |
4945 | |
4946 | // (If global is requested for a definition, we always need to create a new |
4947 | // global, not just return a bitcast.) |
4948 | if (!IsForDefinition) |
4949 | return Entry; |
4950 | } |
4951 | |
4952 | auto DAddrSpace = GetGlobalVarAddressSpace(D); |
4953 | |
4954 | auto *GV = new llvm::GlobalVariable( |
4955 | getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr, |
4956 | MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal, |
4957 | getContext().getTargetAddressSpace(AS: DAddrSpace)); |
4958 | |
4959 | // If we already created a global with the same mangled name (but different |
4960 | // type) before, take its name and remove it from its parent. |
4961 | if (Entry) { |
4962 | GV->takeName(V: Entry); |
4963 | |
4964 | if (!Entry->use_empty()) { |
4965 | Entry->replaceAllUsesWith(V: GV); |
4966 | } |
4967 | |
4968 | Entry->eraseFromParent(); |
4969 | } |
4970 | |
4971 | // This is the first use or definition of a mangled name. If there is a |
4972 | // deferred decl with this name, remember that we need to emit it at the end |
4973 | // of the file. |
4974 | auto DDI = DeferredDecls.find(Val: MangledName); |
4975 | if (DDI != DeferredDecls.end()) { |
4976 | // Move the potentially referenced deferred decl to the DeferredDeclsToEmit |
4977 | // list, and remove it from DeferredDecls (since we don't need it anymore). |
4978 | addDeferredDeclToEmit(GD: DDI->second); |
4979 | DeferredDecls.erase(I: DDI); |
4980 | } |
4981 | |
4982 | // Handle things which are present even on external declarations. |
4983 | if (D) { |
4984 | if (LangOpts.OpenMP && !LangOpts.OpenMPSimd) |
4985 | getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: GV); |
4986 | |
4987 | // FIXME: This code is overly simple and should be merged with other global |
4988 | // handling. |
4989 | GV->setConstant(D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: false, ExcludeDtor: false)); |
4990 | |
4991 | GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); |
4992 | |
4993 | setLinkageForGV(GV, ND: D); |
4994 | |
4995 | if (D->getTLSKind()) { |
4996 | if (D->getTLSKind() == VarDecl::TLS_Dynamic) |
4997 | CXXThreadLocals.push_back(x: D); |
4998 | setTLSMode(GV, D: *D); |
4999 | } |
5000 | |
5001 | setGVProperties(GV, D); |
5002 | |
5003 | // If required by the ABI, treat declarations of static data members with |
5004 | // inline initializers as definitions. |
5005 | if (getContext().isMSStaticDataMemberInlineDefinition(VD: D)) { |
5006 | EmitGlobalVarDefinition(D); |
5007 | } |
5008 | |
5009 | // Emit section information for extern variables. |
5010 | if (D->hasExternalStorage()) { |
5011 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) |
5012 | GV->setSection(SA->getName()); |
5013 | } |
5014 | |
5015 | // Handle XCore specific ABI requirements. |
5016 | if (getTriple().getArch() == llvm::Triple::xcore && |
5017 | D->getLanguageLinkage() == CLanguageLinkage && |
5018 | D->getType().isConstant(Ctx: Context) && |
5019 | isExternallyVisible(L: D->getLinkageAndVisibility().getLinkage())) |
5020 | GV->setSection(".cp.rodata" ); |
5021 | |
5022 | // Handle code model attribute |
5023 | if (const auto *CMA = D->getAttr<CodeModelAttr>()) |
5024 | GV->setCodeModel(CMA->getModel()); |
5025 | |
5026 | // Check if we a have a const declaration with an initializer, we may be |
5027 | // able to emit it as available_externally to expose it's value to the |
5028 | // optimizer. |
5029 | if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() && |
5030 | D->getType().isConstQualified() && !GV->hasInitializer() && |
5031 | !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) { |
5032 | const auto *Record = |
5033 | Context.getBaseElementType(QT: D->getType())->getAsCXXRecordDecl(); |
5034 | bool HasMutableFields = Record && Record->hasMutableFields(); |
5035 | if (!HasMutableFields) { |
5036 | const VarDecl *InitDecl; |
5037 | const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl); |
5038 | if (InitExpr) { |
5039 | ConstantEmitter emitter(*this); |
5040 | llvm::Constant *Init = emitter.tryEmitForInitializer(D: *InitDecl); |
5041 | if (Init) { |
5042 | auto *InitType = Init->getType(); |
5043 | if (GV->getValueType() != InitType) { |
5044 | // The type of the initializer does not match the definition. |
5045 | // This happens when an initializer has a different type from |
5046 | // the type of the global (because of padding at the end of a |
5047 | // structure for instance). |
5048 | GV->setName(StringRef()); |
5049 | // Make a new global with the correct type, this is now guaranteed |
5050 | // to work. |
5051 | auto *NewGV = cast<llvm::GlobalVariable>( |
5052 | Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition) |
5053 | ->stripPointerCasts()); |
5054 | |
5055 | // Erase the old global, since it is no longer used. |
5056 | GV->eraseFromParent(); |
5057 | GV = NewGV; |
5058 | } else { |
5059 | GV->setInitializer(Init); |
5060 | GV->setConstant(true); |
5061 | GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); |
5062 | } |
5063 | emitter.finalize(global: GV); |
5064 | } |
5065 | } |
5066 | } |
5067 | } |
5068 | } |
5069 | |
5070 | if (D && |
5071 | D->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly) { |
5072 | getTargetCodeGenInfo().setTargetAttributes(D, GV, M&: *this); |
5073 | // External HIP managed variables needed to be recorded for transformation |
5074 | // in both device and host compilations. |
5075 | if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() && |
5076 | D->hasExternalStorage()) |
5077 | getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV); |
5078 | } |
5079 | |
5080 | if (D) |
5081 | SanitizerMD->reportGlobal(GV, D: *D); |
5082 | |
5083 | LangAS ExpectedAS = |
5084 | D ? D->getType().getAddressSpace() |
5085 | : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default); |
5086 | assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS); |
5087 | if (DAddrSpace != ExpectedAS) { |
5088 | return getTargetCodeGenInfo().performAddrSpaceCast( |
5089 | CGM&: *this, V: GV, SrcAddr: DAddrSpace, DestAddr: ExpectedAS, |
5090 | DestTy: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: TargetAS)); |
5091 | } |
5092 | |
5093 | return GV; |
5094 | } |
5095 | |
5096 | llvm::Constant * |
5097 | CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { |
5098 | const Decl *D = GD.getDecl(); |
5099 | |
5100 | if (isa<CXXConstructorDecl>(Val: D) || isa<CXXDestructorDecl>(Val: D)) |
5101 | return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr, |
5102 | /*DontDefer=*/false, IsForDefinition); |
5103 | |
5104 | if (isa<CXXMethodDecl>(Val: D)) { |
5105 | auto FInfo = |
5106 | &getTypes().arrangeCXXMethodDeclaration(MD: cast<CXXMethodDecl>(Val: D)); |
5107 | auto Ty = getTypes().GetFunctionType(Info: *FInfo); |
5108 | return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, |
5109 | IsForDefinition); |
5110 | } |
5111 | |
5112 | if (isa<FunctionDecl>(Val: D)) { |
5113 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
5114 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
5115 | return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, |
5116 | IsForDefinition); |
5117 | } |
5118 | |
5119 | return GetAddrOfGlobalVar(D: cast<VarDecl>(Val: D), /*Ty=*/nullptr, IsForDefinition); |
5120 | } |
5121 | |
5122 | llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable( |
5123 | StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage, |
5124 | llvm::Align Alignment) { |
5125 | llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name); |
5126 | llvm::GlobalVariable *OldGV = nullptr; |
5127 | |
5128 | if (GV) { |
5129 | // Check if the variable has the right type. |
5130 | if (GV->getValueType() == Ty) |
5131 | return GV; |
5132 | |
5133 | // Because C++ name mangling, the only way we can end up with an already |
5134 | // existing global with the same name is if it has been declared extern "C". |
5135 | assert(GV->isDeclaration() && "Declaration has wrong type!" ); |
5136 | OldGV = GV; |
5137 | } |
5138 | |
5139 | // Create a new variable. |
5140 | GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true, |
5141 | Linkage, nullptr, Name); |
5142 | |
5143 | if (OldGV) { |
5144 | // Replace occurrences of the old variable if needed. |
5145 | GV->takeName(V: OldGV); |
5146 | |
5147 | if (!OldGV->use_empty()) { |
5148 | OldGV->replaceAllUsesWith(V: GV); |
5149 | } |
5150 | |
5151 | OldGV->eraseFromParent(); |
5152 | } |
5153 | |
5154 | if (supportsCOMDAT() && GV->isWeakForLinker() && |
5155 | !GV->hasAvailableExternallyLinkage()) |
5156 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
5157 | |
5158 | GV->setAlignment(Alignment); |
5159 | |
5160 | return GV; |
5161 | } |
5162 | |
5163 | /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the |
5164 | /// given global variable. If Ty is non-null and if the global doesn't exist, |
5165 | /// then it will be created with the specified type instead of whatever the |
5166 | /// normal requested type would be. If IsForDefinition is true, it is guaranteed |
5167 | /// that an actual global with type Ty will be returned, not conversion of a |
5168 | /// variable with the same mangled name but some other type. |
5169 | llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D, |
5170 | llvm::Type *Ty, |
5171 | ForDefinition_t IsForDefinition) { |
5172 | assert(D->hasGlobalStorage() && "Not a global variable" ); |
5173 | QualType ASTTy = D->getType(); |
5174 | if (!Ty) |
5175 | Ty = getTypes().ConvertTypeForMem(T: ASTTy); |
5176 | |
5177 | StringRef MangledName = getMangledName(GD: D); |
5178 | return GetOrCreateLLVMGlobal(MangledName, Ty, AddrSpace: ASTTy.getAddressSpace(), D, |
5179 | IsForDefinition); |
5180 | } |
5181 | |
5182 | /// CreateRuntimeVariable - Create a new runtime global variable with the |
5183 | /// specified type and name. |
5184 | llvm::Constant * |
5185 | CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty, |
5186 | StringRef Name) { |
5187 | LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global |
5188 | : LangAS::Default; |
5189 | auto *Ret = GetOrCreateLLVMGlobal(MangledName: Name, Ty, AddrSpace, D: nullptr); |
5190 | setDSOLocal(cast<llvm::GlobalValue>(Val: Ret->stripPointerCasts())); |
5191 | return Ret; |
5192 | } |
5193 | |
5194 | void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) { |
5195 | assert(!D->getInit() && "Cannot emit definite definitions here!" ); |
5196 | |
5197 | StringRef MangledName = getMangledName(GD: D); |
5198 | llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName); |
5199 | |
5200 | // We already have a definition, not declaration, with the same mangled name. |
5201 | // Emitting of declaration is not required (and actually overwrites emitted |
5202 | // definition). |
5203 | if (GV && !GV->isDeclaration()) |
5204 | return; |
5205 | |
5206 | // If we have not seen a reference to this variable yet, place it into the |
5207 | // deferred declarations table to be emitted if needed later. |
5208 | if (!MustBeEmitted(Global: D) && !GV) { |
5209 | DeferredDecls[MangledName] = D; |
5210 | return; |
5211 | } |
5212 | |
5213 | // The tentative definition is the only definition. |
5214 | EmitGlobalVarDefinition(D); |
5215 | } |
5216 | |
5217 | void CodeGenModule::EmitExternalDeclaration(const DeclaratorDecl *D) { |
5218 | if (auto const *V = dyn_cast<const VarDecl>(Val: D)) |
5219 | EmitExternalVarDeclaration(D: V); |
5220 | if (auto const *FD = dyn_cast<const FunctionDecl>(Val: D)) |
5221 | EmitExternalFunctionDeclaration(D: FD); |
5222 | } |
5223 | |
5224 | CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const { |
5225 | return Context.toCharUnitsFromBits( |
5226 | BitSize: getDataLayout().getTypeStoreSizeInBits(Ty)); |
5227 | } |
5228 | |
5229 | LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) { |
5230 | if (LangOpts.OpenCL) { |
5231 | LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global; |
5232 | assert(AS == LangAS::opencl_global || |
5233 | AS == LangAS::opencl_global_device || |
5234 | AS == LangAS::opencl_global_host || |
5235 | AS == LangAS::opencl_constant || |
5236 | AS == LangAS::opencl_local || |
5237 | AS >= LangAS::FirstTargetAddressSpace); |
5238 | return AS; |
5239 | } |
5240 | |
5241 | if (LangOpts.SYCLIsDevice && |
5242 | (!D || D->getType().getAddressSpace() == LangAS::Default)) |
5243 | return LangAS::sycl_global; |
5244 | |
5245 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) { |
5246 | if (D) { |
5247 | if (D->hasAttr<CUDAConstantAttr>()) |
5248 | return LangAS::cuda_constant; |
5249 | if (D->hasAttr<CUDASharedAttr>()) |
5250 | return LangAS::cuda_shared; |
5251 | if (D->hasAttr<CUDADeviceAttr>()) |
5252 | return LangAS::cuda_device; |
5253 | if (D->getType().isConstQualified()) |
5254 | return LangAS::cuda_constant; |
5255 | } |
5256 | return LangAS::cuda_device; |
5257 | } |
5258 | |
5259 | if (LangOpts.OpenMP) { |
5260 | LangAS AS; |
5261 | if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(VD: D, AS)) |
5262 | return AS; |
5263 | } |
5264 | return getTargetCodeGenInfo().getGlobalVarAddressSpace(CGM&: *this, D); |
5265 | } |
5266 | |
5267 | LangAS CodeGenModule::GetGlobalConstantAddressSpace() const { |
5268 | // OpenCL v1.2 s6.5.3: a string literal is in the constant address space. |
5269 | if (LangOpts.OpenCL) |
5270 | return LangAS::opencl_constant; |
5271 | if (LangOpts.SYCLIsDevice) |
5272 | return LangAS::sycl_global; |
5273 | if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV()) |
5274 | // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V) |
5275 | // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up |
5276 | // with OpVariable instructions with Generic storage class which is not |
5277 | // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V |
5278 | // UniformConstant storage class is not viable as pointers to it may not be |
5279 | // casted to Generic pointers which are used to model HIP's "flat" pointers. |
5280 | return LangAS::cuda_device; |
5281 | if (auto AS = getTarget().getConstantAddressSpace()) |
5282 | return *AS; |
5283 | return LangAS::Default; |
5284 | } |
5285 | |
5286 | // In address space agnostic languages, string literals are in default address |
5287 | // space in AST. However, certain targets (e.g. amdgcn) request them to be |
5288 | // emitted in constant address space in LLVM IR. To be consistent with other |
5289 | // parts of AST, string literal global variables in constant address space |
5290 | // need to be casted to default address space before being put into address |
5291 | // map and referenced by other part of CodeGen. |
5292 | // In OpenCL, string literals are in constant address space in AST, therefore |
5293 | // they should not be casted to default address space. |
5294 | static llvm::Constant * |
5295 | castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM, |
5296 | llvm::GlobalVariable *GV) { |
5297 | llvm::Constant *Cast = GV; |
5298 | if (!CGM.getLangOpts().OpenCL) { |
5299 | auto AS = CGM.GetGlobalConstantAddressSpace(); |
5300 | if (AS != LangAS::Default) |
5301 | Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast( |
5302 | CGM, V: GV, SrcAddr: AS, DestAddr: LangAS::Default, |
5303 | DestTy: llvm::PointerType::get( |
5304 | C&: CGM.getLLVMContext(), |
5305 | AddressSpace: CGM.getContext().getTargetAddressSpace(AS: LangAS::Default))); |
5306 | } |
5307 | return Cast; |
5308 | } |
5309 | |
5310 | template<typename SomeDecl> |
5311 | void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D, |
5312 | llvm::GlobalValue *GV) { |
5313 | if (!getLangOpts().CPlusPlus) |
5314 | return; |
5315 | |
5316 | // Must have 'used' attribute, or else inline assembly can't rely on |
5317 | // the name existing. |
5318 | if (!D->template hasAttr<UsedAttr>()) |
5319 | return; |
5320 | |
5321 | // Must have internal linkage and an ordinary name. |
5322 | if (!D->getIdentifier() || D->getFormalLinkage() != Linkage::Internal) |
5323 | return; |
5324 | |
5325 | // Must be in an extern "C" context. Entities declared directly within |
5326 | // a record are not extern "C" even if the record is in such a context. |
5327 | const SomeDecl *First = D->getFirstDecl(); |
5328 | if (First->getDeclContext()->isRecord() || !First->isInExternCContext()) |
5329 | return; |
5330 | |
5331 | // OK, this is an internal linkage entity inside an extern "C" linkage |
5332 | // specification. Make a note of that so we can give it the "expected" |
5333 | // mangled name if nothing else is using that name. |
5334 | std::pair<StaticExternCMap::iterator, bool> R = |
5335 | StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV)); |
5336 | |
5337 | // If we have multiple internal linkage entities with the same name |
5338 | // in extern "C" regions, none of them gets that name. |
5339 | if (!R.second) |
5340 | R.first->second = nullptr; |
5341 | } |
5342 | |
5343 | static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) { |
5344 | if (!CGM.supportsCOMDAT()) |
5345 | return false; |
5346 | |
5347 | if (D.hasAttr<SelectAnyAttr>()) |
5348 | return true; |
5349 | |
5350 | GVALinkage Linkage; |
5351 | if (auto *VD = dyn_cast<VarDecl>(Val: &D)) |
5352 | Linkage = CGM.getContext().GetGVALinkageForVariable(VD); |
5353 | else |
5354 | Linkage = CGM.getContext().GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: &D)); |
5355 | |
5356 | switch (Linkage) { |
5357 | case GVA_Internal: |
5358 | case GVA_AvailableExternally: |
5359 | case GVA_StrongExternal: |
5360 | return false; |
5361 | case GVA_DiscardableODR: |
5362 | case GVA_StrongODR: |
5363 | return true; |
5364 | } |
5365 | llvm_unreachable("No such linkage" ); |
5366 | } |
5367 | |
5368 | bool CodeGenModule::supportsCOMDAT() const { |
5369 | return getTriple().supportsCOMDAT(); |
5370 | } |
5371 | |
5372 | void CodeGenModule::maybeSetTrivialComdat(const Decl &D, |
5373 | llvm::GlobalObject &GO) { |
5374 | if (!shouldBeInCOMDAT(CGM&: *this, D)) |
5375 | return; |
5376 | GO.setComdat(TheModule.getOrInsertComdat(Name: GO.getName())); |
5377 | } |
5378 | |
5379 | /// Pass IsTentative as true if you want to create a tentative definition. |
5380 | void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D, |
5381 | bool IsTentative) { |
5382 | // OpenCL global variables of sampler type are translated to function calls, |
5383 | // therefore no need to be translated. |
5384 | QualType ASTTy = D->getType(); |
5385 | if (getLangOpts().OpenCL && ASTTy->isSamplerT()) |
5386 | return; |
5387 | |
5388 | // If this is OpenMP device, check if it is legal to emit this global |
5389 | // normally. |
5390 | if (LangOpts.OpenMPIsTargetDevice && OpenMPRuntime && |
5391 | OpenMPRuntime->emitTargetGlobalVariable(GD: D)) |
5392 | return; |
5393 | |
5394 | llvm::TrackingVH<llvm::Constant> Init; |
5395 | bool NeedsGlobalCtor = false; |
5396 | // Whether the definition of the variable is available externally. |
5397 | // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable |
5398 | // since this is the job for its original source. |
5399 | bool IsDefinitionAvailableExternally = |
5400 | getContext().GetGVALinkageForVariable(VD: D) == GVA_AvailableExternally; |
5401 | bool NeedsGlobalDtor = |
5402 | !IsDefinitionAvailableExternally && |
5403 | D->needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor; |
5404 | |
5405 | // It is helpless to emit the definition for an available_externally variable |
5406 | // which can't be marked as const. |
5407 | // We don't need to check if it needs global ctor or dtor. See the above |
5408 | // comment for ideas. |
5409 | if (IsDefinitionAvailableExternally && |
5410 | (!D->hasConstantInitialization() || |
5411 | // TODO: Update this when we have interface to check constexpr |
5412 | // destructor. |
5413 | D->needsDestruction(Ctx: getContext()) || |
5414 | !D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: true))) |
5415 | return; |
5416 | |
5417 | const VarDecl *InitDecl; |
5418 | const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl); |
5419 | |
5420 | std::optional<ConstantEmitter> emitter; |
5421 | |
5422 | // CUDA E.2.4.1 "__shared__ variables cannot have an initialization |
5423 | // as part of their declaration." Sema has already checked for |
5424 | // error cases, so we just need to set Init to UndefValue. |
5425 | bool IsCUDASharedVar = |
5426 | getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>(); |
5427 | // Shadows of initialized device-side global variables are also left |
5428 | // undefined. |
5429 | // Managed Variables should be initialized on both host side and device side. |
5430 | bool IsCUDAShadowVar = |
5431 | !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() && |
5432 | (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() || |
5433 | D->hasAttr<CUDASharedAttr>()); |
5434 | bool IsCUDADeviceShadowVar = |
5435 | getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() && |
5436 | (D->getType()->isCUDADeviceBuiltinSurfaceType() || |
5437 | D->getType()->isCUDADeviceBuiltinTextureType()); |
5438 | if (getLangOpts().CUDA && |
5439 | (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar)) |
5440 | Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy)); |
5441 | else if (D->hasAttr<LoaderUninitializedAttr>()) |
5442 | Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy)); |
5443 | else if (!InitExpr) { |
5444 | // This is a tentative definition; tentative definitions are |
5445 | // implicitly initialized with { 0 }. |
5446 | // |
5447 | // Note that tentative definitions are only emitted at the end of |
5448 | // a translation unit, so they should never have incomplete |
5449 | // type. In addition, EmitTentativeDefinition makes sure that we |
5450 | // never attempt to emit a tentative definition if a real one |
5451 | // exists. A use may still exists, however, so we still may need |
5452 | // to do a RAUW. |
5453 | assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type" ); |
5454 | Init = EmitNullConstant(T: D->getType()); |
5455 | } else { |
5456 | initializedGlobalDecl = GlobalDecl(D); |
5457 | emitter.emplace(args&: *this); |
5458 | llvm::Constant *Initializer = emitter->tryEmitForInitializer(D: *InitDecl); |
5459 | if (!Initializer) { |
5460 | QualType T = InitExpr->getType(); |
5461 | if (D->getType()->isReferenceType()) |
5462 | T = D->getType(); |
5463 | |
5464 | if (getLangOpts().CPlusPlus) { |
5465 | if (InitDecl->hasFlexibleArrayInit(Ctx: getContext())) |
5466 | ErrorUnsupported(D, Type: "flexible array initializer" ); |
5467 | Init = EmitNullConstant(T); |
5468 | |
5469 | if (!IsDefinitionAvailableExternally) |
5470 | NeedsGlobalCtor = true; |
5471 | } else { |
5472 | ErrorUnsupported(D, Type: "static initializer" ); |
5473 | Init = llvm::UndefValue::get(T: getTypes().ConvertType(T)); |
5474 | } |
5475 | } else { |
5476 | Init = Initializer; |
5477 | // We don't need an initializer, so remove the entry for the delayed |
5478 | // initializer position (just in case this entry was delayed) if we |
5479 | // also don't need to register a destructor. |
5480 | if (getLangOpts().CPlusPlus && !NeedsGlobalDtor) |
5481 | DelayedCXXInitPosition.erase(Val: D); |
5482 | |
5483 | #ifndef NDEBUG |
5484 | CharUnits VarSize = getContext().getTypeSizeInChars(ASTTy) + |
5485 | InitDecl->getFlexibleArrayInitChars(getContext()); |
5486 | CharUnits CstSize = CharUnits::fromQuantity( |
5487 | getDataLayout().getTypeAllocSize(Init->getType())); |
5488 | assert(VarSize == CstSize && "Emitted constant has unexpected size" ); |
5489 | #endif |
5490 | } |
5491 | } |
5492 | |
5493 | llvm::Type* InitType = Init->getType(); |
5494 | llvm::Constant *Entry = |
5495 | GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative)); |
5496 | |
5497 | // Strip off pointer casts if we got them. |
5498 | Entry = Entry->stripPointerCasts(); |
5499 | |
5500 | // Entry is now either a Function or GlobalVariable. |
5501 | auto *GV = dyn_cast<llvm::GlobalVariable>(Val: Entry); |
5502 | |
5503 | // We have a definition after a declaration with the wrong type. |
5504 | // We must make a new GlobalVariable* and update everything that used OldGV |
5505 | // (a declaration or tentative definition) with the new GlobalVariable* |
5506 | // (which will be a definition). |
5507 | // |
5508 | // This happens if there is a prototype for a global (e.g. |
5509 | // "extern int x[];") and then a definition of a different type (e.g. |
5510 | // "int x[10];"). This also happens when an initializer has a different type |
5511 | // from the type of the global (this happens with unions). |
5512 | if (!GV || GV->getValueType() != InitType || |
5513 | GV->getType()->getAddressSpace() != |
5514 | getContext().getTargetAddressSpace(AS: GetGlobalVarAddressSpace(D))) { |
5515 | |
5516 | // Move the old entry aside so that we'll create a new one. |
5517 | Entry->setName(StringRef()); |
5518 | |
5519 | // Make a new global with the correct type, this is now guaranteed to work. |
5520 | GV = cast<llvm::GlobalVariable>( |
5521 | Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative)) |
5522 | ->stripPointerCasts()); |
5523 | |
5524 | // Replace all uses of the old global with the new global |
5525 | llvm::Constant *NewPtrForOldDecl = |
5526 | llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: GV, |
5527 | Ty: Entry->getType()); |
5528 | Entry->replaceAllUsesWith(V: NewPtrForOldDecl); |
5529 | |
5530 | // Erase the old global, since it is no longer used. |
5531 | cast<llvm::GlobalValue>(Val: Entry)->eraseFromParent(); |
5532 | } |
5533 | |
5534 | MaybeHandleStaticInExternC(D, GV); |
5535 | |
5536 | if (D->hasAttr<AnnotateAttr>()) |
5537 | AddGlobalAnnotations(D, GV); |
5538 | |
5539 | // Set the llvm linkage type as appropriate. |
5540 | llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD: D); |
5541 | |
5542 | // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on |
5543 | // the device. [...]" |
5544 | // CUDA B.2.2 "The __constant__ qualifier, optionally used together with |
5545 | // __device__, declares a variable that: [...] |
5546 | // Is accessible from all the threads within the grid and from the host |
5547 | // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize() |
5548 | // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())." |
5549 | if (LangOpts.CUDA) { |
5550 | if (LangOpts.CUDAIsDevice) { |
5551 | if (Linkage != llvm::GlobalValue::InternalLinkage && |
5552 | (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || |
5553 | D->getType()->isCUDADeviceBuiltinSurfaceType() || |
5554 | D->getType()->isCUDADeviceBuiltinTextureType())) |
5555 | GV->setExternallyInitialized(true); |
5556 | } else { |
5557 | getCUDARuntime().internalizeDeviceSideVar(D, Linkage); |
5558 | } |
5559 | getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV); |
5560 | } |
5561 | |
5562 | GV->setInitializer(Init); |
5563 | if (emitter) |
5564 | emitter->finalize(global: GV); |
5565 | |
5566 | // If it is safe to mark the global 'constant', do so now. |
5567 | GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor && |
5568 | D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: true)); |
5569 | |
5570 | // If it is in a read-only section, mark it 'constant'. |
5571 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) { |
5572 | const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()]; |
5573 | if ((SI.SectionFlags & ASTContext::PSF_Write) == 0) |
5574 | GV->setConstant(true); |
5575 | } |
5576 | |
5577 | CharUnits AlignVal = getContext().getDeclAlign(D); |
5578 | // Check for alignment specifed in an 'omp allocate' directive. |
5579 | if (std::optional<CharUnits> AlignValFromAllocate = |
5580 | getOMPAllocateAlignment(VD: D)) |
5581 | AlignVal = *AlignValFromAllocate; |
5582 | GV->setAlignment(AlignVal.getAsAlign()); |
5583 | |
5584 | // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper |
5585 | // function is only defined alongside the variable, not also alongside |
5586 | // callers. Normally, all accesses to a thread_local go through the |
5587 | // thread-wrapper in order to ensure initialization has occurred, underlying |
5588 | // variable will never be used other than the thread-wrapper, so it can be |
5589 | // converted to internal linkage. |
5590 | // |
5591 | // However, if the variable has the 'constinit' attribute, it _can_ be |
5592 | // referenced directly, without calling the thread-wrapper, so the linkage |
5593 | // must not be changed. |
5594 | // |
5595 | // Additionally, if the variable isn't plain external linkage, e.g. if it's |
5596 | // weak or linkonce, the de-duplication semantics are important to preserve, |
5597 | // so we don't change the linkage. |
5598 | if (D->getTLSKind() == VarDecl::TLS_Dynamic && |
5599 | Linkage == llvm::GlobalValue::ExternalLinkage && |
5600 | Context.getTargetInfo().getTriple().isOSDarwin() && |
5601 | !D->hasAttr<ConstInitAttr>()) |
5602 | Linkage = llvm::GlobalValue::InternalLinkage; |
5603 | |
5604 | GV->setLinkage(Linkage); |
5605 | if (D->hasAttr<DLLImportAttr>()) |
5606 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); |
5607 | else if (D->hasAttr<DLLExportAttr>()) |
5608 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); |
5609 | else |
5610 | GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); |
5611 | |
5612 | if (Linkage == llvm::GlobalVariable::CommonLinkage) { |
5613 | // common vars aren't constant even if declared const. |
5614 | GV->setConstant(false); |
5615 | // Tentative definition of global variables may be initialized with |
5616 | // non-zero null pointers. In this case they should have weak linkage |
5617 | // since common linkage must have zero initializer and must not have |
5618 | // explicit section therefore cannot have non-zero initial value. |
5619 | if (!GV->getInitializer()->isNullValue()) |
5620 | GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage); |
5621 | } |
5622 | |
5623 | setNonAliasAttributes(GD: D, GO: GV); |
5624 | |
5625 | if (D->getTLSKind() && !GV->isThreadLocal()) { |
5626 | if (D->getTLSKind() == VarDecl::TLS_Dynamic) |
5627 | CXXThreadLocals.push_back(x: D); |
5628 | setTLSMode(GV, D: *D); |
5629 | } |
5630 | |
5631 | maybeSetTrivialComdat(D: *D, GO&: *GV); |
5632 | |
5633 | // Emit the initializer function if necessary. |
5634 | if (NeedsGlobalCtor || NeedsGlobalDtor) |
5635 | EmitCXXGlobalVarDeclInitFunc(D, Addr: GV, PerformInit: NeedsGlobalCtor); |
5636 | |
5637 | SanitizerMD->reportGlobal(GV, D: *D, IsDynInit: NeedsGlobalCtor); |
5638 | |
5639 | // Emit global variable debug information. |
5640 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
5641 | if (getCodeGenOpts().hasReducedDebugInfo()) |
5642 | DI->EmitGlobalVariable(GV, Decl: D); |
5643 | } |
5644 | |
5645 | void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) { |
5646 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
5647 | if (getCodeGenOpts().hasReducedDebugInfo()) { |
5648 | QualType ASTTy = D->getType(); |
5649 | llvm::Type *Ty = getTypes().ConvertTypeForMem(T: D->getType()); |
5650 | llvm::Constant *GV = |
5651 | GetOrCreateLLVMGlobal(MangledName: D->getName(), Ty, AddrSpace: ASTTy.getAddressSpace(), D); |
5652 | DI->EmitExternalVariable( |
5653 | GV: cast<llvm::GlobalVariable>(Val: GV->stripPointerCasts()), Decl: D); |
5654 | } |
5655 | } |
5656 | |
5657 | void CodeGenModule::EmitExternalFunctionDeclaration(const FunctionDecl *FD) { |
5658 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
5659 | if (getCodeGenOpts().hasReducedDebugInfo()) { |
5660 | auto *Ty = getTypes().ConvertType(T: FD->getType()); |
5661 | StringRef MangledName = getMangledName(GD: FD); |
5662 | auto *Fn = dyn_cast<llvm::Function>( |
5663 | Val: GetOrCreateLLVMFunction(MangledName, Ty, GD: FD, /* ForVTable */ false)); |
5664 | if (!Fn->getSubprogram()) |
5665 | DI->EmitFunctionDecl(GD: FD, Loc: FD->getLocation(), FnType: FD->getType(), Fn); |
5666 | } |
5667 | } |
5668 | |
5669 | static bool isVarDeclStrongDefinition(const ASTContext &Context, |
5670 | CodeGenModule &CGM, const VarDecl *D, |
5671 | bool NoCommon) { |
5672 | // Don't give variables common linkage if -fno-common was specified unless it |
5673 | // was overridden by a NoCommon attribute. |
5674 | if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>()) |
5675 | return true; |
5676 | |
5677 | // C11 6.9.2/2: |
5678 | // A declaration of an identifier for an object that has file scope without |
5679 | // an initializer, and without a storage-class specifier or with the |
5680 | // storage-class specifier static, constitutes a tentative definition. |
5681 | if (D->getInit() || D->hasExternalStorage()) |
5682 | return true; |
5683 | |
5684 | // A variable cannot be both common and exist in a section. |
5685 | if (D->hasAttr<SectionAttr>()) |
5686 | return true; |
5687 | |
5688 | // A variable cannot be both common and exist in a section. |
5689 | // We don't try to determine which is the right section in the front-end. |
5690 | // If no specialized section name is applicable, it will resort to default. |
5691 | if (D->hasAttr<PragmaClangBSSSectionAttr>() || |
5692 | D->hasAttr<PragmaClangDataSectionAttr>() || |
5693 | D->hasAttr<PragmaClangRelroSectionAttr>() || |
5694 | D->hasAttr<PragmaClangRodataSectionAttr>()) |
5695 | return true; |
5696 | |
5697 | // Thread local vars aren't considered common linkage. |
5698 | if (D->getTLSKind()) |
5699 | return true; |
5700 | |
5701 | // Tentative definitions marked with WeakImportAttr are true definitions. |
5702 | if (D->hasAttr<WeakImportAttr>()) |
5703 | return true; |
5704 | |
5705 | // A variable cannot be both common and exist in a comdat. |
5706 | if (shouldBeInCOMDAT(CGM, D: *D)) |
5707 | return true; |
5708 | |
5709 | // Declarations with a required alignment do not have common linkage in MSVC |
5710 | // mode. |
5711 | if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { |
5712 | if (D->hasAttr<AlignedAttr>()) |
5713 | return true; |
5714 | QualType VarType = D->getType(); |
5715 | if (Context.isAlignmentRequired(T: VarType)) |
5716 | return true; |
5717 | |
5718 | if (const auto *RT = VarType->getAs<RecordType>()) { |
5719 | const RecordDecl *RD = RT->getDecl(); |
5720 | for (const FieldDecl *FD : RD->fields()) { |
5721 | if (FD->isBitField()) |
5722 | continue; |
5723 | if (FD->hasAttr<AlignedAttr>()) |
5724 | return true; |
5725 | if (Context.isAlignmentRequired(T: FD->getType())) |
5726 | return true; |
5727 | } |
5728 | } |
5729 | } |
5730 | |
5731 | // Microsoft's link.exe doesn't support alignments greater than 32 bytes for |
5732 | // common symbols, so symbols with greater alignment requirements cannot be |
5733 | // common. |
5734 | // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two |
5735 | // alignments for common symbols via the aligncomm directive, so this |
5736 | // restriction only applies to MSVC environments. |
5737 | if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() && |
5738 | Context.getTypeAlignIfKnown(T: D->getType()) > |
5739 | Context.toBits(CharSize: CharUnits::fromQuantity(Quantity: 32))) |
5740 | return true; |
5741 | |
5742 | return false; |
5743 | } |
5744 | |
5745 | llvm::GlobalValue::LinkageTypes |
5746 | CodeGenModule::getLLVMLinkageForDeclarator(const DeclaratorDecl *D, |
5747 | GVALinkage Linkage) { |
5748 | if (Linkage == GVA_Internal) |
5749 | return llvm::Function::InternalLinkage; |
5750 | |
5751 | if (D->hasAttr<WeakAttr>()) |
5752 | return llvm::GlobalVariable::WeakAnyLinkage; |
5753 | |
5754 | if (const auto *FD = D->getAsFunction()) |
5755 | if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally) |
5756 | return llvm::GlobalVariable::LinkOnceAnyLinkage; |
5757 | |
5758 | // We are guaranteed to have a strong definition somewhere else, |
5759 | // so we can use available_externally linkage. |
5760 | if (Linkage == GVA_AvailableExternally) |
5761 | return llvm::GlobalValue::AvailableExternallyLinkage; |
5762 | |
5763 | // Note that Apple's kernel linker doesn't support symbol |
5764 | // coalescing, so we need to avoid linkonce and weak linkages there. |
5765 | // Normally, this means we just map to internal, but for explicit |
5766 | // instantiations we'll map to external. |
5767 | |
5768 | // In C++, the compiler has to emit a definition in every translation unit |
5769 | // that references the function. We should use linkonce_odr because |
5770 | // a) if all references in this translation unit are optimized away, we |
5771 | // don't need to codegen it. b) if the function persists, it needs to be |
5772 | // merged with other definitions. c) C++ has the ODR, so we know the |
5773 | // definition is dependable. |
5774 | if (Linkage == GVA_DiscardableODR) |
5775 | return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage |
5776 | : llvm::Function::InternalLinkage; |
5777 | |
5778 | // An explicit instantiation of a template has weak linkage, since |
5779 | // explicit instantiations can occur in multiple translation units |
5780 | // and must all be equivalent. However, we are not allowed to |
5781 | // throw away these explicit instantiations. |
5782 | // |
5783 | // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU, |
5784 | // so say that CUDA templates are either external (for kernels) or internal. |
5785 | // This lets llvm perform aggressive inter-procedural optimizations. For |
5786 | // -fgpu-rdc case, device function calls across multiple TU's are allowed, |
5787 | // therefore we need to follow the normal linkage paradigm. |
5788 | if (Linkage == GVA_StrongODR) { |
5789 | if (getLangOpts().AppleKext) |
5790 | return llvm::Function::ExternalLinkage; |
5791 | if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && |
5792 | !getLangOpts().GPURelocatableDeviceCode) |
5793 | return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage |
5794 | : llvm::Function::InternalLinkage; |
5795 | return llvm::Function::WeakODRLinkage; |
5796 | } |
5797 | |
5798 | // C++ doesn't have tentative definitions and thus cannot have common |
5799 | // linkage. |
5800 | if (!getLangOpts().CPlusPlus && isa<VarDecl>(Val: D) && |
5801 | !isVarDeclStrongDefinition(Context, CGM&: *this, D: cast<VarDecl>(Val: D), |
5802 | NoCommon: CodeGenOpts.NoCommon)) |
5803 | return llvm::GlobalVariable::CommonLinkage; |
5804 | |
5805 | // selectany symbols are externally visible, so use weak instead of |
5806 | // linkonce. MSVC optimizes away references to const selectany globals, so |
5807 | // all definitions should be the same and ODR linkage should be used. |
5808 | // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx |
5809 | if (D->hasAttr<SelectAnyAttr>()) |
5810 | return llvm::GlobalVariable::WeakODRLinkage; |
5811 | |
5812 | // Otherwise, we have strong external linkage. |
5813 | assert(Linkage == GVA_StrongExternal); |
5814 | return llvm::GlobalVariable::ExternalLinkage; |
5815 | } |
5816 | |
5817 | llvm::GlobalValue::LinkageTypes |
5818 | CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) { |
5819 | GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD); |
5820 | return getLLVMLinkageForDeclarator(D: VD, Linkage); |
5821 | } |
5822 | |
5823 | /// Replace the uses of a function that was declared with a non-proto type. |
5824 | /// We want to silently drop extra arguments from call sites |
5825 | static void replaceUsesOfNonProtoConstant(llvm::Constant *old, |
5826 | llvm::Function *newFn) { |
5827 | // Fast path. |
5828 | if (old->use_empty()) |
5829 | return; |
5830 | |
5831 | llvm::Type *newRetTy = newFn->getReturnType(); |
5832 | SmallVector<llvm::Value *, 4> newArgs; |
5833 | |
5834 | SmallVector<llvm::CallBase *> callSitesToBeRemovedFromParent; |
5835 | |
5836 | for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end(); |
5837 | ui != ue; ui++) { |
5838 | llvm::User *user = ui->getUser(); |
5839 | |
5840 | // Recognize and replace uses of bitcasts. Most calls to |
5841 | // unprototyped functions will use bitcasts. |
5842 | if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(Val: user)) { |
5843 | if (bitcast->getOpcode() == llvm::Instruction::BitCast) |
5844 | replaceUsesOfNonProtoConstant(old: bitcast, newFn); |
5845 | continue; |
5846 | } |
5847 | |
5848 | // Recognize calls to the function. |
5849 | llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(Val: user); |
5850 | if (!callSite) |
5851 | continue; |
5852 | if (!callSite->isCallee(U: &*ui)) |
5853 | continue; |
5854 | |
5855 | // If the return types don't match exactly, then we can't |
5856 | // transform this call unless it's dead. |
5857 | if (callSite->getType() != newRetTy && !callSite->use_empty()) |
5858 | continue; |
5859 | |
5860 | // Get the call site's attribute list. |
5861 | SmallVector<llvm::AttributeSet, 8> newArgAttrs; |
5862 | llvm::AttributeList oldAttrs = callSite->getAttributes(); |
5863 | |
5864 | // If the function was passed too few arguments, don't transform. |
5865 | unsigned newNumArgs = newFn->arg_size(); |
5866 | if (callSite->arg_size() < newNumArgs) |
5867 | continue; |
5868 | |
5869 | // If extra arguments were passed, we silently drop them. |
5870 | // If any of the types mismatch, we don't transform. |
5871 | unsigned argNo = 0; |
5872 | bool dontTransform = false; |
5873 | for (llvm::Argument &A : newFn->args()) { |
5874 | if (callSite->getArgOperand(i: argNo)->getType() != A.getType()) { |
5875 | dontTransform = true; |
5876 | break; |
5877 | } |
5878 | |
5879 | // Add any parameter attributes. |
5880 | newArgAttrs.push_back(Elt: oldAttrs.getParamAttrs(ArgNo: argNo)); |
5881 | argNo++; |
5882 | } |
5883 | if (dontTransform) |
5884 | continue; |
5885 | |
5886 | // Okay, we can transform this. Create the new call instruction and copy |
5887 | // over the required information. |
5888 | newArgs.append(in_start: callSite->arg_begin(), in_end: callSite->arg_begin() + argNo); |
5889 | |
5890 | // Copy over any operand bundles. |
5891 | SmallVector<llvm::OperandBundleDef, 1> newBundles; |
5892 | callSite->getOperandBundlesAsDefs(Defs&: newBundles); |
5893 | |
5894 | llvm::CallBase *newCall; |
5895 | if (isa<llvm::CallInst>(Val: callSite)) { |
5896 | newCall = |
5897 | llvm::CallInst::Create(Func: newFn, Args: newArgs, Bundles: newBundles, NameStr: "" , InsertBefore: callSite); |
5898 | } else { |
5899 | auto *oldInvoke = cast<llvm::InvokeInst>(Val: callSite); |
5900 | newCall = llvm::InvokeInst::Create(Func: newFn, IfNormal: oldInvoke->getNormalDest(), |
5901 | IfException: oldInvoke->getUnwindDest(), Args: newArgs, |
5902 | Bundles: newBundles, NameStr: "" , InsertBefore: callSite); |
5903 | } |
5904 | newArgs.clear(); // for the next iteration |
5905 | |
5906 | if (!newCall->getType()->isVoidTy()) |
5907 | newCall->takeName(V: callSite); |
5908 | newCall->setAttributes( |
5909 | llvm::AttributeList::get(C&: newFn->getContext(), FnAttrs: oldAttrs.getFnAttrs(), |
5910 | RetAttrs: oldAttrs.getRetAttrs(), ArgAttrs: newArgAttrs)); |
5911 | newCall->setCallingConv(callSite->getCallingConv()); |
5912 | |
5913 | // Finally, remove the old call, replacing any uses with the new one. |
5914 | if (!callSite->use_empty()) |
5915 | callSite->replaceAllUsesWith(V: newCall); |
5916 | |
5917 | // Copy debug location attached to CI. |
5918 | if (callSite->getDebugLoc()) |
5919 | newCall->setDebugLoc(callSite->getDebugLoc()); |
5920 | |
5921 | callSitesToBeRemovedFromParent.push_back(Elt: callSite); |
5922 | } |
5923 | |
5924 | for (auto *callSite : callSitesToBeRemovedFromParent) { |
5925 | callSite->eraseFromParent(); |
5926 | } |
5927 | } |
5928 | |
5929 | /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we |
5930 | /// implement a function with no prototype, e.g. "int foo() {}". If there are |
5931 | /// existing call uses of the old function in the module, this adjusts them to |
5932 | /// call the new function directly. |
5933 | /// |
5934 | /// This is not just a cleanup: the always_inline pass requires direct calls to |
5935 | /// functions to be able to inline them. If there is a bitcast in the way, it |
5936 | /// won't inline them. Instcombine normally deletes these calls, but it isn't |
5937 | /// run at -O0. |
5938 | static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, |
5939 | llvm::Function *NewFn) { |
5940 | // If we're redefining a global as a function, don't transform it. |
5941 | if (!isa<llvm::Function>(Val: Old)) return; |
5942 | |
5943 | replaceUsesOfNonProtoConstant(old: Old, newFn: NewFn); |
5944 | } |
5945 | |
5946 | void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { |
5947 | auto DK = VD->isThisDeclarationADefinition(); |
5948 | if ((DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>()) || |
5949 | (LangOpts.CUDA && !shouldEmitCUDAGlobalVar(Global: VD))) |
5950 | return; |
5951 | |
5952 | TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind(); |
5953 | // If we have a definition, this might be a deferred decl. If the |
5954 | // instantiation is explicit, make sure we emit it at the end. |
5955 | if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition) |
5956 | GetAddrOfGlobalVar(D: VD); |
5957 | |
5958 | EmitTopLevelDecl(D: VD); |
5959 | } |
5960 | |
5961 | void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD, |
5962 | llvm::GlobalValue *GV) { |
5963 | const auto *D = cast<FunctionDecl>(Val: GD.getDecl()); |
5964 | |
5965 | // Compute the function info and LLVM type. |
5966 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
5967 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
5968 | |
5969 | // Get or create the prototype for the function. |
5970 | if (!GV || (GV->getValueType() != Ty)) |
5971 | GV = cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, |
5972 | /*DontDefer=*/true, |
5973 | IsForDefinition: ForDefinition)); |
5974 | |
5975 | // Already emitted. |
5976 | if (!GV->isDeclaration()) |
5977 | return; |
5978 | |
5979 | // We need to set linkage and visibility on the function before |
5980 | // generating code for it because various parts of IR generation |
5981 | // want to propagate this information down (e.g. to local static |
5982 | // declarations). |
5983 | auto *Fn = cast<llvm::Function>(Val: GV); |
5984 | setFunctionLinkage(GD, F: Fn); |
5985 | |
5986 | // FIXME: this is redundant with part of setFunctionDefinitionAttributes |
5987 | setGVProperties(GV: Fn, GD); |
5988 | |
5989 | MaybeHandleStaticInExternC(D, GV: Fn); |
5990 | |
5991 | maybeSetTrivialComdat(D: *D, GO&: *Fn); |
5992 | |
5993 | CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo: FI); |
5994 | |
5995 | setNonAliasAttributes(GD, GO: Fn); |
5996 | SetLLVMFunctionAttributesForDefinition(D, F: Fn); |
5997 | |
5998 | if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>()) |
5999 | AddGlobalCtor(Ctor: Fn, Priority: CA->getPriority()); |
6000 | if (const DestructorAttr *DA = D->getAttr<DestructorAttr>()) |
6001 | AddGlobalDtor(Dtor: Fn, Priority: DA->getPriority(), IsDtorAttrFunc: true); |
6002 | if (getLangOpts().OpenMP && D->hasAttr<OMPDeclareTargetDeclAttr>()) |
6003 | getOpenMPRuntime().emitDeclareTargetFunction(FD: D, GV); |
6004 | } |
6005 | |
6006 | void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) { |
6007 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
6008 | const AliasAttr *AA = D->getAttr<AliasAttr>(); |
6009 | assert(AA && "Not an alias?" ); |
6010 | |
6011 | StringRef MangledName = getMangledName(GD); |
6012 | |
6013 | if (AA->getAliasee() == MangledName) { |
6014 | Diags.Report(Loc: AA->getLocation(), DiagID: diag::err_cyclic_alias) << 0; |
6015 | return; |
6016 | } |
6017 | |
6018 | // If there is a definition in the module, then it wins over the alias. |
6019 | // This is dubious, but allow it to be safe. Just ignore the alias. |
6020 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
6021 | if (Entry && !Entry->isDeclaration()) |
6022 | return; |
6023 | |
6024 | Aliases.push_back(x: GD); |
6025 | |
6026 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType()); |
6027 | |
6028 | // Create a reference to the named value. This ensures that it is emitted |
6029 | // if a deferred decl. |
6030 | llvm::Constant *Aliasee; |
6031 | llvm::GlobalValue::LinkageTypes LT; |
6032 | if (isa<llvm::FunctionType>(Val: DeclTy)) { |
6033 | Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy, GD, |
6034 | /*ForVTable=*/false); |
6035 | LT = getFunctionLinkage(GD); |
6036 | } else { |
6037 | Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default, |
6038 | /*D=*/nullptr); |
6039 | if (const auto *VD = dyn_cast<VarDecl>(Val: GD.getDecl())) |
6040 | LT = getLLVMLinkageVarDefinition(VD); |
6041 | else |
6042 | LT = getFunctionLinkage(GD); |
6043 | } |
6044 | |
6045 | // Create the new alias itself, but don't set a name yet. |
6046 | unsigned AS = Aliasee->getType()->getPointerAddressSpace(); |
6047 | auto *GA = |
6048 | llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage: LT, Name: "" , Aliasee, Parent: &getModule()); |
6049 | |
6050 | if (Entry) { |
6051 | if (GA->getAliasee() == Entry) { |
6052 | Diags.Report(Loc: AA->getLocation(), DiagID: diag::err_cyclic_alias) << 0; |
6053 | return; |
6054 | } |
6055 | |
6056 | assert(Entry->isDeclaration()); |
6057 | |
6058 | // If there is a declaration in the module, then we had an extern followed |
6059 | // by the alias, as in: |
6060 | // extern int test6(); |
6061 | // ... |
6062 | // int test6() __attribute__((alias("test7"))); |
6063 | // |
6064 | // Remove it and replace uses of it with the alias. |
6065 | GA->takeName(V: Entry); |
6066 | |
6067 | Entry->replaceAllUsesWith(V: GA); |
6068 | Entry->eraseFromParent(); |
6069 | } else { |
6070 | GA->setName(MangledName); |
6071 | } |
6072 | |
6073 | // Set attributes which are particular to an alias; this is a |
6074 | // specialization of the attributes which may be set on a global |
6075 | // variable/function. |
6076 | if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() || |
6077 | D->isWeakImported()) { |
6078 | GA->setLinkage(llvm::Function::WeakAnyLinkage); |
6079 | } |
6080 | |
6081 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) |
6082 | if (VD->getTLSKind()) |
6083 | setTLSMode(GV: GA, D: *VD); |
6084 | |
6085 | SetCommonAttributes(GD, GV: GA); |
6086 | |
6087 | // Emit global alias debug information. |
6088 | if (isa<VarDecl>(Val: D)) |
6089 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6090 | DI->EmitGlobalAlias(GV: cast<llvm::GlobalValue>(Val: GA->getAliasee()->stripPointerCasts()), Decl: GD); |
6091 | } |
6092 | |
6093 | void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) { |
6094 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
6095 | const IFuncAttr *IFA = D->getAttr<IFuncAttr>(); |
6096 | assert(IFA && "Not an ifunc?" ); |
6097 | |
6098 | StringRef MangledName = getMangledName(GD); |
6099 | |
6100 | if (IFA->getResolver() == MangledName) { |
6101 | Diags.Report(Loc: IFA->getLocation(), DiagID: diag::err_cyclic_alias) << 1; |
6102 | return; |
6103 | } |
6104 | |
6105 | // Report an error if some definition overrides ifunc. |
6106 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
6107 | if (Entry && !Entry->isDeclaration()) { |
6108 | GlobalDecl OtherGD; |
6109 | if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
6110 | DiagnosedConflictingDefinitions.insert(V: GD).second) { |
6111 | Diags.Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name) |
6112 | << MangledName; |
6113 | Diags.Report(Loc: OtherGD.getDecl()->getLocation(), |
6114 | DiagID: diag::note_previous_definition); |
6115 | } |
6116 | return; |
6117 | } |
6118 | |
6119 | Aliases.push_back(x: GD); |
6120 | |
6121 | // The resolver might not be visited yet. Specify a dummy non-function type to |
6122 | // indicate IsIncompleteFunction. Either the type is ignored (if the resolver |
6123 | // was emitted) or the whole function will be replaced (if the resolver has |
6124 | // not been emitted). |
6125 | llvm::Constant *Resolver = |
6126 | GetOrCreateLLVMFunction(MangledName: IFA->getResolver(), Ty: VoidTy, GD: {}, |
6127 | /*ForVTable=*/false); |
6128 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType()); |
6129 | llvm::GlobalIFunc *GIF = |
6130 | llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: 0, Linkage: llvm::Function::ExternalLinkage, |
6131 | Name: "" , Resolver, Parent: &getModule()); |
6132 | if (Entry) { |
6133 | if (GIF->getResolver() == Entry) { |
6134 | Diags.Report(Loc: IFA->getLocation(), DiagID: diag::err_cyclic_alias) << 1; |
6135 | return; |
6136 | } |
6137 | assert(Entry->isDeclaration()); |
6138 | |
6139 | // If there is a declaration in the module, then we had an extern followed |
6140 | // by the ifunc, as in: |
6141 | // extern int test(); |
6142 | // ... |
6143 | // int test() __attribute__((ifunc("resolver"))); |
6144 | // |
6145 | // Remove it and replace uses of it with the ifunc. |
6146 | GIF->takeName(V: Entry); |
6147 | |
6148 | Entry->replaceAllUsesWith(V: GIF); |
6149 | Entry->eraseFromParent(); |
6150 | } else |
6151 | GIF->setName(MangledName); |
6152 | SetCommonAttributes(GD, GV: GIF); |
6153 | } |
6154 | |
6155 | llvm::Function *CodeGenModule::getIntrinsic(unsigned IID, |
6156 | ArrayRef<llvm::Type*> Tys) { |
6157 | return llvm::Intrinsic::getDeclaration(M: &getModule(), id: (llvm::Intrinsic::ID)IID, |
6158 | Tys); |
6159 | } |
6160 | |
6161 | static llvm::StringMapEntry<llvm::GlobalVariable *> & |
6162 | GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map, |
6163 | const StringLiteral *Literal, bool TargetIsLSB, |
6164 | bool &IsUTF16, unsigned &StringLength) { |
6165 | StringRef String = Literal->getString(); |
6166 | unsigned NumBytes = String.size(); |
6167 | |
6168 | // Check for simple case. |
6169 | if (!Literal->containsNonAsciiOrNull()) { |
6170 | StringLength = NumBytes; |
6171 | return *Map.insert(KV: std::make_pair(x&: String, y: nullptr)).first; |
6172 | } |
6173 | |
6174 | // Otherwise, convert the UTF8 literals into a string of shorts. |
6175 | IsUTF16 = true; |
6176 | |
6177 | SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls. |
6178 | const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); |
6179 | llvm::UTF16 *ToPtr = &ToBuf[0]; |
6180 | |
6181 | (void)llvm::ConvertUTF8toUTF16(sourceStart: &FromPtr, sourceEnd: FromPtr + NumBytes, targetStart: &ToPtr, |
6182 | targetEnd: ToPtr + NumBytes, flags: llvm::strictConversion); |
6183 | |
6184 | // ConvertUTF8toUTF16 returns the length in ToPtr. |
6185 | StringLength = ToPtr - &ToBuf[0]; |
6186 | |
6187 | // Add an explicit null. |
6188 | *ToPtr = 0; |
6189 | return *Map.insert(KV: std::make_pair( |
6190 | x: StringRef(reinterpret_cast<const char *>(ToBuf.data()), |
6191 | (StringLength + 1) * 2), |
6192 | y: nullptr)).first; |
6193 | } |
6194 | |
6195 | ConstantAddress |
6196 | CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { |
6197 | unsigned StringLength = 0; |
6198 | bool isUTF16 = false; |
6199 | llvm::StringMapEntry<llvm::GlobalVariable *> &Entry = |
6200 | GetConstantCFStringEntry(Map&: CFConstantStringMap, Literal, |
6201 | TargetIsLSB: getDataLayout().isLittleEndian(), IsUTF16&: isUTF16, |
6202 | StringLength); |
6203 | |
6204 | if (auto *C = Entry.second) |
6205 | return ConstantAddress( |
6206 | C, C->getValueType(), CharUnits::fromQuantity(Quantity: C->getAlignment())); |
6207 | |
6208 | const ASTContext &Context = getContext(); |
6209 | const llvm::Triple &Triple = getTriple(); |
6210 | |
6211 | const auto CFRuntime = getLangOpts().CFRuntime; |
6212 | const bool IsSwiftABI = |
6213 | static_cast<unsigned>(CFRuntime) >= |
6214 | static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift); |
6215 | const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1; |
6216 | |
6217 | // If we don't already have it, get __CFConstantStringClassReference. |
6218 | if (!CFConstantStringClassRef) { |
6219 | const char *CFConstantStringClassName = "__CFConstantStringClassReference" ; |
6220 | llvm::Type *Ty = getTypes().ConvertType(T: getContext().IntTy); |
6221 | Ty = llvm::ArrayType::get(ElementType: Ty, NumElements: 0); |
6222 | |
6223 | switch (CFRuntime) { |
6224 | default: break; |
6225 | case LangOptions::CoreFoundationABI::Swift: [[fallthrough]]; |
6226 | case LangOptions::CoreFoundationABI::Swift5_0: |
6227 | CFConstantStringClassName = |
6228 | Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN" |
6229 | : "$s10Foundation19_NSCFConstantStringCN" ; |
6230 | Ty = IntPtrTy; |
6231 | break; |
6232 | case LangOptions::CoreFoundationABI::Swift4_2: |
6233 | CFConstantStringClassName = |
6234 | Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN" |
6235 | : "$S10Foundation19_NSCFConstantStringCN" ; |
6236 | Ty = IntPtrTy; |
6237 | break; |
6238 | case LangOptions::CoreFoundationABI::Swift4_1: |
6239 | CFConstantStringClassName = |
6240 | Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN" |
6241 | : "__T010Foundation19_NSCFConstantStringCN" ; |
6242 | Ty = IntPtrTy; |
6243 | break; |
6244 | } |
6245 | |
6246 | llvm::Constant *C = CreateRuntimeVariable(Ty, Name: CFConstantStringClassName); |
6247 | |
6248 | if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) { |
6249 | llvm::GlobalValue *GV = nullptr; |
6250 | |
6251 | if ((GV = dyn_cast<llvm::GlobalValue>(Val: C))) { |
6252 | IdentifierInfo &II = Context.Idents.get(Name: GV->getName()); |
6253 | TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl(); |
6254 | DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl); |
6255 | |
6256 | const VarDecl *VD = nullptr; |
6257 | for (const auto *Result : DC->lookup(Name: &II)) |
6258 | if ((VD = dyn_cast<VarDecl>(Val: Result))) |
6259 | break; |
6260 | |
6261 | if (Triple.isOSBinFormatELF()) { |
6262 | if (!VD) |
6263 | GV->setLinkage(llvm::GlobalValue::ExternalLinkage); |
6264 | } else { |
6265 | GV->setLinkage(llvm::GlobalValue::ExternalLinkage); |
6266 | if (!VD || !VD->hasAttr<DLLExportAttr>()) |
6267 | GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); |
6268 | else |
6269 | GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); |
6270 | } |
6271 | |
6272 | setDSOLocal(GV); |
6273 | } |
6274 | } |
6275 | |
6276 | // Decay array -> ptr |
6277 | CFConstantStringClassRef = |
6278 | IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty) : C; |
6279 | } |
6280 | |
6281 | QualType CFTy = Context.getCFConstantStringType(); |
6282 | |
6283 | auto *STy = cast<llvm::StructType>(Val: getTypes().ConvertType(T: CFTy)); |
6284 | |
6285 | ConstantInitBuilder Builder(*this); |
6286 | auto Fields = Builder.beginStruct(structTy: STy); |
6287 | |
6288 | // Class pointer. |
6289 | Fields.add(value: cast<llvm::Constant>(Val&: CFConstantStringClassRef)); |
6290 | |
6291 | // Flags. |
6292 | if (IsSwiftABI) { |
6293 | Fields.addInt(intTy: IntPtrTy, value: IsSwift4_1 ? 0x05 : 0x01); |
6294 | Fields.addInt(intTy: Int64Ty, value: isUTF16 ? 0x07d0 : 0x07c8); |
6295 | } else { |
6296 | Fields.addInt(intTy: IntTy, value: isUTF16 ? 0x07d0 : 0x07C8); |
6297 | } |
6298 | |
6299 | // String pointer. |
6300 | llvm::Constant *C = nullptr; |
6301 | if (isUTF16) { |
6302 | auto Arr = llvm::ArrayRef( |
6303 | reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())), |
6304 | Entry.first().size() / 2); |
6305 | C = llvm::ConstantDataArray::get(Context&: VMContext, Elts: Arr); |
6306 | } else { |
6307 | C = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Entry.first()); |
6308 | } |
6309 | |
6310 | // Note: -fwritable-strings doesn't make the backing store strings of |
6311 | // CFStrings writable. |
6312 | auto *GV = |
6313 | new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true, |
6314 | llvm::GlobalValue::PrivateLinkage, C, ".str" ); |
6315 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
6316 | // Don't enforce the target's minimum global alignment, since the only use |
6317 | // of the string is via this class initializer. |
6318 | CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(T: Context.ShortTy) |
6319 | : Context.getTypeAlignInChars(T: Context.CharTy); |
6320 | GV->setAlignment(Align.getAsAlign()); |
6321 | |
6322 | // FIXME: We set the section explicitly to avoid a bug in ld64 224.1. |
6323 | // Without it LLVM can merge the string with a non unnamed_addr one during |
6324 | // LTO. Doing that changes the section it ends in, which surprises ld64. |
6325 | if (Triple.isOSBinFormatMachO()) |
6326 | GV->setSection(isUTF16 ? "__TEXT,__ustring" |
6327 | : "__TEXT,__cstring,cstring_literals" ); |
6328 | // Make sure the literal ends up in .rodata to allow for safe ICF and for |
6329 | // the static linker to adjust permissions to read-only later on. |
6330 | else if (Triple.isOSBinFormatELF()) |
6331 | GV->setSection(".rodata" ); |
6332 | |
6333 | // String. |
6334 | Fields.add(value: GV); |
6335 | |
6336 | // String length. |
6337 | llvm::IntegerType *LengthTy = |
6338 | llvm::IntegerType::get(C&: getModule().getContext(), |
6339 | NumBits: Context.getTargetInfo().getLongWidth()); |
6340 | if (IsSwiftABI) { |
6341 | if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || |
6342 | CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) |
6343 | LengthTy = Int32Ty; |
6344 | else |
6345 | LengthTy = IntPtrTy; |
6346 | } |
6347 | Fields.addInt(intTy: LengthTy, value: StringLength); |
6348 | |
6349 | // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is |
6350 | // properly aligned on 32-bit platforms. |
6351 | CharUnits Alignment = |
6352 | IsSwiftABI ? Context.toCharUnitsFromBits(BitSize: 64) : getPointerAlign(); |
6353 | |
6354 | // The struct. |
6355 | GV = Fields.finishAndCreateGlobal(args: "_unnamed_cfstring_" , args&: Alignment, |
6356 | /*isConstant=*/args: false, |
6357 | args: llvm::GlobalVariable::PrivateLinkage); |
6358 | GV->addAttribute(Kind: "objc_arc_inert" ); |
6359 | switch (Triple.getObjectFormat()) { |
6360 | case llvm::Triple::UnknownObjectFormat: |
6361 | llvm_unreachable("unknown file format" ); |
6362 | case llvm::Triple::DXContainer: |
6363 | case llvm::Triple::GOFF: |
6364 | case llvm::Triple::SPIRV: |
6365 | case llvm::Triple::XCOFF: |
6366 | llvm_unreachable("unimplemented" ); |
6367 | case llvm::Triple::COFF: |
6368 | case llvm::Triple::ELF: |
6369 | case llvm::Triple::Wasm: |
6370 | GV->setSection("cfstring" ); |
6371 | break; |
6372 | case llvm::Triple::MachO: |
6373 | GV->setSection("__DATA,__cfstring" ); |
6374 | break; |
6375 | } |
6376 | Entry.second = GV; |
6377 | |
6378 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
6379 | } |
6380 | |
6381 | bool CodeGenModule::getExpressionLocationsEnabled() const { |
6382 | return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo; |
6383 | } |
6384 | |
6385 | QualType CodeGenModule::getObjCFastEnumerationStateType() { |
6386 | if (ObjCFastEnumerationStateType.isNull()) { |
6387 | RecordDecl *D = Context.buildImplicitRecord(Name: "__objcFastEnumerationState" ); |
6388 | D->startDefinition(); |
6389 | |
6390 | QualType FieldTypes[] = { |
6391 | Context.UnsignedLongTy, Context.getPointerType(T: Context.getObjCIdType()), |
6392 | Context.getPointerType(T: Context.UnsignedLongTy), |
6393 | Context.getConstantArrayType(EltTy: Context.UnsignedLongTy, ArySize: llvm::APInt(32, 5), |
6394 | SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0)}; |
6395 | |
6396 | for (size_t i = 0; i < 4; ++i) { |
6397 | FieldDecl *Field = FieldDecl::Create(C: Context, |
6398 | DC: D, |
6399 | StartLoc: SourceLocation(), |
6400 | IdLoc: SourceLocation(), Id: nullptr, |
6401 | T: FieldTypes[i], /*TInfo=*/nullptr, |
6402 | /*BitWidth=*/BW: nullptr, |
6403 | /*Mutable=*/false, |
6404 | InitStyle: ICIS_NoInit); |
6405 | Field->setAccess(AS_public); |
6406 | D->addDecl(D: Field); |
6407 | } |
6408 | |
6409 | D->completeDefinition(); |
6410 | ObjCFastEnumerationStateType = Context.getTagDeclType(Decl: D); |
6411 | } |
6412 | |
6413 | return ObjCFastEnumerationStateType; |
6414 | } |
6415 | |
6416 | llvm::Constant * |
6417 | CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) { |
6418 | assert(!E->getType()->isPointerType() && "Strings are always arrays" ); |
6419 | |
6420 | // Don't emit it as the address of the string, emit the string data itself |
6421 | // as an inline array. |
6422 | if (E->getCharByteWidth() == 1) { |
6423 | SmallString<64> Str(E->getString()); |
6424 | |
6425 | // Resize the string to the right size, which is indicated by its type. |
6426 | const ConstantArrayType *CAT = Context.getAsConstantArrayType(T: E->getType()); |
6427 | assert(CAT && "String literal not of constant array type!" ); |
6428 | Str.resize(N: CAT->getZExtSize()); |
6429 | return llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Str, AddNull: false); |
6430 | } |
6431 | |
6432 | auto *AType = cast<llvm::ArrayType>(Val: getTypes().ConvertType(T: E->getType())); |
6433 | llvm::Type *ElemTy = AType->getElementType(); |
6434 | unsigned NumElements = AType->getNumElements(); |
6435 | |
6436 | // Wide strings have either 2-byte or 4-byte elements. |
6437 | if (ElemTy->getPrimitiveSizeInBits() == 16) { |
6438 | SmallVector<uint16_t, 32> Elements; |
6439 | Elements.reserve(N: NumElements); |
6440 | |
6441 | for(unsigned i = 0, e = E->getLength(); i != e; ++i) |
6442 | Elements.push_back(Elt: E->getCodeUnit(i)); |
6443 | Elements.resize(N: NumElements); |
6444 | return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements); |
6445 | } |
6446 | |
6447 | assert(ElemTy->getPrimitiveSizeInBits() == 32); |
6448 | SmallVector<uint32_t, 32> Elements; |
6449 | Elements.reserve(N: NumElements); |
6450 | |
6451 | for(unsigned i = 0, e = E->getLength(); i != e; ++i) |
6452 | Elements.push_back(Elt: E->getCodeUnit(i)); |
6453 | Elements.resize(N: NumElements); |
6454 | return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements); |
6455 | } |
6456 | |
6457 | static llvm::GlobalVariable * |
6458 | GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT, |
6459 | CodeGenModule &CGM, StringRef GlobalName, |
6460 | CharUnits Alignment) { |
6461 | unsigned AddrSpace = CGM.getContext().getTargetAddressSpace( |
6462 | AS: CGM.GetGlobalConstantAddressSpace()); |
6463 | |
6464 | llvm::Module &M = CGM.getModule(); |
6465 | // Create a global variable for this string |
6466 | auto *GV = new llvm::GlobalVariable( |
6467 | M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName, |
6468 | nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace); |
6469 | GV->setAlignment(Alignment.getAsAlign()); |
6470 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
6471 | if (GV->isWeakForLinker()) { |
6472 | assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals" ); |
6473 | GV->setComdat(M.getOrInsertComdat(Name: GV->getName())); |
6474 | } |
6475 | CGM.setDSOLocal(GV); |
6476 | |
6477 | return GV; |
6478 | } |
6479 | |
6480 | /// GetAddrOfConstantStringFromLiteral - Return a pointer to a |
6481 | /// constant array for the given string literal. |
6482 | ConstantAddress |
6483 | CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S, |
6484 | StringRef Name) { |
6485 | CharUnits Alignment = |
6486 | getContext().getAlignOfGlobalVarInChars(T: S->getType(), /*VD=*/nullptr); |
6487 | |
6488 | llvm::Constant *C = GetConstantArrayFromStringLiteral(E: S); |
6489 | llvm::GlobalVariable **Entry = nullptr; |
6490 | if (!LangOpts.WritableStrings) { |
6491 | Entry = &ConstantStringMap[C]; |
6492 | if (auto GV = *Entry) { |
6493 | if (uint64_t(Alignment.getQuantity()) > GV->getAlignment()) |
6494 | GV->setAlignment(Alignment.getAsAlign()); |
6495 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6496 | GV->getValueType(), Alignment); |
6497 | } |
6498 | } |
6499 | |
6500 | SmallString<256> MangledNameBuffer; |
6501 | StringRef GlobalVariableName; |
6502 | llvm::GlobalValue::LinkageTypes LT; |
6503 | |
6504 | // Mangle the string literal if that's how the ABI merges duplicate strings. |
6505 | // Don't do it if they are writable, since we don't want writes in one TU to |
6506 | // affect strings in another. |
6507 | if (getCXXABI().getMangleContext().shouldMangleStringLiteral(SL: S) && |
6508 | !LangOpts.WritableStrings) { |
6509 | llvm::raw_svector_ostream Out(MangledNameBuffer); |
6510 | getCXXABI().getMangleContext().mangleStringLiteral(SL: S, Out); |
6511 | LT = llvm::GlobalValue::LinkOnceODRLinkage; |
6512 | GlobalVariableName = MangledNameBuffer; |
6513 | } else { |
6514 | LT = llvm::GlobalValue::PrivateLinkage; |
6515 | GlobalVariableName = Name; |
6516 | } |
6517 | |
6518 | auto GV = GenerateStringLiteral(C, LT, CGM&: *this, GlobalName: GlobalVariableName, Alignment); |
6519 | |
6520 | CGDebugInfo *DI = getModuleDebugInfo(); |
6521 | if (DI && getCodeGenOpts().hasReducedDebugInfo()) |
6522 | DI->AddStringLiteralDebugInfo(GV, S); |
6523 | |
6524 | if (Entry) |
6525 | *Entry = GV; |
6526 | |
6527 | SanitizerMD->reportGlobal(GV, Loc: S->getStrTokenLoc(TokNum: 0), Name: "<string literal>" ); |
6528 | |
6529 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6530 | GV->getValueType(), Alignment); |
6531 | } |
6532 | |
6533 | /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant |
6534 | /// array for the given ObjCEncodeExpr node. |
6535 | ConstantAddress |
6536 | CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) { |
6537 | std::string Str; |
6538 | getContext().getObjCEncodingForType(T: E->getEncodedType(), S&: Str); |
6539 | |
6540 | return GetAddrOfConstantCString(Str); |
6541 | } |
6542 | |
6543 | /// GetAddrOfConstantCString - Returns a pointer to a character array containing |
6544 | /// the literal and a terminating '\0' character. |
6545 | /// The result has pointer to array type. |
6546 | ConstantAddress CodeGenModule::GetAddrOfConstantCString( |
6547 | const std::string &Str, const char *GlobalName) { |
6548 | StringRef StrWithNull(Str.c_str(), Str.size() + 1); |
6549 | CharUnits Alignment = getContext().getAlignOfGlobalVarInChars( |
6550 | T: getContext().CharTy, /*VD=*/nullptr); |
6551 | |
6552 | llvm::Constant *C = |
6553 | llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: StrWithNull, AddNull: false); |
6554 | |
6555 | // Don't share any string literals if strings aren't constant. |
6556 | llvm::GlobalVariable **Entry = nullptr; |
6557 | if (!LangOpts.WritableStrings) { |
6558 | Entry = &ConstantStringMap[C]; |
6559 | if (auto GV = *Entry) { |
6560 | if (uint64_t(Alignment.getQuantity()) > GV->getAlignment()) |
6561 | GV->setAlignment(Alignment.getAsAlign()); |
6562 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6563 | GV->getValueType(), Alignment); |
6564 | } |
6565 | } |
6566 | |
6567 | // Get the default prefix if a name wasn't specified. |
6568 | if (!GlobalName) |
6569 | GlobalName = ".str" ; |
6570 | // Create a global variable for this. |
6571 | auto GV = GenerateStringLiteral(C, LT: llvm::GlobalValue::PrivateLinkage, CGM&: *this, |
6572 | GlobalName, Alignment); |
6573 | if (Entry) |
6574 | *Entry = GV; |
6575 | |
6576 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6577 | GV->getValueType(), Alignment); |
6578 | } |
6579 | |
6580 | ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary( |
6581 | const MaterializeTemporaryExpr *E, const Expr *Init) { |
6582 | assert((E->getStorageDuration() == SD_Static || |
6583 | E->getStorageDuration() == SD_Thread) && "not a global temporary" ); |
6584 | const auto *VD = cast<VarDecl>(Val: E->getExtendingDecl()); |
6585 | |
6586 | // If we're not materializing a subobject of the temporary, keep the |
6587 | // cv-qualifiers from the type of the MaterializeTemporaryExpr. |
6588 | QualType MaterializedType = Init->getType(); |
6589 | if (Init == E->getSubExpr()) |
6590 | MaterializedType = E->getType(); |
6591 | |
6592 | CharUnits Align = getContext().getTypeAlignInChars(T: MaterializedType); |
6593 | |
6594 | auto InsertResult = MaterializedGlobalTemporaryMap.insert(KV: {E, nullptr}); |
6595 | if (!InsertResult.second) { |
6596 | // We've seen this before: either we already created it or we're in the |
6597 | // process of doing so. |
6598 | if (!InsertResult.first->second) { |
6599 | // We recursively re-entered this function, probably during emission of |
6600 | // the initializer. Create a placeholder. We'll clean this up in the |
6601 | // outer call, at the end of this function. |
6602 | llvm::Type *Type = getTypes().ConvertTypeForMem(T: MaterializedType); |
6603 | InsertResult.first->second = new llvm::GlobalVariable( |
6604 | getModule(), Type, false, llvm::GlobalVariable::InternalLinkage, |
6605 | nullptr); |
6606 | } |
6607 | return ConstantAddress(InsertResult.first->second, |
6608 | llvm::cast<llvm::GlobalVariable>( |
6609 | Val: InsertResult.first->second->stripPointerCasts()) |
6610 | ->getValueType(), |
6611 | Align); |
6612 | } |
6613 | |
6614 | // FIXME: If an externally-visible declaration extends multiple temporaries, |
6615 | // we need to give each temporary the same name in every translation unit (and |
6616 | // we also need to make the temporaries externally-visible). |
6617 | SmallString<256> Name; |
6618 | llvm::raw_svector_ostream Out(Name); |
6619 | getCXXABI().getMangleContext().mangleReferenceTemporary( |
6620 | D: VD, ManglingNumber: E->getManglingNumber(), Out); |
6621 | |
6622 | APValue *Value = nullptr; |
6623 | if (E->getStorageDuration() == SD_Static && VD->evaluateValue()) { |
6624 | // If the initializer of the extending declaration is a constant |
6625 | // initializer, we should have a cached constant initializer for this |
6626 | // temporary. Note that this might have a different value from the value |
6627 | // computed by evaluating the initializer if the surrounding constant |
6628 | // expression modifies the temporary. |
6629 | Value = E->getOrCreateValue(MayCreate: false); |
6630 | } |
6631 | |
6632 | // Try evaluating it now, it might have a constant initializer. |
6633 | Expr::EvalResult EvalResult; |
6634 | if (!Value && Init->EvaluateAsRValue(Result&: EvalResult, Ctx: getContext()) && |
6635 | !EvalResult.hasSideEffects()) |
6636 | Value = &EvalResult.Val; |
6637 | |
6638 | LangAS AddrSpace = GetGlobalVarAddressSpace(D: VD); |
6639 | |
6640 | std::optional<ConstantEmitter> emitter; |
6641 | llvm::Constant *InitialValue = nullptr; |
6642 | bool Constant = false; |
6643 | llvm::Type *Type; |
6644 | if (Value) { |
6645 | // The temporary has a constant initializer, use it. |
6646 | emitter.emplace(args&: *this); |
6647 | InitialValue = emitter->emitForInitializer(value: *Value, destAddrSpace: AddrSpace, |
6648 | destType: MaterializedType); |
6649 | Constant = |
6650 | MaterializedType.isConstantStorage(Ctx: getContext(), /*ExcludeCtor*/ Value, |
6651 | /*ExcludeDtor*/ false); |
6652 | Type = InitialValue->getType(); |
6653 | } else { |
6654 | // No initializer, the initialization will be provided when we |
6655 | // initialize the declaration which performed lifetime extension. |
6656 | Type = getTypes().ConvertTypeForMem(T: MaterializedType); |
6657 | } |
6658 | |
6659 | // Create a global variable for this lifetime-extended temporary. |
6660 | llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD); |
6661 | if (Linkage == llvm::GlobalVariable::ExternalLinkage) { |
6662 | const VarDecl *InitVD; |
6663 | if (VD->isStaticDataMember() && VD->getAnyInitializer(D&: InitVD) && |
6664 | isa<CXXRecordDecl>(Val: InitVD->getLexicalDeclContext())) { |
6665 | // Temporaries defined inside a class get linkonce_odr linkage because the |
6666 | // class can be defined in multiple translation units. |
6667 | Linkage = llvm::GlobalVariable::LinkOnceODRLinkage; |
6668 | } else { |
6669 | // There is no need for this temporary to have external linkage if the |
6670 | // VarDecl has external linkage. |
6671 | Linkage = llvm::GlobalVariable::InternalLinkage; |
6672 | } |
6673 | } |
6674 | auto TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace); |
6675 | auto *GV = new llvm::GlobalVariable( |
6676 | getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(), |
6677 | /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS); |
6678 | if (emitter) emitter->finalize(global: GV); |
6679 | // Don't assign dllimport or dllexport to local linkage globals. |
6680 | if (!llvm::GlobalValue::isLocalLinkage(Linkage)) { |
6681 | setGVProperties(GV, D: VD); |
6682 | if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass) |
6683 | // The reference temporary should never be dllexport. |
6684 | GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); |
6685 | } |
6686 | GV->setAlignment(Align.getAsAlign()); |
6687 | if (supportsCOMDAT() && GV->isWeakForLinker()) |
6688 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
6689 | if (VD->getTLSKind()) |
6690 | setTLSMode(GV, D: *VD); |
6691 | llvm::Constant *CV = GV; |
6692 | if (AddrSpace != LangAS::Default) |
6693 | CV = getTargetCodeGenInfo().performAddrSpaceCast( |
6694 | CGM&: *this, V: GV, SrcAddr: AddrSpace, DestAddr: LangAS::Default, |
6695 | DestTy: llvm::PointerType::get( |
6696 | C&: getLLVMContext(), |
6697 | AddressSpace: getContext().getTargetAddressSpace(AS: LangAS::Default))); |
6698 | |
6699 | // Update the map with the new temporary. If we created a placeholder above, |
6700 | // replace it with the new global now. |
6701 | llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E]; |
6702 | if (Entry) { |
6703 | Entry->replaceAllUsesWith(V: CV); |
6704 | llvm::cast<llvm::GlobalVariable>(Val: Entry)->eraseFromParent(); |
6705 | } |
6706 | Entry = CV; |
6707 | |
6708 | return ConstantAddress(CV, Type, Align); |
6709 | } |
6710 | |
6711 | /// EmitObjCPropertyImplementations - Emit information for synthesized |
6712 | /// properties for an implementation. |
6713 | void CodeGenModule::EmitObjCPropertyImplementations(const |
6714 | ObjCImplementationDecl *D) { |
6715 | for (const auto *PID : D->property_impls()) { |
6716 | // Dynamic is just for type-checking. |
6717 | if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) { |
6718 | ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
6719 | |
6720 | // Determine which methods need to be implemented, some may have |
6721 | // been overridden. Note that ::isPropertyAccessor is not the method |
6722 | // we want, that just indicates if the decl came from a |
6723 | // property. What we want to know is if the method is defined in |
6724 | // this implementation. |
6725 | auto *Getter = PID->getGetterMethodDecl(); |
6726 | if (!Getter || Getter->isSynthesizedAccessorStub()) |
6727 | CodeGenFunction(*this).GenerateObjCGetter( |
6728 | IMP: const_cast<ObjCImplementationDecl *>(D), PID); |
6729 | auto *Setter = PID->getSetterMethodDecl(); |
6730 | if (!PD->isReadOnly() && (!Setter || Setter->isSynthesizedAccessorStub())) |
6731 | CodeGenFunction(*this).GenerateObjCSetter( |
6732 | IMP: const_cast<ObjCImplementationDecl *>(D), PID); |
6733 | } |
6734 | } |
6735 | } |
6736 | |
6737 | static bool needsDestructMethod(ObjCImplementationDecl *impl) { |
6738 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
6739 | for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); |
6740 | ivar; ivar = ivar->getNextIvar()) |
6741 | if (ivar->getType().isDestructedType()) |
6742 | return true; |
6743 | |
6744 | return false; |
6745 | } |
6746 | |
6747 | static bool AllTrivialInitializers(CodeGenModule &CGM, |
6748 | ObjCImplementationDecl *D) { |
6749 | CodeGenFunction CGF(CGM); |
6750 | for (ObjCImplementationDecl::init_iterator B = D->init_begin(), |
6751 | E = D->init_end(); B != E; ++B) { |
6752 | CXXCtorInitializer *CtorInitExp = *B; |
6753 | Expr *Init = CtorInitExp->getInit(); |
6754 | if (!CGF.isTrivialInitializer(Init)) |
6755 | return false; |
6756 | } |
6757 | return true; |
6758 | } |
6759 | |
6760 | /// EmitObjCIvarInitializations - Emit information for ivar initialization |
6761 | /// for an implementation. |
6762 | void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) { |
6763 | // We might need a .cxx_destruct even if we don't have any ivar initializers. |
6764 | if (needsDestructMethod(impl: D)) { |
6765 | const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_destruct" ); |
6766 | Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
6767 | ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create( |
6768 | C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector, |
6769 | T: getContext().VoidTy, ReturnTInfo: nullptr, contextDecl: D, |
6770 | /*isInstance=*/true, /*isVariadic=*/false, |
6771 | /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false, |
6772 | /*isImplicitlyDeclared=*/true, |
6773 | /*isDefined=*/false, impControl: ObjCImplementationControl::Required); |
6774 | D->addInstanceMethod(method: DTORMethod); |
6775 | CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: DTORMethod, ctor: false); |
6776 | D->setHasDestructors(true); |
6777 | } |
6778 | |
6779 | // If the implementation doesn't have any ivar initializers, we don't need |
6780 | // a .cxx_construct. |
6781 | if (D->getNumIvarInitializers() == 0 || |
6782 | AllTrivialInitializers(CGM&: *this, D)) |
6783 | return; |
6784 | |
6785 | const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_construct" ); |
6786 | Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
6787 | // The constructor returns 'self'. |
6788 | ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create( |
6789 | C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector, |
6790 | T: getContext().getObjCIdType(), ReturnTInfo: nullptr, contextDecl: D, /*isInstance=*/true, |
6791 | /*isVariadic=*/false, |
6792 | /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false, |
6793 | /*isImplicitlyDeclared=*/true, |
6794 | /*isDefined=*/false, impControl: ObjCImplementationControl::Required); |
6795 | D->addInstanceMethod(method: CTORMethod); |
6796 | CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: CTORMethod, ctor: true); |
6797 | D->setHasNonZeroConstructors(true); |
6798 | } |
6799 | |
6800 | // EmitLinkageSpec - Emit all declarations in a linkage spec. |
6801 | void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) { |
6802 | if (LSD->getLanguage() != LinkageSpecLanguageIDs::C && |
6803 | LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) { |
6804 | ErrorUnsupported(D: LSD, Type: "linkage spec" ); |
6805 | return; |
6806 | } |
6807 | |
6808 | EmitDeclContext(DC: LSD); |
6809 | } |
6810 | |
6811 | void CodeGenModule::EmitTopLevelStmt(const TopLevelStmtDecl *D) { |
6812 | // Device code should not be at top level. |
6813 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) |
6814 | return; |
6815 | |
6816 | std::unique_ptr<CodeGenFunction> &CurCGF = |
6817 | GlobalTopLevelStmtBlockInFlight.first; |
6818 | |
6819 | // We emitted a top-level stmt but after it there is initialization. |
6820 | // Stop squashing the top-level stmts into a single function. |
6821 | if (CurCGF && CXXGlobalInits.back() != CurCGF->CurFn) { |
6822 | CurCGF->FinishFunction(EndLoc: D->getEndLoc()); |
6823 | CurCGF = nullptr; |
6824 | } |
6825 | |
6826 | if (!CurCGF) { |
6827 | // void __stmts__N(void) |
6828 | // FIXME: Ask the ABI name mangler to pick a name. |
6829 | std::string Name = "__stmts__" + llvm::utostr(X: CXXGlobalInits.size()); |
6830 | FunctionArgList Args; |
6831 | QualType RetTy = getContext().VoidTy; |
6832 | const CGFunctionInfo &FnInfo = |
6833 | getTypes().arrangeBuiltinFunctionDeclaration(resultType: RetTy, args: Args); |
6834 | llvm::FunctionType *FnTy = getTypes().GetFunctionType(Info: FnInfo); |
6835 | llvm::Function *Fn = llvm::Function::Create( |
6836 | Ty: FnTy, Linkage: llvm::GlobalValue::InternalLinkage, N: Name, M: &getModule()); |
6837 | |
6838 | CurCGF.reset(p: new CodeGenFunction(*this)); |
6839 | GlobalTopLevelStmtBlockInFlight.second = D; |
6840 | CurCGF->StartFunction(GD: GlobalDecl(), RetTy, Fn, FnInfo, Args, |
6841 | Loc: D->getBeginLoc(), StartLoc: D->getBeginLoc()); |
6842 | CXXGlobalInits.push_back(x: Fn); |
6843 | } |
6844 | |
6845 | CurCGF->EmitStmt(S: D->getStmt()); |
6846 | } |
6847 | |
6848 | void CodeGenModule::EmitDeclContext(const DeclContext *DC) { |
6849 | for (auto *I : DC->decls()) { |
6850 | // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope |
6851 | // are themselves considered "top-level", so EmitTopLevelDecl on an |
6852 | // ObjCImplDecl does not recursively visit them. We need to do that in |
6853 | // case they're nested inside another construct (LinkageSpecDecl / |
6854 | // ExportDecl) that does stop them from being considered "top-level". |
6855 | if (auto *OID = dyn_cast<ObjCImplDecl>(Val: I)) { |
6856 | for (auto *M : OID->methods()) |
6857 | EmitTopLevelDecl(D: M); |
6858 | } |
6859 | |
6860 | EmitTopLevelDecl(D: I); |
6861 | } |
6862 | } |
6863 | |
6864 | /// EmitTopLevelDecl - Emit code for a single top level declaration. |
6865 | void CodeGenModule::EmitTopLevelDecl(Decl *D) { |
6866 | // Ignore dependent declarations. |
6867 | if (D->isTemplated()) |
6868 | return; |
6869 | |
6870 | // Consteval function shouldn't be emitted. |
6871 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D); FD && FD->isImmediateFunction()) |
6872 | return; |
6873 | |
6874 | switch (D->getKind()) { |
6875 | case Decl::CXXConversion: |
6876 | case Decl::CXXMethod: |
6877 | case Decl::Function: |
6878 | EmitGlobal(GD: cast<FunctionDecl>(Val: D)); |
6879 | // Always provide some coverage mapping |
6880 | // even for the functions that aren't emitted. |
6881 | AddDeferredUnusedCoverageMapping(D); |
6882 | break; |
6883 | |
6884 | case Decl::CXXDeductionGuide: |
6885 | // Function-like, but does not result in code emission. |
6886 | break; |
6887 | |
6888 | case Decl::Var: |
6889 | case Decl::Decomposition: |
6890 | case Decl::VarTemplateSpecialization: |
6891 | EmitGlobal(GD: cast<VarDecl>(Val: D)); |
6892 | if (auto *DD = dyn_cast<DecompositionDecl>(Val: D)) |
6893 | for (auto *B : DD->bindings()) |
6894 | if (auto *HD = B->getHoldingVar()) |
6895 | EmitGlobal(GD: HD); |
6896 | break; |
6897 | |
6898 | // Indirect fields from global anonymous structs and unions can be |
6899 | // ignored; only the actual variable requires IR gen support. |
6900 | case Decl::IndirectField: |
6901 | break; |
6902 | |
6903 | // C++ Decls |
6904 | case Decl::Namespace: |
6905 | EmitDeclContext(DC: cast<NamespaceDecl>(Val: D)); |
6906 | break; |
6907 | case Decl::ClassTemplateSpecialization: { |
6908 | const auto *Spec = cast<ClassTemplateSpecializationDecl>(Val: D); |
6909 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6910 | if (Spec->getSpecializationKind() == |
6911 | TSK_ExplicitInstantiationDefinition && |
6912 | Spec->hasDefinition()) |
6913 | DI->completeTemplateDefinition(SD: *Spec); |
6914 | } [[fallthrough]]; |
6915 | case Decl::CXXRecord: { |
6916 | CXXRecordDecl *CRD = cast<CXXRecordDecl>(Val: D); |
6917 | if (CGDebugInfo *DI = getModuleDebugInfo()) { |
6918 | if (CRD->hasDefinition()) |
6919 | DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: D))); |
6920 | if (auto *ES = D->getASTContext().getExternalSource()) |
6921 | if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never) |
6922 | DI->completeUnusedClass(D: *CRD); |
6923 | } |
6924 | // Emit any static data members, they may be definitions. |
6925 | for (auto *I : CRD->decls()) |
6926 | if (isa<VarDecl>(Val: I) || isa<CXXRecordDecl>(Val: I)) |
6927 | EmitTopLevelDecl(D: I); |
6928 | break; |
6929 | } |
6930 | // No code generation needed. |
6931 | case Decl::UsingShadow: |
6932 | case Decl::ClassTemplate: |
6933 | case Decl::VarTemplate: |
6934 | case Decl::Concept: |
6935 | case Decl::VarTemplatePartialSpecialization: |
6936 | case Decl::FunctionTemplate: |
6937 | case Decl::TypeAliasTemplate: |
6938 | case Decl::Block: |
6939 | case Decl::Empty: |
6940 | case Decl::Binding: |
6941 | break; |
6942 | case Decl::Using: // using X; [C++] |
6943 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6944 | DI->EmitUsingDecl(UD: cast<UsingDecl>(Val&: *D)); |
6945 | break; |
6946 | case Decl::UsingEnum: // using enum X; [C++] |
6947 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6948 | DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val&: *D)); |
6949 | break; |
6950 | case Decl::NamespaceAlias: |
6951 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6952 | DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val&: *D)); |
6953 | break; |
6954 | case Decl::UsingDirective: // using namespace X; [C++] |
6955 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6956 | DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val&: *D)); |
6957 | break; |
6958 | case Decl::CXXConstructor: |
6959 | getCXXABI().EmitCXXConstructors(D: cast<CXXConstructorDecl>(Val: D)); |
6960 | break; |
6961 | case Decl::CXXDestructor: |
6962 | getCXXABI().EmitCXXDestructors(D: cast<CXXDestructorDecl>(Val: D)); |
6963 | break; |
6964 | |
6965 | case Decl::StaticAssert: |
6966 | // Nothing to do. |
6967 | break; |
6968 | |
6969 | // Objective-C Decls |
6970 | |
6971 | // Forward declarations, no (immediate) code generation. |
6972 | case Decl::ObjCInterface: |
6973 | case Decl::ObjCCategory: |
6974 | break; |
6975 | |
6976 | case Decl::ObjCProtocol: { |
6977 | auto *Proto = cast<ObjCProtocolDecl>(Val: D); |
6978 | if (Proto->isThisDeclarationADefinition()) |
6979 | ObjCRuntime->GenerateProtocol(OPD: Proto); |
6980 | break; |
6981 | } |
6982 | |
6983 | case Decl::ObjCCategoryImpl: |
6984 | // Categories have properties but don't support synthesize so we |
6985 | // can ignore them here. |
6986 | ObjCRuntime->GenerateCategory(OCD: cast<ObjCCategoryImplDecl>(Val: D)); |
6987 | break; |
6988 | |
6989 | case Decl::ObjCImplementation: { |
6990 | auto *OMD = cast<ObjCImplementationDecl>(Val: D); |
6991 | EmitObjCPropertyImplementations(D: OMD); |
6992 | EmitObjCIvarInitializations(D: OMD); |
6993 | ObjCRuntime->GenerateClass(OID: OMD); |
6994 | // Emit global variable debug information. |
6995 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6996 | if (getCodeGenOpts().hasReducedDebugInfo()) |
6997 | DI->getOrCreateInterfaceType(Ty: getContext().getObjCInterfaceType( |
6998 | Decl: OMD->getClassInterface()), Loc: OMD->getLocation()); |
6999 | break; |
7000 | } |
7001 | case Decl::ObjCMethod: { |
7002 | auto *OMD = cast<ObjCMethodDecl>(Val: D); |
7003 | // If this is not a prototype, emit the body. |
7004 | if (OMD->getBody()) |
7005 | CodeGenFunction(*this).GenerateObjCMethod(OMD); |
7006 | break; |
7007 | } |
7008 | case Decl::ObjCCompatibleAlias: |
7009 | ObjCRuntime->RegisterAlias(OAD: cast<ObjCCompatibleAliasDecl>(Val: D)); |
7010 | break; |
7011 | |
7012 | case Decl::PragmaComment: { |
7013 | const auto *PCD = cast<PragmaCommentDecl>(Val: D); |
7014 | switch (PCD->getCommentKind()) { |
7015 | case PCK_Unknown: |
7016 | llvm_unreachable("unexpected pragma comment kind" ); |
7017 | case PCK_Linker: |
7018 | AppendLinkerOptions(Opts: PCD->getArg()); |
7019 | break; |
7020 | case PCK_Lib: |
7021 | AddDependentLib(Lib: PCD->getArg()); |
7022 | break; |
7023 | case PCK_Compiler: |
7024 | case PCK_ExeStr: |
7025 | case PCK_User: |
7026 | break; // We ignore all of these. |
7027 | } |
7028 | break; |
7029 | } |
7030 | |
7031 | case Decl::PragmaDetectMismatch: { |
7032 | const auto *PDMD = cast<PragmaDetectMismatchDecl>(Val: D); |
7033 | AddDetectMismatch(Name: PDMD->getName(), Value: PDMD->getValue()); |
7034 | break; |
7035 | } |
7036 | |
7037 | case Decl::LinkageSpec: |
7038 | EmitLinkageSpec(LSD: cast<LinkageSpecDecl>(Val: D)); |
7039 | break; |
7040 | |
7041 | case Decl::FileScopeAsm: { |
7042 | // File-scope asm is ignored during device-side CUDA compilation. |
7043 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) |
7044 | break; |
7045 | // File-scope asm is ignored during device-side OpenMP compilation. |
7046 | if (LangOpts.OpenMPIsTargetDevice) |
7047 | break; |
7048 | // File-scope asm is ignored during device-side SYCL compilation. |
7049 | if (LangOpts.SYCLIsDevice) |
7050 | break; |
7051 | auto *AD = cast<FileScopeAsmDecl>(Val: D); |
7052 | getModule().appendModuleInlineAsm(Asm: AD->getAsmString()->getString()); |
7053 | break; |
7054 | } |
7055 | |
7056 | case Decl::TopLevelStmt: |
7057 | EmitTopLevelStmt(D: cast<TopLevelStmtDecl>(Val: D)); |
7058 | break; |
7059 | |
7060 | case Decl::Import: { |
7061 | auto *Import = cast<ImportDecl>(Val: D); |
7062 | |
7063 | // If we've already imported this module, we're done. |
7064 | if (!ImportedModules.insert(X: Import->getImportedModule())) |
7065 | break; |
7066 | |
7067 | // Emit debug information for direct imports. |
7068 | if (!Import->getImportedOwningModule()) { |
7069 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7070 | DI->EmitImportDecl(ID: *Import); |
7071 | } |
7072 | |
7073 | // For C++ standard modules we are done - we will call the module |
7074 | // initializer for imported modules, and that will likewise call those for |
7075 | // any imports it has. |
7076 | if (CXX20ModuleInits && Import->getImportedOwningModule() && |
7077 | !Import->getImportedOwningModule()->isModuleMapModule()) |
7078 | break; |
7079 | |
7080 | // For clang C++ module map modules the initializers for sub-modules are |
7081 | // emitted here. |
7082 | |
7083 | // Find all of the submodules and emit the module initializers. |
7084 | llvm::SmallPtrSet<clang::Module *, 16> Visited; |
7085 | SmallVector<clang::Module *, 16> Stack; |
7086 | Visited.insert(Ptr: Import->getImportedModule()); |
7087 | Stack.push_back(Elt: Import->getImportedModule()); |
7088 | |
7089 | while (!Stack.empty()) { |
7090 | clang::Module *Mod = Stack.pop_back_val(); |
7091 | if (!EmittedModuleInitializers.insert(Ptr: Mod).second) |
7092 | continue; |
7093 | |
7094 | for (auto *D : Context.getModuleInitializers(M: Mod)) |
7095 | EmitTopLevelDecl(D); |
7096 | |
7097 | // Visit the submodules of this module. |
7098 | for (auto *Submodule : Mod->submodules()) { |
7099 | // Skip explicit children; they need to be explicitly imported to emit |
7100 | // the initializers. |
7101 | if (Submodule->IsExplicit) |
7102 | continue; |
7103 | |
7104 | if (Visited.insert(Ptr: Submodule).second) |
7105 | Stack.push_back(Elt: Submodule); |
7106 | } |
7107 | } |
7108 | break; |
7109 | } |
7110 | |
7111 | case Decl::Export: |
7112 | EmitDeclContext(DC: cast<ExportDecl>(Val: D)); |
7113 | break; |
7114 | |
7115 | case Decl::OMPThreadPrivate: |
7116 | EmitOMPThreadPrivateDecl(D: cast<OMPThreadPrivateDecl>(Val: D)); |
7117 | break; |
7118 | |
7119 | case Decl::OMPAllocate: |
7120 | EmitOMPAllocateDecl(D: cast<OMPAllocateDecl>(Val: D)); |
7121 | break; |
7122 | |
7123 | case Decl::OMPDeclareReduction: |
7124 | EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: D)); |
7125 | break; |
7126 | |
7127 | case Decl::OMPDeclareMapper: |
7128 | EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: D)); |
7129 | break; |
7130 | |
7131 | case Decl::OMPRequires: |
7132 | EmitOMPRequiresDecl(D: cast<OMPRequiresDecl>(Val: D)); |
7133 | break; |
7134 | |
7135 | case Decl::Typedef: |
7136 | case Decl::TypeAlias: // using foo = bar; [C++11] |
7137 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7138 | DI->EmitAndRetainType( |
7139 | Ty: getContext().getTypedefType(Decl: cast<TypedefNameDecl>(Val: D))); |
7140 | break; |
7141 | |
7142 | case Decl::Record: |
7143 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7144 | if (cast<RecordDecl>(Val: D)->getDefinition()) |
7145 | DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: D))); |
7146 | break; |
7147 | |
7148 | case Decl::Enum: |
7149 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7150 | if (cast<EnumDecl>(Val: D)->getDefinition()) |
7151 | DI->EmitAndRetainType(Ty: getContext().getEnumType(Decl: cast<EnumDecl>(Val: D))); |
7152 | break; |
7153 | |
7154 | case Decl::HLSLBuffer: |
7155 | getHLSLRuntime().addBuffer(D: cast<HLSLBufferDecl>(Val: D)); |
7156 | break; |
7157 | |
7158 | default: |
7159 | // Make sure we handled everything we should, every other kind is a |
7160 | // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind |
7161 | // function. Need to recode Decl::Kind to do that easily. |
7162 | assert(isa<TypeDecl>(D) && "Unsupported decl kind" ); |
7163 | break; |
7164 | } |
7165 | } |
7166 | |
7167 | void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { |
7168 | // Do we need to generate coverage mapping? |
7169 | if (!CodeGenOpts.CoverageMapping) |
7170 | return; |
7171 | switch (D->getKind()) { |
7172 | case Decl::CXXConversion: |
7173 | case Decl::CXXMethod: |
7174 | case Decl::Function: |
7175 | case Decl::ObjCMethod: |
7176 | case Decl::CXXConstructor: |
7177 | case Decl::CXXDestructor: { |
7178 | if (!cast<FunctionDecl>(Val: D)->doesThisDeclarationHaveABody()) |
7179 | break; |
7180 | SourceManager &SM = getContext().getSourceManager(); |
7181 | if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(SpellingLoc: D->getBeginLoc())) |
7182 | break; |
7183 | if (!llvm::coverage::SystemHeadersCoverage && |
7184 | SM.isInSystemHeader(Loc: D->getBeginLoc())) |
7185 | break; |
7186 | DeferredEmptyCoverageMappingDecls.try_emplace(Key: D, Args: true); |
7187 | break; |
7188 | } |
7189 | default: |
7190 | break; |
7191 | }; |
7192 | } |
7193 | |
7194 | void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) { |
7195 | // Do we need to generate coverage mapping? |
7196 | if (!CodeGenOpts.CoverageMapping) |
7197 | return; |
7198 | if (const auto *Fn = dyn_cast<FunctionDecl>(Val: D)) { |
7199 | if (Fn->isTemplateInstantiation()) |
7200 | ClearUnusedCoverageMapping(D: Fn->getTemplateInstantiationPattern()); |
7201 | } |
7202 | DeferredEmptyCoverageMappingDecls.insert_or_assign(Key: D, Val: false); |
7203 | } |
7204 | |
7205 | void CodeGenModule::EmitDeferredUnusedCoverageMappings() { |
7206 | // We call takeVector() here to avoid use-after-free. |
7207 | // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because |
7208 | // we deserialize function bodies to emit coverage info for them, and that |
7209 | // deserializes more declarations. How should we handle that case? |
7210 | for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) { |
7211 | if (!Entry.second) |
7212 | continue; |
7213 | const Decl *D = Entry.first; |
7214 | switch (D->getKind()) { |
7215 | case Decl::CXXConversion: |
7216 | case Decl::CXXMethod: |
7217 | case Decl::Function: |
7218 | case Decl::ObjCMethod: { |
7219 | CodeGenPGO PGO(*this); |
7220 | GlobalDecl GD(cast<FunctionDecl>(Val: D)); |
7221 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7222 | Linkage: getFunctionLinkage(GD)); |
7223 | break; |
7224 | } |
7225 | case Decl::CXXConstructor: { |
7226 | CodeGenPGO PGO(*this); |
7227 | GlobalDecl GD(cast<CXXConstructorDecl>(Val: D), Ctor_Base); |
7228 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7229 | Linkage: getFunctionLinkage(GD)); |
7230 | break; |
7231 | } |
7232 | case Decl::CXXDestructor: { |
7233 | CodeGenPGO PGO(*this); |
7234 | GlobalDecl GD(cast<CXXDestructorDecl>(Val: D), Dtor_Base); |
7235 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7236 | Linkage: getFunctionLinkage(GD)); |
7237 | break; |
7238 | } |
7239 | default: |
7240 | break; |
7241 | }; |
7242 | } |
7243 | } |
7244 | |
7245 | void CodeGenModule::EmitMainVoidAlias() { |
7246 | // In order to transition away from "__original_main" gracefully, emit an |
7247 | // alias for "main" in the no-argument case so that libc can detect when |
7248 | // new-style no-argument main is in used. |
7249 | if (llvm::Function *F = getModule().getFunction(Name: "main" )) { |
7250 | if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() && |
7251 | F->getReturnType()->isIntegerTy(Bitwidth: Context.getTargetInfo().getIntWidth())) { |
7252 | auto *GA = llvm::GlobalAlias::create(Name: "__main_void" , Aliasee: F); |
7253 | GA->setVisibility(llvm::GlobalValue::HiddenVisibility); |
7254 | } |
7255 | } |
7256 | } |
7257 | |
7258 | /// Turns the given pointer into a constant. |
7259 | static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context, |
7260 | const void *Ptr) { |
7261 | uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr); |
7262 | llvm::Type *i64 = llvm::Type::getInt64Ty(C&: Context); |
7263 | return llvm::ConstantInt::get(Ty: i64, V: PtrInt); |
7264 | } |
7265 | |
7266 | static void EmitGlobalDeclMetadata(CodeGenModule &CGM, |
7267 | llvm::NamedMDNode *&GlobalMetadata, |
7268 | GlobalDecl D, |
7269 | llvm::GlobalValue *Addr) { |
7270 | if (!GlobalMetadata) |
7271 | GlobalMetadata = |
7272 | CGM.getModule().getOrInsertNamedMetadata(Name: "clang.global.decl.ptrs" ); |
7273 | |
7274 | // TODO: should we report variant information for ctors/dtors? |
7275 | llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(C: Addr), |
7276 | llvm::ConstantAsMetadata::get(C: GetPointerConstant( |
7277 | Context&: CGM.getLLVMContext(), Ptr: D.getDecl()))}; |
7278 | GlobalMetadata->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops)); |
7279 | } |
7280 | |
7281 | bool CodeGenModule::CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem, |
7282 | llvm::GlobalValue *CppFunc) { |
7283 | // Store the list of ifuncs we need to replace uses in. |
7284 | llvm::SmallVector<llvm::GlobalIFunc *> IFuncs; |
7285 | // List of ConstantExprs that we should be able to delete when we're done |
7286 | // here. |
7287 | llvm::SmallVector<llvm::ConstantExpr *> CEs; |
7288 | |
7289 | // It isn't valid to replace the extern-C ifuncs if all we find is itself! |
7290 | if (Elem == CppFunc) |
7291 | return false; |
7292 | |
7293 | // First make sure that all users of this are ifuncs (or ifuncs via a |
7294 | // bitcast), and collect the list of ifuncs and CEs so we can work on them |
7295 | // later. |
7296 | for (llvm::User *User : Elem->users()) { |
7297 | // Users can either be a bitcast ConstExpr that is used by the ifuncs, OR an |
7298 | // ifunc directly. In any other case, just give up, as we don't know what we |
7299 | // could break by changing those. |
7300 | if (auto *ConstExpr = dyn_cast<llvm::ConstantExpr>(Val: User)) { |
7301 | if (ConstExpr->getOpcode() != llvm::Instruction::BitCast) |
7302 | return false; |
7303 | |
7304 | for (llvm::User *CEUser : ConstExpr->users()) { |
7305 | if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: CEUser)) { |
7306 | IFuncs.push_back(Elt: IFunc); |
7307 | } else { |
7308 | return false; |
7309 | } |
7310 | } |
7311 | CEs.push_back(Elt: ConstExpr); |
7312 | } else if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: User)) { |
7313 | IFuncs.push_back(Elt: IFunc); |
7314 | } else { |
7315 | // This user is one we don't know how to handle, so fail redirection. This |
7316 | // will result in an ifunc retaining a resolver name that will ultimately |
7317 | // fail to be resolved to a defined function. |
7318 | return false; |
7319 | } |
7320 | } |
7321 | |
7322 | // Now we know this is a valid case where we can do this alias replacement, we |
7323 | // need to remove all of the references to Elem (and the bitcasts!) so we can |
7324 | // delete it. |
7325 | for (llvm::GlobalIFunc *IFunc : IFuncs) |
7326 | IFunc->setResolver(nullptr); |
7327 | for (llvm::ConstantExpr *ConstExpr : CEs) |
7328 | ConstExpr->destroyConstant(); |
7329 | |
7330 | // We should now be out of uses for the 'old' version of this function, so we |
7331 | // can erase it as well. |
7332 | Elem->eraseFromParent(); |
7333 | |
7334 | for (llvm::GlobalIFunc *IFunc : IFuncs) { |
7335 | // The type of the resolver is always just a function-type that returns the |
7336 | // type of the IFunc, so create that here. If the type of the actual |
7337 | // resolver doesn't match, it just gets bitcast to the right thing. |
7338 | auto *ResolverTy = |
7339 | llvm::FunctionType::get(Result: IFunc->getType(), /*isVarArg*/ false); |
7340 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
7341 | MangledName: CppFunc->getName(), Ty: ResolverTy, GD: {}, /*ForVTable*/ false); |
7342 | IFunc->setResolver(Resolver); |
7343 | } |
7344 | return true; |
7345 | } |
7346 | |
7347 | /// For each function which is declared within an extern "C" region and marked |
7348 | /// as 'used', but has internal linkage, create an alias from the unmangled |
7349 | /// name to the mangled name if possible. People expect to be able to refer |
7350 | /// to such functions with an unmangled name from inline assembly within the |
7351 | /// same translation unit. |
7352 | void CodeGenModule::EmitStaticExternCAliases() { |
7353 | if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases()) |
7354 | return; |
7355 | for (auto &I : StaticExternCValues) { |
7356 | const IdentifierInfo *Name = I.first; |
7357 | llvm::GlobalValue *Val = I.second; |
7358 | |
7359 | // If Val is null, that implies there were multiple declarations that each |
7360 | // had a claim to the unmangled name. In this case, generation of the alias |
7361 | // is suppressed. See CodeGenModule::MaybeHandleStaticInExternC. |
7362 | if (!Val) |
7363 | break; |
7364 | |
7365 | llvm::GlobalValue *ExistingElem = |
7366 | getModule().getNamedValue(Name: Name->getName()); |
7367 | |
7368 | // If there is either not something already by this name, or we were able to |
7369 | // replace all uses from IFuncs, create the alias. |
7370 | if (!ExistingElem || CheckAndReplaceExternCIFuncs(Elem: ExistingElem, CppFunc: Val)) |
7371 | addCompilerUsedGlobal(GV: llvm::GlobalAlias::create(Name: Name->getName(), Aliasee: Val)); |
7372 | } |
7373 | } |
7374 | |
7375 | bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName, |
7376 | GlobalDecl &Result) const { |
7377 | auto Res = Manglings.find(Key: MangledName); |
7378 | if (Res == Manglings.end()) |
7379 | return false; |
7380 | Result = Res->getValue(); |
7381 | return true; |
7382 | } |
7383 | |
7384 | /// Emits metadata nodes associating all the global values in the |
7385 | /// current module with the Decls they came from. This is useful for |
7386 | /// projects using IR gen as a subroutine. |
7387 | /// |
7388 | /// Since there's currently no way to associate an MDNode directly |
7389 | /// with an llvm::GlobalValue, we create a global named metadata |
7390 | /// with the name 'clang.global.decl.ptrs'. |
7391 | void CodeGenModule::EmitDeclMetadata() { |
7392 | llvm::NamedMDNode *GlobalMetadata = nullptr; |
7393 | |
7394 | for (auto &I : MangledDeclNames) { |
7395 | llvm::GlobalValue *Addr = getModule().getNamedValue(Name: I.second); |
7396 | // Some mangled names don't necessarily have an associated GlobalValue |
7397 | // in this module, e.g. if we mangled it for DebugInfo. |
7398 | if (Addr) |
7399 | EmitGlobalDeclMetadata(CGM&: *this, GlobalMetadata, D: I.first, Addr); |
7400 | } |
7401 | } |
7402 | |
7403 | /// Emits metadata nodes for all the local variables in the current |
7404 | /// function. |
7405 | void CodeGenFunction::EmitDeclMetadata() { |
7406 | if (LocalDeclMap.empty()) return; |
7407 | |
7408 | llvm::LLVMContext &Context = getLLVMContext(); |
7409 | |
7410 | // Find the unique metadata ID for this name. |
7411 | unsigned DeclPtrKind = Context.getMDKindID(Name: "clang.decl.ptr" ); |
7412 | |
7413 | llvm::NamedMDNode *GlobalMetadata = nullptr; |
7414 | |
7415 | for (auto &I : LocalDeclMap) { |
7416 | const Decl *D = I.first; |
7417 | llvm::Value *Addr = I.second.emitRawPointer(CGF&: *this); |
7418 | if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Val: Addr)) { |
7419 | llvm::Value *DAddr = GetPointerConstant(Context&: getLLVMContext(), Ptr: D); |
7420 | Alloca->setMetadata( |
7421 | KindID: DeclPtrKind, Node: llvm::MDNode::get( |
7422 | Context, MDs: llvm::ValueAsMetadata::getConstant(C: DAddr))); |
7423 | } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr)) { |
7424 | GlobalDecl GD = GlobalDecl(cast<VarDecl>(Val: D)); |
7425 | EmitGlobalDeclMetadata(CGM, GlobalMetadata, D: GD, Addr: GV); |
7426 | } |
7427 | } |
7428 | } |
7429 | |
7430 | void CodeGenModule::EmitVersionIdentMetadata() { |
7431 | llvm::NamedMDNode *IdentMetadata = |
7432 | TheModule.getOrInsertNamedMetadata(Name: "llvm.ident" ); |
7433 | std::string Version = getClangFullVersion(); |
7434 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7435 | |
7436 | llvm::Metadata *IdentNode[] = {llvm::MDString::get(Context&: Ctx, Str: Version)}; |
7437 | IdentMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: IdentNode)); |
7438 | } |
7439 | |
7440 | void CodeGenModule::EmitCommandLineMetadata() { |
7441 | llvm::NamedMDNode *CommandLineMetadata = |
7442 | TheModule.getOrInsertNamedMetadata(Name: "llvm.commandline" ); |
7443 | std::string CommandLine = getCodeGenOpts().RecordCommandLine; |
7444 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7445 | |
7446 | llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Context&: Ctx, Str: CommandLine)}; |
7447 | CommandLineMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: CommandLineNode)); |
7448 | } |
7449 | |
7450 | void CodeGenModule::EmitCoverageFile() { |
7451 | llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata(Name: "llvm.dbg.cu" ); |
7452 | if (!CUNode) |
7453 | return; |
7454 | |
7455 | llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata(Name: "llvm.gcov" ); |
7456 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7457 | auto *CoverageDataFile = |
7458 | llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageDataFile); |
7459 | auto *CoverageNotesFile = |
7460 | llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageNotesFile); |
7461 | for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) { |
7462 | llvm::MDNode *CU = CUNode->getOperand(i); |
7463 | llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU}; |
7464 | GCov->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: Elts)); |
7465 | } |
7466 | } |
7467 | |
7468 | llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty, |
7469 | bool ForEH) { |
7470 | // Return a bogus pointer if RTTI is disabled, unless it's for EH. |
7471 | // FIXME: should we even be calling this method if RTTI is disabled |
7472 | // and it's not for EH? |
7473 | if (!shouldEmitRTTI(ForEH)) |
7474 | return llvm::Constant::getNullValue(Ty: GlobalsInt8PtrTy); |
7475 | |
7476 | if (ForEH && Ty->isObjCObjectPointerType() && |
7477 | LangOpts.ObjCRuntime.isGNUFamily()) |
7478 | return ObjCRuntime->GetEHType(T: Ty); |
7479 | |
7480 | return getCXXABI().getAddrOfRTTIDescriptor(Ty); |
7481 | } |
7482 | |
7483 | void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) { |
7484 | // Do not emit threadprivates in simd-only mode. |
7485 | if (LangOpts.OpenMP && LangOpts.OpenMPSimd) |
7486 | return; |
7487 | for (auto RefExpr : D->varlists()) { |
7488 | auto *VD = cast<VarDecl>(Val: cast<DeclRefExpr>(Val: RefExpr)->getDecl()); |
7489 | bool PerformInit = |
7490 | VD->getAnyInitializer() && |
7491 | !VD->getAnyInitializer()->isConstantInitializer(Ctx&: getContext(), |
7492 | /*ForRef=*/false); |
7493 | |
7494 | Address Addr(GetAddrOfGlobalVar(D: VD), |
7495 | getTypes().ConvertTypeForMem(T: VD->getType()), |
7496 | getContext().getDeclAlign(D: VD)); |
7497 | if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition( |
7498 | VD, VDAddr: Addr, Loc: RefExpr->getBeginLoc(), PerformInit)) |
7499 | CXXGlobalInits.push_back(x: InitFunction); |
7500 | } |
7501 | } |
7502 | |
7503 | llvm::Metadata * |
7504 | CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map, |
7505 | StringRef Suffix) { |
7506 | if (auto *FnType = T->getAs<FunctionProtoType>()) |
7507 | T = getContext().getFunctionType( |
7508 | ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(), |
7509 | EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None)); |
7510 | |
7511 | llvm::Metadata *&InternalId = Map[T.getCanonicalType()]; |
7512 | if (InternalId) |
7513 | return InternalId; |
7514 | |
7515 | if (isExternallyVisible(L: T->getLinkage())) { |
7516 | std::string OutName; |
7517 | llvm::raw_string_ostream Out(OutName); |
7518 | getCXXABI().getMangleContext().mangleCanonicalTypeName( |
7519 | T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers); |
7520 | |
7521 | if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers) |
7522 | Out << ".normalized" ; |
7523 | |
7524 | Out << Suffix; |
7525 | |
7526 | InternalId = llvm::MDString::get(Context&: getLLVMContext(), Str: Out.str()); |
7527 | } else { |
7528 | InternalId = llvm::MDNode::getDistinct(Context&: getLLVMContext(), |
7529 | MDs: llvm::ArrayRef<llvm::Metadata *>()); |
7530 | } |
7531 | |
7532 | return InternalId; |
7533 | } |
7534 | |
7535 | llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) { |
7536 | return CreateMetadataIdentifierImpl(T, Map&: MetadataIdMap, Suffix: "" ); |
7537 | } |
7538 | |
7539 | llvm::Metadata * |
7540 | CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) { |
7541 | return CreateMetadataIdentifierImpl(T, Map&: VirtualMetadataIdMap, Suffix: ".virtual" ); |
7542 | } |
7543 | |
7544 | // Generalize pointer types to a void pointer with the qualifiers of the |
7545 | // originally pointed-to type, e.g. 'const char *' and 'char * const *' |
7546 | // generalize to 'const void *' while 'char *' and 'const char **' generalize to |
7547 | // 'void *'. |
7548 | static QualType GeneralizeType(ASTContext &Ctx, QualType Ty) { |
7549 | if (!Ty->isPointerType()) |
7550 | return Ty; |
7551 | |
7552 | return Ctx.getPointerType( |
7553 | T: QualType(Ctx.VoidTy).withCVRQualifiers( |
7554 | CVR: Ty->getPointeeType().getCVRQualifiers())); |
7555 | } |
7556 | |
7557 | // Apply type generalization to a FunctionType's return and argument types |
7558 | static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) { |
7559 | if (auto *FnType = Ty->getAs<FunctionProtoType>()) { |
7560 | SmallVector<QualType, 8> GeneralizedParams; |
7561 | for (auto &Param : FnType->param_types()) |
7562 | GeneralizedParams.push_back(Elt: GeneralizeType(Ctx, Ty: Param)); |
7563 | |
7564 | return Ctx.getFunctionType( |
7565 | ResultTy: GeneralizeType(Ctx, Ty: FnType->getReturnType()), |
7566 | Args: GeneralizedParams, EPI: FnType->getExtProtoInfo()); |
7567 | } |
7568 | |
7569 | if (auto *FnType = Ty->getAs<FunctionNoProtoType>()) |
7570 | return Ctx.getFunctionNoProtoType( |
7571 | ResultTy: GeneralizeType(Ctx, Ty: FnType->getReturnType())); |
7572 | |
7573 | llvm_unreachable("Encountered unknown FunctionType" ); |
7574 | } |
7575 | |
7576 | llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) { |
7577 | return CreateMetadataIdentifierImpl(T: GeneralizeFunctionType(Ctx&: getContext(), Ty: T), |
7578 | Map&: GeneralizedMetadataIdMap, Suffix: ".generalized" ); |
7579 | } |
7580 | |
7581 | /// Returns whether this module needs the "all-vtables" type identifier. |
7582 | bool CodeGenModule::NeedAllVtablesTypeId() const { |
7583 | // Returns true if at least one of vtable-based CFI checkers is enabled and |
7584 | // is not in the trapping mode. |
7585 | return ((LangOpts.Sanitize.has(K: SanitizerKind::CFIVCall) && |
7586 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIVCall)) || |
7587 | (LangOpts.Sanitize.has(K: SanitizerKind::CFINVCall) && |
7588 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFINVCall)) || |
7589 | (LangOpts.Sanitize.has(K: SanitizerKind::CFIDerivedCast) && |
7590 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIDerivedCast)) || |
7591 | (LangOpts.Sanitize.has(K: SanitizerKind::CFIUnrelatedCast) && |
7592 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIUnrelatedCast))); |
7593 | } |
7594 | |
7595 | void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable, |
7596 | CharUnits Offset, |
7597 | const CXXRecordDecl *RD) { |
7598 | llvm::Metadata *MD = |
7599 | CreateMetadataIdentifierForType(T: QualType(RD->getTypeForDecl(), 0)); |
7600 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD); |
7601 | |
7602 | if (CodeGenOpts.SanitizeCfiCrossDso) |
7603 | if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD)) |
7604 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), |
7605 | TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId)); |
7606 | |
7607 | if (NeedAllVtablesTypeId()) { |
7608 | llvm::Metadata *MD = llvm::MDString::get(Context&: getLLVMContext(), Str: "all-vtables" ); |
7609 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD); |
7610 | } |
7611 | } |
7612 | |
7613 | llvm::SanitizerStatReport &CodeGenModule::getSanStats() { |
7614 | if (!SanStats) |
7615 | SanStats = std::make_unique<llvm::SanitizerStatReport>(args: &getModule()); |
7616 | |
7617 | return *SanStats; |
7618 | } |
7619 | |
7620 | llvm::Value * |
7621 | CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E, |
7622 | CodeGenFunction &CGF) { |
7623 | llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, T: E->getType()); |
7624 | auto *SamplerT = getOpenCLRuntime().getSamplerType(T: E->getType().getTypePtr()); |
7625 | auto *FTy = llvm::FunctionType::get(Result: SamplerT, Params: {C->getType()}, isVarArg: false); |
7626 | auto *Call = CGF.EmitRuntimeCall( |
7627 | callee: CreateRuntimeFunction(FTy, Name: "__translate_sampler_initializer" ), args: {C}); |
7628 | return Call; |
7629 | } |
7630 | |
7631 | CharUnits CodeGenModule::getNaturalPointeeTypeAlignment( |
7632 | QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) { |
7633 | return getNaturalTypeAlignment(T: T->getPointeeType(), BaseInfo, TBAAInfo, |
7634 | /* forPointeeType= */ true); |
7635 | } |
7636 | |
7637 | CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T, |
7638 | LValueBaseInfo *BaseInfo, |
7639 | TBAAAccessInfo *TBAAInfo, |
7640 | bool forPointeeType) { |
7641 | if (TBAAInfo) |
7642 | *TBAAInfo = getTBAAAccessInfo(AccessType: T); |
7643 | |
7644 | // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But |
7645 | // that doesn't return the information we need to compute BaseInfo. |
7646 | |
7647 | // Honor alignment typedef attributes even on incomplete types. |
7648 | // We also honor them straight for C++ class types, even as pointees; |
7649 | // there's an expressivity gap here. |
7650 | if (auto TT = T->getAs<TypedefType>()) { |
7651 | if (auto Align = TT->getDecl()->getMaxAlignment()) { |
7652 | if (BaseInfo) |
7653 | *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); |
7654 | return getContext().toCharUnitsFromBits(BitSize: Align); |
7655 | } |
7656 | } |
7657 | |
7658 | bool AlignForArray = T->isArrayType(); |
7659 | |
7660 | // Analyze the base element type, so we don't get confused by incomplete |
7661 | // array types. |
7662 | T = getContext().getBaseElementType(QT: T); |
7663 | |
7664 | if (T->isIncompleteType()) { |
7665 | // We could try to replicate the logic from |
7666 | // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the |
7667 | // type is incomplete, so it's impossible to test. We could try to reuse |
7668 | // getTypeAlignIfKnown, but that doesn't return the information we need |
7669 | // to set BaseInfo. So just ignore the possibility that the alignment is |
7670 | // greater than one. |
7671 | if (BaseInfo) |
7672 | *BaseInfo = LValueBaseInfo(AlignmentSource::Type); |
7673 | return CharUnits::One(); |
7674 | } |
7675 | |
7676 | if (BaseInfo) |
7677 | *BaseInfo = LValueBaseInfo(AlignmentSource::Type); |
7678 | |
7679 | CharUnits Alignment; |
7680 | const CXXRecordDecl *RD; |
7681 | if (T.getQualifiers().hasUnaligned()) { |
7682 | Alignment = CharUnits::One(); |
7683 | } else if (forPointeeType && !AlignForArray && |
7684 | (RD = T->getAsCXXRecordDecl())) { |
7685 | // For C++ class pointees, we don't know whether we're pointing at a |
7686 | // base or a complete object, so we generally need to use the |
7687 | // non-virtual alignment. |
7688 | Alignment = getClassPointerAlignment(CD: RD); |
7689 | } else { |
7690 | Alignment = getContext().getTypeAlignInChars(T); |
7691 | } |
7692 | |
7693 | // Cap to the global maximum type alignment unless the alignment |
7694 | // was somehow explicit on the type. |
7695 | if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) { |
7696 | if (Alignment.getQuantity() > MaxAlign && |
7697 | !getContext().isAlignmentRequired(T)) |
7698 | Alignment = CharUnits::fromQuantity(Quantity: MaxAlign); |
7699 | } |
7700 | return Alignment; |
7701 | } |
7702 | |
7703 | bool CodeGenModule::stopAutoInit() { |
7704 | unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter; |
7705 | if (StopAfter) { |
7706 | // This number is positive only when -ftrivial-auto-var-init-stop-after=* is |
7707 | // used |
7708 | if (NumAutoVarInit >= StopAfter) { |
7709 | return true; |
7710 | } |
7711 | if (!NumAutoVarInit) { |
7712 | unsigned DiagID = getDiags().getCustomDiagID( |
7713 | L: DiagnosticsEngine::Warning, |
7714 | FormatString: "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the " |
7715 | "number of times ftrivial-auto-var-init=%1 gets applied." ); |
7716 | getDiags().Report(DiagID) |
7717 | << StopAfter |
7718 | << (getContext().getLangOpts().getTrivialAutoVarInit() == |
7719 | LangOptions::TrivialAutoVarInitKind::Zero |
7720 | ? "zero" |
7721 | : "pattern" ); |
7722 | } |
7723 | ++NumAutoVarInit; |
7724 | } |
7725 | return false; |
7726 | } |
7727 | |
7728 | void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS, |
7729 | const Decl *D) const { |
7730 | // ptxas does not allow '.' in symbol names. On the other hand, HIP prefers |
7731 | // postfix beginning with '.' since the symbol name can be demangled. |
7732 | if (LangOpts.HIP) |
7733 | OS << (isa<VarDecl>(Val: D) ? ".static." : ".intern." ); |
7734 | else |
7735 | OS << (isa<VarDecl>(Val: D) ? "__static__" : "__intern__" ); |
7736 | |
7737 | // If the CUID is not specified we try to generate a unique postfix. |
7738 | if (getLangOpts().CUID.empty()) { |
7739 | SourceManager &SM = getContext().getSourceManager(); |
7740 | PresumedLoc PLoc = SM.getPresumedLoc(Loc: D->getLocation()); |
7741 | assert(PLoc.isValid() && "Source location is expected to be valid." ); |
7742 | |
7743 | // Get the hash of the user defined macros. |
7744 | llvm::MD5 Hash; |
7745 | llvm::MD5::MD5Result Result; |
7746 | for (const auto &Arg : PreprocessorOpts.Macros) |
7747 | Hash.update(Str: Arg.first); |
7748 | Hash.final(Result); |
7749 | |
7750 | // Get the UniqueID for the file containing the decl. |
7751 | llvm::sys::fs::UniqueID ID; |
7752 | if (llvm::sys::fs::getUniqueID(Path: PLoc.getFilename(), Result&: ID)) { |
7753 | PLoc = SM.getPresumedLoc(Loc: D->getLocation(), /*UseLineDirectives=*/false); |
7754 | assert(PLoc.isValid() && "Source location is expected to be valid." ); |
7755 | if (auto EC = llvm::sys::fs::getUniqueID(Path: PLoc.getFilename(), Result&: ID)) |
7756 | SM.getDiagnostics().Report(DiagID: diag::err_cannot_open_file) |
7757 | << PLoc.getFilename() << EC.message(); |
7758 | } |
7759 | OS << llvm::format(Fmt: "%x" , Vals: ID.getFile()) << llvm::format(Fmt: "%x" , Vals: ID.getDevice()) |
7760 | << "_" << llvm::utohexstr(X: Result.low(), /*LowerCase=*/true, /*Width=*/8); |
7761 | } else { |
7762 | OS << getContext().getCUIDHash(); |
7763 | } |
7764 | } |
7765 | |
7766 | void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) { |
7767 | assert(DeferredDeclsToEmit.empty() && |
7768 | "Should have emitted all decls deferred to emit." ); |
7769 | assert(NewBuilder->DeferredDecls.empty() && |
7770 | "Newly created module should not have deferred decls" ); |
7771 | NewBuilder->DeferredDecls = std::move(DeferredDecls); |
7772 | assert(EmittedDeferredDecls.empty() && |
7773 | "Still have (unmerged) EmittedDeferredDecls deferred decls" ); |
7774 | |
7775 | assert(NewBuilder->DeferredVTables.empty() && |
7776 | "Newly created module should not have deferred vtables" ); |
7777 | NewBuilder->DeferredVTables = std::move(DeferredVTables); |
7778 | |
7779 | assert(NewBuilder->MangledDeclNames.empty() && |
7780 | "Newly created module should not have mangled decl names" ); |
7781 | assert(NewBuilder->Manglings.empty() && |
7782 | "Newly created module should not have manglings" ); |
7783 | NewBuilder->Manglings = std::move(Manglings); |
7784 | |
7785 | NewBuilder->WeakRefReferences = std::move(WeakRefReferences); |
7786 | |
7787 | NewBuilder->TBAA = std::move(TBAA); |
7788 | |
7789 | NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx); |
7790 | } |
7791 | |