1 | //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This coordinates the per-module state used while generating code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CodeGenModule.h" |
14 | #include "ABIInfo.h" |
15 | #include "CGBlocks.h" |
16 | #include "CGCUDARuntime.h" |
17 | #include "CGCXXABI.h" |
18 | #include "CGCall.h" |
19 | #include "CGDebugInfo.h" |
20 | #include "CGHLSLRuntime.h" |
21 | #include "CGObjCRuntime.h" |
22 | #include "CGOpenCLRuntime.h" |
23 | #include "CGOpenMPRuntime.h" |
24 | #include "CGOpenMPRuntimeGPU.h" |
25 | #include "CodeGenFunction.h" |
26 | #include "CodeGenPGO.h" |
27 | #include "ConstantEmitter.h" |
28 | #include "CoverageMappingGen.h" |
29 | #include "TargetInfo.h" |
30 | #include "clang/AST/ASTContext.h" |
31 | #include "clang/AST/ASTLambda.h" |
32 | #include "clang/AST/CharUnits.h" |
33 | #include "clang/AST/Decl.h" |
34 | #include "clang/AST/DeclCXX.h" |
35 | #include "clang/AST/DeclObjC.h" |
36 | #include "clang/AST/DeclTemplate.h" |
37 | #include "clang/AST/Mangle.h" |
38 | #include "clang/AST/RecursiveASTVisitor.h" |
39 | #include "clang/AST/StmtVisitor.h" |
40 | #include "clang/Basic/Builtins.h" |
41 | #include "clang/Basic/CodeGenOptions.h" |
42 | #include "clang/Basic/Diagnostic.h" |
43 | #include "clang/Basic/Module.h" |
44 | #include "clang/Basic/SourceManager.h" |
45 | #include "clang/Basic/TargetInfo.h" |
46 | #include "clang/Basic/Version.h" |
47 | #include "clang/CodeGen/BackendUtil.h" |
48 | #include "clang/CodeGen/ConstantInitBuilder.h" |
49 | #include "clang/Frontend/FrontendDiagnostic.h" |
50 | #include "llvm/ADT/STLExtras.h" |
51 | #include "llvm/ADT/StringExtras.h" |
52 | #include "llvm/ADT/StringSwitch.h" |
53 | #include "llvm/Analysis/TargetLibraryInfo.h" |
54 | #include "llvm/BinaryFormat/ELF.h" |
55 | #include "llvm/IR/AttributeMask.h" |
56 | #include "llvm/IR/CallingConv.h" |
57 | #include "llvm/IR/DataLayout.h" |
58 | #include "llvm/IR/Intrinsics.h" |
59 | #include "llvm/IR/LLVMContext.h" |
60 | #include "llvm/IR/Module.h" |
61 | #include "llvm/IR/ProfileSummary.h" |
62 | #include "llvm/ProfileData/InstrProfReader.h" |
63 | #include "llvm/ProfileData/SampleProf.h" |
64 | #include "llvm/Support/CRC.h" |
65 | #include "llvm/Support/CodeGen.h" |
66 | #include "llvm/Support/CommandLine.h" |
67 | #include "llvm/Support/ConvertUTF.h" |
68 | #include "llvm/Support/ErrorHandling.h" |
69 | #include "llvm/Support/TimeProfiler.h" |
70 | #include "llvm/Support/xxhash.h" |
71 | #include "llvm/TargetParser/RISCVISAInfo.h" |
72 | #include "llvm/TargetParser/Triple.h" |
73 | #include "llvm/TargetParser/X86TargetParser.h" |
74 | #include "llvm/Transforms/Utils/BuildLibCalls.h" |
75 | #include <optional> |
76 | #include <set> |
77 | |
78 | using namespace clang; |
79 | using namespace CodeGen; |
80 | |
81 | static llvm::cl::opt<bool> LimitedCoverage( |
82 | "limited-coverage-experimental" , llvm::cl::Hidden, |
83 | llvm::cl::desc("Emit limited coverage mapping information (experimental)" )); |
84 | |
85 | static const char AnnotationSection[] = "llvm.metadata" ; |
86 | |
87 | static CGCXXABI *createCXXABI(CodeGenModule &CGM) { |
88 | switch (CGM.getContext().getCXXABIKind()) { |
89 | case TargetCXXABI::AppleARM64: |
90 | case TargetCXXABI::Fuchsia: |
91 | case TargetCXXABI::GenericAArch64: |
92 | case TargetCXXABI::GenericARM: |
93 | case TargetCXXABI::iOS: |
94 | case TargetCXXABI::WatchOS: |
95 | case TargetCXXABI::GenericMIPS: |
96 | case TargetCXXABI::GenericItanium: |
97 | case TargetCXXABI::WebAssembly: |
98 | case TargetCXXABI::XL: |
99 | return CreateItaniumCXXABI(CGM); |
100 | case TargetCXXABI::Microsoft: |
101 | return CreateMicrosoftCXXABI(CGM); |
102 | } |
103 | |
104 | llvm_unreachable("invalid C++ ABI kind" ); |
105 | } |
106 | |
107 | static std::unique_ptr<TargetCodeGenInfo> |
108 | createTargetCodeGenInfo(CodeGenModule &CGM) { |
109 | const TargetInfo &Target = CGM.getTarget(); |
110 | const llvm::Triple &Triple = Target.getTriple(); |
111 | const CodeGenOptions &CodeGenOpts = CGM.getCodeGenOpts(); |
112 | |
113 | switch (Triple.getArch()) { |
114 | default: |
115 | return createDefaultTargetCodeGenInfo(CGM); |
116 | |
117 | case llvm::Triple::m68k: |
118 | return createM68kTargetCodeGenInfo(CGM); |
119 | case llvm::Triple::mips: |
120 | case llvm::Triple::mipsel: |
121 | if (Triple.getOS() == llvm::Triple::NaCl) |
122 | return createPNaClTargetCodeGenInfo(CGM); |
123 | else if (Triple.getOS() == llvm::Triple::Win32) |
124 | return createWindowsMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true); |
125 | return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true); |
126 | |
127 | case llvm::Triple::mips64: |
128 | case llvm::Triple::mips64el: |
129 | return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/false); |
130 | |
131 | case llvm::Triple::avr: { |
132 | // For passing parameters, R8~R25 are used on avr, and R18~R25 are used |
133 | // on avrtiny. For passing return value, R18~R25 are used on avr, and |
134 | // R22~R25 are used on avrtiny. |
135 | unsigned NPR = Target.getABI() == "avrtiny" ? 6 : 18; |
136 | unsigned NRR = Target.getABI() == "avrtiny" ? 4 : 8; |
137 | return createAVRTargetCodeGenInfo(CGM, NPR, NRR); |
138 | } |
139 | |
140 | case llvm::Triple::aarch64: |
141 | case llvm::Triple::aarch64_32: |
142 | case llvm::Triple::aarch64_be: { |
143 | AArch64ABIKind Kind = AArch64ABIKind::AAPCS; |
144 | if (Target.getABI() == "darwinpcs" ) |
145 | Kind = AArch64ABIKind::DarwinPCS; |
146 | else if (Triple.isOSWindows()) |
147 | return createWindowsAArch64TargetCodeGenInfo(CGM, K: AArch64ABIKind::Win64); |
148 | else if (Target.getABI() == "aapcs-soft" ) |
149 | Kind = AArch64ABIKind::AAPCSSoft; |
150 | else if (Target.getABI() == "pauthtest" ) |
151 | Kind = AArch64ABIKind::PAuthTest; |
152 | |
153 | return createAArch64TargetCodeGenInfo(CGM, Kind); |
154 | } |
155 | |
156 | case llvm::Triple::wasm32: |
157 | case llvm::Triple::wasm64: { |
158 | WebAssemblyABIKind Kind = WebAssemblyABIKind::MVP; |
159 | if (Target.getABI() == "experimental-mv" ) |
160 | Kind = WebAssemblyABIKind::ExperimentalMV; |
161 | return createWebAssemblyTargetCodeGenInfo(CGM, K: Kind); |
162 | } |
163 | |
164 | case llvm::Triple::arm: |
165 | case llvm::Triple::armeb: |
166 | case llvm::Triple::thumb: |
167 | case llvm::Triple::thumbeb: { |
168 | if (Triple.getOS() == llvm::Triple::Win32) |
169 | return createWindowsARMTargetCodeGenInfo(CGM, K: ARMABIKind::AAPCS_VFP); |
170 | |
171 | ARMABIKind Kind = ARMABIKind::AAPCS; |
172 | StringRef ABIStr = Target.getABI(); |
173 | if (ABIStr == "apcs-gnu" ) |
174 | Kind = ARMABIKind::APCS; |
175 | else if (ABIStr == "aapcs16" ) |
176 | Kind = ARMABIKind::AAPCS16_VFP; |
177 | else if (CodeGenOpts.FloatABI == "hard" || |
178 | (CodeGenOpts.FloatABI != "soft" && Triple.isHardFloatABI())) |
179 | Kind = ARMABIKind::AAPCS_VFP; |
180 | |
181 | return createARMTargetCodeGenInfo(CGM, Kind); |
182 | } |
183 | |
184 | case llvm::Triple::ppc: { |
185 | if (Triple.isOSAIX()) |
186 | return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/false); |
187 | |
188 | bool IsSoftFloat = |
189 | CodeGenOpts.FloatABI == "soft" || Target.hasFeature(Feature: "spe" ); |
190 | return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat); |
191 | } |
192 | case llvm::Triple::ppcle: { |
193 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
194 | return createPPC32TargetCodeGenInfo(CGM, SoftFloatABI: IsSoftFloat); |
195 | } |
196 | case llvm::Triple::ppc64: |
197 | if (Triple.isOSAIX()) |
198 | return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/true); |
199 | |
200 | if (Triple.isOSBinFormatELF()) { |
201 | PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv1; |
202 | if (Target.getABI() == "elfv2" ) |
203 | Kind = PPC64_SVR4_ABIKind::ELFv2; |
204 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
205 | |
206 | return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat); |
207 | } |
208 | return createPPC64TargetCodeGenInfo(CGM); |
209 | case llvm::Triple::ppc64le: { |
210 | assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!" ); |
211 | PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv2; |
212 | if (Target.getABI() == "elfv1" ) |
213 | Kind = PPC64_SVR4_ABIKind::ELFv1; |
214 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft" ; |
215 | |
216 | return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, SoftFloatABI: IsSoftFloat); |
217 | } |
218 | |
219 | case llvm::Triple::nvptx: |
220 | case llvm::Triple::nvptx64: |
221 | return createNVPTXTargetCodeGenInfo(CGM); |
222 | |
223 | case llvm::Triple::msp430: |
224 | return createMSP430TargetCodeGenInfo(CGM); |
225 | |
226 | case llvm::Triple::riscv32: |
227 | case llvm::Triple::riscv64: { |
228 | StringRef ABIStr = Target.getABI(); |
229 | unsigned XLen = Target.getPointerWidth(AddrSpace: LangAS::Default); |
230 | unsigned ABIFLen = 0; |
231 | if (ABIStr.ends_with(Suffix: "f" )) |
232 | ABIFLen = 32; |
233 | else if (ABIStr.ends_with(Suffix: "d" )) |
234 | ABIFLen = 64; |
235 | bool EABI = ABIStr.ends_with(Suffix: "e" ); |
236 | return createRISCVTargetCodeGenInfo(CGM, XLen, FLen: ABIFLen, EABI); |
237 | } |
238 | |
239 | case llvm::Triple::systemz: { |
240 | bool SoftFloat = CodeGenOpts.FloatABI == "soft" ; |
241 | bool HasVector = !SoftFloat && Target.getABI() == "vector" ; |
242 | return createSystemZTargetCodeGenInfo(CGM, HasVector, SoftFloatABI: SoftFloat); |
243 | } |
244 | |
245 | case llvm::Triple::tce: |
246 | case llvm::Triple::tcele: |
247 | return createTCETargetCodeGenInfo(CGM); |
248 | |
249 | case llvm::Triple::x86: { |
250 | bool IsDarwinVectorABI = Triple.isOSDarwin(); |
251 | bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); |
252 | |
253 | if (Triple.getOS() == llvm::Triple::Win32) { |
254 | return createWinX86_32TargetCodeGenInfo( |
255 | CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI, |
256 | NumRegisterParameters: CodeGenOpts.NumRegisterParameters); |
257 | } |
258 | return createX86_32TargetCodeGenInfo( |
259 | CGM, DarwinVectorABI: IsDarwinVectorABI, Win32StructABI: IsWin32FloatStructABI, |
260 | NumRegisterParameters: CodeGenOpts.NumRegisterParameters, SoftFloatABI: CodeGenOpts.FloatABI == "soft" ); |
261 | } |
262 | |
263 | case llvm::Triple::x86_64: { |
264 | StringRef ABI = Target.getABI(); |
265 | X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 |
266 | : ABI == "avx" ? X86AVXABILevel::AVX |
267 | : X86AVXABILevel::None); |
268 | |
269 | switch (Triple.getOS()) { |
270 | case llvm::Triple::UEFI: |
271 | case llvm::Triple::Win32: |
272 | return createWinX86_64TargetCodeGenInfo(CGM, AVXLevel); |
273 | default: |
274 | return createX86_64TargetCodeGenInfo(CGM, AVXLevel); |
275 | } |
276 | } |
277 | case llvm::Triple::hexagon: |
278 | return createHexagonTargetCodeGenInfo(CGM); |
279 | case llvm::Triple::lanai: |
280 | return createLanaiTargetCodeGenInfo(CGM); |
281 | case llvm::Triple::r600: |
282 | return createAMDGPUTargetCodeGenInfo(CGM); |
283 | case llvm::Triple::amdgcn: |
284 | return createAMDGPUTargetCodeGenInfo(CGM); |
285 | case llvm::Triple::sparc: |
286 | return createSparcV8TargetCodeGenInfo(CGM); |
287 | case llvm::Triple::sparcv9: |
288 | return createSparcV9TargetCodeGenInfo(CGM); |
289 | case llvm::Triple::xcore: |
290 | return createXCoreTargetCodeGenInfo(CGM); |
291 | case llvm::Triple::arc: |
292 | return createARCTargetCodeGenInfo(CGM); |
293 | case llvm::Triple::spir: |
294 | case llvm::Triple::spir64: |
295 | return createCommonSPIRTargetCodeGenInfo(CGM); |
296 | case llvm::Triple::spirv32: |
297 | case llvm::Triple::spirv64: |
298 | case llvm::Triple::spirv: |
299 | return createSPIRVTargetCodeGenInfo(CGM); |
300 | case llvm::Triple::dxil: |
301 | return createDirectXTargetCodeGenInfo(CGM); |
302 | case llvm::Triple::ve: |
303 | return createVETargetCodeGenInfo(CGM); |
304 | case llvm::Triple::csky: { |
305 | bool IsSoftFloat = !Target.hasFeature(Feature: "hard-float-abi" ); |
306 | bool hasFP64 = |
307 | Target.hasFeature(Feature: "fpuv2_df" ) || Target.hasFeature(Feature: "fpuv3_df" ); |
308 | return createCSKYTargetCodeGenInfo(CGM, FLen: IsSoftFloat ? 0 |
309 | : hasFP64 ? 64 |
310 | : 32); |
311 | } |
312 | case llvm::Triple::bpfeb: |
313 | case llvm::Triple::bpfel: |
314 | return createBPFTargetCodeGenInfo(CGM); |
315 | case llvm::Triple::loongarch32: |
316 | case llvm::Triple::loongarch64: { |
317 | StringRef ABIStr = Target.getABI(); |
318 | unsigned ABIFRLen = 0; |
319 | if (ABIStr.ends_with(Suffix: "f" )) |
320 | ABIFRLen = 32; |
321 | else if (ABIStr.ends_with(Suffix: "d" )) |
322 | ABIFRLen = 64; |
323 | return createLoongArchTargetCodeGenInfo( |
324 | CGM, GRLen: Target.getPointerWidth(AddrSpace: LangAS::Default), FLen: ABIFRLen); |
325 | } |
326 | } |
327 | } |
328 | |
329 | const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { |
330 | if (!TheTargetCodeGenInfo) |
331 | TheTargetCodeGenInfo = createTargetCodeGenInfo(CGM&: *this); |
332 | return *TheTargetCodeGenInfo; |
333 | } |
334 | |
335 | static void checkDataLayoutConsistency(const TargetInfo &Target, |
336 | llvm::LLVMContext &Context, |
337 | const LangOptions &Opts) { |
338 | #ifndef NDEBUG |
339 | // Don't verify non-standard ABI configurations. |
340 | if (Opts.AlignDouble || Opts.OpenCL || Opts.HLSL) |
341 | return; |
342 | |
343 | llvm::Triple Triple = Target.getTriple(); |
344 | llvm::DataLayout DL(Target.getDataLayoutString()); |
345 | auto Check = [&](const char *Name, llvm::Type *Ty, unsigned Alignment) { |
346 | llvm::Align DLAlign = DL.getABITypeAlign(Ty); |
347 | llvm::Align ClangAlign(Alignment / 8); |
348 | if (DLAlign != ClangAlign) { |
349 | llvm::errs() << "For target " << Triple.str() << " type " << Name |
350 | << " mapping to " << *Ty << " has data layout alignment " |
351 | << DLAlign.value() << " while clang specifies " |
352 | << ClangAlign.value() << "\n" ; |
353 | abort(); |
354 | } |
355 | }; |
356 | |
357 | Check("bool" , llvm::Type::getIntNTy(Context, Target.BoolWidth), |
358 | Target.BoolAlign); |
359 | Check("short" , llvm::Type::getIntNTy(Context, Target.ShortWidth), |
360 | Target.ShortAlign); |
361 | Check("int" , llvm::Type::getIntNTy(Context, Target.IntWidth), |
362 | Target.IntAlign); |
363 | Check("long" , llvm::Type::getIntNTy(Context, Target.LongWidth), |
364 | Target.LongAlign); |
365 | // FIXME: M68k specifies incorrect long long alignment in both LLVM and Clang. |
366 | if (Triple.getArch() != llvm::Triple::m68k) |
367 | Check("long long" , llvm::Type::getIntNTy(Context, Target.LongLongWidth), |
368 | Target.LongLongAlign); |
369 | // FIXME: There are int128 alignment mismatches on multiple targets. |
370 | if (Target.hasInt128Type() && !Target.getTargetOpts().ForceEnableInt128 && |
371 | !Triple.isAMDGPU() && !Triple.isSPIRV() && |
372 | Triple.getArch() != llvm::Triple::ve) |
373 | Check("__int128" , llvm::Type::getIntNTy(Context, 128), Target.Int128Align); |
374 | |
375 | if (Target.hasFloat16Type()) |
376 | Check("half" , llvm::Type::getFloatingPointTy(Context, *Target.HalfFormat), |
377 | Target.HalfAlign); |
378 | if (Target.hasBFloat16Type()) |
379 | Check("bfloat" , llvm::Type::getBFloatTy(Context), Target.BFloat16Align); |
380 | Check("float" , llvm::Type::getFloatingPointTy(Context, *Target.FloatFormat), |
381 | Target.FloatAlign); |
382 | // FIXME: AIX specifies wrong double alignment in DataLayout |
383 | if (!Triple.isOSAIX()) { |
384 | Check("double" , |
385 | llvm::Type::getFloatingPointTy(Context, *Target.DoubleFormat), |
386 | Target.DoubleAlign); |
387 | Check("long double" , |
388 | llvm::Type::getFloatingPointTy(Context, *Target.LongDoubleFormat), |
389 | Target.LongDoubleAlign); |
390 | } |
391 | // FIXME: Wasm has a mismatch in f128 alignment between Clang and LLVM. |
392 | if (Target.hasFloat128Type() && !Triple.isWasm()) |
393 | Check("__float128" , llvm::Type::getFP128Ty(Context), Target.Float128Align); |
394 | if (Target.hasIbm128Type()) |
395 | Check("__ibm128" , llvm::Type::getPPC_FP128Ty(Context), Target.Ibm128Align); |
396 | |
397 | Check("void*" , llvm::PointerType::getUnqual(Context), Target.PointerAlign); |
398 | #endif |
399 | } |
400 | |
401 | CodeGenModule::(ASTContext &C, |
402 | IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS, |
403 | const HeaderSearchOptions &HSO, |
404 | const PreprocessorOptions &PPO, |
405 | const CodeGenOptions &CGO, llvm::Module &M, |
406 | DiagnosticsEngine &diags, |
407 | CoverageSourceInfo *CoverageInfo) |
408 | : Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO), |
409 | PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags), |
410 | Target(C.getTargetInfo()), ABI(createCXXABI(CGM&: *this)), |
411 | VMContext(M.getContext()), VTables(*this), StackHandler(diags), |
412 | SanitizerMD(new SanitizerMetadata(*this)), |
413 | AtomicOpts(Target.getAtomicOpts()) { |
414 | |
415 | // Initialize the type cache. |
416 | Types.reset(p: new CodeGenTypes(*this)); |
417 | llvm::LLVMContext &LLVMContext = M.getContext(); |
418 | VoidTy = llvm::Type::getVoidTy(C&: LLVMContext); |
419 | Int8Ty = llvm::Type::getInt8Ty(C&: LLVMContext); |
420 | Int16Ty = llvm::Type::getInt16Ty(C&: LLVMContext); |
421 | Int32Ty = llvm::Type::getInt32Ty(C&: LLVMContext); |
422 | Int64Ty = llvm::Type::getInt64Ty(C&: LLVMContext); |
423 | HalfTy = llvm::Type::getHalfTy(C&: LLVMContext); |
424 | BFloatTy = llvm::Type::getBFloatTy(C&: LLVMContext); |
425 | FloatTy = llvm::Type::getFloatTy(C&: LLVMContext); |
426 | DoubleTy = llvm::Type::getDoubleTy(C&: LLVMContext); |
427 | PointerWidthInBits = C.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default); |
428 | PointerAlignInBytes = |
429 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getPointerAlign(AddrSpace: LangAS::Default)) |
430 | .getQuantity(); |
431 | SizeSizeInBytes = |
432 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getMaxPointerWidth()).getQuantity(); |
433 | IntAlignInBytes = |
434 | C.toCharUnitsFromBits(BitSize: C.getTargetInfo().getIntAlign()).getQuantity(); |
435 | CharTy = |
436 | llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getCharWidth()); |
437 | IntTy = llvm::IntegerType::get(C&: LLVMContext, NumBits: C.getTargetInfo().getIntWidth()); |
438 | IntPtrTy = llvm::IntegerType::get(C&: LLVMContext, |
439 | NumBits: C.getTargetInfo().getMaxPointerWidth()); |
440 | Int8PtrTy = llvm::PointerType::get(C&: LLVMContext, |
441 | AddressSpace: C.getTargetAddressSpace(AS: LangAS::Default)); |
442 | const llvm::DataLayout &DL = M.getDataLayout(); |
443 | AllocaInt8PtrTy = |
444 | llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getAllocaAddrSpace()); |
445 | GlobalsInt8PtrTy = |
446 | llvm::PointerType::get(C&: LLVMContext, AddressSpace: DL.getDefaultGlobalsAddressSpace()); |
447 | ConstGlobalsPtrTy = llvm::PointerType::get( |
448 | C&: LLVMContext, AddressSpace: C.getTargetAddressSpace(AS: GetGlobalConstantAddressSpace())); |
449 | ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace(); |
450 | |
451 | // Build C++20 Module initializers. |
452 | // TODO: Add Microsoft here once we know the mangling required for the |
453 | // initializers. |
454 | CXX20ModuleInits = |
455 | LangOpts.CPlusPlusModules && getCXXABI().getMangleContext().getKind() == |
456 | ItaniumMangleContext::MK_Itanium; |
457 | |
458 | RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC(); |
459 | |
460 | if (LangOpts.ObjC) |
461 | createObjCRuntime(); |
462 | if (LangOpts.OpenCL) |
463 | createOpenCLRuntime(); |
464 | if (LangOpts.OpenMP) |
465 | createOpenMPRuntime(); |
466 | if (LangOpts.CUDA) |
467 | createCUDARuntime(); |
468 | if (LangOpts.HLSL) |
469 | createHLSLRuntime(); |
470 | |
471 | // Enable TBAA unless it's suppressed. TSan and TySan need TBAA even at O0. |
472 | if (LangOpts.Sanitize.hasOneOf(K: SanitizerKind::Thread | SanitizerKind::Type) || |
473 | (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)) |
474 | TBAA.reset(p: new CodeGenTBAA(Context, getTypes(), TheModule, CodeGenOpts, |
475 | getLangOpts())); |
476 | |
477 | // If debug info or coverage generation is enabled, create the CGDebugInfo |
478 | // object. |
479 | if (CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo || |
480 | CodeGenOpts.CoverageNotesFile.size() || |
481 | CodeGenOpts.CoverageDataFile.size()) |
482 | DebugInfo.reset(p: new CGDebugInfo(*this)); |
483 | else if (getTriple().isOSWindows()) |
484 | // On Windows targets, we want to emit compiler info even if debug info is |
485 | // otherwise disabled. Use a temporary CGDebugInfo instance to emit only |
486 | // basic compiler metadata. |
487 | CGDebugInfo(*this); |
488 | |
489 | Block.GlobalUniqueCount = 0; |
490 | |
491 | if (C.getLangOpts().ObjC) |
492 | ObjCData.reset(p: new ObjCEntrypoints()); |
493 | |
494 | if (CodeGenOpts.hasProfileClangUse()) { |
495 | auto ReaderOrErr = llvm::IndexedInstrProfReader::create( |
496 | Path: CodeGenOpts.ProfileInstrumentUsePath, FS&: *FS, |
497 | RemappingPath: CodeGenOpts.ProfileRemappingFile); |
498 | // We're checking for profile read errors in CompilerInvocation, so if |
499 | // there was an error it should've already been caught. If it hasn't been |
500 | // somehow, trip an assertion. |
501 | assert(ReaderOrErr); |
502 | PGOReader = std::move(ReaderOrErr.get()); |
503 | } |
504 | |
505 | // If coverage mapping generation is enabled, create the |
506 | // CoverageMappingModuleGen object. |
507 | if (CodeGenOpts.CoverageMapping) |
508 | CoverageMapping.reset(p: new CoverageMappingModuleGen(*this, *CoverageInfo)); |
509 | |
510 | // Generate the module name hash here if needed. |
511 | if (CodeGenOpts.UniqueInternalLinkageNames && |
512 | !getModule().getSourceFileName().empty()) { |
513 | std::string Path = getModule().getSourceFileName(); |
514 | // Check if a path substitution is needed from the MacroPrefixMap. |
515 | for (const auto &Entry : LangOpts.MacroPrefixMap) |
516 | if (Path.rfind(str: Entry.first, pos: 0) != std::string::npos) { |
517 | Path = Entry.second + Path.substr(pos: Entry.first.size()); |
518 | break; |
519 | } |
520 | ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(FName: Path); |
521 | } |
522 | |
523 | // Record mregparm value now so it is visible through all of codegen. |
524 | if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86) |
525 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "NumRegisterParameters" , |
526 | Val: CodeGenOpts.NumRegisterParameters); |
527 | |
528 | // If there are any functions that are marked for Windows secure hot-patching, |
529 | // then build the list of functions now. |
530 | if (!CGO.MSSecureHotPatchFunctionsFile.empty() || |
531 | !CGO.MSSecureHotPatchFunctionsList.empty()) { |
532 | if (!CGO.MSSecureHotPatchFunctionsFile.empty()) { |
533 | auto BufOrErr = |
534 | llvm::MemoryBuffer::getFile(Filename: CGO.MSSecureHotPatchFunctionsFile); |
535 | if (BufOrErr) { |
536 | const llvm::MemoryBuffer &FileBuffer = **BufOrErr; |
537 | for (llvm::line_iterator I(FileBuffer.getMemBufferRef(), true), E; |
538 | I != E; ++I) |
539 | this->MSHotPatchFunctions.push_back(x: std::string{*I}); |
540 | } else { |
541 | auto &DE = Context.getDiagnostics(); |
542 | unsigned DiagID = |
543 | DE.getCustomDiagID(L: DiagnosticsEngine::Error, |
544 | FormatString: "failed to open hotpatch functions file " |
545 | "(-fms-hotpatch-functions-file): %0 : %1" ); |
546 | DE.Report(DiagID) << CGO.MSSecureHotPatchFunctionsFile |
547 | << BufOrErr.getError().message(); |
548 | } |
549 | } |
550 | |
551 | for (const auto &FuncName : CGO.MSSecureHotPatchFunctionsList) |
552 | this->MSHotPatchFunctions.push_back(x: FuncName); |
553 | |
554 | llvm::sort(C&: this->MSHotPatchFunctions); |
555 | } |
556 | |
557 | if (!Context.getAuxTargetInfo()) |
558 | checkDataLayoutConsistency(Target: Context.getTargetInfo(), Context&: LLVMContext, Opts: LangOpts); |
559 | } |
560 | |
561 | CodeGenModule::~CodeGenModule() {} |
562 | |
563 | void CodeGenModule::createObjCRuntime() { |
564 | // This is just isGNUFamily(), but we want to force implementors of |
565 | // new ABIs to decide how best to do this. |
566 | switch (LangOpts.ObjCRuntime.getKind()) { |
567 | case ObjCRuntime::GNUstep: |
568 | case ObjCRuntime::GCC: |
569 | case ObjCRuntime::ObjFW: |
570 | ObjCRuntime.reset(p: CreateGNUObjCRuntime(CGM&: *this)); |
571 | return; |
572 | |
573 | case ObjCRuntime::FragileMacOSX: |
574 | case ObjCRuntime::MacOSX: |
575 | case ObjCRuntime::iOS: |
576 | case ObjCRuntime::WatchOS: |
577 | ObjCRuntime.reset(p: CreateMacObjCRuntime(CGM&: *this)); |
578 | return; |
579 | } |
580 | llvm_unreachable("bad runtime kind" ); |
581 | } |
582 | |
583 | void CodeGenModule::createOpenCLRuntime() { |
584 | OpenCLRuntime.reset(p: new CGOpenCLRuntime(*this)); |
585 | } |
586 | |
587 | void CodeGenModule::createOpenMPRuntime() { |
588 | // Select a specialized code generation class based on the target, if any. |
589 | // If it does not exist use the default implementation. |
590 | switch (getTriple().getArch()) { |
591 | case llvm::Triple::nvptx: |
592 | case llvm::Triple::nvptx64: |
593 | case llvm::Triple::amdgcn: |
594 | case llvm::Triple::spirv64: |
595 | assert( |
596 | getLangOpts().OpenMPIsTargetDevice && |
597 | "OpenMP AMDGPU/NVPTX/SPIRV is only prepared to deal with device code." ); |
598 | OpenMPRuntime.reset(p: new CGOpenMPRuntimeGPU(*this)); |
599 | break; |
600 | default: |
601 | if (LangOpts.OpenMPSimd) |
602 | OpenMPRuntime.reset(p: new CGOpenMPSIMDRuntime(*this)); |
603 | else |
604 | OpenMPRuntime.reset(p: new CGOpenMPRuntime(*this)); |
605 | break; |
606 | } |
607 | } |
608 | |
609 | void CodeGenModule::createCUDARuntime() { |
610 | CUDARuntime.reset(p: CreateNVCUDARuntime(CGM&: *this)); |
611 | } |
612 | |
613 | void CodeGenModule::createHLSLRuntime() { |
614 | HLSLRuntime.reset(p: new CGHLSLRuntime(*this)); |
615 | } |
616 | |
617 | void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) { |
618 | Replacements[Name] = C; |
619 | } |
620 | |
621 | void CodeGenModule::applyReplacements() { |
622 | for (auto &I : Replacements) { |
623 | StringRef MangledName = I.first; |
624 | llvm::Constant *Replacement = I.second; |
625 | llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName); |
626 | if (!Entry) |
627 | continue; |
628 | auto *OldF = cast<llvm::Function>(Val: Entry); |
629 | auto *NewF = dyn_cast<llvm::Function>(Val: Replacement); |
630 | if (!NewF) { |
631 | if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Val: Replacement)) { |
632 | NewF = dyn_cast<llvm::Function>(Val: Alias->getAliasee()); |
633 | } else { |
634 | auto *CE = cast<llvm::ConstantExpr>(Val: Replacement); |
635 | assert(CE->getOpcode() == llvm::Instruction::BitCast || |
636 | CE->getOpcode() == llvm::Instruction::GetElementPtr); |
637 | NewF = dyn_cast<llvm::Function>(Val: CE->getOperand(i_nocapture: 0)); |
638 | } |
639 | } |
640 | |
641 | // Replace old with new, but keep the old order. |
642 | OldF->replaceAllUsesWith(V: Replacement); |
643 | if (NewF) { |
644 | NewF->removeFromParent(); |
645 | OldF->getParent()->getFunctionList().insertAfter(where: OldF->getIterator(), |
646 | New: NewF); |
647 | } |
648 | OldF->eraseFromParent(); |
649 | } |
650 | } |
651 | |
652 | void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) { |
653 | GlobalValReplacements.push_back(Elt: std::make_pair(x&: GV, y&: C)); |
654 | } |
655 | |
656 | void CodeGenModule::applyGlobalValReplacements() { |
657 | for (auto &I : GlobalValReplacements) { |
658 | llvm::GlobalValue *GV = I.first; |
659 | llvm::Constant *C = I.second; |
660 | |
661 | GV->replaceAllUsesWith(V: C); |
662 | GV->eraseFromParent(); |
663 | } |
664 | } |
665 | |
666 | // This is only used in aliases that we created and we know they have a |
667 | // linear structure. |
668 | static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) { |
669 | const llvm::Constant *C; |
670 | if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: GV)) |
671 | C = GA->getAliasee(); |
672 | else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(Val: GV)) |
673 | C = GI->getResolver(); |
674 | else |
675 | return GV; |
676 | |
677 | const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(Val: C->stripPointerCasts()); |
678 | if (!AliaseeGV) |
679 | return nullptr; |
680 | |
681 | const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject(); |
682 | if (FinalGV == GV) |
683 | return nullptr; |
684 | |
685 | return FinalGV; |
686 | } |
687 | |
688 | static bool checkAliasedGlobal( |
689 | const ASTContext &Context, DiagnosticsEngine &Diags, SourceLocation Location, |
690 | bool IsIFunc, const llvm::GlobalValue *Alias, const llvm::GlobalValue *&GV, |
691 | const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames, |
692 | SourceRange AliasRange) { |
693 | GV = getAliasedGlobal(GV: Alias); |
694 | if (!GV) { |
695 | Diags.Report(Loc: Location, DiagID: diag::err_cyclic_alias) << IsIFunc; |
696 | return false; |
697 | } |
698 | |
699 | if (GV->hasCommonLinkage()) { |
700 | const llvm::Triple &Triple = Context.getTargetInfo().getTriple(); |
701 | if (Triple.getObjectFormat() == llvm::Triple::XCOFF) { |
702 | Diags.Report(Loc: Location, DiagID: diag::err_alias_to_common); |
703 | return false; |
704 | } |
705 | } |
706 | |
707 | if (GV->isDeclaration()) { |
708 | Diags.Report(Loc: Location, DiagID: diag::err_alias_to_undefined) << IsIFunc << IsIFunc; |
709 | Diags.Report(Loc: Location, DiagID: diag::note_alias_requires_mangled_name) |
710 | << IsIFunc << IsIFunc; |
711 | // Provide a note if the given function is not found and exists as a |
712 | // mangled name. |
713 | for (const auto &[Decl, Name] : MangledDeclNames) { |
714 | if (const auto *ND = dyn_cast<NamedDecl>(Val: Decl.getDecl())) { |
715 | IdentifierInfo *II = ND->getIdentifier(); |
716 | if (II && II->getName() == GV->getName()) { |
717 | Diags.Report(Loc: Location, DiagID: diag::note_alias_mangled_name_alternative) |
718 | << Name |
719 | << FixItHint::CreateReplacement( |
720 | RemoveRange: AliasRange, |
721 | Code: (Twine(IsIFunc ? "ifunc" : "alias" ) + "(\"" + Name + "\")" ) |
722 | .str()); |
723 | } |
724 | } |
725 | } |
726 | return false; |
727 | } |
728 | |
729 | if (IsIFunc) { |
730 | // Check resolver function type. |
731 | const auto *F = dyn_cast<llvm::Function>(Val: GV); |
732 | if (!F) { |
733 | Diags.Report(Loc: Location, DiagID: diag::err_alias_to_undefined) |
734 | << IsIFunc << IsIFunc; |
735 | return false; |
736 | } |
737 | |
738 | llvm::FunctionType *FTy = F->getFunctionType(); |
739 | if (!FTy->getReturnType()->isPointerTy()) { |
740 | Diags.Report(Loc: Location, DiagID: diag::err_ifunc_resolver_return); |
741 | return false; |
742 | } |
743 | } |
744 | |
745 | return true; |
746 | } |
747 | |
748 | // Emit a warning if toc-data attribute is requested for global variables that |
749 | // have aliases and remove the toc-data attribute. |
750 | static void checkAliasForTocData(llvm::GlobalVariable *GVar, |
751 | const CodeGenOptions &CodeGenOpts, |
752 | DiagnosticsEngine &Diags, |
753 | SourceLocation Location) { |
754 | if (GVar->hasAttribute(Kind: "toc-data" )) { |
755 | auto GVId = GVar->getName(); |
756 | // Is this a global variable specified by the user as local? |
757 | if ((llvm::binary_search(Range: CodeGenOpts.TocDataVarsUserSpecified, Value&: GVId))) { |
758 | Diags.Report(Loc: Location, DiagID: diag::warn_toc_unsupported_type) |
759 | << GVId << "the variable has an alias" ; |
760 | } |
761 | llvm::AttributeSet CurrAttributes = GVar->getAttributes(); |
762 | llvm::AttributeSet NewAttributes = |
763 | CurrAttributes.removeAttribute(C&: GVar->getContext(), Kind: "toc-data" ); |
764 | GVar->setAttributes(NewAttributes); |
765 | } |
766 | } |
767 | |
768 | void CodeGenModule::checkAliases() { |
769 | // Check if the constructed aliases are well formed. It is really unfortunate |
770 | // that we have to do this in CodeGen, but we only construct mangled names |
771 | // and aliases during codegen. |
772 | bool Error = false; |
773 | DiagnosticsEngine &Diags = getDiags(); |
774 | for (const GlobalDecl &GD : Aliases) { |
775 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
776 | SourceLocation Location; |
777 | SourceRange Range; |
778 | bool IsIFunc = D->hasAttr<IFuncAttr>(); |
779 | if (const Attr *A = D->getDefiningAttr()) { |
780 | Location = A->getLocation(); |
781 | Range = A->getRange(); |
782 | } else |
783 | llvm_unreachable("Not an alias or ifunc?" ); |
784 | |
785 | StringRef MangledName = getMangledName(GD); |
786 | llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName); |
787 | const llvm::GlobalValue *GV = nullptr; |
788 | if (!checkAliasedGlobal(Context: getContext(), Diags, Location, IsIFunc, Alias, GV, |
789 | MangledDeclNames, AliasRange: Range)) { |
790 | Error = true; |
791 | continue; |
792 | } |
793 | |
794 | if (getContext().getTargetInfo().getTriple().isOSAIX()) |
795 | if (const llvm::GlobalVariable *GVar = |
796 | dyn_cast<const llvm::GlobalVariable>(Val: GV)) |
797 | checkAliasForTocData(GVar: const_cast<llvm::GlobalVariable *>(GVar), |
798 | CodeGenOpts: getCodeGenOpts(), Diags, Location); |
799 | |
800 | llvm::Constant *Aliasee = |
801 | IsIFunc ? cast<llvm::GlobalIFunc>(Val: Alias)->getResolver() |
802 | : cast<llvm::GlobalAlias>(Val: Alias)->getAliasee(); |
803 | |
804 | llvm::GlobalValue *AliaseeGV; |
805 | if (auto CE = dyn_cast<llvm::ConstantExpr>(Val: Aliasee)) |
806 | AliaseeGV = cast<llvm::GlobalValue>(Val: CE->getOperand(i_nocapture: 0)); |
807 | else |
808 | AliaseeGV = cast<llvm::GlobalValue>(Val: Aliasee); |
809 | |
810 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) { |
811 | StringRef AliasSection = SA->getName(); |
812 | if (AliasSection != AliaseeGV->getSection()) |
813 | Diags.Report(Loc: SA->getLocation(), DiagID: diag::warn_alias_with_section) |
814 | << AliasSection << IsIFunc << IsIFunc; |
815 | } |
816 | |
817 | // We have to handle alias to weak aliases in here. LLVM itself disallows |
818 | // this since the object semantics would not match the IL one. For |
819 | // compatibility with gcc we implement it by just pointing the alias |
820 | // to its aliasee's aliasee. We also warn, since the user is probably |
821 | // expecting the link to be weak. |
822 | if (auto *GA = dyn_cast<llvm::GlobalAlias>(Val: AliaseeGV)) { |
823 | if (GA->isInterposable()) { |
824 | Diags.Report(Loc: Location, DiagID: diag::warn_alias_to_weak_alias) |
825 | << GV->getName() << GA->getName() << IsIFunc; |
826 | Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
827 | C: GA->getAliasee(), Ty: Alias->getType()); |
828 | |
829 | if (IsIFunc) |
830 | cast<llvm::GlobalIFunc>(Val: Alias)->setResolver(Aliasee); |
831 | else |
832 | cast<llvm::GlobalAlias>(Val: Alias)->setAliasee(Aliasee); |
833 | } |
834 | } |
835 | // ifunc resolvers are usually implemented to run before sanitizer |
836 | // initialization. Disable instrumentation to prevent the ordering issue. |
837 | if (IsIFunc) |
838 | cast<llvm::Function>(Val: Aliasee)->addFnAttr( |
839 | Kind: llvm::Attribute::DisableSanitizerInstrumentation); |
840 | } |
841 | if (!Error) |
842 | return; |
843 | |
844 | for (const GlobalDecl &GD : Aliases) { |
845 | StringRef MangledName = getMangledName(GD); |
846 | llvm::GlobalValue *Alias = GetGlobalValue(Ref: MangledName); |
847 | Alias->replaceAllUsesWith(V: llvm::PoisonValue::get(T: Alias->getType())); |
848 | Alias->eraseFromParent(); |
849 | } |
850 | } |
851 | |
852 | void CodeGenModule::clear() { |
853 | DeferredDeclsToEmit.clear(); |
854 | EmittedDeferredDecls.clear(); |
855 | DeferredAnnotations.clear(); |
856 | if (OpenMPRuntime) |
857 | OpenMPRuntime->clear(); |
858 | } |
859 | |
860 | void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags, |
861 | StringRef MainFile) { |
862 | if (!hasDiagnostics()) |
863 | return; |
864 | if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) { |
865 | if (MainFile.empty()) |
866 | MainFile = "<stdin>" ; |
867 | Diags.Report(DiagID: diag::warn_profile_data_unprofiled) << MainFile; |
868 | } else { |
869 | if (Mismatched > 0) |
870 | Diags.Report(DiagID: diag::warn_profile_data_out_of_date) << Visited << Mismatched; |
871 | |
872 | if (Missing > 0) |
873 | Diags.Report(DiagID: diag::warn_profile_data_missing) << Visited << Missing; |
874 | } |
875 | } |
876 | |
877 | static std::optional<llvm::GlobalValue::VisibilityTypes> |
878 | getLLVMVisibility(clang::LangOptions::VisibilityFromDLLStorageClassKinds K) { |
879 | // Map to LLVM visibility. |
880 | switch (K) { |
881 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Keep: |
882 | return std::nullopt; |
883 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Default: |
884 | return llvm::GlobalValue::DefaultVisibility; |
885 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Hidden: |
886 | return llvm::GlobalValue::HiddenVisibility; |
887 | case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Protected: |
888 | return llvm::GlobalValue::ProtectedVisibility; |
889 | } |
890 | llvm_unreachable("unknown option value!" ); |
891 | } |
892 | |
893 | static void |
894 | setLLVMVisibility(llvm::GlobalValue &GV, |
895 | std::optional<llvm::GlobalValue::VisibilityTypes> V) { |
896 | if (!V) |
897 | return; |
898 | |
899 | // Reset DSO locality before setting the visibility. This removes |
900 | // any effects that visibility options and annotations may have |
901 | // had on the DSO locality. Setting the visibility will implicitly set |
902 | // appropriate globals to DSO Local; however, this will be pessimistic |
903 | // w.r.t. to the normal compiler IRGen. |
904 | GV.setDSOLocal(false); |
905 | GV.setVisibility(*V); |
906 | } |
907 | |
908 | static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO, |
909 | llvm::Module &M) { |
910 | if (!LO.VisibilityFromDLLStorageClass) |
911 | return; |
912 | |
913 | std::optional<llvm::GlobalValue::VisibilityTypes> DLLExportVisibility = |
914 | getLLVMVisibility(K: LO.getDLLExportVisibility()); |
915 | |
916 | std::optional<llvm::GlobalValue::VisibilityTypes> |
917 | NoDLLStorageClassVisibility = |
918 | getLLVMVisibility(K: LO.getNoDLLStorageClassVisibility()); |
919 | |
920 | std::optional<llvm::GlobalValue::VisibilityTypes> |
921 | ExternDeclDLLImportVisibility = |
922 | getLLVMVisibility(K: LO.getExternDeclDLLImportVisibility()); |
923 | |
924 | std::optional<llvm::GlobalValue::VisibilityTypes> |
925 | ExternDeclNoDLLStorageClassVisibility = |
926 | getLLVMVisibility(K: LO.getExternDeclNoDLLStorageClassVisibility()); |
927 | |
928 | for (llvm::GlobalValue &GV : M.global_values()) { |
929 | if (GV.hasAppendingLinkage() || GV.hasLocalLinkage()) |
930 | continue; |
931 | |
932 | if (GV.isDeclarationForLinker()) |
933 | setLLVMVisibility(GV, V: GV.getDLLStorageClass() == |
934 | llvm::GlobalValue::DLLImportStorageClass |
935 | ? ExternDeclDLLImportVisibility |
936 | : ExternDeclNoDLLStorageClassVisibility); |
937 | else |
938 | setLLVMVisibility(GV, V: GV.getDLLStorageClass() == |
939 | llvm::GlobalValue::DLLExportStorageClass |
940 | ? DLLExportVisibility |
941 | : NoDLLStorageClassVisibility); |
942 | |
943 | GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
944 | } |
945 | } |
946 | |
947 | static bool isStackProtectorOn(const LangOptions &LangOpts, |
948 | const llvm::Triple &Triple, |
949 | clang::LangOptions::StackProtectorMode Mode) { |
950 | if (Triple.isGPU()) |
951 | return false; |
952 | return LangOpts.getStackProtector() == Mode; |
953 | } |
954 | |
955 | void CodeGenModule::Release() { |
956 | Module *Primary = getContext().getCurrentNamedModule(); |
957 | if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule()) |
958 | EmitModuleInitializers(Primary); |
959 | EmitDeferred(); |
960 | DeferredDecls.insert_range(R&: EmittedDeferredDecls); |
961 | EmittedDeferredDecls.clear(); |
962 | EmitVTablesOpportunistically(); |
963 | applyGlobalValReplacements(); |
964 | applyReplacements(); |
965 | emitMultiVersionFunctions(); |
966 | |
967 | if (Context.getLangOpts().IncrementalExtensions && |
968 | GlobalTopLevelStmtBlockInFlight.first) { |
969 | const TopLevelStmtDecl *TLSD = GlobalTopLevelStmtBlockInFlight.second; |
970 | GlobalTopLevelStmtBlockInFlight.first->FinishFunction(EndLoc: TLSD->getEndLoc()); |
971 | GlobalTopLevelStmtBlockInFlight = {nullptr, nullptr}; |
972 | } |
973 | |
974 | // Module implementations are initialized the same way as a regular TU that |
975 | // imports one or more modules. |
976 | if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition()) |
977 | EmitCXXModuleInitFunc(Primary); |
978 | else |
979 | EmitCXXGlobalInitFunc(); |
980 | EmitCXXGlobalCleanUpFunc(); |
981 | registerGlobalDtorsWithAtExit(); |
982 | EmitCXXThreadLocalInitFunc(); |
983 | if (ObjCRuntime) |
984 | if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction()) |
985 | AddGlobalCtor(Ctor: ObjCInitFunction); |
986 | if (Context.getLangOpts().CUDA && CUDARuntime) { |
987 | if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule()) |
988 | AddGlobalCtor(Ctor: CudaCtorFunction); |
989 | } |
990 | if (OpenMPRuntime) { |
991 | OpenMPRuntime->createOffloadEntriesAndInfoMetadata(); |
992 | OpenMPRuntime->clear(); |
993 | } |
994 | if (PGOReader) { |
995 | getModule().setProfileSummary( |
996 | M: PGOReader->getSummary(/* UseCS */ false).getMD(Context&: VMContext), |
997 | Kind: llvm::ProfileSummary::PSK_Instr); |
998 | if (PGOStats.hasDiagnostics()) |
999 | PGOStats.reportDiagnostics(Diags&: getDiags(), MainFile: getCodeGenOpts().MainFileName); |
1000 | } |
1001 | llvm::stable_sort(Range&: GlobalCtors, C: [](const Structor &L, const Structor &R) { |
1002 | return L.LexOrder < R.LexOrder; |
1003 | }); |
1004 | EmitCtorList(Fns&: GlobalCtors, GlobalName: "llvm.global_ctors" ); |
1005 | EmitCtorList(Fns&: GlobalDtors, GlobalName: "llvm.global_dtors" ); |
1006 | EmitGlobalAnnotations(); |
1007 | EmitStaticExternCAliases(); |
1008 | checkAliases(); |
1009 | EmitDeferredUnusedCoverageMappings(); |
1010 | CodeGenPGO(*this).setValueProfilingFlag(getModule()); |
1011 | CodeGenPGO(*this).setProfileVersion(getModule()); |
1012 | if (CoverageMapping) |
1013 | CoverageMapping->emit(); |
1014 | if (CodeGenOpts.SanitizeCfiCrossDso) { |
1015 | CodeGenFunction(*this).EmitCfiCheckFail(); |
1016 | CodeGenFunction(*this).EmitCfiCheckStub(); |
1017 | } |
1018 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) |
1019 | finalizeKCFITypes(); |
1020 | emitAtAvailableLinkGuard(); |
1021 | if (Context.getTargetInfo().getTriple().isWasm()) |
1022 | EmitMainVoidAlias(); |
1023 | |
1024 | if (getTriple().isAMDGPU() || |
1025 | (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD)) { |
1026 | // Emit amdhsa_code_object_version module flag, which is code object version |
1027 | // times 100. |
1028 | if (getTarget().getTargetOpts().CodeObjectVersion != |
1029 | llvm::CodeObjectVersionKind::COV_None) { |
1030 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
1031 | Key: "amdhsa_code_object_version" , |
1032 | Val: getTarget().getTargetOpts().CodeObjectVersion); |
1033 | } |
1034 | |
1035 | // Currently, "-mprintf-kind" option is only supported for HIP |
1036 | if (LangOpts.HIP) { |
1037 | auto *MDStr = llvm::MDString::get( |
1038 | Context&: getLLVMContext(), Str: (getTarget().getTargetOpts().AMDGPUPrintfKindVal == |
1039 | TargetOptions::AMDGPUPrintfKind::Hostcall) |
1040 | ? "hostcall" |
1041 | : "buffered" ); |
1042 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "amdgpu_printf_kind" , |
1043 | Val: MDStr); |
1044 | } |
1045 | } |
1046 | |
1047 | // Emit a global array containing all external kernels or device variables |
1048 | // used by host functions and mark it as used for CUDA/HIP. This is necessary |
1049 | // to get kernels or device variables in archives linked in even if these |
1050 | // kernels or device variables are only used in host functions. |
1051 | if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) { |
1052 | SmallVector<llvm::Constant *, 8> UsedArray; |
1053 | for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) { |
1054 | GlobalDecl GD; |
1055 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) |
1056 | GD = GlobalDecl(FD, KernelReferenceKind::Kernel); |
1057 | else |
1058 | GD = GlobalDecl(D); |
1059 | UsedArray.push_back(Elt: llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
1060 | C: GetAddrOfGlobal(GD), Ty: Int8PtrTy)); |
1061 | } |
1062 | |
1063 | llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: Int8PtrTy, NumElements: UsedArray.size()); |
1064 | |
1065 | auto *GV = new llvm::GlobalVariable( |
1066 | getModule(), ATy, false, llvm::GlobalValue::InternalLinkage, |
1067 | llvm::ConstantArray::get(T: ATy, V: UsedArray), "__clang_gpu_used_external" ); |
1068 | addCompilerUsedGlobal(GV); |
1069 | } |
1070 | if (LangOpts.HIP) { |
1071 | // Emit a unique ID so that host and device binaries from the same |
1072 | // compilation unit can be associated. |
1073 | auto *GV = new llvm::GlobalVariable( |
1074 | getModule(), Int8Ty, false, llvm::GlobalValue::ExternalLinkage, |
1075 | llvm::Constant::getNullValue(Ty: Int8Ty), |
1076 | "__hip_cuid_" + getContext().getCUIDHash()); |
1077 | getSanitizerMetadata()->disableSanitizerForGlobal(GV); |
1078 | addCompilerUsedGlobal(GV); |
1079 | } |
1080 | emitLLVMUsed(); |
1081 | if (SanStats) |
1082 | SanStats->finish(); |
1083 | |
1084 | if (CodeGenOpts.Autolink && |
1085 | (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) { |
1086 | EmitModuleLinkOptions(); |
1087 | } |
1088 | |
1089 | // On ELF we pass the dependent library specifiers directly to the linker |
1090 | // without manipulating them. This is in contrast to other platforms where |
1091 | // they are mapped to a specific linker option by the compiler. This |
1092 | // difference is a result of the greater variety of ELF linkers and the fact |
1093 | // that ELF linkers tend to handle libraries in a more complicated fashion |
1094 | // than on other platforms. This forces us to defer handling the dependent |
1095 | // libs to the linker. |
1096 | // |
1097 | // CUDA/HIP device and host libraries are different. Currently there is no |
1098 | // way to differentiate dependent libraries for host or device. Existing |
1099 | // usage of #pragma comment(lib, *) is intended for host libraries on |
1100 | // Windows. Therefore emit llvm.dependent-libraries only for host. |
1101 | if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) { |
1102 | auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.dependent-libraries" ); |
1103 | for (auto *MD : ELFDependentLibraries) |
1104 | NMD->addOperand(M: MD); |
1105 | } |
1106 | |
1107 | if (CodeGenOpts.DwarfVersion) { |
1108 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "Dwarf Version" , |
1109 | Val: CodeGenOpts.DwarfVersion); |
1110 | } |
1111 | |
1112 | if (CodeGenOpts.Dwarf64) |
1113 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "DWARF64" , Val: 1); |
1114 | |
1115 | if (Context.getLangOpts().SemanticInterposition) |
1116 | // Require various optimization to respect semantic interposition. |
1117 | getModule().setSemanticInterposition(true); |
1118 | |
1119 | if (CodeGenOpts.EmitCodeView) { |
1120 | // Indicate that we want CodeView in the metadata. |
1121 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeView" , Val: 1); |
1122 | } |
1123 | if (CodeGenOpts.CodeViewGHash) { |
1124 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "CodeViewGHash" , Val: 1); |
1125 | } |
1126 | if (CodeGenOpts.ControlFlowGuard) { |
1127 | // Function ID tables and checks for Control Flow Guard (cfguard=2). |
1128 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "cfguard" , Val: 2); |
1129 | } else if (CodeGenOpts.ControlFlowGuardNoChecks) { |
1130 | // Function ID tables for Control Flow Guard (cfguard=1). |
1131 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "cfguard" , Val: 1); |
1132 | } |
1133 | if (CodeGenOpts.EHContGuard) { |
1134 | // Function ID tables for EH Continuation Guard. |
1135 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ehcontguard" , Val: 1); |
1136 | } |
1137 | if (Context.getLangOpts().Kernel) { |
1138 | // Note if we are compiling with /kernel. |
1139 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "ms-kernel" , Val: 1); |
1140 | } |
1141 | if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) { |
1142 | // We don't support LTO with 2 with different StrictVTablePointers |
1143 | // FIXME: we could support it by stripping all the information introduced |
1144 | // by StrictVTablePointers. |
1145 | |
1146 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "StrictVTablePointers" ,Val: 1); |
1147 | |
1148 | llvm::Metadata *Ops[2] = { |
1149 | llvm::MDString::get(Context&: VMContext, Str: "StrictVTablePointers" ), |
1150 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1151 | Ty: llvm::Type::getInt32Ty(C&: VMContext), V: 1))}; |
1152 | |
1153 | getModule().addModuleFlag(Behavior: llvm::Module::Require, |
1154 | Key: "StrictVTablePointersRequirement" , |
1155 | Val: llvm::MDNode::get(Context&: VMContext, MDs: Ops)); |
1156 | } |
1157 | if (getModuleDebugInfo() || getTriple().isOSWindows()) |
1158 | // We support a single version in the linked module. The LLVM |
1159 | // parser will drop debug info with a different version number |
1160 | // (and warn about it, too). |
1161 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "Debug Info Version" , |
1162 | Val: llvm::DEBUG_METADATA_VERSION); |
1163 | |
1164 | // We need to record the widths of enums and wchar_t, so that we can generate |
1165 | // the correct build attributes in the ARM backend. wchar_size is also used by |
1166 | // TargetLibraryInfo. |
1167 | uint64_t WCharWidth = |
1168 | Context.getTypeSizeInChars(T: Context.getWideCharType()).getQuantity(); |
1169 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "wchar_size" , Val: WCharWidth); |
1170 | |
1171 | if (getTriple().isOSzOS()) { |
1172 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, |
1173 | Key: "zos_product_major_version" , |
1174 | Val: uint32_t(CLANG_VERSION_MAJOR)); |
1175 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, |
1176 | Key: "zos_product_minor_version" , |
1177 | Val: uint32_t(CLANG_VERSION_MINOR)); |
1178 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "zos_product_patchlevel" , |
1179 | Val: uint32_t(CLANG_VERSION_PATCHLEVEL)); |
1180 | std::string ProductId = getClangVendor() + "clang" ; |
1181 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_product_id" , |
1182 | Val: llvm::MDString::get(Context&: VMContext, Str: ProductId)); |
1183 | |
1184 | // Record the language because we need it for the PPA2. |
1185 | StringRef lang_str = languageToString( |
1186 | L: LangStandard::getLangStandardForKind(K: LangOpts.LangStd).Language); |
1187 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_cu_language" , |
1188 | Val: llvm::MDString::get(Context&: VMContext, Str: lang_str)); |
1189 | |
1190 | time_t TT = PreprocessorOpts.SourceDateEpoch |
1191 | ? *PreprocessorOpts.SourceDateEpoch |
1192 | : std::time(timer: nullptr); |
1193 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "zos_translation_time" , |
1194 | Val: static_cast<uint64_t>(TT)); |
1195 | |
1196 | // Multiple modes will be supported here. |
1197 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "zos_le_char_mode" , |
1198 | Val: llvm::MDString::get(Context&: VMContext, Str: "ascii" )); |
1199 | } |
1200 | |
1201 | llvm::Triple T = Context.getTargetInfo().getTriple(); |
1202 | if (T.isARM() || T.isThumb()) { |
1203 | // The minimum width of an enum in bytes |
1204 | uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4; |
1205 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "min_enum_size" , Val: EnumWidth); |
1206 | } |
1207 | |
1208 | if (T.isRISCV()) { |
1209 | StringRef ABIStr = Target.getABI(); |
1210 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1211 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "target-abi" , |
1212 | Val: llvm::MDString::get(Context&: Ctx, Str: ABIStr)); |
1213 | |
1214 | // Add the canonical ISA string as metadata so the backend can set the ELF |
1215 | // attributes correctly. We use AppendUnique so LTO will keep all of the |
1216 | // unique ISA strings that were linked together. |
1217 | const std::vector<std::string> &Features = |
1218 | getTarget().getTargetOpts().Features; |
1219 | auto ParseResult = |
1220 | llvm::RISCVISAInfo::parseFeatures(XLen: T.isRISCV64() ? 64 : 32, Features); |
1221 | if (!errorToBool(Err: ParseResult.takeError())) |
1222 | getModule().addModuleFlag( |
1223 | Behavior: llvm::Module::AppendUnique, Key: "riscv-isa" , |
1224 | Val: llvm::MDNode::get( |
1225 | Context&: Ctx, MDs: llvm::MDString::get(Context&: Ctx, Str: (*ParseResult)->toString()))); |
1226 | } |
1227 | |
1228 | if (CodeGenOpts.SanitizeCfiCrossDso) { |
1229 | // Indicate that we want cross-DSO control flow integrity checks. |
1230 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "Cross-DSO CFI" , Val: 1); |
1231 | } |
1232 | |
1233 | if (CodeGenOpts.WholeProgramVTables) { |
1234 | // Indicate whether VFE was enabled for this module, so that the |
1235 | // vcall_visibility metadata added under whole program vtables is handled |
1236 | // appropriately in the optimizer. |
1237 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "Virtual Function Elim" , |
1238 | Val: CodeGenOpts.VirtualFunctionElimination); |
1239 | } |
1240 | |
1241 | if (LangOpts.Sanitize.has(K: SanitizerKind::CFIICall)) { |
1242 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1243 | Key: "CFI Canonical Jump Tables" , |
1244 | Val: CodeGenOpts.SanitizeCfiCanonicalJumpTables); |
1245 | } |
1246 | |
1247 | if (CodeGenOpts.SanitizeCfiICallNormalizeIntegers) { |
1248 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "cfi-normalize-integers" , |
1249 | Val: 1); |
1250 | } |
1251 | |
1252 | if (!CodeGenOpts.UniqueSourceFileIdentifier.empty()) { |
1253 | getModule().addModuleFlag( |
1254 | Behavior: llvm::Module::Append, Key: "Unique Source File Identifier" , |
1255 | Val: llvm::MDTuple::get( |
1256 | Context&: TheModule.getContext(), |
1257 | MDs: llvm::MDString::get(Context&: TheModule.getContext(), |
1258 | Str: CodeGenOpts.UniqueSourceFileIdentifier))); |
1259 | } |
1260 | |
1261 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) { |
1262 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi" , Val: 1); |
1263 | // KCFI assumes patchable-function-prefix is the same for all indirectly |
1264 | // called functions. Store the expected offset for code generation. |
1265 | if (CodeGenOpts.PatchableFunctionEntryOffset) |
1266 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-offset" , |
1267 | Val: CodeGenOpts.PatchableFunctionEntryOffset); |
1268 | if (CodeGenOpts.SanitizeKcfiArity) |
1269 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "kcfi-arity" , Val: 1); |
1270 | } |
1271 | |
1272 | if (CodeGenOpts.CFProtectionReturn && |
1273 | Target.checkCFProtectionReturnSupported(Diags&: getDiags())) { |
1274 | // Indicate that we want to instrument return control flow protection. |
1275 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-return" , |
1276 | Val: 1); |
1277 | } |
1278 | |
1279 | if (CodeGenOpts.CFProtectionBranch && |
1280 | Target.checkCFProtectionBranchSupported(Diags&: getDiags())) { |
1281 | // Indicate that we want to instrument branch control flow protection. |
1282 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "cf-protection-branch" , |
1283 | Val: 1); |
1284 | |
1285 | auto Scheme = CodeGenOpts.getCFBranchLabelScheme(); |
1286 | if (Target.checkCFBranchLabelSchemeSupported(Scheme, Diags&: getDiags())) { |
1287 | if (Scheme == CFBranchLabelSchemeKind::Default) |
1288 | Scheme = Target.getDefaultCFBranchLabelScheme(); |
1289 | getModule().addModuleFlag( |
1290 | Behavior: llvm::Module::Error, Key: "cf-branch-label-scheme" , |
1291 | Val: llvm::MDString::get(Context&: getLLVMContext(), |
1292 | Str: getCFBranchLabelSchemeFlagVal(Scheme))); |
1293 | } |
1294 | } |
1295 | |
1296 | if (CodeGenOpts.FunctionReturnThunks) |
1297 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "function_return_thunk_extern" , Val: 1); |
1298 | |
1299 | if (CodeGenOpts.IndirectBranchCSPrefix) |
1300 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "indirect_branch_cs_prefix" , Val: 1); |
1301 | |
1302 | // Add module metadata for return address signing (ignoring |
1303 | // non-leaf/all) and stack tagging. These are actually turned on by function |
1304 | // attributes, but we use module metadata to emit build attributes. This is |
1305 | // needed for LTO, where the function attributes are inside bitcode |
1306 | // serialised into a global variable by the time build attributes are |
1307 | // emitted, so we can't access them. LTO objects could be compiled with |
1308 | // different flags therefore module flags are set to "Min" behavior to achieve |
1309 | // the same end result of the normal build where e.g BTI is off if any object |
1310 | // doesn't support it. |
1311 | if (Context.getTargetInfo().hasFeature(Feature: "ptrauth" ) && |
1312 | LangOpts.getSignReturnAddressScope() != |
1313 | LangOptions::SignReturnAddressScopeKind::None) |
1314 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1315 | Key: "sign-return-address-buildattr" , Val: 1); |
1316 | if (LangOpts.Sanitize.has(K: SanitizerKind::MemtagStack)) |
1317 | getModule().addModuleFlag(Behavior: llvm::Module::Override, |
1318 | Key: "tag-stack-memory-buildattr" , Val: 1); |
1319 | |
1320 | if (T.isARM() || T.isThumb() || T.isAArch64()) { |
1321 | if (LangOpts.BranchTargetEnforcement) |
1322 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-target-enforcement" , |
1323 | Val: 1); |
1324 | if (LangOpts.BranchProtectionPAuthLR) |
1325 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "branch-protection-pauth-lr" , |
1326 | Val: 1); |
1327 | if (LangOpts.GuardedControlStack) |
1328 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "guarded-control-stack" , Val: 1); |
1329 | if (LangOpts.hasSignReturnAddress()) |
1330 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address" , Val: 1); |
1331 | if (LangOpts.isSignReturnAddressScopeAll()) |
1332 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "sign-return-address-all" , |
1333 | Val: 1); |
1334 | if (!LangOpts.isSignReturnAddressWithAKey()) |
1335 | getModule().addModuleFlag(Behavior: llvm::Module::Min, |
1336 | Key: "sign-return-address-with-bkey" , Val: 1); |
1337 | |
1338 | if (LangOpts.PointerAuthELFGOT) |
1339 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "ptrauth-elf-got" , Val: 1); |
1340 | |
1341 | if (getTriple().isOSLinux()) { |
1342 | if (LangOpts.PointerAuthCalls) |
1343 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "ptrauth-sign-personality" , |
1344 | Val: 1); |
1345 | assert(getTriple().isOSBinFormatELF()); |
1346 | using namespace llvm::ELF; |
1347 | uint64_t PAuthABIVersion = |
1348 | (LangOpts.PointerAuthIntrinsics |
1349 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INTRINSICS) | |
1350 | (LangOpts.PointerAuthCalls |
1351 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_CALLS) | |
1352 | (LangOpts.PointerAuthReturns |
1353 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_RETURNS) | |
1354 | (LangOpts.PointerAuthAuthTraps |
1355 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_AUTHTRAPS) | |
1356 | (LangOpts.PointerAuthVTPtrAddressDiscrimination |
1357 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRADDRDISCR) | |
1358 | (LangOpts.PointerAuthVTPtrTypeDiscrimination |
1359 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR) | |
1360 | (LangOpts.PointerAuthInitFini |
1361 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI) | |
1362 | (LangOpts.PointerAuthInitFiniAddressDiscrimination |
1363 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINIADDRDISC) | |
1364 | (LangOpts.PointerAuthELFGOT |
1365 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT) | |
1366 | (LangOpts.PointerAuthIndirectGotos |
1367 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOTOS) | |
1368 | (LangOpts.PointerAuthTypeInfoVTPtrDiscrimination |
1369 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_TYPEINFOVPTRDISCR) | |
1370 | (LangOpts.PointerAuthFunctionTypeDiscrimination |
1371 | << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_FPTRTYPEDISCR); |
1372 | static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_FPTRTYPEDISCR == |
1373 | AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST, |
1374 | "Update when new enum items are defined" ); |
1375 | if (PAuthABIVersion != 0) { |
1376 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
1377 | Key: "aarch64-elf-pauthabi-platform" , |
1378 | Val: AARCH64_PAUTH_PLATFORM_LLVM_LINUX); |
1379 | getModule().addModuleFlag(Behavior: llvm::Module::Error, |
1380 | Key: "aarch64-elf-pauthabi-version" , |
1381 | Val: PAuthABIVersion); |
1382 | } |
1383 | } |
1384 | } |
1385 | |
1386 | if (CodeGenOpts.StackClashProtector) |
1387 | getModule().addModuleFlag( |
1388 | Behavior: llvm::Module::Override, Key: "probe-stack" , |
1389 | Val: llvm::MDString::get(Context&: TheModule.getContext(), Str: "inline-asm" )); |
1390 | |
1391 | if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096) |
1392 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "stack-probe-size" , |
1393 | Val: CodeGenOpts.StackProbeSize); |
1394 | |
1395 | if (!CodeGenOpts.MemoryProfileOutput.empty()) { |
1396 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1397 | getModule().addModuleFlag( |
1398 | Behavior: llvm::Module::Error, Key: "MemProfProfileFilename" , |
1399 | Val: llvm::MDString::get(Context&: Ctx, Str: CodeGenOpts.MemoryProfileOutput)); |
1400 | } |
1401 | |
1402 | if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) { |
1403 | // Indicate whether __nvvm_reflect should be configured to flush denormal |
1404 | // floating point values to 0. (This corresponds to its "__CUDA_FTZ" |
1405 | // property.) |
1406 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "nvvm-reflect-ftz" , |
1407 | Val: CodeGenOpts.FP32DenormalMode.Output != |
1408 | llvm::DenormalMode::IEEE); |
1409 | } |
1410 | |
1411 | if (LangOpts.EHAsynch) |
1412 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "eh-asynch" , Val: 1); |
1413 | |
1414 | // Emit Import Call section. |
1415 | if (CodeGenOpts.ImportCallOptimization) |
1416 | getModule().addModuleFlag(Behavior: llvm::Module::Warning, Key: "import-call-optimization" , |
1417 | Val: 1); |
1418 | |
1419 | // Enable unwind v2 (epilog). |
1420 | if (CodeGenOpts.getWinX64EHUnwindV2() != llvm::WinX64EHUnwindV2Mode::Disabled) |
1421 | getModule().addModuleFlag( |
1422 | Behavior: llvm::Module::Warning, Key: "winx64-eh-unwindv2" , |
1423 | Val: static_cast<unsigned>(CodeGenOpts.getWinX64EHUnwindV2())); |
1424 | |
1425 | // Indicate whether this Module was compiled with -fopenmp |
1426 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) |
1427 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp" , Val: LangOpts.OpenMP); |
1428 | if (getLangOpts().OpenMPIsTargetDevice) |
1429 | getModule().addModuleFlag(Behavior: llvm::Module::Max, Key: "openmp-device" , |
1430 | Val: LangOpts.OpenMP); |
1431 | |
1432 | // Emit OpenCL specific module metadata: OpenCL/SPIR version. |
1433 | if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) { |
1434 | EmitOpenCLMetadata(); |
1435 | // Emit SPIR version. |
1436 | if (getTriple().isSPIR()) { |
1437 | // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the |
1438 | // opencl.spir.version named metadata. |
1439 | // C++ for OpenCL has a distinct mapping for version compatibility with |
1440 | // OpenCL. |
1441 | auto Version = LangOpts.getOpenCLCompatibleVersion(); |
1442 | llvm::Metadata *SPIRVerElts[] = { |
1443 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1444 | Ty: Int32Ty, V: Version / 100)), |
1445 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get( |
1446 | Ty: Int32Ty, V: (Version / 100 > 1) ? 0 : 2))}; |
1447 | llvm::NamedMDNode *SPIRVerMD = |
1448 | TheModule.getOrInsertNamedMetadata(Name: "opencl.spir.version" ); |
1449 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1450 | SPIRVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: SPIRVerElts)); |
1451 | } |
1452 | } |
1453 | |
1454 | // HLSL related end of code gen work items. |
1455 | if (LangOpts.HLSL) |
1456 | getHLSLRuntime().finishCodeGen(); |
1457 | |
1458 | if (uint32_t PLevel = Context.getLangOpts().PICLevel) { |
1459 | assert(PLevel < 3 && "Invalid PIC Level" ); |
1460 | getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel)); |
1461 | if (Context.getLangOpts().PIE) |
1462 | getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel)); |
1463 | } |
1464 | |
1465 | if (getCodeGenOpts().CodeModel.size() > 0) { |
1466 | unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel) |
1467 | .Case(S: "tiny" , Value: llvm::CodeModel::Tiny) |
1468 | .Case(S: "small" , Value: llvm::CodeModel::Small) |
1469 | .Case(S: "kernel" , Value: llvm::CodeModel::Kernel) |
1470 | .Case(S: "medium" , Value: llvm::CodeModel::Medium) |
1471 | .Case(S: "large" , Value: llvm::CodeModel::Large) |
1472 | .Default(Value: ~0u); |
1473 | if (CM != ~0u) { |
1474 | llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM); |
1475 | getModule().setCodeModel(codeModel); |
1476 | |
1477 | if ((CM == llvm::CodeModel::Medium || CM == llvm::CodeModel::Large) && |
1478 | Context.getTargetInfo().getTriple().getArch() == |
1479 | llvm::Triple::x86_64) { |
1480 | getModule().setLargeDataThreshold(getCodeGenOpts().LargeDataThreshold); |
1481 | } |
1482 | } |
1483 | } |
1484 | |
1485 | if (CodeGenOpts.NoPLT) |
1486 | getModule().setRtLibUseGOT(); |
1487 | if (getTriple().isOSBinFormatELF() && |
1488 | CodeGenOpts.DirectAccessExternalData != |
1489 | getModule().getDirectAccessExternalData()) { |
1490 | getModule().setDirectAccessExternalData( |
1491 | CodeGenOpts.DirectAccessExternalData); |
1492 | } |
1493 | if (CodeGenOpts.UnwindTables) |
1494 | getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables)); |
1495 | |
1496 | switch (CodeGenOpts.getFramePointer()) { |
1497 | case CodeGenOptions::FramePointerKind::None: |
1498 | // 0 ("none") is the default. |
1499 | break; |
1500 | case CodeGenOptions::FramePointerKind::Reserved: |
1501 | getModule().setFramePointer(llvm::FramePointerKind::Reserved); |
1502 | break; |
1503 | case CodeGenOptions::FramePointerKind::NonLeaf: |
1504 | getModule().setFramePointer(llvm::FramePointerKind::NonLeaf); |
1505 | break; |
1506 | case CodeGenOptions::FramePointerKind::All: |
1507 | getModule().setFramePointer(llvm::FramePointerKind::All); |
1508 | break; |
1509 | } |
1510 | |
1511 | SimplifyPersonality(); |
1512 | |
1513 | if (getCodeGenOpts().EmitDeclMetadata) |
1514 | EmitDeclMetadata(); |
1515 | |
1516 | if (getCodeGenOpts().CoverageNotesFile.size() || |
1517 | getCodeGenOpts().CoverageDataFile.size()) |
1518 | EmitCoverageFile(); |
1519 | |
1520 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1521 | DI->finalize(); |
1522 | |
1523 | if (getCodeGenOpts().EmitVersionIdentMetadata) |
1524 | EmitVersionIdentMetadata(); |
1525 | |
1526 | if (!getCodeGenOpts().RecordCommandLine.empty()) |
1527 | EmitCommandLineMetadata(); |
1528 | |
1529 | if (!getCodeGenOpts().StackProtectorGuard.empty()) |
1530 | getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard); |
1531 | if (!getCodeGenOpts().StackProtectorGuardReg.empty()) |
1532 | getModule().setStackProtectorGuardReg( |
1533 | getCodeGenOpts().StackProtectorGuardReg); |
1534 | if (!getCodeGenOpts().StackProtectorGuardSymbol.empty()) |
1535 | getModule().setStackProtectorGuardSymbol( |
1536 | getCodeGenOpts().StackProtectorGuardSymbol); |
1537 | if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX) |
1538 | getModule().setStackProtectorGuardOffset( |
1539 | getCodeGenOpts().StackProtectorGuardOffset); |
1540 | if (getCodeGenOpts().StackAlignment) |
1541 | getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment); |
1542 | if (getCodeGenOpts().SkipRaxSetup) |
1543 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "SkipRaxSetup" , Val: 1); |
1544 | if (getLangOpts().RegCall4) |
1545 | getModule().addModuleFlag(Behavior: llvm::Module::Override, Key: "RegCallv4" , Val: 1); |
1546 | |
1547 | if (getContext().getTargetInfo().getMaxTLSAlign()) |
1548 | getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "MaxTLSAlign" , |
1549 | Val: getContext().getTargetInfo().getMaxTLSAlign()); |
1550 | |
1551 | getTargetCodeGenInfo().emitTargetGlobals(CGM&: *this); |
1552 | |
1553 | getTargetCodeGenInfo().emitTargetMetadata(CGM&: *this, MangledDeclNames); |
1554 | |
1555 | EmitBackendOptionsMetadata(CodeGenOpts: getCodeGenOpts()); |
1556 | |
1557 | // If there is device offloading code embed it in the host now. |
1558 | EmbedObject(M: &getModule(), CGOpts: CodeGenOpts, Diags&: getDiags()); |
1559 | |
1560 | // Set visibility from DLL storage class |
1561 | // We do this at the end of LLVM IR generation; after any operation |
1562 | // that might affect the DLL storage class or the visibility, and |
1563 | // before anything that might act on these. |
1564 | setVisibilityFromDLLStorageClass(LO: LangOpts, M&: getModule()); |
1565 | |
1566 | // Check the tail call symbols are truly undefined. |
1567 | if (getTriple().isPPC() && !MustTailCallUndefinedGlobals.empty()) { |
1568 | for (auto &I : MustTailCallUndefinedGlobals) { |
1569 | if (!I.first->isDefined()) |
1570 | getDiags().Report(Loc: I.second, DiagID: diag::err_ppc_impossible_musttail) << 2; |
1571 | else { |
1572 | StringRef MangledName = getMangledName(GD: GlobalDecl(I.first)); |
1573 | llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName); |
1574 | if (!Entry || Entry->isWeakForLinker() || |
1575 | Entry->isDeclarationForLinker()) |
1576 | getDiags().Report(Loc: I.second, DiagID: diag::err_ppc_impossible_musttail) << 2; |
1577 | } |
1578 | } |
1579 | } |
1580 | } |
1581 | |
1582 | void CodeGenModule::EmitOpenCLMetadata() { |
1583 | // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the |
1584 | // opencl.ocl.version named metadata node. |
1585 | // C++ for OpenCL has a distinct mapping for versions compatible with OpenCL. |
1586 | auto CLVersion = LangOpts.getOpenCLCompatibleVersion(); |
1587 | |
1588 | auto EmitVersion = [this](StringRef MDName, int Version) { |
1589 | llvm::Metadata *OCLVerElts[] = { |
1590 | llvm::ConstantAsMetadata::get( |
1591 | C: llvm::ConstantInt::get(Ty: Int32Ty, V: Version / 100)), |
1592 | llvm::ConstantAsMetadata::get( |
1593 | C: llvm::ConstantInt::get(Ty: Int32Ty, V: (Version % 100) / 10))}; |
1594 | llvm::NamedMDNode *OCLVerMD = TheModule.getOrInsertNamedMetadata(Name: MDName); |
1595 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
1596 | OCLVerMD->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: OCLVerElts)); |
1597 | }; |
1598 | |
1599 | EmitVersion("opencl.ocl.version" , CLVersion); |
1600 | if (LangOpts.OpenCLCPlusPlus) { |
1601 | // In addition to the OpenCL compatible version, emit the C++ version. |
1602 | EmitVersion("opencl.cxx.version" , LangOpts.OpenCLCPlusPlusVersion); |
1603 | } |
1604 | } |
1605 | |
1606 | void CodeGenModule::EmitBackendOptionsMetadata( |
1607 | const CodeGenOptions &CodeGenOpts) { |
1608 | if (getTriple().isRISCV()) { |
1609 | getModule().addModuleFlag(Behavior: llvm::Module::Min, Key: "SmallDataLimit" , |
1610 | Val: CodeGenOpts.SmallDataLimit); |
1611 | } |
1612 | } |
1613 | |
1614 | void CodeGenModule::UpdateCompletedType(const TagDecl *TD) { |
1615 | // Make sure that this type is translated. |
1616 | getTypes().UpdateCompletedType(TD); |
1617 | } |
1618 | |
1619 | void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { |
1620 | // Make sure that this type is translated. |
1621 | getTypes().RefreshTypeCacheForClass(RD); |
1622 | } |
1623 | |
1624 | llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) { |
1625 | if (!TBAA) |
1626 | return nullptr; |
1627 | return TBAA->getTypeInfo(QTy); |
1628 | } |
1629 | |
1630 | TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) { |
1631 | if (!TBAA) |
1632 | return TBAAAccessInfo(); |
1633 | if (getLangOpts().CUDAIsDevice) { |
1634 | // As CUDA builtin surface/texture types are replaced, skip generating TBAA |
1635 | // access info. |
1636 | if (AccessType->isCUDADeviceBuiltinSurfaceType()) { |
1637 | if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() != |
1638 | nullptr) |
1639 | return TBAAAccessInfo(); |
1640 | } else if (AccessType->isCUDADeviceBuiltinTextureType()) { |
1641 | if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() != |
1642 | nullptr) |
1643 | return TBAAAccessInfo(); |
1644 | } |
1645 | } |
1646 | return TBAA->getAccessInfo(AccessType); |
1647 | } |
1648 | |
1649 | TBAAAccessInfo |
1650 | CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) { |
1651 | if (!TBAA) |
1652 | return TBAAAccessInfo(); |
1653 | return TBAA->getVTablePtrAccessInfo(VTablePtrType); |
1654 | } |
1655 | |
1656 | llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) { |
1657 | if (!TBAA) |
1658 | return nullptr; |
1659 | return TBAA->getTBAAStructInfo(QTy); |
1660 | } |
1661 | |
1662 | llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) { |
1663 | if (!TBAA) |
1664 | return nullptr; |
1665 | return TBAA->getBaseTypeInfo(QTy); |
1666 | } |
1667 | |
1668 | llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) { |
1669 | if (!TBAA) |
1670 | return nullptr; |
1671 | return TBAA->getAccessTagInfo(Info); |
1672 | } |
1673 | |
1674 | TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, |
1675 | TBAAAccessInfo TargetInfo) { |
1676 | if (!TBAA) |
1677 | return TBAAAccessInfo(); |
1678 | return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo); |
1679 | } |
1680 | |
1681 | TBAAAccessInfo |
1682 | CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, |
1683 | TBAAAccessInfo InfoB) { |
1684 | if (!TBAA) |
1685 | return TBAAAccessInfo(); |
1686 | return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB); |
1687 | } |
1688 | |
1689 | TBAAAccessInfo |
1690 | CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, |
1691 | TBAAAccessInfo SrcInfo) { |
1692 | if (!TBAA) |
1693 | return TBAAAccessInfo(); |
1694 | return TBAA->mergeTBAAInfoForConditionalOperator(InfoA: DestInfo, InfoB: SrcInfo); |
1695 | } |
1696 | |
1697 | void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst, |
1698 | TBAAAccessInfo TBAAInfo) { |
1699 | if (llvm::MDNode *Tag = getTBAAAccessTagInfo(Info: TBAAInfo)) |
1700 | Inst->setMetadata(KindID: llvm::LLVMContext::MD_tbaa, Node: Tag); |
1701 | } |
1702 | |
1703 | void CodeGenModule::DecorateInstructionWithInvariantGroup( |
1704 | llvm::Instruction *I, const CXXRecordDecl *RD) { |
1705 | I->setMetadata(KindID: llvm::LLVMContext::MD_invariant_group, |
1706 | Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: {})); |
1707 | } |
1708 | |
1709 | void CodeGenModule::Error(SourceLocation loc, StringRef message) { |
1710 | unsigned diagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, FormatString: "%0" ); |
1711 | getDiags().Report(Loc: Context.getFullLoc(Loc: loc), DiagID: diagID) << message; |
1712 | } |
1713 | |
1714 | /// ErrorUnsupported - Print out an error that codegen doesn't support the |
1715 | /// specified stmt yet. |
1716 | void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) { |
1717 | unsigned DiagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, |
1718 | FormatString: "cannot compile this %0 yet" ); |
1719 | std::string Msg = Type; |
1720 | getDiags().Report(Loc: Context.getFullLoc(Loc: S->getBeginLoc()), DiagID) |
1721 | << Msg << S->getSourceRange(); |
1722 | } |
1723 | |
1724 | /// ErrorUnsupported - Print out an error that codegen doesn't support the |
1725 | /// specified decl yet. |
1726 | void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) { |
1727 | unsigned DiagID = getDiags().getCustomDiagID(L: DiagnosticsEngine::Error, |
1728 | FormatString: "cannot compile this %0 yet" ); |
1729 | std::string Msg = Type; |
1730 | getDiags().Report(Loc: Context.getFullLoc(Loc: D->getLocation()), DiagID) << Msg; |
1731 | } |
1732 | |
1733 | void CodeGenModule::runWithSufficientStackSpace(SourceLocation Loc, |
1734 | llvm::function_ref<void()> Fn) { |
1735 | StackHandler.runWithSufficientStackSpace(Loc, Fn); |
1736 | } |
1737 | |
1738 | llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) { |
1739 | return llvm::ConstantInt::get(Ty: SizeTy, V: size.getQuantity()); |
1740 | } |
1741 | |
1742 | void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV, |
1743 | const NamedDecl *D) const { |
1744 | // Internal definitions always have default visibility. |
1745 | if (GV->hasLocalLinkage()) { |
1746 | GV->setVisibility(llvm::GlobalValue::DefaultVisibility); |
1747 | return; |
1748 | } |
1749 | if (!D) |
1750 | return; |
1751 | |
1752 | // Set visibility for definitions, and for declarations if requested globally |
1753 | // or set explicitly. |
1754 | LinkageInfo LV = D->getLinkageAndVisibility(); |
1755 | |
1756 | // OpenMP declare target variables must be visible to the host so they can |
1757 | // be registered. We require protected visibility unless the variable has |
1758 | // the DT_nohost modifier and does not need to be registered. |
1759 | if (Context.getLangOpts().OpenMP && |
1760 | Context.getLangOpts().OpenMPIsTargetDevice && isa<VarDecl>(Val: D) && |
1761 | D->hasAttr<OMPDeclareTargetDeclAttr>() && |
1762 | D->getAttr<OMPDeclareTargetDeclAttr>()->getDevType() != |
1763 | OMPDeclareTargetDeclAttr::DT_NoHost && |
1764 | LV.getVisibility() == HiddenVisibility) { |
1765 | GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); |
1766 | return; |
1767 | } |
1768 | |
1769 | if (Context.getLangOpts().HLSL && !D->isInExportDeclContext()) { |
1770 | GV->setVisibility(llvm::GlobalValue::HiddenVisibility); |
1771 | return; |
1772 | } |
1773 | |
1774 | if (GV->hasDLLExportStorageClass() || GV->hasDLLImportStorageClass()) { |
1775 | // Reject incompatible dlllstorage and visibility annotations. |
1776 | if (!LV.isVisibilityExplicit()) |
1777 | return; |
1778 | if (GV->hasDLLExportStorageClass()) { |
1779 | if (LV.getVisibility() == HiddenVisibility) |
1780 | getDiags().Report(Loc: D->getLocation(), |
1781 | DiagID: diag::err_hidden_visibility_dllexport); |
1782 | } else if (LV.getVisibility() != DefaultVisibility) { |
1783 | getDiags().Report(Loc: D->getLocation(), |
1784 | DiagID: diag::err_non_default_visibility_dllimport); |
1785 | } |
1786 | return; |
1787 | } |
1788 | |
1789 | if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls || |
1790 | !GV->isDeclarationForLinker()) |
1791 | GV->setVisibility(GetLLVMVisibility(V: LV.getVisibility())); |
1792 | } |
1793 | |
1794 | static bool shouldAssumeDSOLocal(const CodeGenModule &CGM, |
1795 | llvm::GlobalValue *GV) { |
1796 | if (GV->hasLocalLinkage()) |
1797 | return true; |
1798 | |
1799 | if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage()) |
1800 | return true; |
1801 | |
1802 | // DLLImport explicitly marks the GV as external. |
1803 | if (GV->hasDLLImportStorageClass()) |
1804 | return false; |
1805 | |
1806 | const llvm::Triple &TT = CGM.getTriple(); |
1807 | const auto &CGOpts = CGM.getCodeGenOpts(); |
1808 | if (TT.isOSCygMing()) { |
1809 | // In MinGW, variables without DLLImport can still be automatically |
1810 | // imported from a DLL by the linker; don't mark variables that |
1811 | // potentially could come from another DLL as DSO local. |
1812 | |
1813 | // With EmulatedTLS, TLS variables can be autoimported from other DLLs |
1814 | // (and this actually happens in the public interface of libstdc++), so |
1815 | // such variables can't be marked as DSO local. (Native TLS variables |
1816 | // can't be dllimported at all, though.) |
1817 | if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(Val: GV) && |
1818 | (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS) && |
1819 | CGOpts.AutoImport) |
1820 | return false; |
1821 | } |
1822 | |
1823 | // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols |
1824 | // remain unresolved in the link, they can be resolved to zero, which is |
1825 | // outside the current DSO. |
1826 | if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage()) |
1827 | return false; |
1828 | |
1829 | // Every other GV is local on COFF. |
1830 | // Make an exception for windows OS in the triple: Some firmware builds use |
1831 | // *-win32-macho triples. This (accidentally?) produced windows relocations |
1832 | // without GOT tables in older clang versions; Keep this behaviour. |
1833 | // FIXME: even thread local variables? |
1834 | if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO())) |
1835 | return true; |
1836 | |
1837 | // Only handle COFF and ELF for now. |
1838 | if (!TT.isOSBinFormatELF()) |
1839 | return false; |
1840 | |
1841 | // If this is not an executable, don't assume anything is local. |
1842 | llvm::Reloc::Model RM = CGOpts.RelocationModel; |
1843 | const auto &LOpts = CGM.getLangOpts(); |
1844 | if (RM != llvm::Reloc::Static && !LOpts.PIE) { |
1845 | // On ELF, if -fno-semantic-interposition is specified and the target |
1846 | // supports local aliases, there will be neither CC1 |
1847 | // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set |
1848 | // dso_local on the function if using a local alias is preferable (can avoid |
1849 | // PLT indirection). |
1850 | if (!(isa<llvm::Function>(Val: GV) && GV->canBenefitFromLocalAlias())) |
1851 | return false; |
1852 | return !(CGM.getLangOpts().SemanticInterposition || |
1853 | CGM.getLangOpts().HalfNoSemanticInterposition); |
1854 | } |
1855 | |
1856 | // A definition cannot be preempted from an executable. |
1857 | if (!GV->isDeclarationForLinker()) |
1858 | return true; |
1859 | |
1860 | // Most PIC code sequences that assume that a symbol is local cannot produce a |
1861 | // 0 if it turns out the symbol is undefined. While this is ABI and relocation |
1862 | // depended, it seems worth it to handle it here. |
1863 | if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage()) |
1864 | return false; |
1865 | |
1866 | // PowerPC64 prefers TOC indirection to avoid copy relocations. |
1867 | if (TT.isPPC64()) |
1868 | return false; |
1869 | |
1870 | if (CGOpts.DirectAccessExternalData) { |
1871 | // If -fdirect-access-external-data (default for -fno-pic), set dso_local |
1872 | // for non-thread-local variables. If the symbol is not defined in the |
1873 | // executable, a copy relocation will be needed at link time. dso_local is |
1874 | // excluded for thread-local variables because they generally don't support |
1875 | // copy relocations. |
1876 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: GV)) |
1877 | if (!Var->isThreadLocal()) |
1878 | return true; |
1879 | |
1880 | // -fno-pic sets dso_local on a function declaration to allow direct |
1881 | // accesses when taking its address (similar to a data symbol). If the |
1882 | // function is not defined in the executable, a canonical PLT entry will be |
1883 | // needed at link time. -fno-direct-access-external-data can avoid the |
1884 | // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as |
1885 | // it could just cause trouble without providing perceptible benefits. |
1886 | if (isa<llvm::Function>(Val: GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static) |
1887 | return true; |
1888 | } |
1889 | |
1890 | // If we can use copy relocations we can assume it is local. |
1891 | |
1892 | // Otherwise don't assume it is local. |
1893 | return false; |
1894 | } |
1895 | |
1896 | void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const { |
1897 | GV->setDSOLocal(shouldAssumeDSOLocal(CGM: *this, GV)); |
1898 | } |
1899 | |
1900 | void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV, |
1901 | GlobalDecl GD) const { |
1902 | const auto *D = dyn_cast<NamedDecl>(Val: GD.getDecl()); |
1903 | // C++ destructors have a few C++ ABI specific special cases. |
1904 | if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(Val: D)) { |
1905 | getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, DT: GD.getDtorType()); |
1906 | return; |
1907 | } |
1908 | setDLLImportDLLExport(GV, D); |
1909 | } |
1910 | |
1911 | void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV, |
1912 | const NamedDecl *D) const { |
1913 | if (D && D->isExternallyVisible()) { |
1914 | if (D->hasAttr<DLLImportAttr>()) |
1915 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); |
1916 | else if ((D->hasAttr<DLLExportAttr>() || |
1917 | shouldMapVisibilityToDLLExport(D)) && |
1918 | !GV->isDeclarationForLinker()) |
1919 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); |
1920 | } |
1921 | } |
1922 | |
1923 | void CodeGenModule::setGVProperties(llvm::GlobalValue *GV, |
1924 | GlobalDecl GD) const { |
1925 | setDLLImportDLLExport(GV, GD); |
1926 | setGVPropertiesAux(GV, D: dyn_cast<NamedDecl>(Val: GD.getDecl())); |
1927 | } |
1928 | |
1929 | void CodeGenModule::setGVProperties(llvm::GlobalValue *GV, |
1930 | const NamedDecl *D) const { |
1931 | setDLLImportDLLExport(GV, D); |
1932 | setGVPropertiesAux(GV, D); |
1933 | } |
1934 | |
1935 | void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV, |
1936 | const NamedDecl *D) const { |
1937 | setGlobalVisibility(GV, D); |
1938 | setDSOLocal(GV); |
1939 | GV->setPartition(CodeGenOpts.SymbolPartition); |
1940 | } |
1941 | |
1942 | static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) { |
1943 | return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S) |
1944 | .Case(S: "global-dynamic" , Value: llvm::GlobalVariable::GeneralDynamicTLSModel) |
1945 | .Case(S: "local-dynamic" , Value: llvm::GlobalVariable::LocalDynamicTLSModel) |
1946 | .Case(S: "initial-exec" , Value: llvm::GlobalVariable::InitialExecTLSModel) |
1947 | .Case(S: "local-exec" , Value: llvm::GlobalVariable::LocalExecTLSModel); |
1948 | } |
1949 | |
1950 | llvm::GlobalVariable::ThreadLocalMode |
1951 | CodeGenModule::GetDefaultLLVMTLSModel() const { |
1952 | switch (CodeGenOpts.getDefaultTLSModel()) { |
1953 | case CodeGenOptions::GeneralDynamicTLSModel: |
1954 | return llvm::GlobalVariable::GeneralDynamicTLSModel; |
1955 | case CodeGenOptions::LocalDynamicTLSModel: |
1956 | return llvm::GlobalVariable::LocalDynamicTLSModel; |
1957 | case CodeGenOptions::InitialExecTLSModel: |
1958 | return llvm::GlobalVariable::InitialExecTLSModel; |
1959 | case CodeGenOptions::LocalExecTLSModel: |
1960 | return llvm::GlobalVariable::LocalExecTLSModel; |
1961 | } |
1962 | llvm_unreachable("Invalid TLS model!" ); |
1963 | } |
1964 | |
1965 | void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const { |
1966 | assert(D.getTLSKind() && "setting TLS mode on non-TLS var!" ); |
1967 | |
1968 | llvm::GlobalValue::ThreadLocalMode TLM; |
1969 | TLM = GetDefaultLLVMTLSModel(); |
1970 | |
1971 | // Override the TLS model if it is explicitly specified. |
1972 | if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) { |
1973 | TLM = GetLLVMTLSModel(S: Attr->getModel()); |
1974 | } |
1975 | |
1976 | GV->setThreadLocalMode(TLM); |
1977 | } |
1978 | |
1979 | static std::string getCPUSpecificMangling(const CodeGenModule &CGM, |
1980 | StringRef Name) { |
1981 | const TargetInfo &Target = CGM.getTarget(); |
1982 | return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str(); |
1983 | } |
1984 | |
1985 | static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM, |
1986 | const CPUSpecificAttr *Attr, |
1987 | unsigned CPUIndex, |
1988 | raw_ostream &Out) { |
1989 | // cpu_specific gets the current name, dispatch gets the resolver if IFunc is |
1990 | // supported. |
1991 | if (Attr) |
1992 | Out << getCPUSpecificMangling(CGM, Name: Attr->getCPUName(Index: CPUIndex)->getName()); |
1993 | else if (CGM.getTarget().supportsIFunc()) |
1994 | Out << ".resolver" ; |
1995 | } |
1996 | |
1997 | // Returns true if GD is a function decl with internal linkage and |
1998 | // needs a unique suffix after the mangled name. |
1999 | static bool isUniqueInternalLinkageDecl(GlobalDecl GD, |
2000 | CodeGenModule &CGM) { |
2001 | const Decl *D = GD.getDecl(); |
2002 | return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(Val: D) && |
2003 | (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage); |
2004 | } |
2005 | |
2006 | static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD, |
2007 | const NamedDecl *ND, |
2008 | bool OmitMultiVersionMangling = false) { |
2009 | SmallString<256> Buffer; |
2010 | llvm::raw_svector_ostream Out(Buffer); |
2011 | MangleContext &MC = CGM.getCXXABI().getMangleContext(); |
2012 | if (!CGM.getModuleNameHash().empty()) |
2013 | MC.needsUniqueInternalLinkageNames(); |
2014 | bool ShouldMangle = MC.shouldMangleDeclName(D: ND); |
2015 | if (ShouldMangle) |
2016 | MC.mangleName(GD: GD.getWithDecl(D: ND), Out); |
2017 | else { |
2018 | IdentifierInfo *II = ND->getIdentifier(); |
2019 | assert(II && "Attempt to mangle unnamed decl." ); |
2020 | const auto *FD = dyn_cast<FunctionDecl>(Val: ND); |
2021 | |
2022 | if (FD && |
2023 | FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) { |
2024 | if (CGM.getLangOpts().RegCall4) |
2025 | Out << "__regcall4__" << II->getName(); |
2026 | else |
2027 | Out << "__regcall3__" << II->getName(); |
2028 | } else if (FD && FD->hasAttr<CUDAGlobalAttr>() && |
2029 | GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { |
2030 | Out << "__device_stub__" << II->getName(); |
2031 | } else if (FD && |
2032 | DeviceKernelAttr::isOpenCLSpelling( |
2033 | A: FD->getAttr<DeviceKernelAttr>()) && |
2034 | GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { |
2035 | Out << "__clang_ocl_kern_imp_" << II->getName(); |
2036 | } else { |
2037 | Out << II->getName(); |
2038 | } |
2039 | } |
2040 | |
2041 | // Check if the module name hash should be appended for internal linkage |
2042 | // symbols. This should come before multi-version target suffixes are |
2043 | // appended. This is to keep the name and module hash suffix of the |
2044 | // internal linkage function together. The unique suffix should only be |
2045 | // added when name mangling is done to make sure that the final name can |
2046 | // be properly demangled. For example, for C functions without prototypes, |
2047 | // name mangling is not done and the unique suffix should not be appeneded |
2048 | // then. |
2049 | if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) { |
2050 | assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames && |
2051 | "Hash computed when not explicitly requested" ); |
2052 | Out << CGM.getModuleNameHash(); |
2053 | } |
2054 | |
2055 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
2056 | if (FD->isMultiVersion() && !OmitMultiVersionMangling) { |
2057 | switch (FD->getMultiVersionKind()) { |
2058 | case MultiVersionKind::CPUDispatch: |
2059 | case MultiVersionKind::CPUSpecific: |
2060 | AppendCPUSpecificCPUDispatchMangling(CGM, |
2061 | Attr: FD->getAttr<CPUSpecificAttr>(), |
2062 | CPUIndex: GD.getMultiVersionIndex(), Out); |
2063 | break; |
2064 | case MultiVersionKind::Target: { |
2065 | auto *Attr = FD->getAttr<TargetAttr>(); |
2066 | assert(Attr && "Expected TargetAttr to be present " |
2067 | "for attribute mangling" ); |
2068 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
2069 | Info.appendAttributeMangling(Attr, Out); |
2070 | break; |
2071 | } |
2072 | case MultiVersionKind::TargetVersion: { |
2073 | auto *Attr = FD->getAttr<TargetVersionAttr>(); |
2074 | assert(Attr && "Expected TargetVersionAttr to be present " |
2075 | "for attribute mangling" ); |
2076 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
2077 | Info.appendAttributeMangling(Attr, Out); |
2078 | break; |
2079 | } |
2080 | case MultiVersionKind::TargetClones: { |
2081 | auto *Attr = FD->getAttr<TargetClonesAttr>(); |
2082 | assert(Attr && "Expected TargetClonesAttr to be present " |
2083 | "for attribute mangling" ); |
2084 | unsigned Index = GD.getMultiVersionIndex(); |
2085 | const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo(); |
2086 | Info.appendAttributeMangling(Attr, Index, Out); |
2087 | break; |
2088 | } |
2089 | case MultiVersionKind::None: |
2090 | llvm_unreachable("None multiversion type isn't valid here" ); |
2091 | } |
2092 | } |
2093 | |
2094 | // Make unique name for device side static file-scope variable for HIP. |
2095 | if (CGM.getContext().shouldExternalize(D: ND) && |
2096 | CGM.getLangOpts().GPURelocatableDeviceCode && |
2097 | CGM.getLangOpts().CUDAIsDevice) |
2098 | CGM.printPostfixForExternalizedDecl(OS&: Out, D: ND); |
2099 | |
2100 | return std::string(Out.str()); |
2101 | } |
2102 | |
2103 | void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD, |
2104 | const FunctionDecl *FD, |
2105 | StringRef &CurName) { |
2106 | if (!FD->isMultiVersion()) |
2107 | return; |
2108 | |
2109 | // Get the name of what this would be without the 'target' attribute. This |
2110 | // allows us to lookup the version that was emitted when this wasn't a |
2111 | // multiversion function. |
2112 | std::string NonTargetName = |
2113 | getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
2114 | GlobalDecl OtherGD; |
2115 | if (lookupRepresentativeDecl(MangledName: NonTargetName, Result&: OtherGD)) { |
2116 | assert(OtherGD.getCanonicalDecl() |
2117 | .getDecl() |
2118 | ->getAsFunction() |
2119 | ->isMultiVersion() && |
2120 | "Other GD should now be a multiversioned function" ); |
2121 | // OtherFD is the version of this function that was mangled BEFORE |
2122 | // becoming a MultiVersion function. It potentially needs to be updated. |
2123 | const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl() |
2124 | .getDecl() |
2125 | ->getAsFunction() |
2126 | ->getMostRecentDecl(); |
2127 | std::string OtherName = getMangledNameImpl(CGM&: *this, GD: OtherGD, ND: OtherFD); |
2128 | // This is so that if the initial version was already the 'default' |
2129 | // version, we don't try to update it. |
2130 | if (OtherName != NonTargetName) { |
2131 | // Remove instead of erase, since others may have stored the StringRef |
2132 | // to this. |
2133 | const auto ExistingRecord = Manglings.find(Key: NonTargetName); |
2134 | if (ExistingRecord != std::end(cont&: Manglings)) |
2135 | Manglings.remove(KeyValue: &(*ExistingRecord)); |
2136 | auto Result = Manglings.insert(KV: std::make_pair(x&: OtherName, y&: OtherGD)); |
2137 | StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] = |
2138 | Result.first->first(); |
2139 | // If this is the current decl is being created, make sure we update the name. |
2140 | if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl()) |
2141 | CurName = OtherNameRef; |
2142 | if (llvm::GlobalValue *Entry = GetGlobalValue(Ref: NonTargetName)) |
2143 | Entry->setName(OtherName); |
2144 | } |
2145 | } |
2146 | } |
2147 | |
2148 | StringRef CodeGenModule::getMangledName(GlobalDecl GD) { |
2149 | GlobalDecl CanonicalGD = GD.getCanonicalDecl(); |
2150 | |
2151 | // Some ABIs don't have constructor variants. Make sure that base and |
2152 | // complete constructors get mangled the same. |
2153 | if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: CanonicalGD.getDecl())) { |
2154 | if (!getTarget().getCXXABI().hasConstructorVariants()) { |
2155 | CXXCtorType OrigCtorType = GD.getCtorType(); |
2156 | assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete); |
2157 | if (OrigCtorType == Ctor_Base) |
2158 | CanonicalGD = GlobalDecl(CD, Ctor_Complete); |
2159 | } |
2160 | } |
2161 | |
2162 | // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a |
2163 | // static device variable depends on whether the variable is referenced by |
2164 | // a host or device host function. Therefore the mangled name cannot be |
2165 | // cached. |
2166 | if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(D: GD.getDecl())) { |
2167 | auto FoundName = MangledDeclNames.find(Key: CanonicalGD); |
2168 | if (FoundName != MangledDeclNames.end()) |
2169 | return FoundName->second; |
2170 | } |
2171 | |
2172 | // Keep the first result in the case of a mangling collision. |
2173 | const auto *ND = cast<NamedDecl>(Val: GD.getDecl()); |
2174 | std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND); |
2175 | |
2176 | // Ensure either we have different ABIs between host and device compilations, |
2177 | // says host compilation following MSVC ABI but device compilation follows |
2178 | // Itanium C++ ABI or, if they follow the same ABI, kernel names after |
2179 | // mangling should be the same after name stubbing. The later checking is |
2180 | // very important as the device kernel name being mangled in host-compilation |
2181 | // is used to resolve the device binaries to be executed. Inconsistent naming |
2182 | // result in undefined behavior. Even though we cannot check that naming |
2183 | // directly between host- and device-compilations, the host- and |
2184 | // device-mangling in host compilation could help catching certain ones. |
2185 | assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || |
2186 | getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice || |
2187 | (getContext().getAuxTargetInfo() && |
2188 | (getContext().getAuxTargetInfo()->getCXXABI() != |
2189 | getContext().getTargetInfo().getCXXABI())) || |
2190 | getCUDARuntime().getDeviceSideName(ND) == |
2191 | getMangledNameImpl( |
2192 | *this, |
2193 | GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), |
2194 | ND)); |
2195 | |
2196 | // This invariant should hold true in the future. |
2197 | // Prior work: |
2198 | // https://discourse.llvm.org/t/rfc-clang-diagnostic-for-demangling-failures/82835/8 |
2199 | // https://github.com/llvm/llvm-project/issues/111345 |
2200 | // assert(!((StringRef(MangledName).starts_with("_Z") || |
2201 | // StringRef(MangledName).starts_with("?")) && |
2202 | // !GD.getDecl()->hasAttr<AsmLabelAttr>() && |
2203 | // llvm::demangle(MangledName) == MangledName) && |
2204 | // "LLVM demangler must demangle clang-generated names"); |
2205 | |
2206 | auto Result = Manglings.insert(KV: std::make_pair(x&: MangledName, y&: GD)); |
2207 | return MangledDeclNames[CanonicalGD] = Result.first->first(); |
2208 | } |
2209 | |
2210 | StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD, |
2211 | const BlockDecl *BD) { |
2212 | MangleContext &MangleCtx = getCXXABI().getMangleContext(); |
2213 | const Decl *D = GD.getDecl(); |
2214 | |
2215 | SmallString<256> Buffer; |
2216 | llvm::raw_svector_ostream Out(Buffer); |
2217 | if (!D) |
2218 | MangleCtx.mangleGlobalBlock(BD, |
2219 | ID: dyn_cast_or_null<VarDecl>(Val: initializedGlobalDecl.getDecl()), Out); |
2220 | else if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: D)) |
2221 | MangleCtx.mangleCtorBlock(CD, CT: GD.getCtorType(), BD, Out); |
2222 | else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: D)) |
2223 | MangleCtx.mangleDtorBlock(CD: DD, DT: GD.getDtorType(), BD, Out); |
2224 | else |
2225 | MangleCtx.mangleBlock(DC: cast<DeclContext>(Val: D), BD, Out); |
2226 | |
2227 | auto Result = Manglings.insert(KV: std::make_pair(x: Out.str(), y&: BD)); |
2228 | return Result.first->first(); |
2229 | } |
2230 | |
2231 | const GlobalDecl CodeGenModule::getMangledNameDecl(StringRef Name) { |
2232 | auto it = MangledDeclNames.begin(); |
2233 | while (it != MangledDeclNames.end()) { |
2234 | if (it->second == Name) |
2235 | return it->first; |
2236 | it++; |
2237 | } |
2238 | return GlobalDecl(); |
2239 | } |
2240 | |
2241 | llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) { |
2242 | return getModule().getNamedValue(Name); |
2243 | } |
2244 | |
2245 | /// AddGlobalCtor - Add a function to the list that will be called before |
2246 | /// main() runs. |
2247 | void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority, |
2248 | unsigned LexOrder, |
2249 | llvm::Constant *AssociatedData) { |
2250 | // FIXME: Type coercion of void()* types. |
2251 | GlobalCtors.push_back(x: Structor(Priority, LexOrder, Ctor, AssociatedData)); |
2252 | } |
2253 | |
2254 | /// AddGlobalDtor - Add a function to the list that will be called |
2255 | /// when the module is unloaded. |
2256 | void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority, |
2257 | bool IsDtorAttrFunc) { |
2258 | if (CodeGenOpts.RegisterGlobalDtorsWithAtExit && |
2259 | (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) { |
2260 | DtorsUsingAtExit[Priority].push_back(NewVal: Dtor); |
2261 | return; |
2262 | } |
2263 | |
2264 | // FIXME: Type coercion of void()* types. |
2265 | GlobalDtors.push_back(x: Structor(Priority, ~0U, Dtor, nullptr)); |
2266 | } |
2267 | |
2268 | void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) { |
2269 | if (Fns.empty()) return; |
2270 | |
2271 | const PointerAuthSchema &InitFiniAuthSchema = |
2272 | getCodeGenOpts().PointerAuth.InitFiniPointers; |
2273 | |
2274 | // Ctor function type is ptr. |
2275 | llvm::PointerType *PtrTy = llvm::PointerType::get( |
2276 | C&: getLLVMContext(), AddressSpace: TheModule.getDataLayout().getProgramAddressSpace()); |
2277 | |
2278 | // Get the type of a ctor entry, { i32, ptr, ptr }. |
2279 | llvm::StructType *CtorStructTy = llvm::StructType::get(elt1: Int32Ty, elts: PtrTy, elts: PtrTy); |
2280 | |
2281 | // Construct the constructor and destructor arrays. |
2282 | ConstantInitBuilder Builder(*this); |
2283 | auto Ctors = Builder.beginArray(eltTy: CtorStructTy); |
2284 | for (const auto &I : Fns) { |
2285 | auto Ctor = Ctors.beginStruct(ty: CtorStructTy); |
2286 | Ctor.addInt(intTy: Int32Ty, value: I.Priority); |
2287 | if (InitFiniAuthSchema) { |
2288 | llvm::Constant *StorageAddress = |
2289 | (InitFiniAuthSchema.isAddressDiscriminated() |
2290 | ? llvm::ConstantExpr::getIntToPtr( |
2291 | C: llvm::ConstantInt::get( |
2292 | Ty: IntPtrTy, |
2293 | V: llvm::ConstantPtrAuth::AddrDiscriminator_CtorsDtors), |
2294 | Ty: PtrTy) |
2295 | : nullptr); |
2296 | llvm::Constant *SignedCtorPtr = getConstantSignedPointer( |
2297 | Pointer: I.Initializer, Key: InitFiniAuthSchema.getKey(), StorageAddress, |
2298 | OtherDiscriminator: llvm::ConstantInt::get( |
2299 | Ty: SizeTy, V: InitFiniAuthSchema.getConstantDiscrimination())); |
2300 | Ctor.add(value: SignedCtorPtr); |
2301 | } else { |
2302 | Ctor.add(value: I.Initializer); |
2303 | } |
2304 | if (I.AssociatedData) |
2305 | Ctor.add(value: I.AssociatedData); |
2306 | else |
2307 | Ctor.addNullPointer(ptrTy: PtrTy); |
2308 | Ctor.finishAndAddTo(parent&: Ctors); |
2309 | } |
2310 | |
2311 | auto List = Ctors.finishAndCreateGlobal(args&: GlobalName, args: getPointerAlign(), |
2312 | /*constant*/ args: false, |
2313 | args: llvm::GlobalValue::AppendingLinkage); |
2314 | |
2315 | // The LTO linker doesn't seem to like it when we set an alignment |
2316 | // on appending variables. Take it off as a workaround. |
2317 | List->setAlignment(std::nullopt); |
2318 | |
2319 | Fns.clear(); |
2320 | } |
2321 | |
2322 | llvm::GlobalValue::LinkageTypes |
2323 | CodeGenModule::getFunctionLinkage(GlobalDecl GD) { |
2324 | const auto *D = cast<FunctionDecl>(Val: GD.getDecl()); |
2325 | |
2326 | GVALinkage Linkage = getContext().GetGVALinkageForFunction(FD: D); |
2327 | |
2328 | if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Val: D)) |
2329 | return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, DT: GD.getDtorType()); |
2330 | |
2331 | return getLLVMLinkageForDeclarator(D, Linkage); |
2332 | } |
2333 | |
2334 | llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) { |
2335 | llvm::MDString *MDS = dyn_cast<llvm::MDString>(Val: MD); |
2336 | if (!MDS) return nullptr; |
2337 | |
2338 | return llvm::ConstantInt::get(Ty: Int64Ty, V: llvm::MD5Hash(Str: MDS->getString())); |
2339 | } |
2340 | |
2341 | llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T) { |
2342 | if (auto *FnType = T->getAs<FunctionProtoType>()) |
2343 | T = getContext().getFunctionType( |
2344 | ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(), |
2345 | EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None)); |
2346 | |
2347 | std::string OutName; |
2348 | llvm::raw_string_ostream Out(OutName); |
2349 | getCXXABI().getMangleContext().mangleCanonicalTypeName( |
2350 | T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers); |
2351 | |
2352 | if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers) |
2353 | Out << ".normalized" ; |
2354 | |
2355 | return llvm::ConstantInt::get(Ty: Int32Ty, |
2356 | V: static_cast<uint32_t>(llvm::xxHash64(Data: OutName))); |
2357 | } |
2358 | |
2359 | void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD, |
2360 | const CGFunctionInfo &Info, |
2361 | llvm::Function *F, bool IsThunk) { |
2362 | unsigned CallingConv; |
2363 | llvm::AttributeList PAL; |
2364 | ConstructAttributeList(Name: F->getName(), Info, CalleeInfo: GD, Attrs&: PAL, CallingConv, |
2365 | /*AttrOnCallSite=*/false, IsThunk); |
2366 | if (CallingConv == llvm::CallingConv::X86_VectorCall && |
2367 | getTarget().getTriple().isWindowsArm64EC()) { |
2368 | SourceLocation Loc; |
2369 | if (const Decl *D = GD.getDecl()) |
2370 | Loc = D->getLocation(); |
2371 | |
2372 | Error(loc: Loc, message: "__vectorcall calling convention is not currently supported" ); |
2373 | } |
2374 | F->setAttributes(PAL); |
2375 | F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); |
2376 | } |
2377 | |
2378 | static void removeImageAccessQualifier(std::string& TyName) { |
2379 | std::string ReadOnlyQual("__read_only" ); |
2380 | std::string::size_type ReadOnlyPos = TyName.find(str: ReadOnlyQual); |
2381 | if (ReadOnlyPos != std::string::npos) |
2382 | // "+ 1" for the space after access qualifier. |
2383 | TyName.erase(pos: ReadOnlyPos, n: ReadOnlyQual.size() + 1); |
2384 | else { |
2385 | std::string WriteOnlyQual("__write_only" ); |
2386 | std::string::size_type WriteOnlyPos = TyName.find(str: WriteOnlyQual); |
2387 | if (WriteOnlyPos != std::string::npos) |
2388 | TyName.erase(pos: WriteOnlyPos, n: WriteOnlyQual.size() + 1); |
2389 | else { |
2390 | std::string ReadWriteQual("__read_write" ); |
2391 | std::string::size_type ReadWritePos = TyName.find(str: ReadWriteQual); |
2392 | if (ReadWritePos != std::string::npos) |
2393 | TyName.erase(pos: ReadWritePos, n: ReadWriteQual.size() + 1); |
2394 | } |
2395 | } |
2396 | } |
2397 | |
2398 | // Returns the address space id that should be produced to the |
2399 | // kernel_arg_addr_space metadata. This is always fixed to the ids |
2400 | // as specified in the SPIR 2.0 specification in order to differentiate |
2401 | // for example in clGetKernelArgInfo() implementation between the address |
2402 | // spaces with targets without unique mapping to the OpenCL address spaces |
2403 | // (basically all single AS CPUs). |
2404 | static unsigned ArgInfoAddressSpace(LangAS AS) { |
2405 | switch (AS) { |
2406 | case LangAS::opencl_global: |
2407 | return 1; |
2408 | case LangAS::opencl_constant: |
2409 | return 2; |
2410 | case LangAS::opencl_local: |
2411 | return 3; |
2412 | case LangAS::opencl_generic: |
2413 | return 4; // Not in SPIR 2.0 specs. |
2414 | case LangAS::opencl_global_device: |
2415 | return 5; |
2416 | case LangAS::opencl_global_host: |
2417 | return 6; |
2418 | default: |
2419 | return 0; // Assume private. |
2420 | } |
2421 | } |
2422 | |
2423 | void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn, |
2424 | const FunctionDecl *FD, |
2425 | CodeGenFunction *CGF) { |
2426 | assert(((FD && CGF) || (!FD && !CGF)) && |
2427 | "Incorrect use - FD and CGF should either be both null or not!" ); |
2428 | // Create MDNodes that represent the kernel arg metadata. |
2429 | // Each MDNode is a list in the form of "key", N number of values which is |
2430 | // the same number of values as their are kernel arguments. |
2431 | |
2432 | const PrintingPolicy &Policy = Context.getPrintingPolicy(); |
2433 | |
2434 | // MDNode for the kernel argument address space qualifiers. |
2435 | SmallVector<llvm::Metadata *, 8> addressQuals; |
2436 | |
2437 | // MDNode for the kernel argument access qualifiers (images only). |
2438 | SmallVector<llvm::Metadata *, 8> accessQuals; |
2439 | |
2440 | // MDNode for the kernel argument type names. |
2441 | SmallVector<llvm::Metadata *, 8> argTypeNames; |
2442 | |
2443 | // MDNode for the kernel argument base type names. |
2444 | SmallVector<llvm::Metadata *, 8> argBaseTypeNames; |
2445 | |
2446 | // MDNode for the kernel argument type qualifiers. |
2447 | SmallVector<llvm::Metadata *, 8> argTypeQuals; |
2448 | |
2449 | // MDNode for the kernel argument names. |
2450 | SmallVector<llvm::Metadata *, 8> argNames; |
2451 | |
2452 | if (FD && CGF) |
2453 | for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { |
2454 | const ParmVarDecl *parm = FD->getParamDecl(i); |
2455 | // Get argument name. |
2456 | argNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: parm->getName())); |
2457 | |
2458 | if (!getLangOpts().OpenCL) |
2459 | continue; |
2460 | QualType ty = parm->getType(); |
2461 | std::string typeQuals; |
2462 | |
2463 | // Get image and pipe access qualifier: |
2464 | if (ty->isImageType() || ty->isPipeType()) { |
2465 | const Decl *PDecl = parm; |
2466 | if (const auto *TD = ty->getAs<TypedefType>()) |
2467 | PDecl = TD->getDecl(); |
2468 | const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>(); |
2469 | if (A && A->isWriteOnly()) |
2470 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "write_only" )); |
2471 | else if (A && A->isReadWrite()) |
2472 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_write" )); |
2473 | else |
2474 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "read_only" )); |
2475 | } else |
2476 | accessQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: "none" )); |
2477 | |
2478 | auto getTypeSpelling = [&](QualType Ty) { |
2479 | auto typeName = Ty.getUnqualifiedType().getAsString(Policy); |
2480 | |
2481 | if (Ty.isCanonical()) { |
2482 | StringRef typeNameRef = typeName; |
2483 | // Turn "unsigned type" to "utype" |
2484 | if (typeNameRef.consume_front(Prefix: "unsigned " )) |
2485 | return std::string("u" ) + typeNameRef.str(); |
2486 | if (typeNameRef.consume_front(Prefix: "signed " )) |
2487 | return typeNameRef.str(); |
2488 | } |
2489 | |
2490 | return typeName; |
2491 | }; |
2492 | |
2493 | if (ty->isPointerType()) { |
2494 | QualType pointeeTy = ty->getPointeeType(); |
2495 | |
2496 | // Get address qualifier. |
2497 | addressQuals.push_back( |
2498 | Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32( |
2499 | C: ArgInfoAddressSpace(AS: pointeeTy.getAddressSpace())))); |
2500 | |
2501 | // Get argument type name. |
2502 | std::string typeName = getTypeSpelling(pointeeTy) + "*" ; |
2503 | std::string baseTypeName = |
2504 | getTypeSpelling(pointeeTy.getCanonicalType()) + "*" ; |
2505 | argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName)); |
2506 | argBaseTypeNames.push_back( |
2507 | Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName)); |
2508 | |
2509 | // Get argument type qualifiers: |
2510 | if (ty.isRestrictQualified()) |
2511 | typeQuals = "restrict" ; |
2512 | if (pointeeTy.isConstQualified() || |
2513 | (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) |
2514 | typeQuals += typeQuals.empty() ? "const" : " const" ; |
2515 | if (pointeeTy.isVolatileQualified()) |
2516 | typeQuals += typeQuals.empty() ? "volatile" : " volatile" ; |
2517 | } else { |
2518 | uint32_t AddrSpc = 0; |
2519 | bool isPipe = ty->isPipeType(); |
2520 | if (ty->isImageType() || isPipe) |
2521 | AddrSpc = ArgInfoAddressSpace(AS: LangAS::opencl_global); |
2522 | |
2523 | addressQuals.push_back( |
2524 | Elt: llvm::ConstantAsMetadata::get(C: CGF->Builder.getInt32(C: AddrSpc))); |
2525 | |
2526 | // Get argument type name. |
2527 | ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty; |
2528 | std::string typeName = getTypeSpelling(ty); |
2529 | std::string baseTypeName = getTypeSpelling(ty.getCanonicalType()); |
2530 | |
2531 | // Remove access qualifiers on images |
2532 | // (as they are inseparable from type in clang implementation, |
2533 | // but OpenCL spec provides a special query to get access qualifier |
2534 | // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): |
2535 | if (ty->isImageType()) { |
2536 | removeImageAccessQualifier(TyName&: typeName); |
2537 | removeImageAccessQualifier(TyName&: baseTypeName); |
2538 | } |
2539 | |
2540 | argTypeNames.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeName)); |
2541 | argBaseTypeNames.push_back( |
2542 | Elt: llvm::MDString::get(Context&: VMContext, Str: baseTypeName)); |
2543 | |
2544 | if (isPipe) |
2545 | typeQuals = "pipe" ; |
2546 | } |
2547 | argTypeQuals.push_back(Elt: llvm::MDString::get(Context&: VMContext, Str: typeQuals)); |
2548 | } |
2549 | |
2550 | if (getLangOpts().OpenCL) { |
2551 | Fn->setMetadata(Kind: "kernel_arg_addr_space" , |
2552 | Node: llvm::MDNode::get(Context&: VMContext, MDs: addressQuals)); |
2553 | Fn->setMetadata(Kind: "kernel_arg_access_qual" , |
2554 | Node: llvm::MDNode::get(Context&: VMContext, MDs: accessQuals)); |
2555 | Fn->setMetadata(Kind: "kernel_arg_type" , |
2556 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeNames)); |
2557 | Fn->setMetadata(Kind: "kernel_arg_base_type" , |
2558 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argBaseTypeNames)); |
2559 | Fn->setMetadata(Kind: "kernel_arg_type_qual" , |
2560 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argTypeQuals)); |
2561 | } |
2562 | if (getCodeGenOpts().EmitOpenCLArgMetadata || |
2563 | getCodeGenOpts().HIPSaveKernelArgName) |
2564 | Fn->setMetadata(Kind: "kernel_arg_name" , |
2565 | Node: llvm::MDNode::get(Context&: VMContext, MDs: argNames)); |
2566 | } |
2567 | |
2568 | /// Determines whether the language options require us to model |
2569 | /// unwind exceptions. We treat -fexceptions as mandating this |
2570 | /// except under the fragile ObjC ABI with only ObjC exceptions |
2571 | /// enabled. This means, for example, that C with -fexceptions |
2572 | /// enables this. |
2573 | static bool hasUnwindExceptions(const LangOptions &LangOpts) { |
2574 | // If exceptions are completely disabled, obviously this is false. |
2575 | if (!LangOpts.Exceptions) return false; |
2576 | |
2577 | // If C++ exceptions are enabled, this is true. |
2578 | if (LangOpts.CXXExceptions) return true; |
2579 | |
2580 | // If ObjC exceptions are enabled, this depends on the ABI. |
2581 | if (LangOpts.ObjCExceptions) { |
2582 | return LangOpts.ObjCRuntime.hasUnwindExceptions(); |
2583 | } |
2584 | |
2585 | return true; |
2586 | } |
2587 | |
2588 | static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM, |
2589 | const CXXMethodDecl *MD) { |
2590 | // Check that the type metadata can ever actually be used by a call. |
2591 | if (!CGM.getCodeGenOpts().LTOUnit || |
2592 | !CGM.HasHiddenLTOVisibility(RD: MD->getParent())) |
2593 | return false; |
2594 | |
2595 | // Only functions whose address can be taken with a member function pointer |
2596 | // need this sort of type metadata. |
2597 | return MD->isImplicitObjectMemberFunction() && !MD->isVirtual() && |
2598 | !isa<CXXConstructorDecl, CXXDestructorDecl>(Val: MD); |
2599 | } |
2600 | |
2601 | SmallVector<const CXXRecordDecl *, 0> |
2602 | CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) { |
2603 | llvm::SetVector<const CXXRecordDecl *> MostBases; |
2604 | |
2605 | std::function<void (const CXXRecordDecl *)> CollectMostBases; |
2606 | CollectMostBases = [&](const CXXRecordDecl *RD) { |
2607 | if (RD->getNumBases() == 0) |
2608 | MostBases.insert(X: RD); |
2609 | for (const CXXBaseSpecifier &B : RD->bases()) |
2610 | CollectMostBases(B.getType()->getAsCXXRecordDecl()); |
2611 | }; |
2612 | CollectMostBases(RD); |
2613 | return MostBases.takeVector(); |
2614 | } |
2615 | |
2616 | void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D, |
2617 | llvm::Function *F) { |
2618 | llvm::AttrBuilder B(F->getContext()); |
2619 | |
2620 | if ((!D || !D->hasAttr<NoUwtableAttr>()) && CodeGenOpts.UnwindTables) |
2621 | B.addUWTableAttr(Kind: llvm::UWTableKind(CodeGenOpts.UnwindTables)); |
2622 | |
2623 | if (CodeGenOpts.StackClashProtector) |
2624 | B.addAttribute(A: "probe-stack" , V: "inline-asm" ); |
2625 | |
2626 | if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096) |
2627 | B.addAttribute(A: "stack-probe-size" , |
2628 | V: std::to_string(val: CodeGenOpts.StackProbeSize)); |
2629 | |
2630 | if (!hasUnwindExceptions(LangOpts)) |
2631 | B.addAttribute(Val: llvm::Attribute::NoUnwind); |
2632 | |
2633 | if (D && D->hasAttr<NoStackProtectorAttr>()) |
2634 | ; // Do nothing. |
2635 | else if (D && D->hasAttr<StrictGuardStackCheckAttr>() && |
2636 | isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPOn)) |
2637 | B.addAttribute(Val: llvm::Attribute::StackProtectStrong); |
2638 | else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPOn)) |
2639 | B.addAttribute(Val: llvm::Attribute::StackProtect); |
2640 | else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPStrong)) |
2641 | B.addAttribute(Val: llvm::Attribute::StackProtectStrong); |
2642 | else if (isStackProtectorOn(LangOpts, Triple: getTriple(), Mode: LangOptions::SSPReq)) |
2643 | B.addAttribute(Val: llvm::Attribute::StackProtectReq); |
2644 | |
2645 | if (!D) { |
2646 | // Non-entry HLSL functions must always be inlined. |
2647 | if (getLangOpts().HLSL && !F->hasFnAttribute(Kind: llvm::Attribute::NoInline)) |
2648 | B.addAttribute(Val: llvm::Attribute::AlwaysInline); |
2649 | // If we don't have a declaration to control inlining, the function isn't |
2650 | // explicitly marked as alwaysinline for semantic reasons, and inlining is |
2651 | // disabled, mark the function as noinline. |
2652 | else if (!F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline) && |
2653 | CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) |
2654 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2655 | |
2656 | F->addFnAttrs(Attrs: B); |
2657 | return; |
2658 | } |
2659 | |
2660 | // Handle SME attributes that apply to function definitions, |
2661 | // rather than to function prototypes. |
2662 | if (D->hasAttr<ArmLocallyStreamingAttr>()) |
2663 | B.addAttribute(A: "aarch64_pstate_sm_body" ); |
2664 | |
2665 | if (auto *Attr = D->getAttr<ArmNewAttr>()) { |
2666 | if (Attr->isNewZA()) |
2667 | B.addAttribute(A: "aarch64_new_za" ); |
2668 | if (Attr->isNewZT0()) |
2669 | B.addAttribute(A: "aarch64_new_zt0" ); |
2670 | } |
2671 | |
2672 | // Track whether we need to add the optnone LLVM attribute, |
2673 | // starting with the default for this optimization level. |
2674 | bool ShouldAddOptNone = |
2675 | !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0; |
2676 | // We can't add optnone in the following cases, it won't pass the verifier. |
2677 | ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>(); |
2678 | ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>(); |
2679 | |
2680 | // Non-entry HLSL functions must always be inlined. |
2681 | if (getLangOpts().HLSL && !F->hasFnAttribute(Kind: llvm::Attribute::NoInline) && |
2682 | !D->hasAttr<NoInlineAttr>()) { |
2683 | B.addAttribute(Val: llvm::Attribute::AlwaysInline); |
2684 | } else if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) && |
2685 | !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) { |
2686 | // Add optnone, but do so only if the function isn't always_inline. |
2687 | B.addAttribute(Val: llvm::Attribute::OptimizeNone); |
2688 | |
2689 | // OptimizeNone implies noinline; we should not be inlining such functions. |
2690 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2691 | |
2692 | // We still need to handle naked functions even though optnone subsumes |
2693 | // much of their semantics. |
2694 | if (D->hasAttr<NakedAttr>()) |
2695 | B.addAttribute(Val: llvm::Attribute::Naked); |
2696 | |
2697 | // OptimizeNone wins over OptimizeForSize and MinSize. |
2698 | F->removeFnAttr(Kind: llvm::Attribute::OptimizeForSize); |
2699 | F->removeFnAttr(Kind: llvm::Attribute::MinSize); |
2700 | } else if (D->hasAttr<NakedAttr>()) { |
2701 | // Naked implies noinline: we should not be inlining such functions. |
2702 | B.addAttribute(Val: llvm::Attribute::Naked); |
2703 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2704 | } else if (D->hasAttr<NoDuplicateAttr>()) { |
2705 | B.addAttribute(Val: llvm::Attribute::NoDuplicate); |
2706 | } else if (D->hasAttr<NoInlineAttr>() && |
2707 | !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) { |
2708 | // Add noinline if the function isn't always_inline. |
2709 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2710 | } else if (D->hasAttr<AlwaysInlineAttr>() && |
2711 | !F->hasFnAttribute(Kind: llvm::Attribute::NoInline)) { |
2712 | // (noinline wins over always_inline, and we can't specify both in IR) |
2713 | B.addAttribute(Val: llvm::Attribute::AlwaysInline); |
2714 | } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { |
2715 | // If we're not inlining, then force everything that isn't always_inline to |
2716 | // carry an explicit noinline attribute. |
2717 | if (!F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) |
2718 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2719 | } else { |
2720 | // Otherwise, propagate the inline hint attribute and potentially use its |
2721 | // absence to mark things as noinline. |
2722 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
2723 | // Search function and template pattern redeclarations for inline. |
2724 | auto CheckForInline = [](const FunctionDecl *FD) { |
2725 | auto CheckRedeclForInline = [](const FunctionDecl *Redecl) { |
2726 | return Redecl->isInlineSpecified(); |
2727 | }; |
2728 | if (any_of(Range: FD->redecls(), P: CheckRedeclForInline)) |
2729 | return true; |
2730 | const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(); |
2731 | if (!Pattern) |
2732 | return false; |
2733 | return any_of(Range: Pattern->redecls(), P: CheckRedeclForInline); |
2734 | }; |
2735 | if (CheckForInline(FD)) { |
2736 | B.addAttribute(Val: llvm::Attribute::InlineHint); |
2737 | } else if (CodeGenOpts.getInlining() == |
2738 | CodeGenOptions::OnlyHintInlining && |
2739 | !FD->isInlined() && |
2740 | !F->hasFnAttribute(Kind: llvm::Attribute::AlwaysInline)) { |
2741 | B.addAttribute(Val: llvm::Attribute::NoInline); |
2742 | } |
2743 | } |
2744 | } |
2745 | |
2746 | // Add other optimization related attributes if we are optimizing this |
2747 | // function. |
2748 | if (!D->hasAttr<OptimizeNoneAttr>()) { |
2749 | if (D->hasAttr<ColdAttr>()) { |
2750 | if (!ShouldAddOptNone) |
2751 | B.addAttribute(Val: llvm::Attribute::OptimizeForSize); |
2752 | B.addAttribute(Val: llvm::Attribute::Cold); |
2753 | } |
2754 | if (D->hasAttr<HotAttr>()) |
2755 | B.addAttribute(Val: llvm::Attribute::Hot); |
2756 | if (D->hasAttr<MinSizeAttr>()) |
2757 | B.addAttribute(Val: llvm::Attribute::MinSize); |
2758 | } |
2759 | |
2760 | F->addFnAttrs(Attrs: B); |
2761 | |
2762 | unsigned alignment = D->getMaxAlignment() / Context.getCharWidth(); |
2763 | if (alignment) |
2764 | F->setAlignment(llvm::Align(alignment)); |
2765 | |
2766 | if (!D->hasAttr<AlignedAttr>()) |
2767 | if (LangOpts.FunctionAlignment) |
2768 | F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment)); |
2769 | |
2770 | // Some C++ ABIs require 2-byte alignment for member functions, in order to |
2771 | // reserve a bit for differentiating between virtual and non-virtual member |
2772 | // functions. If the current target's C++ ABI requires this and this is a |
2773 | // member function, set its alignment accordingly. |
2774 | if (getTarget().getCXXABI().areMemberFunctionsAligned()) { |
2775 | if (isa<CXXMethodDecl>(Val: D) && F->getPointerAlignment(DL: getDataLayout()) < 2) |
2776 | F->setAlignment(std::max(a: llvm::Align(2), b: F->getAlign().valueOrOne())); |
2777 | } |
2778 | |
2779 | // In the cross-dso CFI mode with canonical jump tables, we want !type |
2780 | // attributes on definitions only. |
2781 | if (CodeGenOpts.SanitizeCfiCrossDso && |
2782 | CodeGenOpts.SanitizeCfiCanonicalJumpTables) { |
2783 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
2784 | // Skip available_externally functions. They won't be codegen'ed in the |
2785 | // current module anyway. |
2786 | if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally) |
2787 | createFunctionTypeMetadataForIcall(FD, F); |
2788 | } |
2789 | } |
2790 | |
2791 | // Emit type metadata on member functions for member function pointer checks. |
2792 | // These are only ever necessary on definitions; we're guaranteed that the |
2793 | // definition will be present in the LTO unit as a result of LTO visibility. |
2794 | auto *MD = dyn_cast<CXXMethodDecl>(Val: D); |
2795 | if (MD && requiresMemberFunctionPointerTypeMetadata(CGM&: *this, MD)) { |
2796 | for (const CXXRecordDecl *Base : getMostBaseClasses(RD: MD->getParent())) { |
2797 | llvm::Metadata *Id = |
2798 | CreateMetadataIdentifierForType(T: Context.getMemberPointerType( |
2799 | T: MD->getType(), /*Qualifier=*/nullptr, Cls: Base)); |
2800 | F->addTypeMetadata(Offset: 0, TypeID: Id); |
2801 | } |
2802 | } |
2803 | } |
2804 | |
2805 | void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) { |
2806 | const Decl *D = GD.getDecl(); |
2807 | if (isa_and_nonnull<NamedDecl>(Val: D)) |
2808 | setGVProperties(GV, GD); |
2809 | else |
2810 | GV->setVisibility(llvm::GlobalValue::DefaultVisibility); |
2811 | |
2812 | if (D && D->hasAttr<UsedAttr>()) |
2813 | addUsedOrCompilerUsedGlobal(GV); |
2814 | |
2815 | if (const auto *VD = dyn_cast_if_present<VarDecl>(Val: D); |
2816 | VD && |
2817 | ((CodeGenOpts.KeepPersistentStorageVariables && |
2818 | (VD->getStorageDuration() == SD_Static || |
2819 | VD->getStorageDuration() == SD_Thread)) || |
2820 | (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static && |
2821 | VD->getType().isConstQualified()))) |
2822 | addUsedOrCompilerUsedGlobal(GV); |
2823 | } |
2824 | |
2825 | bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD, |
2826 | llvm::AttrBuilder &Attrs, |
2827 | bool SetTargetFeatures) { |
2828 | // Add target-cpu and target-features attributes to functions. If |
2829 | // we have a decl for the function and it has a target attribute then |
2830 | // parse that and add it to the feature set. |
2831 | StringRef TargetCPU = getTarget().getTargetOpts().CPU; |
2832 | StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU; |
2833 | std::vector<std::string> Features; |
2834 | const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: GD.getDecl()); |
2835 | FD = FD ? FD->getMostRecentDecl() : FD; |
2836 | const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr; |
2837 | const auto *TV = FD ? FD->getAttr<TargetVersionAttr>() : nullptr; |
2838 | assert((!TD || !TV) && "both target_version and target specified" ); |
2839 | const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr; |
2840 | const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr; |
2841 | bool AddedAttr = false; |
2842 | if (TD || TV || SD || TC) { |
2843 | llvm::StringMap<bool> FeatureMap; |
2844 | getContext().getFunctionFeatureMap(FeatureMap, GD); |
2845 | |
2846 | // Produce the canonical string for this set of features. |
2847 | for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap) |
2848 | Features.push_back(x: (Entry.getValue() ? "+" : "-" ) + Entry.getKey().str()); |
2849 | |
2850 | // Now add the target-cpu and target-features to the function. |
2851 | // While we populated the feature map above, we still need to |
2852 | // get and parse the target attribute so we can get the cpu for |
2853 | // the function. |
2854 | if (TD) { |
2855 | ParsedTargetAttr ParsedAttr = |
2856 | Target.parseTargetAttr(Str: TD->getFeaturesStr()); |
2857 | if (!ParsedAttr.CPU.empty() && |
2858 | getTarget().isValidCPUName(Name: ParsedAttr.CPU)) { |
2859 | TargetCPU = ParsedAttr.CPU; |
2860 | TuneCPU = "" ; // Clear the tune CPU. |
2861 | } |
2862 | if (!ParsedAttr.Tune.empty() && |
2863 | getTarget().isValidCPUName(Name: ParsedAttr.Tune)) |
2864 | TuneCPU = ParsedAttr.Tune; |
2865 | } |
2866 | |
2867 | if (SD) { |
2868 | // Apply the given CPU name as the 'tune-cpu' so that the optimizer can |
2869 | // favor this processor. |
2870 | TuneCPU = SD->getCPUName(Index: GD.getMultiVersionIndex())->getName(); |
2871 | } |
2872 | } else { |
2873 | // Otherwise just add the existing target cpu and target features to the |
2874 | // function. |
2875 | Features = getTarget().getTargetOpts().Features; |
2876 | } |
2877 | |
2878 | if (!TargetCPU.empty()) { |
2879 | Attrs.addAttribute(A: "target-cpu" , V: TargetCPU); |
2880 | AddedAttr = true; |
2881 | } |
2882 | if (!TuneCPU.empty()) { |
2883 | Attrs.addAttribute(A: "tune-cpu" , V: TuneCPU); |
2884 | AddedAttr = true; |
2885 | } |
2886 | if (!Features.empty() && SetTargetFeatures) { |
2887 | llvm::erase_if(C&: Features, P: [&](const std::string& F) { |
2888 | return getTarget().isReadOnlyFeature(Feature: F.substr(pos: 1)); |
2889 | }); |
2890 | llvm::sort(C&: Features); |
2891 | Attrs.addAttribute(A: "target-features" , V: llvm::join(R&: Features, Separator: "," )); |
2892 | AddedAttr = true; |
2893 | } |
2894 | // Add metadata for AArch64 Function Multi Versioning. |
2895 | if (getTarget().getTriple().isAArch64()) { |
2896 | llvm::SmallVector<StringRef, 8> Feats; |
2897 | bool IsDefault = false; |
2898 | if (TV) { |
2899 | IsDefault = TV->isDefaultVersion(); |
2900 | TV->getFeatures(Out&: Feats); |
2901 | } else if (TC) { |
2902 | IsDefault = TC->isDefaultVersion(Index: GD.getMultiVersionIndex()); |
2903 | TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex()); |
2904 | } |
2905 | if (IsDefault) { |
2906 | Attrs.addAttribute(A: "fmv-features" ); |
2907 | AddedAttr = true; |
2908 | } else if (!Feats.empty()) { |
2909 | // Sort features and remove duplicates. |
2910 | std::set<StringRef> OrderedFeats(Feats.begin(), Feats.end()); |
2911 | std::string FMVFeatures; |
2912 | for (StringRef F : OrderedFeats) |
2913 | FMVFeatures.append(str: "," + F.str()); |
2914 | Attrs.addAttribute(A: "fmv-features" , V: FMVFeatures.substr(pos: 1)); |
2915 | AddedAttr = true; |
2916 | } |
2917 | } |
2918 | return AddedAttr; |
2919 | } |
2920 | |
2921 | void CodeGenModule::setNonAliasAttributes(GlobalDecl GD, |
2922 | llvm::GlobalObject *GO) { |
2923 | const Decl *D = GD.getDecl(); |
2924 | SetCommonAttributes(GD, GV: GO); |
2925 | |
2926 | if (D) { |
2927 | if (auto *GV = dyn_cast<llvm::GlobalVariable>(Val: GO)) { |
2928 | if (D->hasAttr<RetainAttr>()) |
2929 | addUsedGlobal(GV); |
2930 | if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>()) |
2931 | GV->addAttribute(Kind: "bss-section" , Val: SA->getName()); |
2932 | if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>()) |
2933 | GV->addAttribute(Kind: "data-section" , Val: SA->getName()); |
2934 | if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>()) |
2935 | GV->addAttribute(Kind: "rodata-section" , Val: SA->getName()); |
2936 | if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>()) |
2937 | GV->addAttribute(Kind: "relro-section" , Val: SA->getName()); |
2938 | } |
2939 | |
2940 | if (auto *F = dyn_cast<llvm::Function>(Val: GO)) { |
2941 | if (D->hasAttr<RetainAttr>()) |
2942 | addUsedGlobal(GV: F); |
2943 | if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>()) |
2944 | if (!D->getAttr<SectionAttr>()) |
2945 | F->setSection(SA->getName()); |
2946 | |
2947 | llvm::AttrBuilder Attrs(F->getContext()); |
2948 | if (GetCPUAndFeaturesAttributes(GD, Attrs)) { |
2949 | // We know that GetCPUAndFeaturesAttributes will always have the |
2950 | // newest set, since it has the newest possible FunctionDecl, so the |
2951 | // new ones should replace the old. |
2952 | llvm::AttributeMask RemoveAttrs; |
2953 | RemoveAttrs.addAttribute(A: "target-cpu" ); |
2954 | RemoveAttrs.addAttribute(A: "target-features" ); |
2955 | RemoveAttrs.addAttribute(A: "fmv-features" ); |
2956 | RemoveAttrs.addAttribute(A: "tune-cpu" ); |
2957 | F->removeFnAttrs(Attrs: RemoveAttrs); |
2958 | F->addFnAttrs(Attrs); |
2959 | } |
2960 | } |
2961 | |
2962 | if (const auto *CSA = D->getAttr<CodeSegAttr>()) |
2963 | GO->setSection(CSA->getName()); |
2964 | else if (const auto *SA = D->getAttr<SectionAttr>()) |
2965 | GO->setSection(SA->getName()); |
2966 | } |
2967 | |
2968 | getTargetCodeGenInfo().setTargetAttributes(D, GV: GO, M&: *this); |
2969 | } |
2970 | |
2971 | void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD, |
2972 | llvm::Function *F, |
2973 | const CGFunctionInfo &FI) { |
2974 | const Decl *D = GD.getDecl(); |
2975 | SetLLVMFunctionAttributes(GD, Info: FI, F, /*IsThunk=*/false); |
2976 | SetLLVMFunctionAttributesForDefinition(D, F); |
2977 | |
2978 | F->setLinkage(llvm::Function::InternalLinkage); |
2979 | |
2980 | setNonAliasAttributes(GD, GO: F); |
2981 | } |
2982 | |
2983 | static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) { |
2984 | // Set linkage and visibility in case we never see a definition. |
2985 | LinkageInfo LV = ND->getLinkageAndVisibility(); |
2986 | // Don't set internal linkage on declarations. |
2987 | // "extern_weak" is overloaded in LLVM; we probably should have |
2988 | // separate linkage types for this. |
2989 | if (isExternallyVisible(L: LV.getLinkage()) && |
2990 | (ND->hasAttr<WeakAttr>() || ND->isWeakImported())) |
2991 | GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage); |
2992 | } |
2993 | |
2994 | void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD, |
2995 | llvm::Function *F) { |
2996 | // Only if we are checking indirect calls. |
2997 | if (!LangOpts.Sanitize.has(K: SanitizerKind::CFIICall)) |
2998 | return; |
2999 | |
3000 | // Non-static class methods are handled via vtable or member function pointer |
3001 | // checks elsewhere. |
3002 | if (isa<CXXMethodDecl>(Val: FD) && !cast<CXXMethodDecl>(Val: FD)->isStatic()) |
3003 | return; |
3004 | |
3005 | llvm::Metadata *MD = CreateMetadataIdentifierForType(T: FD->getType()); |
3006 | F->addTypeMetadata(Offset: 0, TypeID: MD); |
3007 | F->addTypeMetadata(Offset: 0, TypeID: CreateMetadataIdentifierGeneralized(T: FD->getType())); |
3008 | |
3009 | // Emit a hash-based bit set entry for cross-DSO calls. |
3010 | if (CodeGenOpts.SanitizeCfiCrossDso) |
3011 | if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD)) |
3012 | F->addTypeMetadata(Offset: 0, TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId)); |
3013 | } |
3014 | |
3015 | void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) { |
3016 | llvm::LLVMContext &Ctx = F->getContext(); |
3017 | llvm::MDBuilder MDB(Ctx); |
3018 | F->setMetadata(KindID: llvm::LLVMContext::MD_kcfi_type, |
3019 | Node: llvm::MDNode::get( |
3020 | Context&: Ctx, MDs: MDB.createConstant(C: CreateKCFITypeId(T: FD->getType())))); |
3021 | } |
3022 | |
3023 | static bool allowKCFIIdentifier(StringRef Name) { |
3024 | // KCFI type identifier constants are only necessary for external assembly |
3025 | // functions, which means it's safe to skip unusual names. Subset of |
3026 | // MCAsmInfo::isAcceptableChar() and MCAsmInfoXCOFF::isAcceptableChar(). |
3027 | return llvm::all_of(Range&: Name, P: [](const char &C) { |
3028 | return llvm::isAlnum(C) || C == '_' || C == '.'; |
3029 | }); |
3030 | } |
3031 | |
3032 | void CodeGenModule::finalizeKCFITypes() { |
3033 | llvm::Module &M = getModule(); |
3034 | for (auto &F : M.functions()) { |
3035 | // Remove KCFI type metadata from non-address-taken local functions. |
3036 | bool AddressTaken = F.hasAddressTaken(); |
3037 | if (!AddressTaken && F.hasLocalLinkage()) |
3038 | F.eraseMetadata(KindID: llvm::LLVMContext::MD_kcfi_type); |
3039 | |
3040 | // Generate a constant with the expected KCFI type identifier for all |
3041 | // address-taken function declarations to support annotating indirectly |
3042 | // called assembly functions. |
3043 | if (!AddressTaken || !F.isDeclaration()) |
3044 | continue; |
3045 | |
3046 | const llvm::ConstantInt *Type; |
3047 | if (const llvm::MDNode *MD = F.getMetadata(KindID: llvm::LLVMContext::MD_kcfi_type)) |
3048 | Type = llvm::mdconst::extract<llvm::ConstantInt>(MD: MD->getOperand(I: 0)); |
3049 | else |
3050 | continue; |
3051 | |
3052 | StringRef Name = F.getName(); |
3053 | if (!allowKCFIIdentifier(Name)) |
3054 | continue; |
3055 | |
3056 | std::string Asm = (".weak __kcfi_typeid_" + Name + "\n.set __kcfi_typeid_" + |
3057 | Name + ", " + Twine(Type->getZExtValue()) + "\n" ) |
3058 | .str(); |
3059 | M.appendModuleInlineAsm(Asm); |
3060 | } |
3061 | } |
3062 | |
3063 | void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F, |
3064 | bool IsIncompleteFunction, |
3065 | bool IsThunk) { |
3066 | |
3067 | if (F->getIntrinsicID() != llvm::Intrinsic::not_intrinsic) { |
3068 | // If this is an intrinsic function, the attributes will have been set |
3069 | // when the function was created. |
3070 | return; |
3071 | } |
3072 | |
3073 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
3074 | |
3075 | if (!IsIncompleteFunction) |
3076 | SetLLVMFunctionAttributes(GD, Info: getTypes().arrangeGlobalDeclaration(GD), F, |
3077 | IsThunk); |
3078 | |
3079 | // Add the Returned attribute for "this", except for iOS 5 and earlier |
3080 | // where substantial code, including the libstdc++ dylib, was compiled with |
3081 | // GCC and does not actually return "this". |
3082 | if (!IsThunk && getCXXABI().HasThisReturn(GD) && |
3083 | !(getTriple().isiOS() && getTriple().isOSVersionLT(Major: 6))) { |
3084 | assert(!F->arg_empty() && |
3085 | F->arg_begin()->getType() |
3086 | ->canLosslesslyBitCastTo(F->getReturnType()) && |
3087 | "unexpected this return" ); |
3088 | F->addParamAttr(ArgNo: 0, Kind: llvm::Attribute::Returned); |
3089 | } |
3090 | |
3091 | // Only a few attributes are set on declarations; these may later be |
3092 | // overridden by a definition. |
3093 | |
3094 | setLinkageForGV(GV: F, ND: FD); |
3095 | setGVProperties(GV: F, D: FD); |
3096 | |
3097 | // Setup target-specific attributes. |
3098 | if (!IsIncompleteFunction && F->isDeclaration()) |
3099 | getTargetCodeGenInfo().setTargetAttributes(D: FD, GV: F, M&: *this); |
3100 | |
3101 | if (const auto *CSA = FD->getAttr<CodeSegAttr>()) |
3102 | F->setSection(CSA->getName()); |
3103 | else if (const auto *SA = FD->getAttr<SectionAttr>()) |
3104 | F->setSection(SA->getName()); |
3105 | |
3106 | if (const auto *EA = FD->getAttr<ErrorAttr>()) { |
3107 | if (EA->isError()) |
3108 | F->addFnAttr(Kind: "dontcall-error" , Val: EA->getUserDiagnostic()); |
3109 | else if (EA->isWarning()) |
3110 | F->addFnAttr(Kind: "dontcall-warn" , Val: EA->getUserDiagnostic()); |
3111 | } |
3112 | |
3113 | // If we plan on emitting this inline builtin, we can't treat it as a builtin. |
3114 | if (FD->isInlineBuiltinDeclaration()) { |
3115 | const FunctionDecl *FDBody; |
3116 | bool HasBody = FD->hasBody(Definition&: FDBody); |
3117 | (void)HasBody; |
3118 | assert(HasBody && "Inline builtin declarations should always have an " |
3119 | "available body!" ); |
3120 | if (shouldEmitFunction(GD: FDBody)) |
3121 | F->addFnAttr(Kind: llvm::Attribute::NoBuiltin); |
3122 | } |
3123 | |
3124 | if (FD->isReplaceableGlobalAllocationFunction()) { |
3125 | // A replaceable global allocation function does not act like a builtin by |
3126 | // default, only if it is invoked by a new-expression or delete-expression. |
3127 | F->addFnAttr(Kind: llvm::Attribute::NoBuiltin); |
3128 | } |
3129 | |
3130 | if (isa<CXXConstructorDecl>(Val: FD) || isa<CXXDestructorDecl>(Val: FD)) |
3131 | F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3132 | else if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) |
3133 | if (MD->isVirtual()) |
3134 | F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3135 | |
3136 | // Don't emit entries for function declarations in the cross-DSO mode. This |
3137 | // is handled with better precision by the receiving DSO. But if jump tables |
3138 | // are non-canonical then we need type metadata in order to produce the local |
3139 | // jump table. |
3140 | if (!CodeGenOpts.SanitizeCfiCrossDso || |
3141 | !CodeGenOpts.SanitizeCfiCanonicalJumpTables) |
3142 | createFunctionTypeMetadataForIcall(FD, F); |
3143 | |
3144 | if (LangOpts.Sanitize.has(K: SanitizerKind::KCFI)) |
3145 | setKCFIType(FD, F); |
3146 | |
3147 | if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>()) |
3148 | getOpenMPRuntime().emitDeclareSimdFunction(FD, Fn: F); |
3149 | |
3150 | if (CodeGenOpts.InlineMaxStackSize != UINT_MAX) |
3151 | F->addFnAttr(Kind: "inline-max-stacksize" , Val: llvm::utostr(X: CodeGenOpts.InlineMaxStackSize)); |
3152 | |
3153 | if (const auto *CB = FD->getAttr<CallbackAttr>()) { |
3154 | // Annotate the callback behavior as metadata: |
3155 | // - The callback callee (as argument number). |
3156 | // - The callback payloads (as argument numbers). |
3157 | llvm::LLVMContext &Ctx = F->getContext(); |
3158 | llvm::MDBuilder MDB(Ctx); |
3159 | |
3160 | // The payload indices are all but the first one in the encoding. The first |
3161 | // identifies the callback callee. |
3162 | int CalleeIdx = *CB->encoding_begin(); |
3163 | ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end()); |
3164 | F->addMetadata(KindID: llvm::LLVMContext::MD_callback, |
3165 | MD&: *llvm::MDNode::get(Context&: Ctx, MDs: {MDB.createCallbackEncoding( |
3166 | CalleeArgNo: CalleeIdx, Arguments: PayloadIndices, |
3167 | /* VarArgsArePassed */ false)})); |
3168 | } |
3169 | } |
3170 | |
3171 | void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) { |
3172 | assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) && |
3173 | "Only globals with definition can force usage." ); |
3174 | LLVMUsed.emplace_back(args&: GV); |
3175 | } |
3176 | |
3177 | void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) { |
3178 | assert(!GV->isDeclaration() && |
3179 | "Only globals with definition can force usage." ); |
3180 | LLVMCompilerUsed.emplace_back(args&: GV); |
3181 | } |
3182 | |
3183 | void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) { |
3184 | assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) && |
3185 | "Only globals with definition can force usage." ); |
3186 | if (getTriple().isOSBinFormatELF()) |
3187 | LLVMCompilerUsed.emplace_back(args&: GV); |
3188 | else |
3189 | LLVMUsed.emplace_back(args&: GV); |
3190 | } |
3191 | |
3192 | static void emitUsed(CodeGenModule &CGM, StringRef Name, |
3193 | std::vector<llvm::WeakTrackingVH> &List) { |
3194 | // Don't create llvm.used if there is no need. |
3195 | if (List.empty()) |
3196 | return; |
3197 | |
3198 | // Convert List to what ConstantArray needs. |
3199 | SmallVector<llvm::Constant*, 8> UsedArray; |
3200 | UsedArray.resize(N: List.size()); |
3201 | for (unsigned i = 0, e = List.size(); i != e; ++i) { |
3202 | UsedArray[i] = |
3203 | llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
3204 | C: cast<llvm::Constant>(Val: &*List[i]), Ty: CGM.Int8PtrTy); |
3205 | } |
3206 | |
3207 | if (UsedArray.empty()) |
3208 | return; |
3209 | llvm::ArrayType *ATy = llvm::ArrayType::get(ElementType: CGM.Int8PtrTy, NumElements: UsedArray.size()); |
3210 | |
3211 | auto *GV = new llvm::GlobalVariable( |
3212 | CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage, |
3213 | llvm::ConstantArray::get(T: ATy, V: UsedArray), Name); |
3214 | |
3215 | GV->setSection("llvm.metadata" ); |
3216 | } |
3217 | |
3218 | void CodeGenModule::emitLLVMUsed() { |
3219 | emitUsed(CGM&: *this, Name: "llvm.used" , List&: LLVMUsed); |
3220 | emitUsed(CGM&: *this, Name: "llvm.compiler.used" , List&: LLVMCompilerUsed); |
3221 | } |
3222 | |
3223 | void CodeGenModule::AppendLinkerOptions(StringRef Opts) { |
3224 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opts); |
3225 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts)); |
3226 | } |
3227 | |
3228 | void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) { |
3229 | llvm::SmallString<32> Opt; |
3230 | getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt); |
3231 | if (Opt.empty()) |
3232 | return; |
3233 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt); |
3234 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: getLLVMContext(), MDs: MDOpts)); |
3235 | } |
3236 | |
3237 | void CodeGenModule::AddDependentLib(StringRef Lib) { |
3238 | auto &C = getLLVMContext(); |
3239 | if (getTarget().getTriple().isOSBinFormatELF()) { |
3240 | ELFDependentLibraries.push_back( |
3241 | Elt: llvm::MDNode::get(Context&: C, MDs: llvm::MDString::get(Context&: C, Str: Lib))); |
3242 | return; |
3243 | } |
3244 | |
3245 | llvm::SmallString<24> Opt; |
3246 | getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt); |
3247 | auto *MDOpts = llvm::MDString::get(Context&: getLLVMContext(), Str: Opt); |
3248 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context&: C, MDs: MDOpts)); |
3249 | } |
3250 | |
3251 | /// Add link options implied by the given module, including modules |
3252 | /// it depends on, using a postorder walk. |
3253 | static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod, |
3254 | SmallVectorImpl<llvm::MDNode *> &Metadata, |
3255 | llvm::SmallPtrSet<Module *, 16> &Visited) { |
3256 | // Import this module's parent. |
3257 | if (Mod->Parent && Visited.insert(Ptr: Mod->Parent).second) { |
3258 | addLinkOptionsPostorder(CGM, Mod: Mod->Parent, Metadata, Visited); |
3259 | } |
3260 | |
3261 | // Import this module's dependencies. |
3262 | for (Module *Import : llvm::reverse(C&: Mod->Imports)) { |
3263 | if (Visited.insert(Ptr: Import).second) |
3264 | addLinkOptionsPostorder(CGM, Mod: Import, Metadata, Visited); |
3265 | } |
3266 | |
3267 | // Add linker options to link against the libraries/frameworks |
3268 | // described by this module. |
3269 | llvm::LLVMContext &Context = CGM.getLLVMContext(); |
3270 | bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF(); |
3271 | |
3272 | // For modules that use export_as for linking, use that module |
3273 | // name instead. |
3274 | if (Mod->UseExportAsModuleLinkName) |
3275 | return; |
3276 | |
3277 | for (const Module::LinkLibrary &LL : llvm::reverse(C&: Mod->LinkLibraries)) { |
3278 | // Link against a framework. Frameworks are currently Darwin only, so we |
3279 | // don't to ask TargetCodeGenInfo for the spelling of the linker option. |
3280 | if (LL.IsFramework) { |
3281 | llvm::Metadata *Args[2] = {llvm::MDString::get(Context, Str: "-framework" ), |
3282 | llvm::MDString::get(Context, Str: LL.Library)}; |
3283 | |
3284 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args)); |
3285 | continue; |
3286 | } |
3287 | |
3288 | // Link against a library. |
3289 | if (IsELF) { |
3290 | llvm::Metadata *Args[2] = { |
3291 | llvm::MDString::get(Context, Str: "lib" ), |
3292 | llvm::MDString::get(Context, Str: LL.Library), |
3293 | }; |
3294 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args)); |
3295 | } else { |
3296 | llvm::SmallString<24> Opt; |
3297 | CGM.getTargetCodeGenInfo().getDependentLibraryOption(Lib: LL.Library, Opt); |
3298 | auto *OptString = llvm::MDString::get(Context, Str: Opt); |
3299 | Metadata.push_back(Elt: llvm::MDNode::get(Context, MDs: OptString)); |
3300 | } |
3301 | } |
3302 | } |
3303 | |
3304 | void CodeGenModule::EmitModuleInitializers(clang::Module *Primary) { |
3305 | assert(Primary->isNamedModuleUnit() && |
3306 | "We should only emit module initializers for named modules." ); |
3307 | |
3308 | // Emit the initializers in the order that sub-modules appear in the |
3309 | // source, first Global Module Fragments, if present. |
3310 | if (auto GMF = Primary->getGlobalModuleFragment()) { |
3311 | for (Decl *D : getContext().getModuleInitializers(M: GMF)) { |
3312 | if (isa<ImportDecl>(Val: D)) |
3313 | continue; |
3314 | assert(isa<VarDecl>(D) && "GMF initializer decl is not a var?" ); |
3315 | EmitTopLevelDecl(D); |
3316 | } |
3317 | } |
3318 | // Second any associated with the module, itself. |
3319 | for (Decl *D : getContext().getModuleInitializers(M: Primary)) { |
3320 | // Skip import decls, the inits for those are called explicitly. |
3321 | if (isa<ImportDecl>(Val: D)) |
3322 | continue; |
3323 | EmitTopLevelDecl(D); |
3324 | } |
3325 | // Third any associated with the Privat eMOdule Fragment, if present. |
3326 | if (auto PMF = Primary->getPrivateModuleFragment()) { |
3327 | for (Decl *D : getContext().getModuleInitializers(M: PMF)) { |
3328 | // Skip import decls, the inits for those are called explicitly. |
3329 | if (isa<ImportDecl>(Val: D)) |
3330 | continue; |
3331 | assert(isa<VarDecl>(D) && "PMF initializer decl is not a var?" ); |
3332 | EmitTopLevelDecl(D); |
3333 | } |
3334 | } |
3335 | } |
3336 | |
3337 | void CodeGenModule::EmitModuleLinkOptions() { |
3338 | // Collect the set of all of the modules we want to visit to emit link |
3339 | // options, which is essentially the imported modules and all of their |
3340 | // non-explicit child modules. |
3341 | llvm::SetVector<clang::Module *> LinkModules; |
3342 | llvm::SmallPtrSet<clang::Module *, 16> Visited; |
3343 | SmallVector<clang::Module *, 16> Stack; |
3344 | |
3345 | // Seed the stack with imported modules. |
3346 | for (Module *M : ImportedModules) { |
3347 | // Do not add any link flags when an implementation TU of a module imports |
3348 | // a header of that same module. |
3349 | if (M->getTopLevelModuleName() == getLangOpts().CurrentModule && |
3350 | !getLangOpts().isCompilingModule()) |
3351 | continue; |
3352 | if (Visited.insert(Ptr: M).second) |
3353 | Stack.push_back(Elt: M); |
3354 | } |
3355 | |
3356 | // Find all of the modules to import, making a little effort to prune |
3357 | // non-leaf modules. |
3358 | while (!Stack.empty()) { |
3359 | clang::Module *Mod = Stack.pop_back_val(); |
3360 | |
3361 | bool AnyChildren = false; |
3362 | |
3363 | // Visit the submodules of this module. |
3364 | for (const auto &SM : Mod->submodules()) { |
3365 | // Skip explicit children; they need to be explicitly imported to be |
3366 | // linked against. |
3367 | if (SM->IsExplicit) |
3368 | continue; |
3369 | |
3370 | if (Visited.insert(Ptr: SM).second) { |
3371 | Stack.push_back(Elt: SM); |
3372 | AnyChildren = true; |
3373 | } |
3374 | } |
3375 | |
3376 | // We didn't find any children, so add this module to the list of |
3377 | // modules to link against. |
3378 | if (!AnyChildren) { |
3379 | LinkModules.insert(X: Mod); |
3380 | } |
3381 | } |
3382 | |
3383 | // Add link options for all of the imported modules in reverse topological |
3384 | // order. We don't do anything to try to order import link flags with respect |
3385 | // to linker options inserted by things like #pragma comment(). |
3386 | SmallVector<llvm::MDNode *, 16> MetadataArgs; |
3387 | Visited.clear(); |
3388 | for (Module *M : LinkModules) |
3389 | if (Visited.insert(Ptr: M).second) |
3390 | addLinkOptionsPostorder(CGM&: *this, Mod: M, Metadata&: MetadataArgs, Visited); |
3391 | std::reverse(first: MetadataArgs.begin(), last: MetadataArgs.end()); |
3392 | LinkerOptionsMetadata.append(in_start: MetadataArgs.begin(), in_end: MetadataArgs.end()); |
3393 | |
3394 | // Add the linker options metadata flag. |
3395 | if (!LinkerOptionsMetadata.empty()) { |
3396 | auto *NMD = getModule().getOrInsertNamedMetadata(Name: "llvm.linker.options" ); |
3397 | for (auto *MD : LinkerOptionsMetadata) |
3398 | NMD->addOperand(M: MD); |
3399 | } |
3400 | } |
3401 | |
3402 | void CodeGenModule::EmitDeferred() { |
3403 | // Emit deferred declare target declarations. |
3404 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) |
3405 | getOpenMPRuntime().emitDeferredTargetDecls(); |
3406 | |
3407 | // Emit code for any potentially referenced deferred decls. Since a |
3408 | // previously unused static decl may become used during the generation of code |
3409 | // for a static function, iterate until no changes are made. |
3410 | |
3411 | if (!DeferredVTables.empty()) { |
3412 | EmitDeferredVTables(); |
3413 | |
3414 | // Emitting a vtable doesn't directly cause more vtables to |
3415 | // become deferred, although it can cause functions to be |
3416 | // emitted that then need those vtables. |
3417 | assert(DeferredVTables.empty()); |
3418 | } |
3419 | |
3420 | // Emit CUDA/HIP static device variables referenced by host code only. |
3421 | // Note we should not clear CUDADeviceVarODRUsedByHost since it is still |
3422 | // needed for further handling. |
3423 | if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) |
3424 | llvm::append_range(C&: DeferredDeclsToEmit, |
3425 | R&: getContext().CUDADeviceVarODRUsedByHost); |
3426 | |
3427 | // Stop if we're out of both deferred vtables and deferred declarations. |
3428 | if (DeferredDeclsToEmit.empty()) |
3429 | return; |
3430 | |
3431 | // Grab the list of decls to emit. If EmitGlobalDefinition schedules more |
3432 | // work, it will not interfere with this. |
3433 | std::vector<GlobalDecl> CurDeclsToEmit; |
3434 | CurDeclsToEmit.swap(x&: DeferredDeclsToEmit); |
3435 | |
3436 | for (GlobalDecl &D : CurDeclsToEmit) { |
3437 | // Functions declared with the sycl_kernel_entry_point attribute are |
3438 | // emitted normally during host compilation. During device compilation, |
3439 | // a SYCL kernel caller offload entry point function is generated and |
3440 | // emitted in place of each of these functions. |
3441 | if (const auto *FD = D.getDecl()->getAsFunction()) { |
3442 | if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>() && |
3443 | FD->isDefined()) { |
3444 | // Functions with an invalid sycl_kernel_entry_point attribute are |
3445 | // ignored during device compilation. |
3446 | if (!FD->getAttr<SYCLKernelEntryPointAttr>()->isInvalidAttr()) { |
3447 | // Generate and emit the SYCL kernel caller function. |
3448 | EmitSYCLKernelCaller(KernelEntryPointFn: FD, Ctx&: getContext()); |
3449 | // Recurse to emit any symbols directly or indirectly referenced |
3450 | // by the SYCL kernel caller function. |
3451 | EmitDeferred(); |
3452 | } |
3453 | // Do not emit the sycl_kernel_entry_point attributed function. |
3454 | continue; |
3455 | } |
3456 | } |
3457 | |
3458 | // We should call GetAddrOfGlobal with IsForDefinition set to true in order |
3459 | // to get GlobalValue with exactly the type we need, not something that |
3460 | // might had been created for another decl with the same mangled name but |
3461 | // different type. |
3462 | llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>( |
3463 | Val: GetAddrOfGlobal(GD: D, IsForDefinition: ForDefinition)); |
3464 | |
3465 | // In case of different address spaces, we may still get a cast, even with |
3466 | // IsForDefinition equal to true. Query mangled names table to get |
3467 | // GlobalValue. |
3468 | if (!GV) |
3469 | GV = GetGlobalValue(Name: getMangledName(GD: D)); |
3470 | |
3471 | // Make sure GetGlobalValue returned non-null. |
3472 | assert(GV); |
3473 | |
3474 | // Check to see if we've already emitted this. This is necessary |
3475 | // for a couple of reasons: first, decls can end up in the |
3476 | // deferred-decls queue multiple times, and second, decls can end |
3477 | // up with definitions in unusual ways (e.g. by an extern inline |
3478 | // function acquiring a strong function redefinition). Just |
3479 | // ignore these cases. |
3480 | if (!GV->isDeclaration()) |
3481 | continue; |
3482 | |
3483 | // If this is OpenMP, check if it is legal to emit this global normally. |
3484 | if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD: D)) |
3485 | continue; |
3486 | |
3487 | // Otherwise, emit the definition and move on to the next one. |
3488 | EmitGlobalDefinition(D, GV); |
3489 | |
3490 | // If we found out that we need to emit more decls, do that recursively. |
3491 | // This has the advantage that the decls are emitted in a DFS and related |
3492 | // ones are close together, which is convenient for testing. |
3493 | if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { |
3494 | EmitDeferred(); |
3495 | assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); |
3496 | } |
3497 | } |
3498 | } |
3499 | |
3500 | void CodeGenModule::EmitVTablesOpportunistically() { |
3501 | // Try to emit external vtables as available_externally if they have emitted |
3502 | // all inlined virtual functions. It runs after EmitDeferred() and therefore |
3503 | // is not allowed to create new references to things that need to be emitted |
3504 | // lazily. Note that it also uses fact that we eagerly emitting RTTI. |
3505 | |
3506 | assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables()) |
3507 | && "Only emit opportunistic vtables with optimizations" ); |
3508 | |
3509 | for (const CXXRecordDecl *RD : OpportunisticVTables) { |
3510 | assert(getVTables().isVTableExternal(RD) && |
3511 | "This queue should only contain external vtables" ); |
3512 | if (getCXXABI().canSpeculativelyEmitVTable(RD)) |
3513 | VTables.GenerateClassData(RD); |
3514 | } |
3515 | OpportunisticVTables.clear(); |
3516 | } |
3517 | |
3518 | void CodeGenModule::EmitGlobalAnnotations() { |
3519 | for (const auto& [MangledName, VD] : DeferredAnnotations) { |
3520 | llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName); |
3521 | if (GV) |
3522 | AddGlobalAnnotations(D: VD, GV); |
3523 | } |
3524 | DeferredAnnotations.clear(); |
3525 | |
3526 | if (Annotations.empty()) |
3527 | return; |
3528 | |
3529 | // Create a new global variable for the ConstantStruct in the Module. |
3530 | llvm::Constant *Array = llvm::ConstantArray::get(T: llvm::ArrayType::get( |
3531 | ElementType: Annotations[0]->getType(), NumElements: Annotations.size()), V: Annotations); |
3532 | auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false, |
3533 | llvm::GlobalValue::AppendingLinkage, |
3534 | Array, "llvm.global.annotations" ); |
3535 | gv->setSection(AnnotationSection); |
3536 | } |
3537 | |
3538 | llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) { |
3539 | llvm::Constant *&AStr = AnnotationStrings[Str]; |
3540 | if (AStr) |
3541 | return AStr; |
3542 | |
3543 | // Not found yet, create a new global. |
3544 | llvm::Constant *s = llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Str); |
3545 | auto *gv = new llvm::GlobalVariable( |
3546 | getModule(), s->getType(), true, llvm::GlobalValue::PrivateLinkage, s, |
3547 | ".str" , nullptr, llvm::GlobalValue::NotThreadLocal, |
3548 | ConstGlobalsPtrTy->getAddressSpace()); |
3549 | gv->setSection(AnnotationSection); |
3550 | gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3551 | AStr = gv; |
3552 | return gv; |
3553 | } |
3554 | |
3555 | llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) { |
3556 | SourceManager &SM = getContext().getSourceManager(); |
3557 | PresumedLoc PLoc = SM.getPresumedLoc(Loc); |
3558 | if (PLoc.isValid()) |
3559 | return EmitAnnotationString(Str: PLoc.getFilename()); |
3560 | return EmitAnnotationString(Str: SM.getBufferName(Loc)); |
3561 | } |
3562 | |
3563 | llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) { |
3564 | SourceManager &SM = getContext().getSourceManager(); |
3565 | PresumedLoc PLoc = SM.getPresumedLoc(Loc: L); |
3566 | unsigned LineNo = PLoc.isValid() ? PLoc.getLine() : |
3567 | SM.getExpansionLineNumber(Loc: L); |
3568 | return llvm::ConstantInt::get(Ty: Int32Ty, V: LineNo); |
3569 | } |
3570 | |
3571 | llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) { |
3572 | ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()}; |
3573 | if (Exprs.empty()) |
3574 | return llvm::ConstantPointerNull::get(T: ConstGlobalsPtrTy); |
3575 | |
3576 | llvm::FoldingSetNodeID ID; |
3577 | for (Expr *E : Exprs) { |
3578 | ID.Add(x: cast<clang::ConstantExpr>(Val: E)->getAPValueResult()); |
3579 | } |
3580 | llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()]; |
3581 | if (Lookup) |
3582 | return Lookup; |
3583 | |
3584 | llvm::SmallVector<llvm::Constant *, 4> LLVMArgs; |
3585 | LLVMArgs.reserve(N: Exprs.size()); |
3586 | ConstantEmitter ConstEmiter(*this); |
3587 | llvm::transform(Range&: Exprs, d_first: std::back_inserter(x&: LLVMArgs), F: [&](const Expr *E) { |
3588 | const auto *CE = cast<clang::ConstantExpr>(Val: E); |
3589 | return ConstEmiter.emitAbstract(loc: CE->getBeginLoc(), value: CE->getAPValueResult(), |
3590 | T: CE->getType()); |
3591 | }); |
3592 | auto *Struct = llvm::ConstantStruct::getAnon(V: LLVMArgs); |
3593 | auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true, |
3594 | llvm::GlobalValue::PrivateLinkage, Struct, |
3595 | ".args" ); |
3596 | GV->setSection(AnnotationSection); |
3597 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3598 | |
3599 | Lookup = GV; |
3600 | return GV; |
3601 | } |
3602 | |
3603 | llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV, |
3604 | const AnnotateAttr *AA, |
3605 | SourceLocation L) { |
3606 | // Get the globals for file name, annotation, and the line number. |
3607 | llvm::Constant *AnnoGV = EmitAnnotationString(Str: AA->getAnnotation()), |
3608 | *UnitGV = EmitAnnotationUnit(Loc: L), |
3609 | *LineNoCst = EmitAnnotationLineNo(L), |
3610 | *Args = EmitAnnotationArgs(Attr: AA); |
3611 | |
3612 | llvm::Constant *GVInGlobalsAS = GV; |
3613 | if (GV->getAddressSpace() != |
3614 | getDataLayout().getDefaultGlobalsAddressSpace()) { |
3615 | GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast( |
3616 | C: GV, |
3617 | Ty: llvm::PointerType::get( |
3618 | C&: GV->getContext(), AddressSpace: getDataLayout().getDefaultGlobalsAddressSpace())); |
3619 | } |
3620 | |
3621 | // Create the ConstantStruct for the global annotation. |
3622 | llvm::Constant *Fields[] = { |
3623 | GVInGlobalsAS, AnnoGV, UnitGV, LineNoCst, Args, |
3624 | }; |
3625 | return llvm::ConstantStruct::getAnon(V: Fields); |
3626 | } |
3627 | |
3628 | void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D, |
3629 | llvm::GlobalValue *GV) { |
3630 | assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute" ); |
3631 | // Get the struct elements for these annotations. |
3632 | for (const auto *I : D->specific_attrs<AnnotateAttr>()) |
3633 | Annotations.push_back(x: EmitAnnotateAttr(GV, AA: I, L: D->getLocation())); |
3634 | } |
3635 | |
3636 | bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn, |
3637 | SourceLocation Loc) const { |
3638 | const auto &NoSanitizeL = getContext().getNoSanitizeList(); |
3639 | // NoSanitize by function name. |
3640 | if (NoSanitizeL.containsFunction(Mask: Kind, FunctionName: Fn->getName())) |
3641 | return true; |
3642 | // NoSanitize by location. Check "mainfile" prefix. |
3643 | auto &SM = Context.getSourceManager(); |
3644 | FileEntryRef MainFile = *SM.getFileEntryRefForID(FID: SM.getMainFileID()); |
3645 | if (NoSanitizeL.containsMainFile(Mask: Kind, FileName: MainFile.getName())) |
3646 | return true; |
3647 | |
3648 | // Check "src" prefix. |
3649 | if (Loc.isValid()) |
3650 | return NoSanitizeL.containsLocation(Mask: Kind, Loc); |
3651 | // If location is unknown, this may be a compiler-generated function. Assume |
3652 | // it's located in the main file. |
3653 | return NoSanitizeL.containsFile(Mask: Kind, FileName: MainFile.getName()); |
3654 | } |
3655 | |
3656 | bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, |
3657 | llvm::GlobalVariable *GV, |
3658 | SourceLocation Loc, QualType Ty, |
3659 | StringRef Category) const { |
3660 | const auto &NoSanitizeL = getContext().getNoSanitizeList(); |
3661 | if (NoSanitizeL.containsGlobal(Mask: Kind, GlobalName: GV->getName(), Category)) |
3662 | return true; |
3663 | auto &SM = Context.getSourceManager(); |
3664 | if (NoSanitizeL.containsMainFile( |
3665 | Mask: Kind, FileName: SM.getFileEntryRefForID(FID: SM.getMainFileID())->getName(), |
3666 | Category)) |
3667 | return true; |
3668 | if (NoSanitizeL.containsLocation(Mask: Kind, Loc, Category)) |
3669 | return true; |
3670 | |
3671 | // Check global type. |
3672 | if (!Ty.isNull()) { |
3673 | // Drill down the array types: if global variable of a fixed type is |
3674 | // not sanitized, we also don't instrument arrays of them. |
3675 | while (auto AT = dyn_cast<ArrayType>(Val: Ty.getTypePtr())) |
3676 | Ty = AT->getElementType(); |
3677 | Ty = Ty.getCanonicalType().getUnqualifiedType(); |
3678 | // Only record types (classes, structs etc.) are ignored. |
3679 | if (Ty->isRecordType()) { |
3680 | std::string TypeStr = Ty.getAsString(Policy: getContext().getPrintingPolicy()); |
3681 | if (NoSanitizeL.containsType(Mask: Kind, MangledTypeName: TypeStr, Category)) |
3682 | return true; |
3683 | } |
3684 | } |
3685 | return false; |
3686 | } |
3687 | |
3688 | bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, |
3689 | StringRef Category) const { |
3690 | const auto &XRayFilter = getContext().getXRayFilter(); |
3691 | using ImbueAttr = XRayFunctionFilter::ImbueAttribute; |
3692 | auto Attr = ImbueAttr::NONE; |
3693 | if (Loc.isValid()) |
3694 | Attr = XRayFilter.shouldImbueLocation(Loc, Category); |
3695 | if (Attr == ImbueAttr::NONE) |
3696 | Attr = XRayFilter.shouldImbueFunction(FunctionName: Fn->getName()); |
3697 | switch (Attr) { |
3698 | case ImbueAttr::NONE: |
3699 | return false; |
3700 | case ImbueAttr::ALWAYS: |
3701 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-always" ); |
3702 | break; |
3703 | case ImbueAttr::ALWAYS_ARG1: |
3704 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-always" ); |
3705 | Fn->addFnAttr(Kind: "xray-log-args" , Val: "1" ); |
3706 | break; |
3707 | case ImbueAttr::NEVER: |
3708 | Fn->addFnAttr(Kind: "function-instrument" , Val: "xray-never" ); |
3709 | break; |
3710 | } |
3711 | return true; |
3712 | } |
3713 | |
3714 | ProfileList::ExclusionType |
3715 | CodeGenModule::isFunctionBlockedByProfileList(llvm::Function *Fn, |
3716 | SourceLocation Loc) const { |
3717 | const auto &ProfileList = getContext().getProfileList(); |
3718 | // If the profile list is empty, then instrument everything. |
3719 | if (ProfileList.isEmpty()) |
3720 | return ProfileList::Allow; |
3721 | llvm::driver::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr(); |
3722 | // First, check the function name. |
3723 | if (auto V = ProfileList.isFunctionExcluded(FunctionName: Fn->getName(), Kind)) |
3724 | return *V; |
3725 | // Next, check the source location. |
3726 | if (Loc.isValid()) |
3727 | if (auto V = ProfileList.isLocationExcluded(Loc, Kind)) |
3728 | return *V; |
3729 | // If location is unknown, this may be a compiler-generated function. Assume |
3730 | // it's located in the main file. |
3731 | auto &SM = Context.getSourceManager(); |
3732 | if (auto MainFile = SM.getFileEntryRefForID(FID: SM.getMainFileID())) |
3733 | if (auto V = ProfileList.isFileExcluded(FileName: MainFile->getName(), Kind)) |
3734 | return *V; |
3735 | return ProfileList.getDefault(Kind); |
3736 | } |
3737 | |
3738 | ProfileList::ExclusionType |
3739 | CodeGenModule::isFunctionBlockedFromProfileInstr(llvm::Function *Fn, |
3740 | SourceLocation Loc) const { |
3741 | auto V = isFunctionBlockedByProfileList(Fn, Loc); |
3742 | if (V != ProfileList::Allow) |
3743 | return V; |
3744 | |
3745 | auto NumGroups = getCodeGenOpts().ProfileTotalFunctionGroups; |
3746 | if (NumGroups > 1) { |
3747 | auto Group = llvm::crc32(Data: arrayRefFromStringRef(Input: Fn->getName())) % NumGroups; |
3748 | if (Group != getCodeGenOpts().ProfileSelectedFunctionGroup) |
3749 | return ProfileList::Skip; |
3750 | } |
3751 | return ProfileList::Allow; |
3752 | } |
3753 | |
3754 | bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) { |
3755 | // Never defer when EmitAllDecls is specified. |
3756 | if (LangOpts.EmitAllDecls) |
3757 | return true; |
3758 | |
3759 | const auto *VD = dyn_cast<VarDecl>(Val: Global); |
3760 | if (VD && |
3761 | ((CodeGenOpts.KeepPersistentStorageVariables && |
3762 | (VD->getStorageDuration() == SD_Static || |
3763 | VD->getStorageDuration() == SD_Thread)) || |
3764 | (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static && |
3765 | VD->getType().isConstQualified()))) |
3766 | return true; |
3767 | |
3768 | return getContext().DeclMustBeEmitted(D: Global); |
3769 | } |
3770 | |
3771 | bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { |
3772 | // In OpenMP 5.0 variables and function may be marked as |
3773 | // device_type(host/nohost) and we should not emit them eagerly unless we sure |
3774 | // that they must be emitted on the host/device. To be sure we need to have |
3775 | // seen a declare target with an explicit mentioning of the function, we know |
3776 | // we have if the level of the declare target attribute is -1. Note that we |
3777 | // check somewhere else if we should emit this at all. |
3778 | if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) { |
3779 | std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = |
3780 | OMPDeclareTargetDeclAttr::getActiveAttr(VD: Global); |
3781 | if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1) |
3782 | return false; |
3783 | } |
3784 | |
3785 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) { |
3786 | if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) |
3787 | // Implicit template instantiations may change linkage if they are later |
3788 | // explicitly instantiated, so they should not be emitted eagerly. |
3789 | return false; |
3790 | // Defer until all versions have been semantically checked. |
3791 | if (FD->hasAttr<TargetVersionAttr>() && !FD->isMultiVersion()) |
3792 | return false; |
3793 | // Defer emission of SYCL kernel entry point functions during device |
3794 | // compilation. |
3795 | if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>()) |
3796 | return false; |
3797 | } |
3798 | if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) { |
3799 | if (Context.getInlineVariableDefinitionKind(VD) == |
3800 | ASTContext::InlineVariableDefinitionKind::WeakUnknown) |
3801 | // A definition of an inline constexpr static data member may change |
3802 | // linkage later if it's redeclared outside the class. |
3803 | return false; |
3804 | if (CXX20ModuleInits && VD->getOwningModule() && |
3805 | !VD->getOwningModule()->isModuleMapModule()) { |
3806 | // For CXX20, module-owned initializers need to be deferred, since it is |
3807 | // not known at this point if they will be run for the current module or |
3808 | // as part of the initializer for an imported one. |
3809 | return false; |
3810 | } |
3811 | } |
3812 | // If OpenMP is enabled and threadprivates must be generated like TLS, delay |
3813 | // codegen for global variables, because they may be marked as threadprivate. |
3814 | if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS && |
3815 | getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Val: Global) && |
3816 | !Global->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: false, ExcludeDtor: false) && |
3817 | !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD: Global)) |
3818 | return false; |
3819 | |
3820 | return true; |
3821 | } |
3822 | |
3823 | ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) { |
3824 | StringRef Name = getMangledName(GD); |
3825 | |
3826 | // The UUID descriptor should be pointer aligned. |
3827 | CharUnits Alignment = CharUnits::fromQuantity(Quantity: PointerAlignInBytes); |
3828 | |
3829 | // Look for an existing global. |
3830 | if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name)) |
3831 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3832 | |
3833 | ConstantEmitter Emitter(*this); |
3834 | llvm::Constant *Init; |
3835 | |
3836 | APValue &V = GD->getAsAPValue(); |
3837 | if (!V.isAbsent()) { |
3838 | // If possible, emit the APValue version of the initializer. In particular, |
3839 | // this gets the type of the constant right. |
3840 | Init = Emitter.emitForInitializer( |
3841 | value: GD->getAsAPValue(), destAddrSpace: GD->getType().getAddressSpace(), destType: GD->getType()); |
3842 | } else { |
3843 | // As a fallback, directly construct the constant. |
3844 | // FIXME: This may get padding wrong under esoteric struct layout rules. |
3845 | // MSVC appears to create a complete type 'struct __s_GUID' that it |
3846 | // presumably uses to represent these constants. |
3847 | MSGuidDecl::Parts Parts = GD->getParts(); |
3848 | llvm::Constant *Fields[4] = { |
3849 | llvm::ConstantInt::get(Ty: Int32Ty, V: Parts.Part1), |
3850 | llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part2), |
3851 | llvm::ConstantInt::get(Ty: Int16Ty, V: Parts.Part3), |
3852 | llvm::ConstantDataArray::getRaw( |
3853 | Data: StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), NumElements: 8, |
3854 | ElementTy: Int8Ty)}; |
3855 | Init = llvm::ConstantStruct::getAnon(V: Fields); |
3856 | } |
3857 | |
3858 | auto *GV = new llvm::GlobalVariable( |
3859 | getModule(), Init->getType(), |
3860 | /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name); |
3861 | if (supportsCOMDAT()) |
3862 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
3863 | setDSOLocal(GV); |
3864 | |
3865 | if (!V.isAbsent()) { |
3866 | Emitter.finalize(global: GV); |
3867 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3868 | } |
3869 | |
3870 | llvm::Type *Ty = getTypes().ConvertTypeForMem(T: GD->getType()); |
3871 | return ConstantAddress(GV, Ty, Alignment); |
3872 | } |
3873 | |
3874 | ConstantAddress CodeGenModule::GetAddrOfUnnamedGlobalConstantDecl( |
3875 | const UnnamedGlobalConstantDecl *GCD) { |
3876 | CharUnits Alignment = getContext().getTypeAlignInChars(T: GCD->getType()); |
3877 | |
3878 | llvm::GlobalVariable **Entry = nullptr; |
3879 | Entry = &UnnamedGlobalConstantDeclMap[GCD]; |
3880 | if (*Entry) |
3881 | return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment); |
3882 | |
3883 | ConstantEmitter Emitter(*this); |
3884 | llvm::Constant *Init; |
3885 | |
3886 | const APValue &V = GCD->getValue(); |
3887 | |
3888 | assert(!V.isAbsent()); |
3889 | Init = Emitter.emitForInitializer(value: V, destAddrSpace: GCD->getType().getAddressSpace(), |
3890 | destType: GCD->getType()); |
3891 | |
3892 | auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(), |
3893 | /*isConstant=*/true, |
3894 | llvm::GlobalValue::PrivateLinkage, Init, |
3895 | ".constant" ); |
3896 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3897 | GV->setAlignment(Alignment.getAsAlign()); |
3898 | |
3899 | Emitter.finalize(global: GV); |
3900 | |
3901 | *Entry = GV; |
3902 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3903 | } |
3904 | |
3905 | ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject( |
3906 | const TemplateParamObjectDecl *TPO) { |
3907 | StringRef Name = getMangledName(GD: TPO); |
3908 | CharUnits Alignment = getNaturalTypeAlignment(T: TPO->getType()); |
3909 | |
3910 | if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name)) |
3911 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3912 | |
3913 | ConstantEmitter Emitter(*this); |
3914 | llvm::Constant *Init = Emitter.emitForInitializer( |
3915 | value: TPO->getValue(), destAddrSpace: TPO->getType().getAddressSpace(), destType: TPO->getType()); |
3916 | |
3917 | if (!Init) { |
3918 | ErrorUnsupported(D: TPO, Type: "template parameter object" ); |
3919 | return ConstantAddress::invalid(); |
3920 | } |
3921 | |
3922 | llvm::GlobalValue::LinkageTypes Linkage = |
3923 | isExternallyVisible(L: TPO->getLinkageAndVisibility().getLinkage()) |
3924 | ? llvm::GlobalValue::LinkOnceODRLinkage |
3925 | : llvm::GlobalValue::InternalLinkage; |
3926 | auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(), |
3927 | /*isConstant=*/true, Linkage, Init, Name); |
3928 | setGVProperties(GV, D: TPO); |
3929 | if (supportsCOMDAT() && Linkage == llvm::GlobalValue::LinkOnceODRLinkage) |
3930 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
3931 | Emitter.finalize(global: GV); |
3932 | |
3933 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
3934 | } |
3935 | |
3936 | ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) { |
3937 | const AliasAttr *AA = VD->getAttr<AliasAttr>(); |
3938 | assert(AA && "No alias?" ); |
3939 | |
3940 | CharUnits Alignment = getContext().getDeclAlign(D: VD); |
3941 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: VD->getType()); |
3942 | |
3943 | // See if there is already something with the target's name in the module. |
3944 | llvm::GlobalValue *Entry = GetGlobalValue(Name: AA->getAliasee()); |
3945 | if (Entry) |
3946 | return ConstantAddress(Entry, DeclTy, Alignment); |
3947 | |
3948 | llvm::Constant *Aliasee; |
3949 | if (isa<llvm::FunctionType>(Val: DeclTy)) |
3950 | Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy, |
3951 | D: GlobalDecl(cast<FunctionDecl>(Val: VD)), |
3952 | /*ForVTable=*/false); |
3953 | else |
3954 | Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default, |
3955 | D: nullptr); |
3956 | |
3957 | auto *F = cast<llvm::GlobalValue>(Val: Aliasee); |
3958 | F->setLinkage(llvm::Function::ExternalWeakLinkage); |
3959 | WeakRefReferences.insert(Ptr: F); |
3960 | |
3961 | return ConstantAddress(Aliasee, DeclTy, Alignment); |
3962 | } |
3963 | |
3964 | template <typename AttrT> static bool hasImplicitAttr(const ValueDecl *D) { |
3965 | if (!D) |
3966 | return false; |
3967 | if (auto *A = D->getAttr<AttrT>()) |
3968 | return A->isImplicit(); |
3969 | return D->isImplicit(); |
3970 | } |
3971 | |
3972 | bool CodeGenModule::shouldEmitCUDAGlobalVar(const VarDecl *Global) const { |
3973 | assert(LangOpts.CUDA && "Should not be called by non-CUDA languages" ); |
3974 | // We need to emit host-side 'shadows' for all global |
3975 | // device-side variables because the CUDA runtime needs their |
3976 | // size and host-side address in order to provide access to |
3977 | // their device-side incarnations. |
3978 | return !LangOpts.CUDAIsDevice || Global->hasAttr<CUDADeviceAttr>() || |
3979 | Global->hasAttr<CUDAConstantAttr>() || |
3980 | Global->hasAttr<CUDASharedAttr>() || |
3981 | Global->getType()->isCUDADeviceBuiltinSurfaceType() || |
3982 | Global->getType()->isCUDADeviceBuiltinTextureType(); |
3983 | } |
3984 | |
3985 | void CodeGenModule::EmitGlobal(GlobalDecl GD) { |
3986 | const auto *Global = cast<ValueDecl>(Val: GD.getDecl()); |
3987 | |
3988 | // Weak references don't produce any output by themselves. |
3989 | if (Global->hasAttr<WeakRefAttr>()) |
3990 | return; |
3991 | |
3992 | // If this is an alias definition (which otherwise looks like a declaration) |
3993 | // emit it now. |
3994 | if (Global->hasAttr<AliasAttr>()) |
3995 | return EmitAliasDefinition(GD); |
3996 | |
3997 | // IFunc like an alias whose value is resolved at runtime by calling resolver. |
3998 | if (Global->hasAttr<IFuncAttr>()) |
3999 | return emitIFuncDefinition(GD); |
4000 | |
4001 | // If this is a cpu_dispatch multiversion function, emit the resolver. |
4002 | if (Global->hasAttr<CPUDispatchAttr>()) |
4003 | return emitCPUDispatchDefinition(GD); |
4004 | |
4005 | // If this is CUDA, be selective about which declarations we emit. |
4006 | // Non-constexpr non-lambda implicit host device functions are not emitted |
4007 | // unless they are used on device side. |
4008 | if (LangOpts.CUDA) { |
4009 | assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) && |
4010 | "Expected Variable or Function" ); |
4011 | if (const auto *VD = dyn_cast<VarDecl>(Val: Global)) { |
4012 | if (!shouldEmitCUDAGlobalVar(Global: VD)) |
4013 | return; |
4014 | } else if (LangOpts.CUDAIsDevice) { |
4015 | const auto *FD = dyn_cast<FunctionDecl>(Val: Global); |
4016 | if ((!Global->hasAttr<CUDADeviceAttr>() || |
4017 | (LangOpts.OffloadImplicitHostDeviceTemplates && |
4018 | hasImplicitAttr<CUDAHostAttr>(D: FD) && |
4019 | hasImplicitAttr<CUDADeviceAttr>(D: FD) && !FD->isConstexpr() && |
4020 | !isLambdaCallOperator(DC: FD) && |
4021 | !getContext().CUDAImplicitHostDeviceFunUsedByDevice.count(V: FD))) && |
4022 | !Global->hasAttr<CUDAGlobalAttr>() && |
4023 | !(LangOpts.HIPStdPar && isa<FunctionDecl>(Val: Global) && |
4024 | !Global->hasAttr<CUDAHostAttr>())) |
4025 | return; |
4026 | // Device-only functions are the only things we skip. |
4027 | } else if (!Global->hasAttr<CUDAHostAttr>() && |
4028 | Global->hasAttr<CUDADeviceAttr>()) |
4029 | return; |
4030 | } |
4031 | |
4032 | if (LangOpts.OpenMP) { |
4033 | // If this is OpenMP, check if it is legal to emit this global normally. |
4034 | if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD)) |
4035 | return; |
4036 | if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Val: Global)) { |
4037 | if (MustBeEmitted(Global)) |
4038 | EmitOMPDeclareReduction(D: DRD); |
4039 | return; |
4040 | } |
4041 | if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Val: Global)) { |
4042 | if (MustBeEmitted(Global)) |
4043 | EmitOMPDeclareMapper(D: DMD); |
4044 | return; |
4045 | } |
4046 | } |
4047 | |
4048 | // Ignore declarations, they will be emitted on their first use. |
4049 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Global)) { |
4050 | if (DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>()) && |
4051 | FD->doesThisDeclarationHaveABody()) |
4052 | addDeferredDeclToEmit(GD: GlobalDecl(FD, KernelReferenceKind::Stub)); |
4053 | |
4054 | // Update deferred annotations with the latest declaration if the function |
4055 | // function was already used or defined. |
4056 | if (FD->hasAttr<AnnotateAttr>()) { |
4057 | StringRef MangledName = getMangledName(GD); |
4058 | if (GetGlobalValue(Name: MangledName)) |
4059 | DeferredAnnotations[MangledName] = FD; |
4060 | } |
4061 | |
4062 | // Forward declarations are emitted lazily on first use. |
4063 | if (!FD->doesThisDeclarationHaveABody()) { |
4064 | if (!FD->doesDeclarationForceExternallyVisibleDefinition() && |
4065 | (!FD->isMultiVersion() || !getTarget().getTriple().isAArch64())) |
4066 | return; |
4067 | |
4068 | StringRef MangledName = getMangledName(GD); |
4069 | |
4070 | // Compute the function info and LLVM type. |
4071 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4072 | llvm::Type *Ty = getTypes().GetFunctionType(Info: FI); |
4073 | |
4074 | GetOrCreateLLVMFunction(MangledName, Ty, D: GD, /*ForVTable=*/false, |
4075 | /*DontDefer=*/false); |
4076 | return; |
4077 | } |
4078 | } else { |
4079 | const auto *VD = cast<VarDecl>(Val: Global); |
4080 | assert(VD->isFileVarDecl() && "Cannot emit local var decl as global." ); |
4081 | if (VD->isThisDeclarationADefinition() != VarDecl::Definition && |
4082 | !Context.isMSStaticDataMemberInlineDefinition(VD)) { |
4083 | if (LangOpts.OpenMP) { |
4084 | // Emit declaration of the must-be-emitted declare target variable. |
4085 | if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
4086 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) { |
4087 | |
4088 | // If this variable has external storage and doesn't require special |
4089 | // link handling we defer to its canonical definition. |
4090 | if (VD->hasExternalStorage() && |
4091 | Res != OMPDeclareTargetDeclAttr::MT_Link) |
4092 | return; |
4093 | |
4094 | bool UnifiedMemoryEnabled = |
4095 | getOpenMPRuntime().hasRequiresUnifiedSharedMemory(); |
4096 | if ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
4097 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
4098 | !UnifiedMemoryEnabled) { |
4099 | (void)GetAddrOfGlobalVar(D: VD); |
4100 | } else { |
4101 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
4102 | ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
4103 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
4104 | UnifiedMemoryEnabled)) && |
4105 | "Link clause or to clause with unified memory expected." ); |
4106 | (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
4107 | } |
4108 | |
4109 | return; |
4110 | } |
4111 | } |
4112 | // If this declaration may have caused an inline variable definition to |
4113 | // change linkage, make sure that it's emitted. |
4114 | if (Context.getInlineVariableDefinitionKind(VD) == |
4115 | ASTContext::InlineVariableDefinitionKind::Strong) |
4116 | GetAddrOfGlobalVar(D: VD); |
4117 | return; |
4118 | } |
4119 | } |
4120 | |
4121 | // Defer code generation to first use when possible, e.g. if this is an inline |
4122 | // function. If the global must always be emitted, do it eagerly if possible |
4123 | // to benefit from cache locality. |
4124 | if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { |
4125 | // Emit the definition if it can't be deferred. |
4126 | EmitGlobalDefinition(D: GD); |
4127 | addEmittedDeferredDecl(GD); |
4128 | return; |
4129 | } |
4130 | |
4131 | // If we're deferring emission of a C++ variable with an |
4132 | // initializer, remember the order in which it appeared in the file. |
4133 | if (getLangOpts().CPlusPlus && isa<VarDecl>(Val: Global) && |
4134 | cast<VarDecl>(Val: Global)->hasInit()) { |
4135 | DelayedCXXInitPosition[Global] = CXXGlobalInits.size(); |
4136 | CXXGlobalInits.push_back(x: nullptr); |
4137 | } |
4138 | |
4139 | StringRef MangledName = getMangledName(GD); |
4140 | if (GetGlobalValue(Name: MangledName) != nullptr) { |
4141 | // The value has already been used and should therefore be emitted. |
4142 | addDeferredDeclToEmit(GD); |
4143 | } else if (MustBeEmitted(Global)) { |
4144 | // The value must be emitted, but cannot be emitted eagerly. |
4145 | assert(!MayBeEmittedEagerly(Global)); |
4146 | addDeferredDeclToEmit(GD); |
4147 | } else { |
4148 | // Otherwise, remember that we saw a deferred decl with this name. The |
4149 | // first use of the mangled name will cause it to move into |
4150 | // DeferredDeclsToEmit. |
4151 | DeferredDecls[MangledName] = GD; |
4152 | } |
4153 | } |
4154 | |
4155 | // Check if T is a class type with a destructor that's not dllimport. |
4156 | static bool HasNonDllImportDtor(QualType T) { |
4157 | if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>()) |
4158 | if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
4159 | if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>()) |
4160 | return true; |
4161 | |
4162 | return false; |
4163 | } |
4164 | |
4165 | namespace { |
4166 | struct FunctionIsDirectlyRecursive |
4167 | : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> { |
4168 | const StringRef Name; |
4169 | const Builtin::Context &BI; |
4170 | FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) |
4171 | : Name(N), BI(C) {} |
4172 | |
4173 | bool VisitCallExpr(const CallExpr *E) { |
4174 | const FunctionDecl *FD = E->getDirectCallee(); |
4175 | if (!FD) |
4176 | return false; |
4177 | AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>(); |
4178 | if (Attr && Name == Attr->getLabel()) |
4179 | return true; |
4180 | unsigned BuiltinID = FD->getBuiltinID(); |
4181 | if (!BuiltinID || !BI.isLibFunction(ID: BuiltinID)) |
4182 | return false; |
4183 | std::string BuiltinNameStr = BI.getName(ID: BuiltinID); |
4184 | StringRef BuiltinName = BuiltinNameStr; |
4185 | return BuiltinName.consume_front(Prefix: "__builtin_" ) && Name == BuiltinName; |
4186 | } |
4187 | |
4188 | bool VisitStmt(const Stmt *S) { |
4189 | for (const Stmt *Child : S->children()) |
4190 | if (Child && this->Visit(S: Child)) |
4191 | return true; |
4192 | return false; |
4193 | } |
4194 | }; |
4195 | |
4196 | // Make sure we're not referencing non-imported vars or functions. |
4197 | struct DLLImportFunctionVisitor |
4198 | : public RecursiveASTVisitor<DLLImportFunctionVisitor> { |
4199 | bool SafeToInline = true; |
4200 | |
4201 | bool shouldVisitImplicitCode() const { return true; } |
4202 | |
4203 | bool VisitVarDecl(VarDecl *VD) { |
4204 | if (VD->getTLSKind()) { |
4205 | // A thread-local variable cannot be imported. |
4206 | SafeToInline = false; |
4207 | return SafeToInline; |
4208 | } |
4209 | |
4210 | // A variable definition might imply a destructor call. |
4211 | if (VD->isThisDeclarationADefinition()) |
4212 | SafeToInline = !HasNonDllImportDtor(T: VD->getType()); |
4213 | |
4214 | return SafeToInline; |
4215 | } |
4216 | |
4217 | bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { |
4218 | if (const auto *D = E->getTemporary()->getDestructor()) |
4219 | SafeToInline = D->hasAttr<DLLImportAttr>(); |
4220 | return SafeToInline; |
4221 | } |
4222 | |
4223 | bool VisitDeclRefExpr(DeclRefExpr *E) { |
4224 | ValueDecl *VD = E->getDecl(); |
4225 | if (isa<FunctionDecl>(Val: VD)) |
4226 | SafeToInline = VD->hasAttr<DLLImportAttr>(); |
4227 | else if (VarDecl *V = dyn_cast<VarDecl>(Val: VD)) |
4228 | SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>(); |
4229 | return SafeToInline; |
4230 | } |
4231 | |
4232 | bool VisitCXXConstructExpr(CXXConstructExpr *E) { |
4233 | SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>(); |
4234 | return SafeToInline; |
4235 | } |
4236 | |
4237 | bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { |
4238 | CXXMethodDecl *M = E->getMethodDecl(); |
4239 | if (!M) { |
4240 | // Call through a pointer to member function. This is safe to inline. |
4241 | SafeToInline = true; |
4242 | } else { |
4243 | SafeToInline = M->hasAttr<DLLImportAttr>(); |
4244 | } |
4245 | return SafeToInline; |
4246 | } |
4247 | |
4248 | bool VisitCXXDeleteExpr(CXXDeleteExpr *E) { |
4249 | SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>(); |
4250 | return SafeToInline; |
4251 | } |
4252 | |
4253 | bool VisitCXXNewExpr(CXXNewExpr *E) { |
4254 | SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>(); |
4255 | return SafeToInline; |
4256 | } |
4257 | }; |
4258 | } |
4259 | |
4260 | // isTriviallyRecursive - Check if this function calls another |
4261 | // decl that, because of the asm attribute or the other decl being a builtin, |
4262 | // ends up pointing to itself. |
4263 | bool |
4264 | CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) { |
4265 | StringRef Name; |
4266 | if (getCXXABI().getMangleContext().shouldMangleDeclName(D: FD)) { |
4267 | // asm labels are a special kind of mangling we have to support. |
4268 | AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>(); |
4269 | if (!Attr) |
4270 | return false; |
4271 | Name = Attr->getLabel(); |
4272 | } else { |
4273 | Name = FD->getName(); |
4274 | } |
4275 | |
4276 | FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo); |
4277 | const Stmt *Body = FD->getBody(); |
4278 | return Body ? Walker.Visit(S: Body) : false; |
4279 | } |
4280 | |
4281 | bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) { |
4282 | if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage) |
4283 | return true; |
4284 | |
4285 | const auto *F = cast<FunctionDecl>(Val: GD.getDecl()); |
4286 | // Inline builtins declaration must be emitted. They often are fortified |
4287 | // functions. |
4288 | if (F->isInlineBuiltinDeclaration()) |
4289 | return true; |
4290 | |
4291 | if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>()) |
4292 | return false; |
4293 | |
4294 | // We don't import function bodies from other named module units since that |
4295 | // behavior may break ABI compatibility of the current unit. |
4296 | if (const Module *M = F->getOwningModule(); |
4297 | M && M->getTopLevelModule()->isNamedModule() && |
4298 | getContext().getCurrentNamedModule() != M->getTopLevelModule()) { |
4299 | // There are practices to mark template member function as always-inline |
4300 | // and mark the template as extern explicit instantiation but not give |
4301 | // the definition for member function. So we have to emit the function |
4302 | // from explicitly instantiation with always-inline. |
4303 | // |
4304 | // See https://github.com/llvm/llvm-project/issues/86893 for details. |
4305 | // |
4306 | // TODO: Maybe it is better to give it a warning if we call a non-inline |
4307 | // function from other module units which is marked as always-inline. |
4308 | if (!F->isTemplateInstantiation() || !F->hasAttr<AlwaysInlineAttr>()) { |
4309 | return false; |
4310 | } |
4311 | } |
4312 | |
4313 | if (F->hasAttr<NoInlineAttr>()) |
4314 | return false; |
4315 | |
4316 | if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) { |
4317 | // Check whether it would be safe to inline this dllimport function. |
4318 | DLLImportFunctionVisitor Visitor; |
4319 | Visitor.TraverseFunctionDecl(D: const_cast<FunctionDecl*>(F)); |
4320 | if (!Visitor.SafeToInline) |
4321 | return false; |
4322 | |
4323 | if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: F)) { |
4324 | // Implicit destructor invocations aren't captured in the AST, so the |
4325 | // check above can't see them. Check for them manually here. |
4326 | for (const Decl *Member : Dtor->getParent()->decls()) |
4327 | if (isa<FieldDecl>(Val: Member)) |
4328 | if (HasNonDllImportDtor(T: cast<FieldDecl>(Val: Member)->getType())) |
4329 | return false; |
4330 | for (const CXXBaseSpecifier &B : Dtor->getParent()->bases()) |
4331 | if (HasNonDllImportDtor(T: B.getType())) |
4332 | return false; |
4333 | } |
4334 | } |
4335 | |
4336 | // PR9614. Avoid cases where the source code is lying to us. An available |
4337 | // externally function should have an equivalent function somewhere else, |
4338 | // but a function that calls itself through asm label/`__builtin_` trickery is |
4339 | // clearly not equivalent to the real implementation. |
4340 | // This happens in glibc's btowc and in some configure checks. |
4341 | return !isTriviallyRecursive(FD: F); |
4342 | } |
4343 | |
4344 | bool CodeGenModule::shouldOpportunisticallyEmitVTables() { |
4345 | return CodeGenOpts.OptimizationLevel > 0; |
4346 | } |
4347 | |
4348 | void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD, |
4349 | llvm::GlobalValue *GV) { |
4350 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4351 | |
4352 | if (FD->isCPUSpecificMultiVersion()) { |
4353 | auto *Spec = FD->getAttr<CPUSpecificAttr>(); |
4354 | for (unsigned I = 0; I < Spec->cpus_size(); ++I) |
4355 | EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr); |
4356 | } else if (auto *TC = FD->getAttr<TargetClonesAttr>()) { |
4357 | for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) |
4358 | if (TC->isFirstOfVersion(Index: I)) |
4359 | EmitGlobalFunctionDefinition(GD: GD.getWithMultiVersionIndex(Index: I), GV: nullptr); |
4360 | } else |
4361 | EmitGlobalFunctionDefinition(GD, GV); |
4362 | |
4363 | // Ensure that the resolver function is also emitted. |
4364 | if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) { |
4365 | // On AArch64 defer the resolver emission until the entire TU is processed. |
4366 | if (getTarget().getTriple().isAArch64()) |
4367 | AddDeferredMultiVersionResolverToEmit(GD); |
4368 | else |
4369 | GetOrCreateMultiVersionResolver(GD); |
4370 | } |
4371 | } |
4372 | |
4373 | void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) { |
4374 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
4375 | |
4376 | PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(), |
4377 | Context.getSourceManager(), |
4378 | "Generating code for declaration" ); |
4379 | |
4380 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
4381 | // At -O0, don't generate IR for functions with available_externally |
4382 | // linkage. |
4383 | if (!shouldEmitFunction(GD)) |
4384 | return; |
4385 | |
4386 | llvm::TimeTraceScope TimeScope("CodeGen Function" , [&]() { |
4387 | std::string Name; |
4388 | llvm::raw_string_ostream OS(Name); |
4389 | FD->getNameForDiagnostic(OS, Policy: getContext().getPrintingPolicy(), |
4390 | /*Qualified=*/true); |
4391 | return Name; |
4392 | }); |
4393 | |
4394 | if (const auto *Method = dyn_cast<CXXMethodDecl>(Val: D)) { |
4395 | // Make sure to emit the definition(s) before we emit the thunks. |
4396 | // This is necessary for the generation of certain thunks. |
4397 | if (isa<CXXConstructorDecl>(Val: Method) || isa<CXXDestructorDecl>(Val: Method)) |
4398 | ABI->emitCXXStructor(GD); |
4399 | else if (FD->isMultiVersion()) |
4400 | EmitMultiVersionFunctionDefinition(GD, GV); |
4401 | else |
4402 | EmitGlobalFunctionDefinition(GD, GV); |
4403 | |
4404 | if (Method->isVirtual()) |
4405 | getVTables().EmitThunks(GD); |
4406 | |
4407 | return; |
4408 | } |
4409 | |
4410 | if (FD->isMultiVersion()) |
4411 | return EmitMultiVersionFunctionDefinition(GD, GV); |
4412 | return EmitGlobalFunctionDefinition(GD, GV); |
4413 | } |
4414 | |
4415 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) |
4416 | return EmitGlobalVarDefinition(D: VD, IsTentative: !VD->hasDefinition()); |
4417 | |
4418 | llvm_unreachable("Invalid argument to EmitGlobalDefinition()" ); |
4419 | } |
4420 | |
4421 | static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, |
4422 | llvm::Function *NewFn); |
4423 | |
4424 | static uint64_t getFMVPriority(const TargetInfo &TI, |
4425 | const CodeGenFunction::FMVResolverOption &RO) { |
4426 | llvm::SmallVector<StringRef, 8> Features{RO.Features}; |
4427 | if (RO.Architecture) |
4428 | Features.push_back(Elt: *RO.Architecture); |
4429 | return TI.getFMVPriority(Features); |
4430 | } |
4431 | |
4432 | // Multiversion functions should be at most 'WeakODRLinkage' so that a different |
4433 | // TU can forward declare the function without causing problems. Particularly |
4434 | // in the cases of CPUDispatch, this causes issues. This also makes sure we |
4435 | // work with internal linkage functions, so that the same function name can be |
4436 | // used with internal linkage in multiple TUs. |
4437 | static llvm::GlobalValue::LinkageTypes |
4438 | getMultiversionLinkage(CodeGenModule &CGM, GlobalDecl GD) { |
4439 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4440 | if (FD->getFormalLinkage() == Linkage::Internal) |
4441 | return llvm::GlobalValue::InternalLinkage; |
4442 | return llvm::GlobalValue::WeakODRLinkage; |
4443 | } |
4444 | |
4445 | void CodeGenModule::emitMultiVersionFunctions() { |
4446 | std::vector<GlobalDecl> MVFuncsToEmit; |
4447 | MultiVersionFuncs.swap(x&: MVFuncsToEmit); |
4448 | for (GlobalDecl GD : MVFuncsToEmit) { |
4449 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4450 | assert(FD && "Expected a FunctionDecl" ); |
4451 | |
4452 | auto createFunction = [&](const FunctionDecl *Decl, unsigned MVIdx = 0) { |
4453 | GlobalDecl CurGD{Decl->isDefined() ? Decl->getDefinition() : Decl, MVIdx}; |
4454 | StringRef MangledName = getMangledName(GD: CurGD); |
4455 | llvm::Constant *Func = GetGlobalValue(Name: MangledName); |
4456 | if (!Func) { |
4457 | if (Decl->isDefined()) { |
4458 | EmitGlobalFunctionDefinition(GD: CurGD, GV: nullptr); |
4459 | Func = GetGlobalValue(Name: MangledName); |
4460 | } else { |
4461 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD: CurGD); |
4462 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
4463 | Func = GetAddrOfFunction(GD: CurGD, Ty, /*ForVTable=*/false, |
4464 | /*DontDefer=*/false, IsForDefinition: ForDefinition); |
4465 | } |
4466 | assert(Func && "This should have just been created" ); |
4467 | } |
4468 | return cast<llvm::Function>(Val: Func); |
4469 | }; |
4470 | |
4471 | // For AArch64, a resolver is only emitted if a function marked with |
4472 | // target_version("default")) or target_clones("default") is defined |
4473 | // in this TU. For other architectures it is always emitted. |
4474 | bool ShouldEmitResolver = !getTarget().getTriple().isAArch64(); |
4475 | SmallVector<CodeGenFunction::FMVResolverOption, 10> Options; |
4476 | |
4477 | getContext().forEachMultiversionedFunctionVersion( |
4478 | FD, Pred: [&](const FunctionDecl *CurFD) { |
4479 | llvm::SmallVector<StringRef, 8> Feats; |
4480 | bool IsDefined = CurFD->getDefinition() != nullptr; |
4481 | |
4482 | if (const auto *TA = CurFD->getAttr<TargetAttr>()) { |
4483 | assert(getTarget().getTriple().isX86() && "Unsupported target" ); |
4484 | TA->getX86AddedFeatures(Out&: Feats); |
4485 | llvm::Function *Func = createFunction(CurFD); |
4486 | Options.emplace_back(Args&: Func, Args&: Feats, Args: TA->getX86Architecture()); |
4487 | } else if (const auto *TVA = CurFD->getAttr<TargetVersionAttr>()) { |
4488 | if (TVA->isDefaultVersion() && IsDefined) |
4489 | ShouldEmitResolver = true; |
4490 | llvm::Function *Func = createFunction(CurFD); |
4491 | char Delim = getTarget().getTriple().isAArch64() ? '+' : ','; |
4492 | TVA->getFeatures(Out&: Feats, Delim); |
4493 | Options.emplace_back(Args&: Func, Args&: Feats); |
4494 | } else if (const auto *TC = CurFD->getAttr<TargetClonesAttr>()) { |
4495 | for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) { |
4496 | if (!TC->isFirstOfVersion(Index: I)) |
4497 | continue; |
4498 | if (TC->isDefaultVersion(Index: I) && IsDefined) |
4499 | ShouldEmitResolver = true; |
4500 | llvm::Function *Func = createFunction(CurFD, I); |
4501 | Feats.clear(); |
4502 | if (getTarget().getTriple().isX86()) { |
4503 | TC->getX86Feature(Out&: Feats, Index: I); |
4504 | Options.emplace_back(Args&: Func, Args&: Feats, Args: TC->getX86Architecture(Index: I)); |
4505 | } else { |
4506 | char Delim = getTarget().getTriple().isAArch64() ? '+' : ','; |
4507 | TC->getFeatures(Out&: Feats, Index: I, Delim); |
4508 | Options.emplace_back(Args&: Func, Args&: Feats); |
4509 | } |
4510 | } |
4511 | } else |
4512 | llvm_unreachable("unexpected MultiVersionKind" ); |
4513 | }); |
4514 | |
4515 | if (!ShouldEmitResolver) |
4516 | continue; |
4517 | |
4518 | llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD); |
4519 | if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: ResolverConstant)) { |
4520 | ResolverConstant = IFunc->getResolver(); |
4521 | if (FD->isTargetClonesMultiVersion() && |
4522 | !getTarget().getTriple().isAArch64()) { |
4523 | std::string MangledName = getMangledNameImpl( |
4524 | CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4525 | if (!GetGlobalValue(Name: MangledName + ".ifunc" )) { |
4526 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4527 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4528 | // In prior versions of Clang, the mangling for ifuncs incorrectly |
4529 | // included an .ifunc suffix. This alias is generated for backward |
4530 | // compatibility. It is deprecated, and may be removed in the future. |
4531 | auto *Alias = llvm::GlobalAlias::create( |
4532 | Ty: DeclTy, AddressSpace: 0, Linkage: getMultiversionLinkage(CGM&: *this, GD), |
4533 | Name: MangledName + ".ifunc" , Aliasee: IFunc, Parent: &getModule()); |
4534 | SetCommonAttributes(GD: FD, GV: Alias); |
4535 | } |
4536 | } |
4537 | } |
4538 | llvm::Function *ResolverFunc = cast<llvm::Function>(Val: ResolverConstant); |
4539 | |
4540 | ResolverFunc->setLinkage(getMultiversionLinkage(CGM&: *this, GD)); |
4541 | |
4542 | if (!ResolverFunc->hasLocalLinkage() && supportsCOMDAT()) |
4543 | ResolverFunc->setComdat( |
4544 | getModule().getOrInsertComdat(Name: ResolverFunc->getName())); |
4545 | |
4546 | const TargetInfo &TI = getTarget(); |
4547 | llvm::stable_sort( |
4548 | Range&: Options, C: [&TI](const CodeGenFunction::FMVResolverOption &LHS, |
4549 | const CodeGenFunction::FMVResolverOption &RHS) { |
4550 | return getFMVPriority(TI, RO: LHS) > getFMVPriority(TI, RO: RHS); |
4551 | }); |
4552 | CodeGenFunction CGF(*this); |
4553 | CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options); |
4554 | } |
4555 | |
4556 | // Ensure that any additions to the deferred decls list caused by emitting a |
4557 | // variant are emitted. This can happen when the variant itself is inline and |
4558 | // calls a function without linkage. |
4559 | if (!MVFuncsToEmit.empty()) |
4560 | EmitDeferred(); |
4561 | |
4562 | // Ensure that any additions to the multiversion funcs list from either the |
4563 | // deferred decls or the multiversion functions themselves are emitted. |
4564 | if (!MultiVersionFuncs.empty()) |
4565 | emitMultiVersionFunctions(); |
4566 | } |
4567 | |
4568 | static void replaceDeclarationWith(llvm::GlobalValue *Old, |
4569 | llvm::Constant *New) { |
4570 | assert(cast<llvm::Function>(Old)->isDeclaration() && "Not a declaration" ); |
4571 | New->takeName(V: Old); |
4572 | Old->replaceAllUsesWith(V: New); |
4573 | Old->eraseFromParent(); |
4574 | } |
4575 | |
4576 | void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) { |
4577 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4578 | assert(FD && "Not a FunctionDecl?" ); |
4579 | assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?" ); |
4580 | const auto *DD = FD->getAttr<CPUDispatchAttr>(); |
4581 | assert(DD && "Not a cpu_dispatch Function?" ); |
4582 | |
4583 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4584 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4585 | |
4586 | StringRef ResolverName = getMangledName(GD); |
4587 | UpdateMultiVersionNames(GD, FD, CurName&: ResolverName); |
4588 | |
4589 | llvm::Type *ResolverType; |
4590 | GlobalDecl ResolverGD; |
4591 | if (getTarget().supportsIFunc()) { |
4592 | ResolverType = llvm::FunctionType::get( |
4593 | Result: llvm::PointerType::get(C&: getLLVMContext(), |
4594 | AddressSpace: getTypes().getTargetAddressSpace(T: FD->getType())), |
4595 | isVarArg: false); |
4596 | } |
4597 | else { |
4598 | ResolverType = DeclTy; |
4599 | ResolverGD = GD; |
4600 | } |
4601 | |
4602 | auto *ResolverFunc = cast<llvm::Function>(Val: GetOrCreateLLVMFunction( |
4603 | MangledName: ResolverName, Ty: ResolverType, D: ResolverGD, /*ForVTable=*/false)); |
4604 | ResolverFunc->setLinkage(getMultiversionLinkage(CGM&: *this, GD)); |
4605 | if (supportsCOMDAT()) |
4606 | ResolverFunc->setComdat( |
4607 | getModule().getOrInsertComdat(Name: ResolverFunc->getName())); |
4608 | |
4609 | SmallVector<CodeGenFunction::FMVResolverOption, 10> Options; |
4610 | const TargetInfo &Target = getTarget(); |
4611 | unsigned Index = 0; |
4612 | for (const IdentifierInfo *II : DD->cpus()) { |
4613 | // Get the name of the target function so we can look it up/create it. |
4614 | std::string MangledName = getMangledNameImpl(CGM&: *this, GD, ND: FD, OmitMultiVersionMangling: true) + |
4615 | getCPUSpecificMangling(CGM: *this, Name: II->getName()); |
4616 | |
4617 | llvm::Constant *Func = GetGlobalValue(Name: MangledName); |
4618 | |
4619 | if (!Func) { |
4620 | GlobalDecl ExistingDecl = Manglings.lookup(Key: MangledName); |
4621 | if (ExistingDecl.getDecl() && |
4622 | ExistingDecl.getDecl()->getAsFunction()->isDefined()) { |
4623 | EmitGlobalFunctionDefinition(GD: ExistingDecl, GV: nullptr); |
4624 | Func = GetGlobalValue(Name: MangledName); |
4625 | } else { |
4626 | if (!ExistingDecl.getDecl()) |
4627 | ExistingDecl = GD.getWithMultiVersionIndex(Index); |
4628 | |
4629 | Func = GetOrCreateLLVMFunction( |
4630 | MangledName, Ty: DeclTy, D: ExistingDecl, |
4631 | /*ForVTable=*/false, /*DontDefer=*/true, |
4632 | /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(), IsForDefinition: ForDefinition); |
4633 | } |
4634 | } |
4635 | |
4636 | llvm::SmallVector<StringRef, 32> Features; |
4637 | Target.getCPUSpecificCPUDispatchFeatures(Name: II->getName(), Features); |
4638 | llvm::transform(Range&: Features, d_first: Features.begin(), |
4639 | F: [](StringRef Str) { return Str.substr(Start: 1); }); |
4640 | llvm::erase_if(C&: Features, P: [&Target](StringRef Feat) { |
4641 | return !Target.validateCpuSupports(Name: Feat); |
4642 | }); |
4643 | Options.emplace_back(Args: cast<llvm::Function>(Val: Func), Args&: Features); |
4644 | ++Index; |
4645 | } |
4646 | |
4647 | llvm::stable_sort(Range&: Options, C: [](const CodeGenFunction::FMVResolverOption &LHS, |
4648 | const CodeGenFunction::FMVResolverOption &RHS) { |
4649 | return llvm::X86::getCpuSupportsMask(FeatureStrs: LHS.Features) > |
4650 | llvm::X86::getCpuSupportsMask(FeatureStrs: RHS.Features); |
4651 | }); |
4652 | |
4653 | // If the list contains multiple 'default' versions, such as when it contains |
4654 | // 'pentium' and 'generic', don't emit the call to the generic one (since we |
4655 | // always run on at least a 'pentium'). We do this by deleting the 'least |
4656 | // advanced' (read, lowest mangling letter). |
4657 | while (Options.size() > 1 && llvm::all_of(Range: llvm::X86::getCpuSupportsMask( |
4658 | FeatureStrs: (Options.end() - 2)->Features), |
4659 | P: [](auto X) { return X == 0; })) { |
4660 | StringRef LHSName = (Options.end() - 2)->Function->getName(); |
4661 | StringRef RHSName = (Options.end() - 1)->Function->getName(); |
4662 | if (LHSName.compare(RHS: RHSName) < 0) |
4663 | Options.erase(CI: Options.end() - 2); |
4664 | else |
4665 | Options.erase(CI: Options.end() - 1); |
4666 | } |
4667 | |
4668 | CodeGenFunction CGF(*this); |
4669 | CGF.EmitMultiVersionResolver(Resolver: ResolverFunc, Options); |
4670 | |
4671 | if (getTarget().supportsIFunc()) { |
4672 | llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(CGM&: *this, GD); |
4673 | auto *IFunc = cast<llvm::GlobalValue>(Val: GetOrCreateMultiVersionResolver(GD)); |
4674 | unsigned AS = IFunc->getType()->getPointerAddressSpace(); |
4675 | |
4676 | // Fix up function declarations that were created for cpu_specific before |
4677 | // cpu_dispatch was known |
4678 | if (!isa<llvm::GlobalIFunc>(Val: IFunc)) { |
4679 | auto *GI = llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: AS, Linkage, Name: "" , |
4680 | Resolver: ResolverFunc, Parent: &getModule()); |
4681 | replaceDeclarationWith(Old: IFunc, New: GI); |
4682 | IFunc = GI; |
4683 | } |
4684 | |
4685 | std::string AliasName = getMangledNameImpl( |
4686 | CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4687 | llvm::Constant *AliasFunc = GetGlobalValue(Name: AliasName); |
4688 | if (!AliasFunc) { |
4689 | auto *GA = llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage, Name: AliasName, |
4690 | Aliasee: IFunc, Parent: &getModule()); |
4691 | SetCommonAttributes(GD, GV: GA); |
4692 | } |
4693 | } |
4694 | } |
4695 | |
4696 | /// Adds a declaration to the list of multi version functions if not present. |
4697 | void CodeGenModule::AddDeferredMultiVersionResolverToEmit(GlobalDecl GD) { |
4698 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4699 | assert(FD && "Not a FunctionDecl?" ); |
4700 | |
4701 | if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) { |
4702 | std::string MangledName = |
4703 | getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4704 | if (!DeferredResolversToEmit.insert(key: MangledName).second) |
4705 | return; |
4706 | } |
4707 | MultiVersionFuncs.push_back(x: GD); |
4708 | } |
4709 | |
4710 | /// If a dispatcher for the specified mangled name is not in the module, create |
4711 | /// and return it. The dispatcher is either an llvm Function with the specified |
4712 | /// type, or a global ifunc. |
4713 | llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) { |
4714 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
4715 | assert(FD && "Not a FunctionDecl?" ); |
4716 | |
4717 | std::string MangledName = |
4718 | getMangledNameImpl(CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4719 | |
4720 | // Holds the name of the resolver, in ifunc mode this is the ifunc (which has |
4721 | // a separate resolver). |
4722 | std::string ResolverName = MangledName; |
4723 | if (getTarget().supportsIFunc()) { |
4724 | switch (FD->getMultiVersionKind()) { |
4725 | case MultiVersionKind::None: |
4726 | llvm_unreachable("unexpected MultiVersionKind::None for resolver" ); |
4727 | case MultiVersionKind::Target: |
4728 | case MultiVersionKind::CPUSpecific: |
4729 | case MultiVersionKind::CPUDispatch: |
4730 | ResolverName += ".ifunc" ; |
4731 | break; |
4732 | case MultiVersionKind::TargetClones: |
4733 | case MultiVersionKind::TargetVersion: |
4734 | break; |
4735 | } |
4736 | } else if (FD->isTargetMultiVersion()) { |
4737 | ResolverName += ".resolver" ; |
4738 | } |
4739 | |
4740 | bool ShouldReturnIFunc = |
4741 | getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion(); |
4742 | |
4743 | // If the resolver has already been created, just return it. This lookup may |
4744 | // yield a function declaration instead of a resolver on AArch64. That is |
4745 | // because we didn't know whether a resolver will be generated when we first |
4746 | // encountered a use of the symbol named after this resolver. Therefore, |
4747 | // targets which support ifuncs should not return here unless we actually |
4748 | // found an ifunc. |
4749 | llvm::GlobalValue *ResolverGV = GetGlobalValue(Name: ResolverName); |
4750 | if (ResolverGV && (isa<llvm::GlobalIFunc>(Val: ResolverGV) || !ShouldReturnIFunc)) |
4751 | return ResolverGV; |
4752 | |
4753 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
4754 | llvm::FunctionType *DeclTy = getTypes().GetFunctionType(Info: FI); |
4755 | |
4756 | // The resolver needs to be created. For target and target_clones, defer |
4757 | // creation until the end of the TU. |
4758 | if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion()) |
4759 | AddDeferredMultiVersionResolverToEmit(GD); |
4760 | |
4761 | // For cpu_specific, don't create an ifunc yet because we don't know if the |
4762 | // cpu_dispatch will be emitted in this translation unit. |
4763 | if (ShouldReturnIFunc) { |
4764 | unsigned AS = getTypes().getTargetAddressSpace(T: FD->getType()); |
4765 | llvm::Type *ResolverType = llvm::FunctionType::get( |
4766 | Result: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS), isVarArg: false); |
4767 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
4768 | MangledName: MangledName + ".resolver" , Ty: ResolverType, D: GlobalDecl{}, |
4769 | /*ForVTable=*/false); |
4770 | llvm::GlobalIFunc *GIF = |
4771 | llvm::GlobalIFunc::create(Ty: DeclTy, AddressSpace: AS, Linkage: getMultiversionLinkage(CGM&: *this, GD), |
4772 | Name: "" , Resolver, Parent: &getModule()); |
4773 | GIF->setName(ResolverName); |
4774 | SetCommonAttributes(GD: FD, GV: GIF); |
4775 | if (ResolverGV) |
4776 | replaceDeclarationWith(Old: ResolverGV, New: GIF); |
4777 | return GIF; |
4778 | } |
4779 | |
4780 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
4781 | MangledName: ResolverName, Ty: DeclTy, D: GlobalDecl{}, /*ForVTable=*/false); |
4782 | assert(isa<llvm::GlobalValue>(Resolver) && !ResolverGV && |
4783 | "Resolver should be created for the first time" ); |
4784 | SetCommonAttributes(GD: FD, GV: cast<llvm::GlobalValue>(Val: Resolver)); |
4785 | return Resolver; |
4786 | } |
4787 | |
4788 | bool CodeGenModule::shouldDropDLLAttribute(const Decl *D, |
4789 | const llvm::GlobalValue *GV) const { |
4790 | auto SC = GV->getDLLStorageClass(); |
4791 | if (SC == llvm::GlobalValue::DefaultStorageClass) |
4792 | return false; |
4793 | const Decl *MRD = D->getMostRecentDecl(); |
4794 | return (((SC == llvm::GlobalValue::DLLImportStorageClass && |
4795 | !MRD->hasAttr<DLLImportAttr>()) || |
4796 | (SC == llvm::GlobalValue::DLLExportStorageClass && |
4797 | !MRD->hasAttr<DLLExportAttr>())) && |
4798 | !shouldMapVisibilityToDLLExport(D: cast<NamedDecl>(Val: MRD))); |
4799 | } |
4800 | |
4801 | /// GetOrCreateLLVMFunction - If the specified mangled name is not in the |
4802 | /// module, create and return an llvm Function with the specified type. If there |
4803 | /// is something in the module with the specified name, return it potentially |
4804 | /// bitcasted to the right type. |
4805 | /// |
4806 | /// If D is non-null, it specifies a decl that correspond to this. This is used |
4807 | /// to set the attributes on the function when it is first created. |
4808 | llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction( |
4809 | StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable, |
4810 | bool DontDefer, bool IsThunk, llvm::AttributeList , |
4811 | ForDefinition_t IsForDefinition) { |
4812 | const Decl *D = GD.getDecl(); |
4813 | |
4814 | std::string NameWithoutMultiVersionMangling; |
4815 | if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D)) { |
4816 | // For the device mark the function as one that should be emitted. |
4817 | if (getLangOpts().OpenMPIsTargetDevice && OpenMPRuntime && |
4818 | !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() && |
4819 | !DontDefer && !IsForDefinition) { |
4820 | if (const FunctionDecl *FDDef = FD->getDefinition()) { |
4821 | GlobalDecl GDDef; |
4822 | if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: FDDef)) |
4823 | GDDef = GlobalDecl(CD, GD.getCtorType()); |
4824 | else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: FDDef)) |
4825 | GDDef = GlobalDecl(DD, GD.getDtorType()); |
4826 | else |
4827 | GDDef = GlobalDecl(FDDef); |
4828 | EmitGlobal(GD: GDDef); |
4829 | } |
4830 | } |
4831 | |
4832 | // Any attempts to use a MultiVersion function should result in retrieving |
4833 | // the iFunc instead. Name Mangling will handle the rest of the changes. |
4834 | if (FD->isMultiVersion()) { |
4835 | UpdateMultiVersionNames(GD, FD, CurName&: MangledName); |
4836 | if (!IsForDefinition) { |
4837 | // On AArch64 we do not immediatelly emit an ifunc resolver when a |
4838 | // function is used. Instead we defer the emission until we see a |
4839 | // default definition. In the meantime we just reference the symbol |
4840 | // without FMV mangling (it may or may not be replaced later). |
4841 | if (getTarget().getTriple().isAArch64()) { |
4842 | AddDeferredMultiVersionResolverToEmit(GD); |
4843 | NameWithoutMultiVersionMangling = getMangledNameImpl( |
4844 | CGM&: *this, GD, ND: FD, /*OmitMultiVersionMangling=*/true); |
4845 | } else |
4846 | return GetOrCreateMultiVersionResolver(GD); |
4847 | } |
4848 | } |
4849 | } |
4850 | |
4851 | if (!NameWithoutMultiVersionMangling.empty()) |
4852 | MangledName = NameWithoutMultiVersionMangling; |
4853 | |
4854 | // Lookup the entry, lazily creating it if necessary. |
4855 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
4856 | if (Entry) { |
4857 | if (WeakRefReferences.erase(Ptr: Entry)) { |
4858 | const FunctionDecl *FD = cast_or_null<FunctionDecl>(Val: D); |
4859 | if (FD && !FD->hasAttr<WeakAttr>()) |
4860 | Entry->setLinkage(llvm::Function::ExternalLinkage); |
4861 | } |
4862 | |
4863 | // Handle dropped DLL attributes. |
4864 | if (D && shouldDropDLLAttribute(D, GV: Entry)) { |
4865 | Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
4866 | setDSOLocal(Entry); |
4867 | } |
4868 | |
4869 | // If there are two attempts to define the same mangled name, issue an |
4870 | // error. |
4871 | if (IsForDefinition && !Entry->isDeclaration()) { |
4872 | GlobalDecl OtherGD; |
4873 | // Check that GD is not yet in DiagnosedConflictingDefinitions is required |
4874 | // to make sure that we issue an error only once. |
4875 | if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
4876 | (GD.getCanonicalDecl().getDecl() != |
4877 | OtherGD.getCanonicalDecl().getDecl()) && |
4878 | DiagnosedConflictingDefinitions.insert(V: GD).second) { |
4879 | getDiags().Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name) |
4880 | << MangledName; |
4881 | getDiags().Report(Loc: OtherGD.getDecl()->getLocation(), |
4882 | DiagID: diag::note_previous_definition); |
4883 | } |
4884 | } |
4885 | |
4886 | if ((isa<llvm::Function>(Val: Entry) || isa<llvm::GlobalAlias>(Val: Entry)) && |
4887 | (Entry->getValueType() == Ty)) { |
4888 | return Entry; |
4889 | } |
4890 | |
4891 | // Make sure the result is of the correct type. |
4892 | // (If function is requested for a definition, we always need to create a new |
4893 | // function, not just return a bitcast.) |
4894 | if (!IsForDefinition) |
4895 | return Entry; |
4896 | } |
4897 | |
4898 | // This function doesn't have a complete type (for example, the return |
4899 | // type is an incomplete struct). Use a fake type instead, and make |
4900 | // sure not to try to set attributes. |
4901 | bool IsIncompleteFunction = false; |
4902 | |
4903 | llvm::FunctionType *FTy; |
4904 | if (isa<llvm::FunctionType>(Val: Ty)) { |
4905 | FTy = cast<llvm::FunctionType>(Val: Ty); |
4906 | } else { |
4907 | FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false); |
4908 | IsIncompleteFunction = true; |
4909 | } |
4910 | |
4911 | llvm::Function *F = |
4912 | llvm::Function::Create(Ty: FTy, Linkage: llvm::Function::ExternalLinkage, |
4913 | N: Entry ? StringRef() : MangledName, M: &getModule()); |
4914 | |
4915 | // Store the declaration associated with this function so it is potentially |
4916 | // updated by further declarations or definitions and emitted at the end. |
4917 | if (D && D->hasAttr<AnnotateAttr>()) |
4918 | DeferredAnnotations[MangledName] = cast<ValueDecl>(Val: D); |
4919 | |
4920 | // If we already created a function with the same mangled name (but different |
4921 | // type) before, take its name and add it to the list of functions to be |
4922 | // replaced with F at the end of CodeGen. |
4923 | // |
4924 | // This happens if there is a prototype for a function (e.g. "int f()") and |
4925 | // then a definition of a different type (e.g. "int f(int x)"). |
4926 | if (Entry) { |
4927 | F->takeName(V: Entry); |
4928 | |
4929 | // This might be an implementation of a function without a prototype, in |
4930 | // which case, try to do special replacement of calls which match the new |
4931 | // prototype. The really key thing here is that we also potentially drop |
4932 | // arguments from the call site so as to make a direct call, which makes the |
4933 | // inliner happier and suppresses a number of optimizer warnings (!) about |
4934 | // dropping arguments. |
4935 | if (!Entry->use_empty()) { |
4936 | ReplaceUsesOfNonProtoTypeWithRealFunction(Old: Entry, NewFn: F); |
4937 | Entry->removeDeadConstantUsers(); |
4938 | } |
4939 | |
4940 | addGlobalValReplacement(GV: Entry, C: F); |
4941 | } |
4942 | |
4943 | assert(F->getName() == MangledName && "name was uniqued!" ); |
4944 | if (D) |
4945 | SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk); |
4946 | if (ExtraAttrs.hasFnAttrs()) { |
4947 | llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs()); |
4948 | F->addFnAttrs(Attrs: B); |
4949 | } |
4950 | |
4951 | if (!DontDefer) { |
4952 | // All MSVC dtors other than the base dtor are linkonce_odr and delegate to |
4953 | // each other bottoming out with the base dtor. Therefore we emit non-base |
4954 | // dtors on usage, even if there is no dtor definition in the TU. |
4955 | if (isa_and_nonnull<CXXDestructorDecl>(Val: D) && |
4956 | getCXXABI().useThunkForDtorVariant(Dtor: cast<CXXDestructorDecl>(Val: D), |
4957 | DT: GD.getDtorType())) |
4958 | addDeferredDeclToEmit(GD); |
4959 | |
4960 | // This is the first use or definition of a mangled name. If there is a |
4961 | // deferred decl with this name, remember that we need to emit it at the end |
4962 | // of the file. |
4963 | auto DDI = DeferredDecls.find(Val: MangledName); |
4964 | if (DDI != DeferredDecls.end()) { |
4965 | // Move the potentially referenced deferred decl to the |
4966 | // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we |
4967 | // don't need it anymore). |
4968 | addDeferredDeclToEmit(GD: DDI->second); |
4969 | DeferredDecls.erase(I: DDI); |
4970 | |
4971 | // Otherwise, there are cases we have to worry about where we're |
4972 | // using a declaration for which we must emit a definition but where |
4973 | // we might not find a top-level definition: |
4974 | // - member functions defined inline in their classes |
4975 | // - friend functions defined inline in some class |
4976 | // - special member functions with implicit definitions |
4977 | // If we ever change our AST traversal to walk into class methods, |
4978 | // this will be unnecessary. |
4979 | // |
4980 | // We also don't emit a definition for a function if it's going to be an |
4981 | // entry in a vtable, unless it's already marked as used. |
4982 | } else if (getLangOpts().CPlusPlus && D) { |
4983 | // Look for a declaration that's lexically in a record. |
4984 | for (const auto *FD = cast<FunctionDecl>(Val: D)->getMostRecentDecl(); FD; |
4985 | FD = FD->getPreviousDecl()) { |
4986 | if (isa<CXXRecordDecl>(Val: FD->getLexicalDeclContext())) { |
4987 | if (FD->doesThisDeclarationHaveABody()) { |
4988 | addDeferredDeclToEmit(GD: GD.getWithDecl(D: FD)); |
4989 | break; |
4990 | } |
4991 | } |
4992 | } |
4993 | } |
4994 | } |
4995 | |
4996 | // Make sure the result is of the requested type. |
4997 | if (!IsIncompleteFunction) { |
4998 | assert(F->getFunctionType() == Ty); |
4999 | return F; |
5000 | } |
5001 | |
5002 | return F; |
5003 | } |
5004 | |
5005 | /// GetAddrOfFunction - Return the address of the given function. If Ty is |
5006 | /// non-null, then this function will use the specified type if it has to |
5007 | /// create it (this occurs when we see a definition of the function). |
5008 | llvm::Constant * |
5009 | CodeGenModule::GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty, bool ForVTable, |
5010 | bool DontDefer, |
5011 | ForDefinition_t IsForDefinition) { |
5012 | // If there was no specific requested type, just convert it now. |
5013 | if (!Ty) { |
5014 | const auto *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
5015 | Ty = getTypes().ConvertType(T: FD->getType()); |
5016 | if (DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>()) && |
5017 | GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { |
5018 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
5019 | Ty = getTypes().GetFunctionType(Info: FI); |
5020 | } |
5021 | } |
5022 | |
5023 | // Devirtualized destructor calls may come through here instead of via |
5024 | // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead |
5025 | // of the complete destructor when necessary. |
5026 | if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: GD.getDecl())) { |
5027 | if (getTarget().getCXXABI().isMicrosoft() && |
5028 | GD.getDtorType() == Dtor_Complete && |
5029 | DD->getParent()->getNumVBases() == 0) |
5030 | GD = GlobalDecl(DD, Dtor_Base); |
5031 | } |
5032 | |
5033 | StringRef MangledName = getMangledName(GD); |
5034 | auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer, |
5035 | /*IsThunk=*/false, ExtraAttrs: llvm::AttributeList(), |
5036 | IsForDefinition); |
5037 | // Returns kernel handle for HIP kernel stub function. |
5038 | if (LangOpts.CUDA && !LangOpts.CUDAIsDevice && |
5039 | cast<FunctionDecl>(Val: GD.getDecl())->hasAttr<CUDAGlobalAttr>()) { |
5040 | auto *Handle = getCUDARuntime().getKernelHandle( |
5041 | Stub: cast<llvm::Function>(Val: F->stripPointerCasts()), GD); |
5042 | if (IsForDefinition) |
5043 | return F; |
5044 | return Handle; |
5045 | } |
5046 | return F; |
5047 | } |
5048 | |
5049 | llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) { |
5050 | llvm::GlobalValue *F = |
5051 | cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD: Decl)->stripPointerCasts()); |
5052 | |
5053 | return llvm::NoCFIValue::get(GV: F); |
5054 | } |
5055 | |
5056 | static const FunctionDecl * |
5057 | GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) { |
5058 | TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl(); |
5059 | DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl); |
5060 | |
5061 | IdentifierInfo &CII = C.Idents.get(Name); |
5062 | for (const auto *Result : DC->lookup(Name: &CII)) |
5063 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result)) |
5064 | return FD; |
5065 | |
5066 | if (!C.getLangOpts().CPlusPlus) |
5067 | return nullptr; |
5068 | |
5069 | // Demangle the premangled name from getTerminateFn() |
5070 | IdentifierInfo &CXXII = |
5071 | (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ" ) |
5072 | ? C.Idents.get(Name: "terminate" ) |
5073 | : C.Idents.get(Name); |
5074 | |
5075 | for (const auto &N : {"__cxxabiv1" , "std" }) { |
5076 | IdentifierInfo &NS = C.Idents.get(Name: N); |
5077 | for (const auto *Result : DC->lookup(Name: &NS)) { |
5078 | const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Val: Result); |
5079 | if (auto *LSD = dyn_cast<LinkageSpecDecl>(Val: Result)) |
5080 | for (const auto *Result : LSD->lookup(Name: &NS)) |
5081 | if ((ND = dyn_cast<NamespaceDecl>(Val: Result))) |
5082 | break; |
5083 | |
5084 | if (ND) |
5085 | for (const auto *Result : ND->lookup(Name: &CXXII)) |
5086 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: Result)) |
5087 | return FD; |
5088 | } |
5089 | } |
5090 | |
5091 | return nullptr; |
5092 | } |
5093 | |
5094 | static void setWindowsItaniumDLLImport(CodeGenModule &CGM, bool Local, |
5095 | llvm::Function *F, StringRef Name) { |
5096 | // In Windows Itanium environments, try to mark runtime functions |
5097 | // dllimport. For Mingw and MSVC, don't. We don't really know if the user |
5098 | // will link their standard library statically or dynamically. Marking |
5099 | // functions imported when they are not imported can cause linker errors |
5100 | // and warnings. |
5101 | if (!Local && CGM.getTriple().isWindowsItaniumEnvironment() && |
5102 | !CGM.getCodeGenOpts().LTOVisibilityPublicStd) { |
5103 | const FunctionDecl *FD = GetRuntimeFunctionDecl(C&: CGM.getContext(), Name); |
5104 | if (!FD || FD->hasAttr<DLLImportAttr>()) { |
5105 | F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); |
5106 | F->setLinkage(llvm::GlobalValue::ExternalLinkage); |
5107 | } |
5108 | } |
5109 | } |
5110 | |
5111 | llvm::FunctionCallee CodeGenModule::CreateRuntimeFunction( |
5112 | QualType ReturnTy, ArrayRef<QualType> ArgTys, StringRef Name, |
5113 | llvm::AttributeList , bool Local, bool AssumeConvergent) { |
5114 | if (AssumeConvergent) { |
5115 | ExtraAttrs = |
5116 | ExtraAttrs.addFnAttribute(C&: VMContext, Kind: llvm::Attribute::Convergent); |
5117 | } |
5118 | |
5119 | QualType FTy = Context.getFunctionType(ResultTy: ReturnTy, Args: ArgTys, |
5120 | EPI: FunctionProtoType::ExtProtoInfo()); |
5121 | const CGFunctionInfo &Info = getTypes().arrangeFreeFunctionType( |
5122 | Ty: Context.getCanonicalType(T: FTy).castAs<FunctionProtoType>()); |
5123 | auto *ConvTy = getTypes().GetFunctionType(Info); |
5124 | llvm::Constant *C = GetOrCreateLLVMFunction( |
5125 | MangledName: Name, Ty: ConvTy, GD: GlobalDecl(), /*ForVTable=*/false, |
5126 | /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs); |
5127 | |
5128 | if (auto *F = dyn_cast<llvm::Function>(Val: C)) { |
5129 | if (F->empty()) { |
5130 | SetLLVMFunctionAttributes(GD: GlobalDecl(), Info, F, /*IsThunk*/ false); |
5131 | // FIXME: Set calling-conv properly in ExtProtoInfo |
5132 | F->setCallingConv(getRuntimeCC()); |
5133 | setWindowsItaniumDLLImport(CGM&: *this, Local, F, Name); |
5134 | setDSOLocal(F); |
5135 | } |
5136 | } |
5137 | return {ConvTy, C}; |
5138 | } |
5139 | |
5140 | /// CreateRuntimeFunction - Create a new runtime function with the specified |
5141 | /// type and name. |
5142 | llvm::FunctionCallee |
5143 | CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name, |
5144 | llvm::AttributeList , bool Local, |
5145 | bool AssumeConvergent) { |
5146 | if (AssumeConvergent) { |
5147 | ExtraAttrs = |
5148 | ExtraAttrs.addFnAttribute(C&: VMContext, Kind: llvm::Attribute::Convergent); |
5149 | } |
5150 | |
5151 | llvm::Constant *C = |
5152 | GetOrCreateLLVMFunction(MangledName: Name, Ty: FTy, GD: GlobalDecl(), /*ForVTable=*/false, |
5153 | /*DontDefer=*/false, /*IsThunk=*/false, |
5154 | ExtraAttrs); |
5155 | |
5156 | if (auto *F = dyn_cast<llvm::Function>(Val: C)) { |
5157 | if (F->empty()) { |
5158 | F->setCallingConv(getRuntimeCC()); |
5159 | setWindowsItaniumDLLImport(CGM&: *this, Local, F, Name); |
5160 | setDSOLocal(F); |
5161 | // FIXME: We should use CodeGenModule::SetLLVMFunctionAttributes() instead |
5162 | // of trying to approximate the attributes using the LLVM function |
5163 | // signature. The other overload of CreateRuntimeFunction does this; it |
5164 | // should be used for new code. |
5165 | markRegisterParameterAttributes(F); |
5166 | } |
5167 | } |
5168 | |
5169 | return {FTy, C}; |
5170 | } |
5171 | |
5172 | /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module, |
5173 | /// create and return an llvm GlobalVariable with the specified type and address |
5174 | /// space. If there is something in the module with the specified name, return |
5175 | /// it potentially bitcasted to the right type. |
5176 | /// |
5177 | /// If D is non-null, it specifies a decl that correspond to this. This is used |
5178 | /// to set the attributes on the global when it is first created. |
5179 | /// |
5180 | /// If IsForDefinition is true, it is guaranteed that an actual global with |
5181 | /// type Ty will be returned, not conversion of a variable with the same |
5182 | /// mangled name but some other type. |
5183 | llvm::Constant * |
5184 | CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty, |
5185 | LangAS AddrSpace, const VarDecl *D, |
5186 | ForDefinition_t IsForDefinition) { |
5187 | // Lookup the entry, lazily creating it if necessary. |
5188 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
5189 | unsigned TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace); |
5190 | if (Entry) { |
5191 | if (WeakRefReferences.erase(Ptr: Entry)) { |
5192 | if (D && !D->hasAttr<WeakAttr>()) |
5193 | Entry->setLinkage(llvm::Function::ExternalLinkage); |
5194 | } |
5195 | |
5196 | // Handle dropped DLL attributes. |
5197 | if (D && shouldDropDLLAttribute(D, GV: Entry)) |
5198 | Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
5199 | |
5200 | if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D) |
5201 | getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: Entry); |
5202 | |
5203 | if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS) |
5204 | return Entry; |
5205 | |
5206 | // If there are two attempts to define the same mangled name, issue an |
5207 | // error. |
5208 | if (IsForDefinition && !Entry->isDeclaration()) { |
5209 | GlobalDecl OtherGD; |
5210 | const VarDecl *OtherD; |
5211 | |
5212 | // Check that D is not yet in DiagnosedConflictingDefinitions is required |
5213 | // to make sure that we issue an error only once. |
5214 | if (D && lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
5215 | (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) && |
5216 | (OtherD = dyn_cast<VarDecl>(Val: OtherGD.getDecl())) && |
5217 | OtherD->hasInit() && |
5218 | DiagnosedConflictingDefinitions.insert(V: D).second) { |
5219 | getDiags().Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name) |
5220 | << MangledName; |
5221 | getDiags().Report(Loc: OtherGD.getDecl()->getLocation(), |
5222 | DiagID: diag::note_previous_definition); |
5223 | } |
5224 | } |
5225 | |
5226 | // Make sure the result is of the correct type. |
5227 | if (Entry->getType()->getAddressSpace() != TargetAS) |
5228 | return llvm::ConstantExpr::getAddrSpaceCast( |
5229 | C: Entry, Ty: llvm::PointerType::get(C&: Ty->getContext(), AddressSpace: TargetAS)); |
5230 | |
5231 | // (If global is requested for a definition, we always need to create a new |
5232 | // global, not just return a bitcast.) |
5233 | if (!IsForDefinition) |
5234 | return Entry; |
5235 | } |
5236 | |
5237 | auto DAddrSpace = GetGlobalVarAddressSpace(D); |
5238 | |
5239 | auto *GV = new llvm::GlobalVariable( |
5240 | getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr, |
5241 | MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal, |
5242 | getContext().getTargetAddressSpace(AS: DAddrSpace)); |
5243 | |
5244 | // If we already created a global with the same mangled name (but different |
5245 | // type) before, take its name and remove it from its parent. |
5246 | if (Entry) { |
5247 | GV->takeName(V: Entry); |
5248 | |
5249 | if (!Entry->use_empty()) { |
5250 | Entry->replaceAllUsesWith(V: GV); |
5251 | } |
5252 | |
5253 | Entry->eraseFromParent(); |
5254 | } |
5255 | |
5256 | // This is the first use or definition of a mangled name. If there is a |
5257 | // deferred decl with this name, remember that we need to emit it at the end |
5258 | // of the file. |
5259 | auto DDI = DeferredDecls.find(Val: MangledName); |
5260 | if (DDI != DeferredDecls.end()) { |
5261 | // Move the potentially referenced deferred decl to the DeferredDeclsToEmit |
5262 | // list, and remove it from DeferredDecls (since we don't need it anymore). |
5263 | addDeferredDeclToEmit(GD: DDI->second); |
5264 | DeferredDecls.erase(I: DDI); |
5265 | } |
5266 | |
5267 | // Handle things which are present even on external declarations. |
5268 | if (D) { |
5269 | if (LangOpts.OpenMP && !LangOpts.OpenMPSimd) |
5270 | getOpenMPRuntime().registerTargetGlobalVariable(VD: D, Addr: GV); |
5271 | |
5272 | // FIXME: This code is overly simple and should be merged with other global |
5273 | // handling. |
5274 | GV->setConstant(D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: false, ExcludeDtor: false)); |
5275 | |
5276 | GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); |
5277 | |
5278 | setLinkageForGV(GV, ND: D); |
5279 | |
5280 | if (D->getTLSKind()) { |
5281 | if (D->getTLSKind() == VarDecl::TLS_Dynamic) |
5282 | CXXThreadLocals.push_back(x: D); |
5283 | setTLSMode(GV, D: *D); |
5284 | } |
5285 | |
5286 | setGVProperties(GV, D); |
5287 | |
5288 | // If required by the ABI, treat declarations of static data members with |
5289 | // inline initializers as definitions. |
5290 | if (getContext().isMSStaticDataMemberInlineDefinition(VD: D)) { |
5291 | EmitGlobalVarDefinition(D); |
5292 | } |
5293 | |
5294 | // Emit section information for extern variables. |
5295 | if (D->hasExternalStorage()) { |
5296 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) |
5297 | GV->setSection(SA->getName()); |
5298 | } |
5299 | |
5300 | // Handle XCore specific ABI requirements. |
5301 | if (getTriple().getArch() == llvm::Triple::xcore && |
5302 | D->getLanguageLinkage() == CLanguageLinkage && |
5303 | D->getType().isConstant(Ctx: Context) && |
5304 | isExternallyVisible(L: D->getLinkageAndVisibility().getLinkage())) |
5305 | GV->setSection(".cp.rodata" ); |
5306 | |
5307 | // Handle code model attribute |
5308 | if (const auto *CMA = D->getAttr<CodeModelAttr>()) |
5309 | GV->setCodeModel(CMA->getModel()); |
5310 | |
5311 | // Check if we a have a const declaration with an initializer, we may be |
5312 | // able to emit it as available_externally to expose it's value to the |
5313 | // optimizer. |
5314 | if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() && |
5315 | D->getType().isConstQualified() && !GV->hasInitializer() && |
5316 | !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) { |
5317 | const auto *Record = |
5318 | Context.getBaseElementType(QT: D->getType())->getAsCXXRecordDecl(); |
5319 | bool HasMutableFields = Record && Record->hasMutableFields(); |
5320 | if (!HasMutableFields) { |
5321 | const VarDecl *InitDecl; |
5322 | const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl); |
5323 | if (InitExpr) { |
5324 | ConstantEmitter emitter(*this); |
5325 | llvm::Constant *Init = emitter.tryEmitForInitializer(D: *InitDecl); |
5326 | if (Init) { |
5327 | auto *InitType = Init->getType(); |
5328 | if (GV->getValueType() != InitType) { |
5329 | // The type of the initializer does not match the definition. |
5330 | // This happens when an initializer has a different type from |
5331 | // the type of the global (because of padding at the end of a |
5332 | // structure for instance). |
5333 | GV->setName(StringRef()); |
5334 | // Make a new global with the correct type, this is now guaranteed |
5335 | // to work. |
5336 | auto *NewGV = cast<llvm::GlobalVariable>( |
5337 | Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition) |
5338 | ->stripPointerCasts()); |
5339 | |
5340 | // Erase the old global, since it is no longer used. |
5341 | GV->eraseFromParent(); |
5342 | GV = NewGV; |
5343 | } else { |
5344 | GV->setInitializer(Init); |
5345 | GV->setConstant(true); |
5346 | GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); |
5347 | } |
5348 | emitter.finalize(global: GV); |
5349 | } |
5350 | } |
5351 | } |
5352 | } |
5353 | } |
5354 | |
5355 | if (D && |
5356 | D->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly) { |
5357 | getTargetCodeGenInfo().setTargetAttributes(D, GV, M&: *this); |
5358 | // External HIP managed variables needed to be recorded for transformation |
5359 | // in both device and host compilations. |
5360 | if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() && |
5361 | D->hasExternalStorage()) |
5362 | getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV); |
5363 | } |
5364 | |
5365 | if (D) |
5366 | SanitizerMD->reportGlobal(GV, D: *D); |
5367 | |
5368 | LangAS ExpectedAS = |
5369 | D ? D->getType().getAddressSpace() |
5370 | : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default); |
5371 | assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS); |
5372 | if (DAddrSpace != ExpectedAS) { |
5373 | return getTargetCodeGenInfo().performAddrSpaceCast( |
5374 | CGM&: *this, V: GV, SrcAddr: DAddrSpace, |
5375 | DestTy: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: TargetAS)); |
5376 | } |
5377 | |
5378 | return GV; |
5379 | } |
5380 | |
5381 | llvm::Constant * |
5382 | CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { |
5383 | const Decl *D = GD.getDecl(); |
5384 | |
5385 | if (isa<CXXConstructorDecl>(Val: D) || isa<CXXDestructorDecl>(Val: D)) |
5386 | return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr, |
5387 | /*DontDefer=*/false, IsForDefinition); |
5388 | |
5389 | if (isa<CXXMethodDecl>(Val: D)) { |
5390 | auto FInfo = |
5391 | &getTypes().arrangeCXXMethodDeclaration(MD: cast<CXXMethodDecl>(Val: D)); |
5392 | auto Ty = getTypes().GetFunctionType(Info: *FInfo); |
5393 | return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, |
5394 | IsForDefinition); |
5395 | } |
5396 | |
5397 | if (isa<FunctionDecl>(Val: D)) { |
5398 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
5399 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
5400 | return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, |
5401 | IsForDefinition); |
5402 | } |
5403 | |
5404 | return GetAddrOfGlobalVar(D: cast<VarDecl>(Val: D), /*Ty=*/nullptr, IsForDefinition); |
5405 | } |
5406 | |
5407 | llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable( |
5408 | StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage, |
5409 | llvm::Align Alignment) { |
5410 | llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name); |
5411 | llvm::GlobalVariable *OldGV = nullptr; |
5412 | |
5413 | if (GV) { |
5414 | // Check if the variable has the right type. |
5415 | if (GV->getValueType() == Ty) |
5416 | return GV; |
5417 | |
5418 | // Because C++ name mangling, the only way we can end up with an already |
5419 | // existing global with the same name is if it has been declared extern "C". |
5420 | assert(GV->isDeclaration() && "Declaration has wrong type!" ); |
5421 | OldGV = GV; |
5422 | } |
5423 | |
5424 | // Create a new variable. |
5425 | GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true, |
5426 | Linkage, nullptr, Name); |
5427 | |
5428 | if (OldGV) { |
5429 | // Replace occurrences of the old variable if needed. |
5430 | GV->takeName(V: OldGV); |
5431 | |
5432 | if (!OldGV->use_empty()) { |
5433 | OldGV->replaceAllUsesWith(V: GV); |
5434 | } |
5435 | |
5436 | OldGV->eraseFromParent(); |
5437 | } |
5438 | |
5439 | if (supportsCOMDAT() && GV->isWeakForLinker() && |
5440 | !GV->hasAvailableExternallyLinkage()) |
5441 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
5442 | |
5443 | GV->setAlignment(Alignment); |
5444 | |
5445 | return GV; |
5446 | } |
5447 | |
5448 | /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the |
5449 | /// given global variable. If Ty is non-null and if the global doesn't exist, |
5450 | /// then it will be created with the specified type instead of whatever the |
5451 | /// normal requested type would be. If IsForDefinition is true, it is guaranteed |
5452 | /// that an actual global with type Ty will be returned, not conversion of a |
5453 | /// variable with the same mangled name but some other type. |
5454 | llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D, |
5455 | llvm::Type *Ty, |
5456 | ForDefinition_t IsForDefinition) { |
5457 | assert(D->hasGlobalStorage() && "Not a global variable" ); |
5458 | QualType ASTTy = D->getType(); |
5459 | if (!Ty) |
5460 | Ty = getTypes().ConvertTypeForMem(T: ASTTy); |
5461 | |
5462 | StringRef MangledName = getMangledName(GD: D); |
5463 | return GetOrCreateLLVMGlobal(MangledName, Ty, AddrSpace: ASTTy.getAddressSpace(), D, |
5464 | IsForDefinition); |
5465 | } |
5466 | |
5467 | /// CreateRuntimeVariable - Create a new runtime global variable with the |
5468 | /// specified type and name. |
5469 | llvm::Constant * |
5470 | CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty, |
5471 | StringRef Name) { |
5472 | LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global |
5473 | : LangAS::Default; |
5474 | auto *Ret = GetOrCreateLLVMGlobal(MangledName: Name, Ty, AddrSpace, D: nullptr); |
5475 | setDSOLocal(cast<llvm::GlobalValue>(Val: Ret->stripPointerCasts())); |
5476 | return Ret; |
5477 | } |
5478 | |
5479 | void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) { |
5480 | assert(!D->getInit() && "Cannot emit definite definitions here!" ); |
5481 | |
5482 | StringRef MangledName = getMangledName(GD: D); |
5483 | llvm::GlobalValue *GV = GetGlobalValue(Name: MangledName); |
5484 | |
5485 | // We already have a definition, not declaration, with the same mangled name. |
5486 | // Emitting of declaration is not required (and actually overwrites emitted |
5487 | // definition). |
5488 | if (GV && !GV->isDeclaration()) |
5489 | return; |
5490 | |
5491 | // If we have not seen a reference to this variable yet, place it into the |
5492 | // deferred declarations table to be emitted if needed later. |
5493 | if (!MustBeEmitted(Global: D) && !GV) { |
5494 | DeferredDecls[MangledName] = D; |
5495 | return; |
5496 | } |
5497 | |
5498 | // The tentative definition is the only definition. |
5499 | EmitGlobalVarDefinition(D); |
5500 | } |
5501 | |
5502 | // Return a GlobalDecl. Use the base variants for destructors and constructors. |
5503 | static GlobalDecl getBaseVariantGlobalDecl(const NamedDecl *D) { |
5504 | if (auto const *CD = dyn_cast<const CXXConstructorDecl>(Val: D)) |
5505 | return GlobalDecl(CD, CXXCtorType::Ctor_Base); |
5506 | else if (auto const *DD = dyn_cast<const CXXDestructorDecl>(Val: D)) |
5507 | return GlobalDecl(DD, CXXDtorType::Dtor_Base); |
5508 | return GlobalDecl(D); |
5509 | } |
5510 | |
5511 | void CodeGenModule::EmitExternalDeclaration(const DeclaratorDecl *D) { |
5512 | CGDebugInfo *DI = getModuleDebugInfo(); |
5513 | if (!DI || !getCodeGenOpts().hasReducedDebugInfo()) |
5514 | return; |
5515 | |
5516 | GlobalDecl GD = getBaseVariantGlobalDecl(D); |
5517 | if (!GD) |
5518 | return; |
5519 | |
5520 | llvm::Constant *Addr = GetAddrOfGlobal(GD)->stripPointerCasts(); |
5521 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) { |
5522 | DI->EmitExternalVariable( |
5523 | GV: cast<llvm::GlobalVariable>(Val: Addr->stripPointerCasts()), Decl: VD); |
5524 | } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) { |
5525 | llvm::Function *Fn = cast<llvm::Function>(Val: Addr); |
5526 | if (!Fn->getSubprogram()) |
5527 | DI->EmitFunctionDecl(GD, Loc: FD->getLocation(), FnType: FD->getType(), Fn); |
5528 | } |
5529 | } |
5530 | |
5531 | CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const { |
5532 | return Context.toCharUnitsFromBits( |
5533 | BitSize: getDataLayout().getTypeStoreSizeInBits(Ty)); |
5534 | } |
5535 | |
5536 | LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) { |
5537 | if (LangOpts.OpenCL) { |
5538 | LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global; |
5539 | assert(AS == LangAS::opencl_global || |
5540 | AS == LangAS::opencl_global_device || |
5541 | AS == LangAS::opencl_global_host || |
5542 | AS == LangAS::opencl_constant || |
5543 | AS == LangAS::opencl_local || |
5544 | AS >= LangAS::FirstTargetAddressSpace); |
5545 | return AS; |
5546 | } |
5547 | |
5548 | if (LangOpts.SYCLIsDevice && |
5549 | (!D || D->getType().getAddressSpace() == LangAS::Default)) |
5550 | return LangAS::sycl_global; |
5551 | |
5552 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) { |
5553 | if (D) { |
5554 | if (D->hasAttr<CUDAConstantAttr>()) |
5555 | return LangAS::cuda_constant; |
5556 | if (D->hasAttr<CUDASharedAttr>()) |
5557 | return LangAS::cuda_shared; |
5558 | if (D->hasAttr<CUDADeviceAttr>()) |
5559 | return LangAS::cuda_device; |
5560 | if (D->getType().isConstQualified()) |
5561 | return LangAS::cuda_constant; |
5562 | } |
5563 | return LangAS::cuda_device; |
5564 | } |
5565 | |
5566 | if (LangOpts.OpenMP) { |
5567 | LangAS AS; |
5568 | if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(VD: D, AS)) |
5569 | return AS; |
5570 | } |
5571 | return getTargetCodeGenInfo().getGlobalVarAddressSpace(CGM&: *this, D); |
5572 | } |
5573 | |
5574 | LangAS CodeGenModule::GetGlobalConstantAddressSpace() const { |
5575 | // OpenCL v1.2 s6.5.3: a string literal is in the constant address space. |
5576 | if (LangOpts.OpenCL) |
5577 | return LangAS::opencl_constant; |
5578 | if (LangOpts.SYCLIsDevice) |
5579 | return LangAS::sycl_global; |
5580 | if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV()) |
5581 | // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V) |
5582 | // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up |
5583 | // with OpVariable instructions with Generic storage class which is not |
5584 | // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V |
5585 | // UniformConstant storage class is not viable as pointers to it may not be |
5586 | // casted to Generic pointers which are used to model HIP's "flat" pointers. |
5587 | return LangAS::cuda_device; |
5588 | if (auto AS = getTarget().getConstantAddressSpace()) |
5589 | return *AS; |
5590 | return LangAS::Default; |
5591 | } |
5592 | |
5593 | // In address space agnostic languages, string literals are in default address |
5594 | // space in AST. However, certain targets (e.g. amdgcn) request them to be |
5595 | // emitted in constant address space in LLVM IR. To be consistent with other |
5596 | // parts of AST, string literal global variables in constant address space |
5597 | // need to be casted to default address space before being put into address |
5598 | // map and referenced by other part of CodeGen. |
5599 | // In OpenCL, string literals are in constant address space in AST, therefore |
5600 | // they should not be casted to default address space. |
5601 | static llvm::Constant * |
5602 | castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM, |
5603 | llvm::GlobalVariable *GV) { |
5604 | llvm::Constant *Cast = GV; |
5605 | if (!CGM.getLangOpts().OpenCL) { |
5606 | auto AS = CGM.GetGlobalConstantAddressSpace(); |
5607 | if (AS != LangAS::Default) |
5608 | Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast( |
5609 | CGM, V: GV, SrcAddr: AS, |
5610 | DestTy: llvm::PointerType::get( |
5611 | C&: CGM.getLLVMContext(), |
5612 | AddressSpace: CGM.getContext().getTargetAddressSpace(AS: LangAS::Default))); |
5613 | } |
5614 | return Cast; |
5615 | } |
5616 | |
5617 | template<typename SomeDecl> |
5618 | void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D, |
5619 | llvm::GlobalValue *GV) { |
5620 | if (!getLangOpts().CPlusPlus) |
5621 | return; |
5622 | |
5623 | // Must have 'used' attribute, or else inline assembly can't rely on |
5624 | // the name existing. |
5625 | if (!D->template hasAttr<UsedAttr>()) |
5626 | return; |
5627 | |
5628 | // Must have internal linkage and an ordinary name. |
5629 | if (!D->getIdentifier() || D->getFormalLinkage() != Linkage::Internal) |
5630 | return; |
5631 | |
5632 | // Must be in an extern "C" context. Entities declared directly within |
5633 | // a record are not extern "C" even if the record is in such a context. |
5634 | const SomeDecl *First = D->getFirstDecl(); |
5635 | if (First->getDeclContext()->isRecord() || !First->isInExternCContext()) |
5636 | return; |
5637 | |
5638 | // OK, this is an internal linkage entity inside an extern "C" linkage |
5639 | // specification. Make a note of that so we can give it the "expected" |
5640 | // mangled name if nothing else is using that name. |
5641 | std::pair<StaticExternCMap::iterator, bool> R = |
5642 | StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV)); |
5643 | |
5644 | // If we have multiple internal linkage entities with the same name |
5645 | // in extern "C" regions, none of them gets that name. |
5646 | if (!R.second) |
5647 | R.first->second = nullptr; |
5648 | } |
5649 | |
5650 | static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) { |
5651 | if (!CGM.supportsCOMDAT()) |
5652 | return false; |
5653 | |
5654 | if (D.hasAttr<SelectAnyAttr>()) |
5655 | return true; |
5656 | |
5657 | GVALinkage Linkage; |
5658 | if (auto *VD = dyn_cast<VarDecl>(Val: &D)) |
5659 | Linkage = CGM.getContext().GetGVALinkageForVariable(VD); |
5660 | else |
5661 | Linkage = CGM.getContext().GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: &D)); |
5662 | |
5663 | switch (Linkage) { |
5664 | case GVA_Internal: |
5665 | case GVA_AvailableExternally: |
5666 | case GVA_StrongExternal: |
5667 | return false; |
5668 | case GVA_DiscardableODR: |
5669 | case GVA_StrongODR: |
5670 | return true; |
5671 | } |
5672 | llvm_unreachable("No such linkage" ); |
5673 | } |
5674 | |
5675 | bool CodeGenModule::supportsCOMDAT() const { |
5676 | return getTriple().supportsCOMDAT(); |
5677 | } |
5678 | |
5679 | void CodeGenModule::maybeSetTrivialComdat(const Decl &D, |
5680 | llvm::GlobalObject &GO) { |
5681 | if (!shouldBeInCOMDAT(CGM&: *this, D)) |
5682 | return; |
5683 | GO.setComdat(TheModule.getOrInsertComdat(Name: GO.getName())); |
5684 | } |
5685 | |
5686 | const ABIInfo &CodeGenModule::getABIInfo() { |
5687 | return getTargetCodeGenInfo().getABIInfo(); |
5688 | } |
5689 | |
5690 | /// Pass IsTentative as true if you want to create a tentative definition. |
5691 | void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D, |
5692 | bool IsTentative) { |
5693 | // OpenCL global variables of sampler type are translated to function calls, |
5694 | // therefore no need to be translated. |
5695 | QualType ASTTy = D->getType(); |
5696 | if (getLangOpts().OpenCL && ASTTy->isSamplerT()) |
5697 | return; |
5698 | |
5699 | // HLSL default buffer constants will be emitted during HLSLBufferDecl codegen |
5700 | if (getLangOpts().HLSL && |
5701 | D->getType().getAddressSpace() == LangAS::hlsl_constant) |
5702 | return; |
5703 | |
5704 | // If this is OpenMP device, check if it is legal to emit this global |
5705 | // normally. |
5706 | if (LangOpts.OpenMPIsTargetDevice && OpenMPRuntime && |
5707 | OpenMPRuntime->emitTargetGlobalVariable(GD: D)) |
5708 | return; |
5709 | |
5710 | llvm::TrackingVH<llvm::Constant> Init; |
5711 | bool NeedsGlobalCtor = false; |
5712 | // Whether the definition of the variable is available externally. |
5713 | // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable |
5714 | // since this is the job for its original source. |
5715 | bool IsDefinitionAvailableExternally = |
5716 | getContext().GetGVALinkageForVariable(VD: D) == GVA_AvailableExternally; |
5717 | bool NeedsGlobalDtor = |
5718 | !IsDefinitionAvailableExternally && |
5719 | D->needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor; |
5720 | |
5721 | // It is helpless to emit the definition for an available_externally variable |
5722 | // which can't be marked as const. |
5723 | // We don't need to check if it needs global ctor or dtor. See the above |
5724 | // comment for ideas. |
5725 | if (IsDefinitionAvailableExternally && |
5726 | (!D->hasConstantInitialization() || |
5727 | // TODO: Update this when we have interface to check constexpr |
5728 | // destructor. |
5729 | D->needsDestruction(Ctx: getContext()) || |
5730 | !D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: true))) |
5731 | return; |
5732 | |
5733 | const VarDecl *InitDecl; |
5734 | const Expr *InitExpr = D->getAnyInitializer(D&: InitDecl); |
5735 | |
5736 | std::optional<ConstantEmitter> emitter; |
5737 | |
5738 | // CUDA E.2.4.1 "__shared__ variables cannot have an initialization |
5739 | // as part of their declaration." Sema has already checked for |
5740 | // error cases, so we just need to set Init to UndefValue. |
5741 | bool IsCUDASharedVar = |
5742 | getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>(); |
5743 | // Shadows of initialized device-side global variables are also left |
5744 | // undefined. |
5745 | // Managed Variables should be initialized on both host side and device side. |
5746 | bool IsCUDAShadowVar = |
5747 | !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() && |
5748 | (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() || |
5749 | D->hasAttr<CUDASharedAttr>()); |
5750 | bool IsCUDADeviceShadowVar = |
5751 | getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() && |
5752 | (D->getType()->isCUDADeviceBuiltinSurfaceType() || |
5753 | D->getType()->isCUDADeviceBuiltinTextureType()); |
5754 | if (getLangOpts().CUDA && |
5755 | (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar)) |
5756 | Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy)); |
5757 | else if (D->hasAttr<LoaderUninitializedAttr>()) |
5758 | Init = llvm::UndefValue::get(T: getTypes().ConvertTypeForMem(T: ASTTy)); |
5759 | else if (!InitExpr) { |
5760 | // This is a tentative definition; tentative definitions are |
5761 | // implicitly initialized with { 0 }. |
5762 | // |
5763 | // Note that tentative definitions are only emitted at the end of |
5764 | // a translation unit, so they should never have incomplete |
5765 | // type. In addition, EmitTentativeDefinition makes sure that we |
5766 | // never attempt to emit a tentative definition if a real one |
5767 | // exists. A use may still exists, however, so we still may need |
5768 | // to do a RAUW. |
5769 | assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type" ); |
5770 | Init = EmitNullConstant(T: D->getType()); |
5771 | } else { |
5772 | initializedGlobalDecl = GlobalDecl(D); |
5773 | emitter.emplace(args&: *this); |
5774 | llvm::Constant *Initializer = emitter->tryEmitForInitializer(D: *InitDecl); |
5775 | if (!Initializer) { |
5776 | QualType T = InitExpr->getType(); |
5777 | if (D->getType()->isReferenceType()) |
5778 | T = D->getType(); |
5779 | |
5780 | if (getLangOpts().HLSL && |
5781 | D->getType().getTypePtr()->isHLSLResourceRecord()) { |
5782 | Init = llvm::PoisonValue::get(T: getTypes().ConvertType(T: ASTTy)); |
5783 | NeedsGlobalCtor = true; |
5784 | } else if (getLangOpts().CPlusPlus) { |
5785 | Init = EmitNullConstant(T); |
5786 | if (!IsDefinitionAvailableExternally) |
5787 | NeedsGlobalCtor = true; |
5788 | if (InitDecl->hasFlexibleArrayInit(Ctx: getContext())) { |
5789 | ErrorUnsupported(D, Type: "flexible array initializer" ); |
5790 | // We cannot create ctor for flexible array initializer |
5791 | NeedsGlobalCtor = false; |
5792 | } |
5793 | } else { |
5794 | ErrorUnsupported(D, Type: "static initializer" ); |
5795 | Init = llvm::PoisonValue::get(T: getTypes().ConvertType(T)); |
5796 | } |
5797 | } else { |
5798 | Init = Initializer; |
5799 | // We don't need an initializer, so remove the entry for the delayed |
5800 | // initializer position (just in case this entry was delayed) if we |
5801 | // also don't need to register a destructor. |
5802 | if (getLangOpts().CPlusPlus && !NeedsGlobalDtor) |
5803 | DelayedCXXInitPosition.erase(Val: D); |
5804 | |
5805 | #ifndef NDEBUG |
5806 | CharUnits VarSize = getContext().getTypeSizeInChars(ASTTy) + |
5807 | InitDecl->getFlexibleArrayInitChars(getContext()); |
5808 | CharUnits CstSize = CharUnits::fromQuantity( |
5809 | getDataLayout().getTypeAllocSize(Init->getType())); |
5810 | assert(VarSize == CstSize && "Emitted constant has unexpected size" ); |
5811 | #endif |
5812 | } |
5813 | } |
5814 | |
5815 | llvm::Type* InitType = Init->getType(); |
5816 | llvm::Constant *Entry = |
5817 | GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative)); |
5818 | |
5819 | // Strip off pointer casts if we got them. |
5820 | Entry = Entry->stripPointerCasts(); |
5821 | |
5822 | // Entry is now either a Function or GlobalVariable. |
5823 | auto *GV = dyn_cast<llvm::GlobalVariable>(Val: Entry); |
5824 | |
5825 | // We have a definition after a declaration with the wrong type. |
5826 | // We must make a new GlobalVariable* and update everything that used OldGV |
5827 | // (a declaration or tentative definition) with the new GlobalVariable* |
5828 | // (which will be a definition). |
5829 | // |
5830 | // This happens if there is a prototype for a global (e.g. |
5831 | // "extern int x[];") and then a definition of a different type (e.g. |
5832 | // "int x[10];"). This also happens when an initializer has a different type |
5833 | // from the type of the global (this happens with unions). |
5834 | if (!GV || GV->getValueType() != InitType || |
5835 | GV->getType()->getAddressSpace() != |
5836 | getContext().getTargetAddressSpace(AS: GetGlobalVarAddressSpace(D))) { |
5837 | |
5838 | // Move the old entry aside so that we'll create a new one. |
5839 | Entry->setName(StringRef()); |
5840 | |
5841 | // Make a new global with the correct type, this is now guaranteed to work. |
5842 | GV = cast<llvm::GlobalVariable>( |
5843 | Val: GetAddrOfGlobalVar(D, Ty: InitType, IsForDefinition: ForDefinition_t(!IsTentative)) |
5844 | ->stripPointerCasts()); |
5845 | |
5846 | // Replace all uses of the old global with the new global |
5847 | llvm::Constant *NewPtrForOldDecl = |
5848 | llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: GV, |
5849 | Ty: Entry->getType()); |
5850 | Entry->replaceAllUsesWith(V: NewPtrForOldDecl); |
5851 | |
5852 | // Erase the old global, since it is no longer used. |
5853 | cast<llvm::GlobalValue>(Val: Entry)->eraseFromParent(); |
5854 | } |
5855 | |
5856 | MaybeHandleStaticInExternC(D, GV); |
5857 | |
5858 | if (D->hasAttr<AnnotateAttr>()) |
5859 | AddGlobalAnnotations(D, GV); |
5860 | |
5861 | // Set the llvm linkage type as appropriate. |
5862 | llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD: D); |
5863 | |
5864 | // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on |
5865 | // the device. [...]" |
5866 | // CUDA B.2.2 "The __constant__ qualifier, optionally used together with |
5867 | // __device__, declares a variable that: [...] |
5868 | // Is accessible from all the threads within the grid and from the host |
5869 | // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize() |
5870 | // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())." |
5871 | if (LangOpts.CUDA) { |
5872 | if (LangOpts.CUDAIsDevice) { |
5873 | if (Linkage != llvm::GlobalValue::InternalLinkage && |
5874 | (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || |
5875 | D->getType()->isCUDADeviceBuiltinSurfaceType() || |
5876 | D->getType()->isCUDADeviceBuiltinTextureType())) |
5877 | GV->setExternallyInitialized(true); |
5878 | } else { |
5879 | getCUDARuntime().internalizeDeviceSideVar(D, Linkage); |
5880 | } |
5881 | getCUDARuntime().handleVarRegistration(VD: D, Var&: *GV); |
5882 | } |
5883 | |
5884 | if (LangOpts.HLSL && GetGlobalVarAddressSpace(D) == LangAS::hlsl_input) { |
5885 | // HLSL Input variables are considered to be set by the driver/pipeline, but |
5886 | // only visible to a single thread/wave. |
5887 | GV->setExternallyInitialized(true); |
5888 | } else { |
5889 | GV->setInitializer(Init); |
5890 | } |
5891 | |
5892 | if (LangOpts.HLSL) |
5893 | getHLSLRuntime().handleGlobalVarDefinition(VD: D, Var: GV); |
5894 | |
5895 | if (emitter) |
5896 | emitter->finalize(global: GV); |
5897 | |
5898 | // If it is safe to mark the global 'constant', do so now. |
5899 | GV->setConstant((D->hasAttr<CUDAConstantAttr>() && LangOpts.CUDAIsDevice) || |
5900 | (!NeedsGlobalCtor && !NeedsGlobalDtor && |
5901 | D->getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: true))); |
5902 | |
5903 | // If it is in a read-only section, mark it 'constant'. |
5904 | if (const SectionAttr *SA = D->getAttr<SectionAttr>()) { |
5905 | const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()]; |
5906 | if ((SI.SectionFlags & ASTContext::PSF_Write) == 0) |
5907 | GV->setConstant(true); |
5908 | } |
5909 | |
5910 | CharUnits AlignVal = getContext().getDeclAlign(D); |
5911 | // Check for alignment specifed in an 'omp allocate' directive. |
5912 | if (std::optional<CharUnits> AlignValFromAllocate = |
5913 | getOMPAllocateAlignment(VD: D)) |
5914 | AlignVal = *AlignValFromAllocate; |
5915 | GV->setAlignment(AlignVal.getAsAlign()); |
5916 | |
5917 | // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper |
5918 | // function is only defined alongside the variable, not also alongside |
5919 | // callers. Normally, all accesses to a thread_local go through the |
5920 | // thread-wrapper in order to ensure initialization has occurred, underlying |
5921 | // variable will never be used other than the thread-wrapper, so it can be |
5922 | // converted to internal linkage. |
5923 | // |
5924 | // However, if the variable has the 'constinit' attribute, it _can_ be |
5925 | // referenced directly, without calling the thread-wrapper, so the linkage |
5926 | // must not be changed. |
5927 | // |
5928 | // Additionally, if the variable isn't plain external linkage, e.g. if it's |
5929 | // weak or linkonce, the de-duplication semantics are important to preserve, |
5930 | // so we don't change the linkage. |
5931 | if (D->getTLSKind() == VarDecl::TLS_Dynamic && |
5932 | Linkage == llvm::GlobalValue::ExternalLinkage && |
5933 | Context.getTargetInfo().getTriple().isOSDarwin() && |
5934 | !D->hasAttr<ConstInitAttr>()) |
5935 | Linkage = llvm::GlobalValue::InternalLinkage; |
5936 | |
5937 | // HLSL variables in the input address space maps like memory-mapped |
5938 | // variables. Even if they are 'static', they are externally initialized and |
5939 | // read/write by the hardware/driver/pipeline. |
5940 | if (LangOpts.HLSL && GetGlobalVarAddressSpace(D) == LangAS::hlsl_input) |
5941 | Linkage = llvm::GlobalValue::ExternalLinkage; |
5942 | |
5943 | GV->setLinkage(Linkage); |
5944 | if (D->hasAttr<DLLImportAttr>()) |
5945 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); |
5946 | else if (D->hasAttr<DLLExportAttr>()) |
5947 | GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); |
5948 | else |
5949 | GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); |
5950 | |
5951 | if (Linkage == llvm::GlobalVariable::CommonLinkage) { |
5952 | // common vars aren't constant even if declared const. |
5953 | GV->setConstant(false); |
5954 | // Tentative definition of global variables may be initialized with |
5955 | // non-zero null pointers. In this case they should have weak linkage |
5956 | // since common linkage must have zero initializer and must not have |
5957 | // explicit section therefore cannot have non-zero initial value. |
5958 | if (!GV->getInitializer()->isNullValue()) |
5959 | GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage); |
5960 | } |
5961 | |
5962 | setNonAliasAttributes(GD: D, GO: GV); |
5963 | |
5964 | if (D->getTLSKind() && !GV->isThreadLocal()) { |
5965 | if (D->getTLSKind() == VarDecl::TLS_Dynamic) |
5966 | CXXThreadLocals.push_back(x: D); |
5967 | setTLSMode(GV, D: *D); |
5968 | } |
5969 | |
5970 | maybeSetTrivialComdat(D: *D, GO&: *GV); |
5971 | |
5972 | // Emit the initializer function if necessary. |
5973 | if (NeedsGlobalCtor || NeedsGlobalDtor) |
5974 | EmitCXXGlobalVarDeclInitFunc(D, Addr: GV, PerformInit: NeedsGlobalCtor); |
5975 | |
5976 | SanitizerMD->reportGlobal(GV, D: *D, IsDynInit: NeedsGlobalCtor); |
5977 | |
5978 | // Emit global variable debug information. |
5979 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
5980 | if (getCodeGenOpts().hasReducedDebugInfo()) |
5981 | DI->EmitGlobalVariable(GV, Decl: D); |
5982 | } |
5983 | |
5984 | static bool isVarDeclStrongDefinition(const ASTContext &Context, |
5985 | CodeGenModule &CGM, const VarDecl *D, |
5986 | bool NoCommon) { |
5987 | // Don't give variables common linkage if -fno-common was specified unless it |
5988 | // was overridden by a NoCommon attribute. |
5989 | if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>()) |
5990 | return true; |
5991 | |
5992 | // C11 6.9.2/2: |
5993 | // A declaration of an identifier for an object that has file scope without |
5994 | // an initializer, and without a storage-class specifier or with the |
5995 | // storage-class specifier static, constitutes a tentative definition. |
5996 | if (D->getInit() || D->hasExternalStorage()) |
5997 | return true; |
5998 | |
5999 | // A variable cannot be both common and exist in a section. |
6000 | if (D->hasAttr<SectionAttr>()) |
6001 | return true; |
6002 | |
6003 | // A variable cannot be both common and exist in a section. |
6004 | // We don't try to determine which is the right section in the front-end. |
6005 | // If no specialized section name is applicable, it will resort to default. |
6006 | if (D->hasAttr<PragmaClangBSSSectionAttr>() || |
6007 | D->hasAttr<PragmaClangDataSectionAttr>() || |
6008 | D->hasAttr<PragmaClangRelroSectionAttr>() || |
6009 | D->hasAttr<PragmaClangRodataSectionAttr>()) |
6010 | return true; |
6011 | |
6012 | // Thread local vars aren't considered common linkage. |
6013 | if (D->getTLSKind()) |
6014 | return true; |
6015 | |
6016 | // Tentative definitions marked with WeakImportAttr are true definitions. |
6017 | if (D->hasAttr<WeakImportAttr>()) |
6018 | return true; |
6019 | |
6020 | // A variable cannot be both common and exist in a comdat. |
6021 | if (shouldBeInCOMDAT(CGM, D: *D)) |
6022 | return true; |
6023 | |
6024 | // Declarations with a required alignment do not have common linkage in MSVC |
6025 | // mode. |
6026 | if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { |
6027 | if (D->hasAttr<AlignedAttr>()) |
6028 | return true; |
6029 | QualType VarType = D->getType(); |
6030 | if (Context.isAlignmentRequired(T: VarType)) |
6031 | return true; |
6032 | |
6033 | if (const auto *RT = VarType->getAs<RecordType>()) { |
6034 | const RecordDecl *RD = RT->getDecl(); |
6035 | for (const FieldDecl *FD : RD->fields()) { |
6036 | if (FD->isBitField()) |
6037 | continue; |
6038 | if (FD->hasAttr<AlignedAttr>()) |
6039 | return true; |
6040 | if (Context.isAlignmentRequired(T: FD->getType())) |
6041 | return true; |
6042 | } |
6043 | } |
6044 | } |
6045 | |
6046 | // Microsoft's link.exe doesn't support alignments greater than 32 bytes for |
6047 | // common symbols, so symbols with greater alignment requirements cannot be |
6048 | // common. |
6049 | // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two |
6050 | // alignments for common symbols via the aligncomm directive, so this |
6051 | // restriction only applies to MSVC environments. |
6052 | if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() && |
6053 | Context.getTypeAlignIfKnown(T: D->getType()) > |
6054 | Context.toBits(CharSize: CharUnits::fromQuantity(Quantity: 32))) |
6055 | return true; |
6056 | |
6057 | return false; |
6058 | } |
6059 | |
6060 | llvm::GlobalValue::LinkageTypes |
6061 | CodeGenModule::getLLVMLinkageForDeclarator(const DeclaratorDecl *D, |
6062 | GVALinkage Linkage) { |
6063 | if (Linkage == GVA_Internal) |
6064 | return llvm::Function::InternalLinkage; |
6065 | |
6066 | if (D->hasAttr<WeakAttr>()) |
6067 | return llvm::GlobalVariable::WeakAnyLinkage; |
6068 | |
6069 | if (const auto *FD = D->getAsFunction()) |
6070 | if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally) |
6071 | return llvm::GlobalVariable::LinkOnceAnyLinkage; |
6072 | |
6073 | // We are guaranteed to have a strong definition somewhere else, |
6074 | // so we can use available_externally linkage. |
6075 | if (Linkage == GVA_AvailableExternally) |
6076 | return llvm::GlobalValue::AvailableExternallyLinkage; |
6077 | |
6078 | // Note that Apple's kernel linker doesn't support symbol |
6079 | // coalescing, so we need to avoid linkonce and weak linkages there. |
6080 | // Normally, this means we just map to internal, but for explicit |
6081 | // instantiations we'll map to external. |
6082 | |
6083 | // In C++, the compiler has to emit a definition in every translation unit |
6084 | // that references the function. We should use linkonce_odr because |
6085 | // a) if all references in this translation unit are optimized away, we |
6086 | // don't need to codegen it. b) if the function persists, it needs to be |
6087 | // merged with other definitions. c) C++ has the ODR, so we know the |
6088 | // definition is dependable. |
6089 | if (Linkage == GVA_DiscardableODR) |
6090 | return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage |
6091 | : llvm::Function::InternalLinkage; |
6092 | |
6093 | // An explicit instantiation of a template has weak linkage, since |
6094 | // explicit instantiations can occur in multiple translation units |
6095 | // and must all be equivalent. However, we are not allowed to |
6096 | // throw away these explicit instantiations. |
6097 | // |
6098 | // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU, |
6099 | // so say that CUDA templates are either external (for kernels) or internal. |
6100 | // This lets llvm perform aggressive inter-procedural optimizations. For |
6101 | // -fgpu-rdc case, device function calls across multiple TU's are allowed, |
6102 | // therefore we need to follow the normal linkage paradigm. |
6103 | if (Linkage == GVA_StrongODR) { |
6104 | if (getLangOpts().AppleKext) |
6105 | return llvm::Function::ExternalLinkage; |
6106 | if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && |
6107 | !getLangOpts().GPURelocatableDeviceCode) |
6108 | return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage |
6109 | : llvm::Function::InternalLinkage; |
6110 | return llvm::Function::WeakODRLinkage; |
6111 | } |
6112 | |
6113 | // C++ doesn't have tentative definitions and thus cannot have common |
6114 | // linkage. |
6115 | if (!getLangOpts().CPlusPlus && isa<VarDecl>(Val: D) && |
6116 | !isVarDeclStrongDefinition(Context, CGM&: *this, D: cast<VarDecl>(Val: D), |
6117 | NoCommon: CodeGenOpts.NoCommon)) |
6118 | return llvm::GlobalVariable::CommonLinkage; |
6119 | |
6120 | // selectany symbols are externally visible, so use weak instead of |
6121 | // linkonce. MSVC optimizes away references to const selectany globals, so |
6122 | // all definitions should be the same and ODR linkage should be used. |
6123 | // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx |
6124 | if (D->hasAttr<SelectAnyAttr>()) |
6125 | return llvm::GlobalVariable::WeakODRLinkage; |
6126 | |
6127 | // Otherwise, we have strong external linkage. |
6128 | assert(Linkage == GVA_StrongExternal); |
6129 | return llvm::GlobalVariable::ExternalLinkage; |
6130 | } |
6131 | |
6132 | llvm::GlobalValue::LinkageTypes |
6133 | CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) { |
6134 | GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD); |
6135 | return getLLVMLinkageForDeclarator(D: VD, Linkage); |
6136 | } |
6137 | |
6138 | /// Replace the uses of a function that was declared with a non-proto type. |
6139 | /// We want to silently drop extra arguments from call sites |
6140 | static void replaceUsesOfNonProtoConstant(llvm::Constant *old, |
6141 | llvm::Function *newFn) { |
6142 | // Fast path. |
6143 | if (old->use_empty()) |
6144 | return; |
6145 | |
6146 | llvm::Type *newRetTy = newFn->getReturnType(); |
6147 | SmallVector<llvm::Value *, 4> newArgs; |
6148 | |
6149 | SmallVector<llvm::CallBase *> callSitesToBeRemovedFromParent; |
6150 | |
6151 | for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end(); |
6152 | ui != ue; ui++) { |
6153 | llvm::User *user = ui->getUser(); |
6154 | |
6155 | // Recognize and replace uses of bitcasts. Most calls to |
6156 | // unprototyped functions will use bitcasts. |
6157 | if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(Val: user)) { |
6158 | if (bitcast->getOpcode() == llvm::Instruction::BitCast) |
6159 | replaceUsesOfNonProtoConstant(old: bitcast, newFn); |
6160 | continue; |
6161 | } |
6162 | |
6163 | // Recognize calls to the function. |
6164 | llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(Val: user); |
6165 | if (!callSite) |
6166 | continue; |
6167 | if (!callSite->isCallee(U: &*ui)) |
6168 | continue; |
6169 | |
6170 | // If the return types don't match exactly, then we can't |
6171 | // transform this call unless it's dead. |
6172 | if (callSite->getType() != newRetTy && !callSite->use_empty()) |
6173 | continue; |
6174 | |
6175 | // Get the call site's attribute list. |
6176 | SmallVector<llvm::AttributeSet, 8> newArgAttrs; |
6177 | llvm::AttributeList oldAttrs = callSite->getAttributes(); |
6178 | |
6179 | // If the function was passed too few arguments, don't transform. |
6180 | unsigned newNumArgs = newFn->arg_size(); |
6181 | if (callSite->arg_size() < newNumArgs) |
6182 | continue; |
6183 | |
6184 | // If extra arguments were passed, we silently drop them. |
6185 | // If any of the types mismatch, we don't transform. |
6186 | unsigned argNo = 0; |
6187 | bool dontTransform = false; |
6188 | for (llvm::Argument &A : newFn->args()) { |
6189 | if (callSite->getArgOperand(i: argNo)->getType() != A.getType()) { |
6190 | dontTransform = true; |
6191 | break; |
6192 | } |
6193 | |
6194 | // Add any parameter attributes. |
6195 | newArgAttrs.push_back(Elt: oldAttrs.getParamAttrs(ArgNo: argNo)); |
6196 | argNo++; |
6197 | } |
6198 | if (dontTransform) |
6199 | continue; |
6200 | |
6201 | // Okay, we can transform this. Create the new call instruction and copy |
6202 | // over the required information. |
6203 | newArgs.append(in_start: callSite->arg_begin(), in_end: callSite->arg_begin() + argNo); |
6204 | |
6205 | // Copy over any operand bundles. |
6206 | SmallVector<llvm::OperandBundleDef, 1> newBundles; |
6207 | callSite->getOperandBundlesAsDefs(Defs&: newBundles); |
6208 | |
6209 | llvm::CallBase *newCall; |
6210 | if (isa<llvm::CallInst>(Val: callSite)) { |
6211 | newCall = llvm::CallInst::Create(Func: newFn, Args: newArgs, Bundles: newBundles, NameStr: "" , |
6212 | InsertBefore: callSite->getIterator()); |
6213 | } else { |
6214 | auto *oldInvoke = cast<llvm::InvokeInst>(Val: callSite); |
6215 | newCall = llvm::InvokeInst::Create( |
6216 | Func: newFn, IfNormal: oldInvoke->getNormalDest(), IfException: oldInvoke->getUnwindDest(), |
6217 | Args: newArgs, Bundles: newBundles, NameStr: "" , InsertBefore: callSite->getIterator()); |
6218 | } |
6219 | newArgs.clear(); // for the next iteration |
6220 | |
6221 | if (!newCall->getType()->isVoidTy()) |
6222 | newCall->takeName(V: callSite); |
6223 | newCall->setAttributes( |
6224 | llvm::AttributeList::get(C&: newFn->getContext(), FnAttrs: oldAttrs.getFnAttrs(), |
6225 | RetAttrs: oldAttrs.getRetAttrs(), ArgAttrs: newArgAttrs)); |
6226 | newCall->setCallingConv(callSite->getCallingConv()); |
6227 | |
6228 | // Finally, remove the old call, replacing any uses with the new one. |
6229 | if (!callSite->use_empty()) |
6230 | callSite->replaceAllUsesWith(V: newCall); |
6231 | |
6232 | // Copy debug location attached to CI. |
6233 | if (callSite->getDebugLoc()) |
6234 | newCall->setDebugLoc(callSite->getDebugLoc()); |
6235 | |
6236 | callSitesToBeRemovedFromParent.push_back(Elt: callSite); |
6237 | } |
6238 | |
6239 | for (auto *callSite : callSitesToBeRemovedFromParent) { |
6240 | callSite->eraseFromParent(); |
6241 | } |
6242 | } |
6243 | |
6244 | /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we |
6245 | /// implement a function with no prototype, e.g. "int foo() {}". If there are |
6246 | /// existing call uses of the old function in the module, this adjusts them to |
6247 | /// call the new function directly. |
6248 | /// |
6249 | /// This is not just a cleanup: the always_inline pass requires direct calls to |
6250 | /// functions to be able to inline them. If there is a bitcast in the way, it |
6251 | /// won't inline them. Instcombine normally deletes these calls, but it isn't |
6252 | /// run at -O0. |
6253 | static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, |
6254 | llvm::Function *NewFn) { |
6255 | // If we're redefining a global as a function, don't transform it. |
6256 | if (!isa<llvm::Function>(Val: Old)) return; |
6257 | |
6258 | replaceUsesOfNonProtoConstant(old: Old, newFn: NewFn); |
6259 | } |
6260 | |
6261 | void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { |
6262 | auto DK = VD->isThisDeclarationADefinition(); |
6263 | if ((DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>()) || |
6264 | (LangOpts.CUDA && !shouldEmitCUDAGlobalVar(Global: VD))) |
6265 | return; |
6266 | |
6267 | TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind(); |
6268 | // If we have a definition, this might be a deferred decl. If the |
6269 | // instantiation is explicit, make sure we emit it at the end. |
6270 | if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition) |
6271 | GetAddrOfGlobalVar(D: VD); |
6272 | |
6273 | EmitTopLevelDecl(D: VD); |
6274 | } |
6275 | |
6276 | void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD, |
6277 | llvm::GlobalValue *GV) { |
6278 | const auto *D = cast<FunctionDecl>(Val: GD.getDecl()); |
6279 | |
6280 | // Compute the function info and LLVM type. |
6281 | const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); |
6282 | llvm::FunctionType *Ty = getTypes().GetFunctionType(Info: FI); |
6283 | |
6284 | // Get or create the prototype for the function. |
6285 | if (!GV || (GV->getValueType() != Ty)) |
6286 | GV = cast<llvm::GlobalValue>(Val: GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, |
6287 | /*DontDefer=*/true, |
6288 | IsForDefinition: ForDefinition)); |
6289 | |
6290 | // Already emitted. |
6291 | if (!GV->isDeclaration()) |
6292 | return; |
6293 | |
6294 | // We need to set linkage and visibility on the function before |
6295 | // generating code for it because various parts of IR generation |
6296 | // want to propagate this information down (e.g. to local static |
6297 | // declarations). |
6298 | auto *Fn = cast<llvm::Function>(Val: GV); |
6299 | setFunctionLinkage(GD, F: Fn); |
6300 | |
6301 | // FIXME: this is redundant with part of setFunctionDefinitionAttributes |
6302 | setGVProperties(GV: Fn, GD); |
6303 | |
6304 | MaybeHandleStaticInExternC(D, GV: Fn); |
6305 | |
6306 | maybeSetTrivialComdat(D: *D, GO&: *Fn); |
6307 | |
6308 | CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo: FI); |
6309 | |
6310 | setNonAliasAttributes(GD, GO: Fn); |
6311 | |
6312 | bool ShouldAddOptNone = !CodeGenOpts.DisableO0ImplyOptNone && |
6313 | (CodeGenOpts.OptimizationLevel == 0) && |
6314 | !D->hasAttr<MinSizeAttr>(); |
6315 | |
6316 | if (DeviceKernelAttr::isOpenCLSpelling(A: D->getAttr<DeviceKernelAttr>())) { |
6317 | if (GD.getKernelReferenceKind() == KernelReferenceKind::Stub && |
6318 | !D->hasAttr<NoInlineAttr>() && |
6319 | !Fn->hasFnAttribute(Kind: llvm::Attribute::NoInline) && |
6320 | !D->hasAttr<OptimizeNoneAttr>() && |
6321 | !Fn->hasFnAttribute(Kind: llvm::Attribute::OptimizeNone) && |
6322 | !ShouldAddOptNone) { |
6323 | Fn->addFnAttr(Kind: llvm::Attribute::AlwaysInline); |
6324 | } |
6325 | } |
6326 | |
6327 | SetLLVMFunctionAttributesForDefinition(D, F: Fn); |
6328 | |
6329 | if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>()) |
6330 | AddGlobalCtor(Ctor: Fn, Priority: CA->getPriority()); |
6331 | if (const DestructorAttr *DA = D->getAttr<DestructorAttr>()) |
6332 | AddGlobalDtor(Dtor: Fn, Priority: DA->getPriority(), IsDtorAttrFunc: true); |
6333 | if (getLangOpts().OpenMP && D->hasAttr<OMPDeclareTargetDeclAttr>()) |
6334 | getOpenMPRuntime().emitDeclareTargetFunction(FD: D, GV); |
6335 | } |
6336 | |
6337 | void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) { |
6338 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
6339 | const AliasAttr *AA = D->getAttr<AliasAttr>(); |
6340 | assert(AA && "Not an alias?" ); |
6341 | |
6342 | StringRef MangledName = getMangledName(GD); |
6343 | |
6344 | if (AA->getAliasee() == MangledName) { |
6345 | Diags.Report(Loc: AA->getLocation(), DiagID: diag::err_cyclic_alias) << 0; |
6346 | return; |
6347 | } |
6348 | |
6349 | // If there is a definition in the module, then it wins over the alias. |
6350 | // This is dubious, but allow it to be safe. Just ignore the alias. |
6351 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
6352 | if (Entry && !Entry->isDeclaration()) |
6353 | return; |
6354 | |
6355 | Aliases.push_back(x: GD); |
6356 | |
6357 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType()); |
6358 | |
6359 | // Create a reference to the named value. This ensures that it is emitted |
6360 | // if a deferred decl. |
6361 | llvm::Constant *Aliasee; |
6362 | llvm::GlobalValue::LinkageTypes LT; |
6363 | if (isa<llvm::FunctionType>(Val: DeclTy)) { |
6364 | Aliasee = GetOrCreateLLVMFunction(MangledName: AA->getAliasee(), Ty: DeclTy, GD, |
6365 | /*ForVTable=*/false); |
6366 | LT = getFunctionLinkage(GD); |
6367 | } else { |
6368 | Aliasee = GetOrCreateLLVMGlobal(MangledName: AA->getAliasee(), Ty: DeclTy, AddrSpace: LangAS::Default, |
6369 | /*D=*/nullptr); |
6370 | if (const auto *VD = dyn_cast<VarDecl>(Val: GD.getDecl())) |
6371 | LT = getLLVMLinkageVarDefinition(VD); |
6372 | else |
6373 | LT = getFunctionLinkage(GD); |
6374 | } |
6375 | |
6376 | // Create the new alias itself, but don't set a name yet. |
6377 | unsigned AS = Aliasee->getType()->getPointerAddressSpace(); |
6378 | auto *GA = |
6379 | llvm::GlobalAlias::create(Ty: DeclTy, AddressSpace: AS, Linkage: LT, Name: "" , Aliasee, Parent: &getModule()); |
6380 | |
6381 | if (Entry) { |
6382 | if (GA->getAliasee() == Entry) { |
6383 | Diags.Report(Loc: AA->getLocation(), DiagID: diag::err_cyclic_alias) << 0; |
6384 | return; |
6385 | } |
6386 | |
6387 | assert(Entry->isDeclaration()); |
6388 | |
6389 | // If there is a declaration in the module, then we had an extern followed |
6390 | // by the alias, as in: |
6391 | // extern int test6(); |
6392 | // ... |
6393 | // int test6() __attribute__((alias("test7"))); |
6394 | // |
6395 | // Remove it and replace uses of it with the alias. |
6396 | GA->takeName(V: Entry); |
6397 | |
6398 | Entry->replaceAllUsesWith(V: GA); |
6399 | Entry->eraseFromParent(); |
6400 | } else { |
6401 | GA->setName(MangledName); |
6402 | } |
6403 | |
6404 | // Set attributes which are particular to an alias; this is a |
6405 | // specialization of the attributes which may be set on a global |
6406 | // variable/function. |
6407 | if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() || |
6408 | D->isWeakImported()) { |
6409 | GA->setLinkage(llvm::Function::WeakAnyLinkage); |
6410 | } |
6411 | |
6412 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) |
6413 | if (VD->getTLSKind()) |
6414 | setTLSMode(GV: GA, D: *VD); |
6415 | |
6416 | SetCommonAttributes(GD, GV: GA); |
6417 | |
6418 | // Emit global alias debug information. |
6419 | if (isa<VarDecl>(Val: D)) |
6420 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
6421 | DI->EmitGlobalAlias(GV: cast<llvm::GlobalValue>(Val: GA->getAliasee()->stripPointerCasts()), Decl: GD); |
6422 | } |
6423 | |
6424 | void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) { |
6425 | const auto *D = cast<ValueDecl>(Val: GD.getDecl()); |
6426 | const IFuncAttr *IFA = D->getAttr<IFuncAttr>(); |
6427 | assert(IFA && "Not an ifunc?" ); |
6428 | |
6429 | StringRef MangledName = getMangledName(GD); |
6430 | |
6431 | if (IFA->getResolver() == MangledName) { |
6432 | Diags.Report(Loc: IFA->getLocation(), DiagID: diag::err_cyclic_alias) << 1; |
6433 | return; |
6434 | } |
6435 | |
6436 | // Report an error if some definition overrides ifunc. |
6437 | llvm::GlobalValue *Entry = GetGlobalValue(Name: MangledName); |
6438 | if (Entry && !Entry->isDeclaration()) { |
6439 | GlobalDecl OtherGD; |
6440 | if (lookupRepresentativeDecl(MangledName, Result&: OtherGD) && |
6441 | DiagnosedConflictingDefinitions.insert(V: GD).second) { |
6442 | Diags.Report(Loc: D->getLocation(), DiagID: diag::err_duplicate_mangled_name) |
6443 | << MangledName; |
6444 | Diags.Report(Loc: OtherGD.getDecl()->getLocation(), |
6445 | DiagID: diag::note_previous_definition); |
6446 | } |
6447 | return; |
6448 | } |
6449 | |
6450 | Aliases.push_back(x: GD); |
6451 | |
6452 | // The resolver might not be visited yet. Specify a dummy non-function type to |
6453 | // indicate IsIncompleteFunction. Either the type is ignored (if the resolver |
6454 | // was emitted) or the whole function will be replaced (if the resolver has |
6455 | // not been emitted). |
6456 | llvm::Constant *Resolver = |
6457 | GetOrCreateLLVMFunction(MangledName: IFA->getResolver(), Ty: VoidTy, GD: {}, |
6458 | /*ForVTable=*/false); |
6459 | llvm::Type *DeclTy = getTypes().ConvertTypeForMem(T: D->getType()); |
6460 | unsigned AS = getTypes().getTargetAddressSpace(T: D->getType()); |
6461 | llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create( |
6462 | Ty: DeclTy, AddressSpace: AS, Linkage: llvm::Function::ExternalLinkage, Name: "" , Resolver, Parent: &getModule()); |
6463 | if (Entry) { |
6464 | if (GIF->getResolver() == Entry) { |
6465 | Diags.Report(Loc: IFA->getLocation(), DiagID: diag::err_cyclic_alias) << 1; |
6466 | return; |
6467 | } |
6468 | assert(Entry->isDeclaration()); |
6469 | |
6470 | // If there is a declaration in the module, then we had an extern followed |
6471 | // by the ifunc, as in: |
6472 | // extern int test(); |
6473 | // ... |
6474 | // int test() __attribute__((ifunc("resolver"))); |
6475 | // |
6476 | // Remove it and replace uses of it with the ifunc. |
6477 | GIF->takeName(V: Entry); |
6478 | |
6479 | Entry->replaceAllUsesWith(V: GIF); |
6480 | Entry->eraseFromParent(); |
6481 | } else |
6482 | GIF->setName(MangledName); |
6483 | SetCommonAttributes(GD, GV: GIF); |
6484 | } |
6485 | |
6486 | llvm::Function *CodeGenModule::getIntrinsic(unsigned IID, |
6487 | ArrayRef<llvm::Type*> Tys) { |
6488 | return llvm::Intrinsic::getOrInsertDeclaration(M: &getModule(), |
6489 | id: (llvm::Intrinsic::ID)IID, Tys); |
6490 | } |
6491 | |
6492 | static llvm::StringMapEntry<llvm::GlobalVariable *> & |
6493 | GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map, |
6494 | const StringLiteral *Literal, bool TargetIsLSB, |
6495 | bool &IsUTF16, unsigned &StringLength) { |
6496 | StringRef String = Literal->getString(); |
6497 | unsigned NumBytes = String.size(); |
6498 | |
6499 | // Check for simple case. |
6500 | if (!Literal->containsNonAsciiOrNull()) { |
6501 | StringLength = NumBytes; |
6502 | return *Map.insert(KV: std::make_pair(x&: String, y: nullptr)).first; |
6503 | } |
6504 | |
6505 | // Otherwise, convert the UTF8 literals into a string of shorts. |
6506 | IsUTF16 = true; |
6507 | |
6508 | SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls. |
6509 | const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); |
6510 | llvm::UTF16 *ToPtr = &ToBuf[0]; |
6511 | |
6512 | (void)llvm::ConvertUTF8toUTF16(sourceStart: &FromPtr, sourceEnd: FromPtr + NumBytes, targetStart: &ToPtr, |
6513 | targetEnd: ToPtr + NumBytes, flags: llvm::strictConversion); |
6514 | |
6515 | // ConvertUTF8toUTF16 returns the length in ToPtr. |
6516 | StringLength = ToPtr - &ToBuf[0]; |
6517 | |
6518 | // Add an explicit null. |
6519 | *ToPtr = 0; |
6520 | return *Map.insert(KV: std::make_pair( |
6521 | x: StringRef(reinterpret_cast<const char *>(ToBuf.data()), |
6522 | (StringLength + 1) * 2), |
6523 | y: nullptr)).first; |
6524 | } |
6525 | |
6526 | ConstantAddress |
6527 | CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { |
6528 | unsigned StringLength = 0; |
6529 | bool isUTF16 = false; |
6530 | llvm::StringMapEntry<llvm::GlobalVariable *> &Entry = |
6531 | GetConstantCFStringEntry(Map&: CFConstantStringMap, Literal, |
6532 | TargetIsLSB: getDataLayout().isLittleEndian(), IsUTF16&: isUTF16, |
6533 | StringLength); |
6534 | |
6535 | if (auto *C = Entry.second) |
6536 | return ConstantAddress( |
6537 | C, C->getValueType(), CharUnits::fromQuantity(Quantity: C->getAlignment())); |
6538 | |
6539 | const ASTContext &Context = getContext(); |
6540 | const llvm::Triple &Triple = getTriple(); |
6541 | |
6542 | const auto CFRuntime = getLangOpts().CFRuntime; |
6543 | const bool IsSwiftABI = |
6544 | static_cast<unsigned>(CFRuntime) >= |
6545 | static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift); |
6546 | const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1; |
6547 | |
6548 | // If we don't already have it, get __CFConstantStringClassReference. |
6549 | if (!CFConstantStringClassRef) { |
6550 | const char *CFConstantStringClassName = "__CFConstantStringClassReference" ; |
6551 | llvm::Type *Ty = getTypes().ConvertType(T: getContext().IntTy); |
6552 | Ty = llvm::ArrayType::get(ElementType: Ty, NumElements: 0); |
6553 | |
6554 | switch (CFRuntime) { |
6555 | default: break; |
6556 | case LangOptions::CoreFoundationABI::Swift: [[fallthrough]]; |
6557 | case LangOptions::CoreFoundationABI::Swift5_0: |
6558 | CFConstantStringClassName = |
6559 | Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN" |
6560 | : "$s10Foundation19_NSCFConstantStringCN" ; |
6561 | Ty = IntPtrTy; |
6562 | break; |
6563 | case LangOptions::CoreFoundationABI::Swift4_2: |
6564 | CFConstantStringClassName = |
6565 | Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN" |
6566 | : "$S10Foundation19_NSCFConstantStringCN" ; |
6567 | Ty = IntPtrTy; |
6568 | break; |
6569 | case LangOptions::CoreFoundationABI::Swift4_1: |
6570 | CFConstantStringClassName = |
6571 | Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN" |
6572 | : "__T010Foundation19_NSCFConstantStringCN" ; |
6573 | Ty = IntPtrTy; |
6574 | break; |
6575 | } |
6576 | |
6577 | llvm::Constant *C = CreateRuntimeVariable(Ty, Name: CFConstantStringClassName); |
6578 | |
6579 | if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) { |
6580 | llvm::GlobalValue *GV = nullptr; |
6581 | |
6582 | if ((GV = dyn_cast<llvm::GlobalValue>(Val: C))) { |
6583 | IdentifierInfo &II = Context.Idents.get(Name: GV->getName()); |
6584 | TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl(); |
6585 | DeclContext *DC = TranslationUnitDecl::castToDeclContext(D: TUDecl); |
6586 | |
6587 | const VarDecl *VD = nullptr; |
6588 | for (const auto *Result : DC->lookup(Name: &II)) |
6589 | if ((VD = dyn_cast<VarDecl>(Val: Result))) |
6590 | break; |
6591 | |
6592 | if (Triple.isOSBinFormatELF()) { |
6593 | if (!VD) |
6594 | GV->setLinkage(llvm::GlobalValue::ExternalLinkage); |
6595 | } else { |
6596 | GV->setLinkage(llvm::GlobalValue::ExternalLinkage); |
6597 | if (!VD || !VD->hasAttr<DLLExportAttr>()) |
6598 | GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); |
6599 | else |
6600 | GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); |
6601 | } |
6602 | |
6603 | setDSOLocal(GV); |
6604 | } |
6605 | } |
6606 | |
6607 | // Decay array -> ptr |
6608 | CFConstantStringClassRef = |
6609 | IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty) : C; |
6610 | } |
6611 | |
6612 | QualType CFTy = Context.getCFConstantStringType(); |
6613 | |
6614 | auto *STy = cast<llvm::StructType>(Val: getTypes().ConvertType(T: CFTy)); |
6615 | |
6616 | ConstantInitBuilder Builder(*this); |
6617 | auto Fields = Builder.beginStruct(structTy: STy); |
6618 | |
6619 | // Class pointer. |
6620 | Fields.add(value: cast<llvm::Constant>(Val&: CFConstantStringClassRef)); |
6621 | |
6622 | // Flags. |
6623 | if (IsSwiftABI) { |
6624 | Fields.addInt(intTy: IntPtrTy, value: IsSwift4_1 ? 0x05 : 0x01); |
6625 | Fields.addInt(intTy: Int64Ty, value: isUTF16 ? 0x07d0 : 0x07c8); |
6626 | } else { |
6627 | Fields.addInt(intTy: IntTy, value: isUTF16 ? 0x07d0 : 0x07C8); |
6628 | } |
6629 | |
6630 | // String pointer. |
6631 | llvm::Constant *C = nullptr; |
6632 | if (isUTF16) { |
6633 | auto Arr = llvm::ArrayRef( |
6634 | reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())), |
6635 | Entry.first().size() / 2); |
6636 | C = llvm::ConstantDataArray::get(Context&: VMContext, Elts: Arr); |
6637 | } else { |
6638 | C = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Entry.first()); |
6639 | } |
6640 | |
6641 | // Note: -fwritable-strings doesn't make the backing store strings of |
6642 | // CFStrings writable. |
6643 | auto *GV = |
6644 | new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true, |
6645 | llvm::GlobalValue::PrivateLinkage, C, ".str" ); |
6646 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
6647 | // Don't enforce the target's minimum global alignment, since the only use |
6648 | // of the string is via this class initializer. |
6649 | CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(T: Context.ShortTy) |
6650 | : Context.getTypeAlignInChars(T: Context.CharTy); |
6651 | GV->setAlignment(Align.getAsAlign()); |
6652 | |
6653 | // FIXME: We set the section explicitly to avoid a bug in ld64 224.1. |
6654 | // Without it LLVM can merge the string with a non unnamed_addr one during |
6655 | // LTO. Doing that changes the section it ends in, which surprises ld64. |
6656 | if (Triple.isOSBinFormatMachO()) |
6657 | GV->setSection(isUTF16 ? "__TEXT,__ustring" |
6658 | : "__TEXT,__cstring,cstring_literals" ); |
6659 | // Make sure the literal ends up in .rodata to allow for safe ICF and for |
6660 | // the static linker to adjust permissions to read-only later on. |
6661 | else if (Triple.isOSBinFormatELF()) |
6662 | GV->setSection(".rodata" ); |
6663 | |
6664 | // String. |
6665 | Fields.add(value: GV); |
6666 | |
6667 | // String length. |
6668 | llvm::IntegerType *LengthTy = |
6669 | llvm::IntegerType::get(C&: getModule().getContext(), |
6670 | NumBits: Context.getTargetInfo().getLongWidth()); |
6671 | if (IsSwiftABI) { |
6672 | if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || |
6673 | CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) |
6674 | LengthTy = Int32Ty; |
6675 | else |
6676 | LengthTy = IntPtrTy; |
6677 | } |
6678 | Fields.addInt(intTy: LengthTy, value: StringLength); |
6679 | |
6680 | // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is |
6681 | // properly aligned on 32-bit platforms. |
6682 | CharUnits Alignment = |
6683 | IsSwiftABI ? Context.toCharUnitsFromBits(BitSize: 64) : getPointerAlign(); |
6684 | |
6685 | // The struct. |
6686 | GV = Fields.finishAndCreateGlobal(args: "_unnamed_cfstring_" , args&: Alignment, |
6687 | /*isConstant=*/args: false, |
6688 | args: llvm::GlobalVariable::PrivateLinkage); |
6689 | GV->addAttribute(Kind: "objc_arc_inert" ); |
6690 | switch (Triple.getObjectFormat()) { |
6691 | case llvm::Triple::UnknownObjectFormat: |
6692 | llvm_unreachable("unknown file format" ); |
6693 | case llvm::Triple::DXContainer: |
6694 | case llvm::Triple::GOFF: |
6695 | case llvm::Triple::SPIRV: |
6696 | case llvm::Triple::XCOFF: |
6697 | llvm_unreachable("unimplemented" ); |
6698 | case llvm::Triple::COFF: |
6699 | case llvm::Triple::ELF: |
6700 | case llvm::Triple::Wasm: |
6701 | GV->setSection("cfstring" ); |
6702 | break; |
6703 | case llvm::Triple::MachO: |
6704 | GV->setSection("__DATA,__cfstring" ); |
6705 | break; |
6706 | } |
6707 | Entry.second = GV; |
6708 | |
6709 | return ConstantAddress(GV, GV->getValueType(), Alignment); |
6710 | } |
6711 | |
6712 | bool CodeGenModule::getExpressionLocationsEnabled() const { |
6713 | return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo; |
6714 | } |
6715 | |
6716 | QualType CodeGenModule::getObjCFastEnumerationStateType() { |
6717 | if (ObjCFastEnumerationStateType.isNull()) { |
6718 | RecordDecl *D = Context.buildImplicitRecord(Name: "__objcFastEnumerationState" ); |
6719 | D->startDefinition(); |
6720 | |
6721 | QualType FieldTypes[] = { |
6722 | Context.UnsignedLongTy, Context.getPointerType(T: Context.getObjCIdType()), |
6723 | Context.getPointerType(T: Context.UnsignedLongTy), |
6724 | Context.getConstantArrayType(EltTy: Context.UnsignedLongTy, ArySize: llvm::APInt(32, 5), |
6725 | SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0)}; |
6726 | |
6727 | for (size_t i = 0; i < 4; ++i) { |
6728 | FieldDecl *Field = FieldDecl::Create(C: Context, |
6729 | DC: D, |
6730 | StartLoc: SourceLocation(), |
6731 | IdLoc: SourceLocation(), Id: nullptr, |
6732 | T: FieldTypes[i], /*TInfo=*/nullptr, |
6733 | /*BitWidth=*/BW: nullptr, |
6734 | /*Mutable=*/false, |
6735 | InitStyle: ICIS_NoInit); |
6736 | Field->setAccess(AS_public); |
6737 | D->addDecl(D: Field); |
6738 | } |
6739 | |
6740 | D->completeDefinition(); |
6741 | ObjCFastEnumerationStateType = Context.getTagDeclType(Decl: D); |
6742 | } |
6743 | |
6744 | return ObjCFastEnumerationStateType; |
6745 | } |
6746 | |
6747 | llvm::Constant * |
6748 | CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) { |
6749 | assert(!E->getType()->isPointerType() && "Strings are always arrays" ); |
6750 | |
6751 | // Don't emit it as the address of the string, emit the string data itself |
6752 | // as an inline array. |
6753 | if (E->getCharByteWidth() == 1) { |
6754 | SmallString<64> Str(E->getString()); |
6755 | |
6756 | // Resize the string to the right size, which is indicated by its type. |
6757 | const ConstantArrayType *CAT = Context.getAsConstantArrayType(T: E->getType()); |
6758 | assert(CAT && "String literal not of constant array type!" ); |
6759 | Str.resize(N: CAT->getZExtSize()); |
6760 | return llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Str, AddNull: false); |
6761 | } |
6762 | |
6763 | auto *AType = cast<llvm::ArrayType>(Val: getTypes().ConvertType(T: E->getType())); |
6764 | llvm::Type *ElemTy = AType->getElementType(); |
6765 | unsigned NumElements = AType->getNumElements(); |
6766 | |
6767 | // Wide strings have either 2-byte or 4-byte elements. |
6768 | if (ElemTy->getPrimitiveSizeInBits() == 16) { |
6769 | SmallVector<uint16_t, 32> Elements; |
6770 | Elements.reserve(N: NumElements); |
6771 | |
6772 | for(unsigned i = 0, e = E->getLength(); i != e; ++i) |
6773 | Elements.push_back(Elt: E->getCodeUnit(i)); |
6774 | Elements.resize(N: NumElements); |
6775 | return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements); |
6776 | } |
6777 | |
6778 | assert(ElemTy->getPrimitiveSizeInBits() == 32); |
6779 | SmallVector<uint32_t, 32> Elements; |
6780 | Elements.reserve(N: NumElements); |
6781 | |
6782 | for(unsigned i = 0, e = E->getLength(); i != e; ++i) |
6783 | Elements.push_back(Elt: E->getCodeUnit(i)); |
6784 | Elements.resize(N: NumElements); |
6785 | return llvm::ConstantDataArray::get(Context&: VMContext, Elts&: Elements); |
6786 | } |
6787 | |
6788 | static llvm::GlobalVariable * |
6789 | GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT, |
6790 | CodeGenModule &CGM, StringRef GlobalName, |
6791 | CharUnits Alignment) { |
6792 | unsigned AddrSpace = CGM.getContext().getTargetAddressSpace( |
6793 | AS: CGM.GetGlobalConstantAddressSpace()); |
6794 | |
6795 | llvm::Module &M = CGM.getModule(); |
6796 | // Create a global variable for this string |
6797 | auto *GV = new llvm::GlobalVariable( |
6798 | M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName, |
6799 | nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace); |
6800 | GV->setAlignment(Alignment.getAsAlign()); |
6801 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
6802 | if (GV->isWeakForLinker()) { |
6803 | assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals" ); |
6804 | GV->setComdat(M.getOrInsertComdat(Name: GV->getName())); |
6805 | } |
6806 | CGM.setDSOLocal(GV); |
6807 | |
6808 | return GV; |
6809 | } |
6810 | |
6811 | /// GetAddrOfConstantStringFromLiteral - Return a pointer to a |
6812 | /// constant array for the given string literal. |
6813 | ConstantAddress |
6814 | CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S, |
6815 | StringRef Name) { |
6816 | CharUnits Alignment = |
6817 | getContext().getAlignOfGlobalVarInChars(T: S->getType(), /*VD=*/nullptr); |
6818 | |
6819 | llvm::Constant *C = GetConstantArrayFromStringLiteral(E: S); |
6820 | llvm::GlobalVariable **Entry = nullptr; |
6821 | if (!LangOpts.WritableStrings) { |
6822 | Entry = &ConstantStringMap[C]; |
6823 | if (auto GV = *Entry) { |
6824 | if (uint64_t(Alignment.getQuantity()) > GV->getAlignment()) |
6825 | GV->setAlignment(Alignment.getAsAlign()); |
6826 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6827 | GV->getValueType(), Alignment); |
6828 | } |
6829 | } |
6830 | |
6831 | SmallString<256> MangledNameBuffer; |
6832 | StringRef GlobalVariableName; |
6833 | llvm::GlobalValue::LinkageTypes LT; |
6834 | |
6835 | // Mangle the string literal if that's how the ABI merges duplicate strings. |
6836 | // Don't do it if they are writable, since we don't want writes in one TU to |
6837 | // affect strings in another. |
6838 | if (getCXXABI().getMangleContext().shouldMangleStringLiteral(SL: S) && |
6839 | !LangOpts.WritableStrings) { |
6840 | llvm::raw_svector_ostream Out(MangledNameBuffer); |
6841 | getCXXABI().getMangleContext().mangleStringLiteral(SL: S, Out); |
6842 | LT = llvm::GlobalValue::LinkOnceODRLinkage; |
6843 | GlobalVariableName = MangledNameBuffer; |
6844 | } else { |
6845 | LT = llvm::GlobalValue::PrivateLinkage; |
6846 | GlobalVariableName = Name; |
6847 | } |
6848 | |
6849 | auto GV = GenerateStringLiteral(C, LT, CGM&: *this, GlobalName: GlobalVariableName, Alignment); |
6850 | |
6851 | CGDebugInfo *DI = getModuleDebugInfo(); |
6852 | if (DI && getCodeGenOpts().hasReducedDebugInfo()) |
6853 | DI->AddStringLiteralDebugInfo(GV, S); |
6854 | |
6855 | if (Entry) |
6856 | *Entry = GV; |
6857 | |
6858 | SanitizerMD->reportGlobal(GV, Loc: S->getStrTokenLoc(TokNum: 0), Name: "<string literal>" ); |
6859 | |
6860 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6861 | GV->getValueType(), Alignment); |
6862 | } |
6863 | |
6864 | /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant |
6865 | /// array for the given ObjCEncodeExpr node. |
6866 | ConstantAddress |
6867 | CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) { |
6868 | std::string Str; |
6869 | getContext().getObjCEncodingForType(T: E->getEncodedType(), S&: Str); |
6870 | |
6871 | return GetAddrOfConstantCString(Str); |
6872 | } |
6873 | |
6874 | /// GetAddrOfConstantCString - Returns a pointer to a character array containing |
6875 | /// the literal and a terminating '\0' character. |
6876 | /// The result has pointer to array type. |
6877 | ConstantAddress CodeGenModule::GetAddrOfConstantCString( |
6878 | const std::string &Str, const char *GlobalName) { |
6879 | StringRef StrWithNull(Str.c_str(), Str.size() + 1); |
6880 | CharUnits Alignment = getContext().getAlignOfGlobalVarInChars( |
6881 | T: getContext().CharTy, /*VD=*/nullptr); |
6882 | |
6883 | llvm::Constant *C = |
6884 | llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: StrWithNull, AddNull: false); |
6885 | |
6886 | // Don't share any string literals if strings aren't constant. |
6887 | llvm::GlobalVariable **Entry = nullptr; |
6888 | if (!LangOpts.WritableStrings) { |
6889 | Entry = &ConstantStringMap[C]; |
6890 | if (auto GV = *Entry) { |
6891 | if (uint64_t(Alignment.getQuantity()) > GV->getAlignment()) |
6892 | GV->setAlignment(Alignment.getAsAlign()); |
6893 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6894 | GV->getValueType(), Alignment); |
6895 | } |
6896 | } |
6897 | |
6898 | // Get the default prefix if a name wasn't specified. |
6899 | if (!GlobalName) |
6900 | GlobalName = ".str" ; |
6901 | // Create a global variable for this. |
6902 | auto GV = GenerateStringLiteral(C, LT: llvm::GlobalValue::PrivateLinkage, CGM&: *this, |
6903 | GlobalName, Alignment); |
6904 | if (Entry) |
6905 | *Entry = GV; |
6906 | |
6907 | return ConstantAddress(castStringLiteralToDefaultAddressSpace(CGM&: *this, GV), |
6908 | GV->getValueType(), Alignment); |
6909 | } |
6910 | |
6911 | ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary( |
6912 | const MaterializeTemporaryExpr *E, const Expr *Init) { |
6913 | assert((E->getStorageDuration() == SD_Static || |
6914 | E->getStorageDuration() == SD_Thread) && "not a global temporary" ); |
6915 | const auto *VD = cast<VarDecl>(Val: E->getExtendingDecl()); |
6916 | |
6917 | // If we're not materializing a subobject of the temporary, keep the |
6918 | // cv-qualifiers from the type of the MaterializeTemporaryExpr. |
6919 | QualType MaterializedType = Init->getType(); |
6920 | if (Init == E->getSubExpr()) |
6921 | MaterializedType = E->getType(); |
6922 | |
6923 | CharUnits Align = getContext().getTypeAlignInChars(T: MaterializedType); |
6924 | |
6925 | auto InsertResult = MaterializedGlobalTemporaryMap.insert(KV: {E, nullptr}); |
6926 | if (!InsertResult.second) { |
6927 | // We've seen this before: either we already created it or we're in the |
6928 | // process of doing so. |
6929 | if (!InsertResult.first->second) { |
6930 | // We recursively re-entered this function, probably during emission of |
6931 | // the initializer. Create a placeholder. We'll clean this up in the |
6932 | // outer call, at the end of this function. |
6933 | llvm::Type *Type = getTypes().ConvertTypeForMem(T: MaterializedType); |
6934 | InsertResult.first->second = new llvm::GlobalVariable( |
6935 | getModule(), Type, false, llvm::GlobalVariable::InternalLinkage, |
6936 | nullptr); |
6937 | } |
6938 | return ConstantAddress(InsertResult.first->second, |
6939 | llvm::cast<llvm::GlobalVariable>( |
6940 | Val: InsertResult.first->second->stripPointerCasts()) |
6941 | ->getValueType(), |
6942 | Align); |
6943 | } |
6944 | |
6945 | // FIXME: If an externally-visible declaration extends multiple temporaries, |
6946 | // we need to give each temporary the same name in every translation unit (and |
6947 | // we also need to make the temporaries externally-visible). |
6948 | SmallString<256> Name; |
6949 | llvm::raw_svector_ostream Out(Name); |
6950 | getCXXABI().getMangleContext().mangleReferenceTemporary( |
6951 | D: VD, ManglingNumber: E->getManglingNumber(), Out); |
6952 | |
6953 | APValue *Value = nullptr; |
6954 | if (E->getStorageDuration() == SD_Static && VD->evaluateValue()) { |
6955 | // If the initializer of the extending declaration is a constant |
6956 | // initializer, we should have a cached constant initializer for this |
6957 | // temporary. Note that this might have a different value from the value |
6958 | // computed by evaluating the initializer if the surrounding constant |
6959 | // expression modifies the temporary. |
6960 | Value = E->getOrCreateValue(MayCreate: false); |
6961 | } |
6962 | |
6963 | // Try evaluating it now, it might have a constant initializer. |
6964 | Expr::EvalResult EvalResult; |
6965 | if (!Value && Init->EvaluateAsRValue(Result&: EvalResult, Ctx: getContext()) && |
6966 | !EvalResult.hasSideEffects()) |
6967 | Value = &EvalResult.Val; |
6968 | |
6969 | LangAS AddrSpace = GetGlobalVarAddressSpace(D: VD); |
6970 | |
6971 | std::optional<ConstantEmitter> emitter; |
6972 | llvm::Constant *InitialValue = nullptr; |
6973 | bool Constant = false; |
6974 | llvm::Type *Type; |
6975 | if (Value) { |
6976 | // The temporary has a constant initializer, use it. |
6977 | emitter.emplace(args&: *this); |
6978 | InitialValue = emitter->emitForInitializer(value: *Value, destAddrSpace: AddrSpace, |
6979 | destType: MaterializedType); |
6980 | Constant = |
6981 | MaterializedType.isConstantStorage(Ctx: getContext(), /*ExcludeCtor*/ Value, |
6982 | /*ExcludeDtor*/ false); |
6983 | Type = InitialValue->getType(); |
6984 | } else { |
6985 | // No initializer, the initialization will be provided when we |
6986 | // initialize the declaration which performed lifetime extension. |
6987 | Type = getTypes().ConvertTypeForMem(T: MaterializedType); |
6988 | } |
6989 | |
6990 | // Create a global variable for this lifetime-extended temporary. |
6991 | llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD); |
6992 | if (Linkage == llvm::GlobalVariable::ExternalLinkage) { |
6993 | const VarDecl *InitVD; |
6994 | if (VD->isStaticDataMember() && VD->getAnyInitializer(D&: InitVD) && |
6995 | isa<CXXRecordDecl>(Val: InitVD->getLexicalDeclContext())) { |
6996 | // Temporaries defined inside a class get linkonce_odr linkage because the |
6997 | // class can be defined in multiple translation units. |
6998 | Linkage = llvm::GlobalVariable::LinkOnceODRLinkage; |
6999 | } else { |
7000 | // There is no need for this temporary to have external linkage if the |
7001 | // VarDecl has external linkage. |
7002 | Linkage = llvm::GlobalVariable::InternalLinkage; |
7003 | } |
7004 | } |
7005 | auto TargetAS = getContext().getTargetAddressSpace(AS: AddrSpace); |
7006 | auto *GV = new llvm::GlobalVariable( |
7007 | getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(), |
7008 | /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS); |
7009 | if (emitter) emitter->finalize(global: GV); |
7010 | // Don't assign dllimport or dllexport to local linkage globals. |
7011 | if (!llvm::GlobalValue::isLocalLinkage(Linkage)) { |
7012 | setGVProperties(GV, D: VD); |
7013 | if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass) |
7014 | // The reference temporary should never be dllexport. |
7015 | GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); |
7016 | } |
7017 | GV->setAlignment(Align.getAsAlign()); |
7018 | if (supportsCOMDAT() && GV->isWeakForLinker()) |
7019 | GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName())); |
7020 | if (VD->getTLSKind()) |
7021 | setTLSMode(GV, D: *VD); |
7022 | llvm::Constant *CV = GV; |
7023 | if (AddrSpace != LangAS::Default) |
7024 | CV = getTargetCodeGenInfo().performAddrSpaceCast( |
7025 | CGM&: *this, V: GV, SrcAddr: AddrSpace, |
7026 | DestTy: llvm::PointerType::get( |
7027 | C&: getLLVMContext(), |
7028 | AddressSpace: getContext().getTargetAddressSpace(AS: LangAS::Default))); |
7029 | |
7030 | // Update the map with the new temporary. If we created a placeholder above, |
7031 | // replace it with the new global now. |
7032 | llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E]; |
7033 | if (Entry) { |
7034 | Entry->replaceAllUsesWith(V: CV); |
7035 | llvm::cast<llvm::GlobalVariable>(Val: Entry)->eraseFromParent(); |
7036 | } |
7037 | Entry = CV; |
7038 | |
7039 | return ConstantAddress(CV, Type, Align); |
7040 | } |
7041 | |
7042 | /// EmitObjCPropertyImplementations - Emit information for synthesized |
7043 | /// properties for an implementation. |
7044 | void CodeGenModule::EmitObjCPropertyImplementations(const |
7045 | ObjCImplementationDecl *D) { |
7046 | for (const auto *PID : D->property_impls()) { |
7047 | // Dynamic is just for type-checking. |
7048 | if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) { |
7049 | ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
7050 | |
7051 | // Determine which methods need to be implemented, some may have |
7052 | // been overridden. Note that ::isPropertyAccessor is not the method |
7053 | // we want, that just indicates if the decl came from a |
7054 | // property. What we want to know is if the method is defined in |
7055 | // this implementation. |
7056 | auto *Getter = PID->getGetterMethodDecl(); |
7057 | if (!Getter || Getter->isSynthesizedAccessorStub()) |
7058 | CodeGenFunction(*this).GenerateObjCGetter( |
7059 | IMP: const_cast<ObjCImplementationDecl *>(D), PID); |
7060 | auto *Setter = PID->getSetterMethodDecl(); |
7061 | if (!PD->isReadOnly() && (!Setter || Setter->isSynthesizedAccessorStub())) |
7062 | CodeGenFunction(*this).GenerateObjCSetter( |
7063 | IMP: const_cast<ObjCImplementationDecl *>(D), PID); |
7064 | } |
7065 | } |
7066 | } |
7067 | |
7068 | static bool needsDestructMethod(ObjCImplementationDecl *impl) { |
7069 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
7070 | for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); |
7071 | ivar; ivar = ivar->getNextIvar()) |
7072 | if (ivar->getType().isDestructedType()) |
7073 | return true; |
7074 | |
7075 | return false; |
7076 | } |
7077 | |
7078 | static bool AllTrivialInitializers(CodeGenModule &CGM, |
7079 | ObjCImplementationDecl *D) { |
7080 | CodeGenFunction CGF(CGM); |
7081 | for (ObjCImplementationDecl::init_iterator B = D->init_begin(), |
7082 | E = D->init_end(); B != E; ++B) { |
7083 | CXXCtorInitializer *CtorInitExp = *B; |
7084 | Expr *Init = CtorInitExp->getInit(); |
7085 | if (!CGF.isTrivialInitializer(Init)) |
7086 | return false; |
7087 | } |
7088 | return true; |
7089 | } |
7090 | |
7091 | /// EmitObjCIvarInitializations - Emit information for ivar initialization |
7092 | /// for an implementation. |
7093 | void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) { |
7094 | // We might need a .cxx_destruct even if we don't have any ivar initializers. |
7095 | if (needsDestructMethod(impl: D)) { |
7096 | const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_destruct" ); |
7097 | Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
7098 | ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create( |
7099 | C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector, |
7100 | T: getContext().VoidTy, ReturnTInfo: nullptr, contextDecl: D, |
7101 | /*isInstance=*/true, /*isVariadic=*/false, |
7102 | /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false, |
7103 | /*isImplicitlyDeclared=*/true, |
7104 | /*isDefined=*/false, impControl: ObjCImplementationControl::Required); |
7105 | D->addInstanceMethod(method: DTORMethod); |
7106 | CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: DTORMethod, ctor: false); |
7107 | D->setHasDestructors(true); |
7108 | } |
7109 | |
7110 | // If the implementation doesn't have any ivar initializers, we don't need |
7111 | // a .cxx_construct. |
7112 | if (D->getNumIvarInitializers() == 0 || |
7113 | AllTrivialInitializers(CGM&: *this, D)) |
7114 | return; |
7115 | |
7116 | const IdentifierInfo *II = &getContext().Idents.get(Name: ".cxx_construct" ); |
7117 | Selector cxxSelector = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
7118 | // The constructor returns 'self'. |
7119 | ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create( |
7120 | C&: getContext(), beginLoc: D->getLocation(), endLoc: D->getLocation(), SelInfo: cxxSelector, |
7121 | T: getContext().getObjCIdType(), ReturnTInfo: nullptr, contextDecl: D, /*isInstance=*/true, |
7122 | /*isVariadic=*/false, |
7123 | /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false, |
7124 | /*isImplicitlyDeclared=*/true, |
7125 | /*isDefined=*/false, impControl: ObjCImplementationControl::Required); |
7126 | D->addInstanceMethod(method: CTORMethod); |
7127 | CodeGenFunction(*this).GenerateObjCCtorDtorMethod(IMP: D, MD: CTORMethod, ctor: true); |
7128 | D->setHasNonZeroConstructors(true); |
7129 | } |
7130 | |
7131 | // EmitLinkageSpec - Emit all declarations in a linkage spec. |
7132 | void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) { |
7133 | if (LSD->getLanguage() != LinkageSpecLanguageIDs::C && |
7134 | LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) { |
7135 | ErrorUnsupported(D: LSD, Type: "linkage spec" ); |
7136 | return; |
7137 | } |
7138 | |
7139 | EmitDeclContext(DC: LSD); |
7140 | } |
7141 | |
7142 | void CodeGenModule::EmitTopLevelStmt(const TopLevelStmtDecl *D) { |
7143 | // Device code should not be at top level. |
7144 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) |
7145 | return; |
7146 | |
7147 | std::unique_ptr<CodeGenFunction> &CurCGF = |
7148 | GlobalTopLevelStmtBlockInFlight.first; |
7149 | |
7150 | // We emitted a top-level stmt but after it there is initialization. |
7151 | // Stop squashing the top-level stmts into a single function. |
7152 | if (CurCGF && CXXGlobalInits.back() != CurCGF->CurFn) { |
7153 | CurCGF->FinishFunction(EndLoc: D->getEndLoc()); |
7154 | CurCGF = nullptr; |
7155 | } |
7156 | |
7157 | if (!CurCGF) { |
7158 | // void __stmts__N(void) |
7159 | // FIXME: Ask the ABI name mangler to pick a name. |
7160 | std::string Name = "__stmts__" + llvm::utostr(X: CXXGlobalInits.size()); |
7161 | FunctionArgList Args; |
7162 | QualType RetTy = getContext().VoidTy; |
7163 | const CGFunctionInfo &FnInfo = |
7164 | getTypes().arrangeBuiltinFunctionDeclaration(resultType: RetTy, args: Args); |
7165 | llvm::FunctionType *FnTy = getTypes().GetFunctionType(Info: FnInfo); |
7166 | llvm::Function *Fn = llvm::Function::Create( |
7167 | Ty: FnTy, Linkage: llvm::GlobalValue::InternalLinkage, N: Name, M: &getModule()); |
7168 | |
7169 | CurCGF.reset(p: new CodeGenFunction(*this)); |
7170 | GlobalTopLevelStmtBlockInFlight.second = D; |
7171 | CurCGF->StartFunction(GD: GlobalDecl(), RetTy, Fn, FnInfo, Args, |
7172 | Loc: D->getBeginLoc(), StartLoc: D->getBeginLoc()); |
7173 | CXXGlobalInits.push_back(x: Fn); |
7174 | } |
7175 | |
7176 | CurCGF->EmitStmt(S: D->getStmt()); |
7177 | } |
7178 | |
7179 | void CodeGenModule::EmitDeclContext(const DeclContext *DC) { |
7180 | for (auto *I : DC->decls()) { |
7181 | // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope |
7182 | // are themselves considered "top-level", so EmitTopLevelDecl on an |
7183 | // ObjCImplDecl does not recursively visit them. We need to do that in |
7184 | // case they're nested inside another construct (LinkageSpecDecl / |
7185 | // ExportDecl) that does stop them from being considered "top-level". |
7186 | if (auto *OID = dyn_cast<ObjCImplDecl>(Val: I)) { |
7187 | for (auto *M : OID->methods()) |
7188 | EmitTopLevelDecl(D: M); |
7189 | } |
7190 | |
7191 | EmitTopLevelDecl(D: I); |
7192 | } |
7193 | } |
7194 | |
7195 | /// EmitTopLevelDecl - Emit code for a single top level declaration. |
7196 | void CodeGenModule::EmitTopLevelDecl(Decl *D) { |
7197 | // Ignore dependent declarations. |
7198 | if (D->isTemplated()) |
7199 | return; |
7200 | |
7201 | // Consteval function shouldn't be emitted. |
7202 | if (auto *FD = dyn_cast<FunctionDecl>(Val: D); FD && FD->isImmediateFunction()) |
7203 | return; |
7204 | |
7205 | switch (D->getKind()) { |
7206 | case Decl::CXXConversion: |
7207 | case Decl::CXXMethod: |
7208 | case Decl::Function: |
7209 | EmitGlobal(GD: cast<FunctionDecl>(Val: D)); |
7210 | // Always provide some coverage mapping |
7211 | // even for the functions that aren't emitted. |
7212 | AddDeferredUnusedCoverageMapping(D); |
7213 | break; |
7214 | |
7215 | case Decl::CXXDeductionGuide: |
7216 | // Function-like, but does not result in code emission. |
7217 | break; |
7218 | |
7219 | case Decl::Var: |
7220 | case Decl::Decomposition: |
7221 | case Decl::VarTemplateSpecialization: |
7222 | EmitGlobal(GD: cast<VarDecl>(Val: D)); |
7223 | if (auto *DD = dyn_cast<DecompositionDecl>(Val: D)) |
7224 | for (auto *B : DD->flat_bindings()) |
7225 | if (auto *HD = B->getHoldingVar()) |
7226 | EmitGlobal(GD: HD); |
7227 | |
7228 | break; |
7229 | |
7230 | // Indirect fields from global anonymous structs and unions can be |
7231 | // ignored; only the actual variable requires IR gen support. |
7232 | case Decl::IndirectField: |
7233 | break; |
7234 | |
7235 | // C++ Decls |
7236 | case Decl::Namespace: |
7237 | EmitDeclContext(DC: cast<NamespaceDecl>(Val: D)); |
7238 | break; |
7239 | case Decl::ClassTemplateSpecialization: { |
7240 | const auto *Spec = cast<ClassTemplateSpecializationDecl>(Val: D); |
7241 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7242 | if (Spec->getSpecializationKind() == |
7243 | TSK_ExplicitInstantiationDefinition && |
7244 | Spec->hasDefinition()) |
7245 | DI->completeTemplateDefinition(SD: *Spec); |
7246 | } [[fallthrough]]; |
7247 | case Decl::CXXRecord: { |
7248 | CXXRecordDecl *CRD = cast<CXXRecordDecl>(Val: D); |
7249 | if (CGDebugInfo *DI = getModuleDebugInfo()) { |
7250 | if (CRD->hasDefinition()) |
7251 | DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: D))); |
7252 | if (auto *ES = D->getASTContext().getExternalSource()) |
7253 | if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never) |
7254 | DI->completeUnusedClass(D: *CRD); |
7255 | } |
7256 | // Emit any static data members, they may be definitions. |
7257 | for (auto *I : CRD->decls()) |
7258 | if (isa<VarDecl>(Val: I) || isa<CXXRecordDecl>(Val: I) || isa<EnumDecl>(Val: I)) |
7259 | EmitTopLevelDecl(D: I); |
7260 | break; |
7261 | } |
7262 | // No code generation needed. |
7263 | case Decl::UsingShadow: |
7264 | case Decl::ClassTemplate: |
7265 | case Decl::VarTemplate: |
7266 | case Decl::Concept: |
7267 | case Decl::VarTemplatePartialSpecialization: |
7268 | case Decl::FunctionTemplate: |
7269 | case Decl::TypeAliasTemplate: |
7270 | case Decl::Block: |
7271 | case Decl::Empty: |
7272 | case Decl::Binding: |
7273 | break; |
7274 | case Decl::Using: // using X; [C++] |
7275 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7276 | DI->EmitUsingDecl(UD: cast<UsingDecl>(Val&: *D)); |
7277 | break; |
7278 | case Decl::UsingEnum: // using enum X; [C++] |
7279 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7280 | DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val&: *D)); |
7281 | break; |
7282 | case Decl::NamespaceAlias: |
7283 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7284 | DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val&: *D)); |
7285 | break; |
7286 | case Decl::UsingDirective: // using namespace X; [C++] |
7287 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7288 | DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val&: *D)); |
7289 | break; |
7290 | case Decl::CXXConstructor: |
7291 | getCXXABI().EmitCXXConstructors(D: cast<CXXConstructorDecl>(Val: D)); |
7292 | break; |
7293 | case Decl::CXXDestructor: |
7294 | getCXXABI().EmitCXXDestructors(D: cast<CXXDestructorDecl>(Val: D)); |
7295 | break; |
7296 | |
7297 | case Decl::StaticAssert: |
7298 | // Nothing to do. |
7299 | break; |
7300 | |
7301 | // Objective-C Decls |
7302 | |
7303 | // Forward declarations, no (immediate) code generation. |
7304 | case Decl::ObjCInterface: |
7305 | case Decl::ObjCCategory: |
7306 | break; |
7307 | |
7308 | case Decl::ObjCProtocol: { |
7309 | auto *Proto = cast<ObjCProtocolDecl>(Val: D); |
7310 | if (Proto->isThisDeclarationADefinition()) |
7311 | ObjCRuntime->GenerateProtocol(OPD: Proto); |
7312 | break; |
7313 | } |
7314 | |
7315 | case Decl::ObjCCategoryImpl: |
7316 | // Categories have properties but don't support synthesize so we |
7317 | // can ignore them here. |
7318 | ObjCRuntime->GenerateCategory(OCD: cast<ObjCCategoryImplDecl>(Val: D)); |
7319 | break; |
7320 | |
7321 | case Decl::ObjCImplementation: { |
7322 | auto *OMD = cast<ObjCImplementationDecl>(Val: D); |
7323 | EmitObjCPropertyImplementations(D: OMD); |
7324 | EmitObjCIvarInitializations(D: OMD); |
7325 | ObjCRuntime->GenerateClass(OID: OMD); |
7326 | // Emit global variable debug information. |
7327 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7328 | if (getCodeGenOpts().hasReducedDebugInfo()) |
7329 | DI->getOrCreateInterfaceType(Ty: getContext().getObjCInterfaceType( |
7330 | Decl: OMD->getClassInterface()), Loc: OMD->getLocation()); |
7331 | break; |
7332 | } |
7333 | case Decl::ObjCMethod: { |
7334 | auto *OMD = cast<ObjCMethodDecl>(Val: D); |
7335 | // If this is not a prototype, emit the body. |
7336 | if (OMD->getBody()) |
7337 | CodeGenFunction(*this).GenerateObjCMethod(OMD); |
7338 | break; |
7339 | } |
7340 | case Decl::ObjCCompatibleAlias: |
7341 | ObjCRuntime->RegisterAlias(OAD: cast<ObjCCompatibleAliasDecl>(Val: D)); |
7342 | break; |
7343 | |
7344 | case Decl::PragmaComment: { |
7345 | const auto *PCD = cast<PragmaCommentDecl>(Val: D); |
7346 | switch (PCD->getCommentKind()) { |
7347 | case PCK_Unknown: |
7348 | llvm_unreachable("unexpected pragma comment kind" ); |
7349 | case PCK_Linker: |
7350 | AppendLinkerOptions(Opts: PCD->getArg()); |
7351 | break; |
7352 | case PCK_Lib: |
7353 | AddDependentLib(Lib: PCD->getArg()); |
7354 | break; |
7355 | case PCK_Compiler: |
7356 | case PCK_ExeStr: |
7357 | case PCK_User: |
7358 | break; // We ignore all of these. |
7359 | } |
7360 | break; |
7361 | } |
7362 | |
7363 | case Decl::PragmaDetectMismatch: { |
7364 | const auto *PDMD = cast<PragmaDetectMismatchDecl>(Val: D); |
7365 | AddDetectMismatch(Name: PDMD->getName(), Value: PDMD->getValue()); |
7366 | break; |
7367 | } |
7368 | |
7369 | case Decl::LinkageSpec: |
7370 | EmitLinkageSpec(LSD: cast<LinkageSpecDecl>(Val: D)); |
7371 | break; |
7372 | |
7373 | case Decl::FileScopeAsm: { |
7374 | // File-scope asm is ignored during device-side CUDA compilation. |
7375 | if (LangOpts.CUDA && LangOpts.CUDAIsDevice) |
7376 | break; |
7377 | // File-scope asm is ignored during device-side OpenMP compilation. |
7378 | if (LangOpts.OpenMPIsTargetDevice) |
7379 | break; |
7380 | // File-scope asm is ignored during device-side SYCL compilation. |
7381 | if (LangOpts.SYCLIsDevice) |
7382 | break; |
7383 | auto *AD = cast<FileScopeAsmDecl>(Val: D); |
7384 | getModule().appendModuleInlineAsm(Asm: AD->getAsmString()); |
7385 | break; |
7386 | } |
7387 | |
7388 | case Decl::TopLevelStmt: |
7389 | EmitTopLevelStmt(D: cast<TopLevelStmtDecl>(Val: D)); |
7390 | break; |
7391 | |
7392 | case Decl::Import: { |
7393 | auto *Import = cast<ImportDecl>(Val: D); |
7394 | |
7395 | // If we've already imported this module, we're done. |
7396 | if (!ImportedModules.insert(X: Import->getImportedModule())) |
7397 | break; |
7398 | |
7399 | // Emit debug information for direct imports. |
7400 | if (!Import->getImportedOwningModule()) { |
7401 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7402 | DI->EmitImportDecl(ID: *Import); |
7403 | } |
7404 | |
7405 | // For C++ standard modules we are done - we will call the module |
7406 | // initializer for imported modules, and that will likewise call those for |
7407 | // any imports it has. |
7408 | if (CXX20ModuleInits && Import->getImportedModule() && |
7409 | Import->getImportedModule()->isNamedModule()) |
7410 | break; |
7411 | |
7412 | // For clang C++ module map modules the initializers for sub-modules are |
7413 | // emitted here. |
7414 | |
7415 | // Find all of the submodules and emit the module initializers. |
7416 | llvm::SmallPtrSet<clang::Module *, 16> Visited; |
7417 | SmallVector<clang::Module *, 16> Stack; |
7418 | Visited.insert(Ptr: Import->getImportedModule()); |
7419 | Stack.push_back(Elt: Import->getImportedModule()); |
7420 | |
7421 | while (!Stack.empty()) { |
7422 | clang::Module *Mod = Stack.pop_back_val(); |
7423 | if (!EmittedModuleInitializers.insert(Ptr: Mod).second) |
7424 | continue; |
7425 | |
7426 | for (auto *D : Context.getModuleInitializers(M: Mod)) |
7427 | EmitTopLevelDecl(D); |
7428 | |
7429 | // Visit the submodules of this module. |
7430 | for (auto *Submodule : Mod->submodules()) { |
7431 | // Skip explicit children; they need to be explicitly imported to emit |
7432 | // the initializers. |
7433 | if (Submodule->IsExplicit) |
7434 | continue; |
7435 | |
7436 | if (Visited.insert(Ptr: Submodule).second) |
7437 | Stack.push_back(Elt: Submodule); |
7438 | } |
7439 | } |
7440 | break; |
7441 | } |
7442 | |
7443 | case Decl::Export: |
7444 | EmitDeclContext(DC: cast<ExportDecl>(Val: D)); |
7445 | break; |
7446 | |
7447 | case Decl::OMPThreadPrivate: |
7448 | EmitOMPThreadPrivateDecl(D: cast<OMPThreadPrivateDecl>(Val: D)); |
7449 | break; |
7450 | |
7451 | case Decl::OMPAllocate: |
7452 | EmitOMPAllocateDecl(D: cast<OMPAllocateDecl>(Val: D)); |
7453 | break; |
7454 | |
7455 | case Decl::OMPDeclareReduction: |
7456 | EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: D)); |
7457 | break; |
7458 | |
7459 | case Decl::OMPDeclareMapper: |
7460 | EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: D)); |
7461 | break; |
7462 | |
7463 | case Decl::OMPRequires: |
7464 | EmitOMPRequiresDecl(D: cast<OMPRequiresDecl>(Val: D)); |
7465 | break; |
7466 | |
7467 | case Decl::Typedef: |
7468 | case Decl::TypeAlias: // using foo = bar; [C++11] |
7469 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7470 | DI->EmitAndRetainType( |
7471 | Ty: getContext().getTypedefType(Decl: cast<TypedefNameDecl>(Val: D))); |
7472 | break; |
7473 | |
7474 | case Decl::Record: |
7475 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7476 | if (cast<RecordDecl>(Val: D)->getDefinition()) |
7477 | DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: D))); |
7478 | break; |
7479 | |
7480 | case Decl::Enum: |
7481 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
7482 | if (cast<EnumDecl>(Val: D)->getDefinition()) |
7483 | DI->EmitAndRetainType(Ty: getContext().getEnumType(Decl: cast<EnumDecl>(Val: D))); |
7484 | break; |
7485 | |
7486 | case Decl::HLSLBuffer: |
7487 | getHLSLRuntime().addBuffer(D: cast<HLSLBufferDecl>(Val: D)); |
7488 | break; |
7489 | |
7490 | case Decl::OpenACCDeclare: |
7491 | EmitOpenACCDeclare(D: cast<OpenACCDeclareDecl>(Val: D)); |
7492 | break; |
7493 | case Decl::OpenACCRoutine: |
7494 | EmitOpenACCRoutine(D: cast<OpenACCRoutineDecl>(Val: D)); |
7495 | break; |
7496 | |
7497 | default: |
7498 | // Make sure we handled everything we should, every other kind is a |
7499 | // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind |
7500 | // function. Need to recode Decl::Kind to do that easily. |
7501 | assert(isa<TypeDecl>(D) && "Unsupported decl kind" ); |
7502 | break; |
7503 | } |
7504 | } |
7505 | |
7506 | void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { |
7507 | // Do we need to generate coverage mapping? |
7508 | if (!CodeGenOpts.CoverageMapping) |
7509 | return; |
7510 | switch (D->getKind()) { |
7511 | case Decl::CXXConversion: |
7512 | case Decl::CXXMethod: |
7513 | case Decl::Function: |
7514 | case Decl::ObjCMethod: |
7515 | case Decl::CXXConstructor: |
7516 | case Decl::CXXDestructor: { |
7517 | if (!cast<FunctionDecl>(Val: D)->doesThisDeclarationHaveABody()) |
7518 | break; |
7519 | SourceManager &SM = getContext().getSourceManager(); |
7520 | if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(SpellingLoc: D->getBeginLoc())) |
7521 | break; |
7522 | if (!llvm::coverage::SystemHeadersCoverage && |
7523 | SM.isInSystemHeader(Loc: D->getBeginLoc())) |
7524 | break; |
7525 | DeferredEmptyCoverageMappingDecls.try_emplace(Key: D, Args: true); |
7526 | break; |
7527 | } |
7528 | default: |
7529 | break; |
7530 | }; |
7531 | } |
7532 | |
7533 | void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) { |
7534 | // Do we need to generate coverage mapping? |
7535 | if (!CodeGenOpts.CoverageMapping) |
7536 | return; |
7537 | if (const auto *Fn = dyn_cast<FunctionDecl>(Val: D)) { |
7538 | if (Fn->isTemplateInstantiation()) |
7539 | ClearUnusedCoverageMapping(D: Fn->getTemplateInstantiationPattern()); |
7540 | } |
7541 | DeferredEmptyCoverageMappingDecls.insert_or_assign(Key: D, Val: false); |
7542 | } |
7543 | |
7544 | void CodeGenModule::EmitDeferredUnusedCoverageMappings() { |
7545 | // We call takeVector() here to avoid use-after-free. |
7546 | // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because |
7547 | // we deserialize function bodies to emit coverage info for them, and that |
7548 | // deserializes more declarations. How should we handle that case? |
7549 | for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) { |
7550 | if (!Entry.second) |
7551 | continue; |
7552 | const Decl *D = Entry.first; |
7553 | switch (D->getKind()) { |
7554 | case Decl::CXXConversion: |
7555 | case Decl::CXXMethod: |
7556 | case Decl::Function: |
7557 | case Decl::ObjCMethod: { |
7558 | CodeGenPGO PGO(*this); |
7559 | GlobalDecl GD(cast<FunctionDecl>(Val: D)); |
7560 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7561 | Linkage: getFunctionLinkage(GD)); |
7562 | break; |
7563 | } |
7564 | case Decl::CXXConstructor: { |
7565 | CodeGenPGO PGO(*this); |
7566 | GlobalDecl GD(cast<CXXConstructorDecl>(Val: D), Ctor_Base); |
7567 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7568 | Linkage: getFunctionLinkage(GD)); |
7569 | break; |
7570 | } |
7571 | case Decl::CXXDestructor: { |
7572 | CodeGenPGO PGO(*this); |
7573 | GlobalDecl GD(cast<CXXDestructorDecl>(Val: D), Dtor_Base); |
7574 | PGO.emitEmptyCounterMapping(D, FuncName: getMangledName(GD), |
7575 | Linkage: getFunctionLinkage(GD)); |
7576 | break; |
7577 | } |
7578 | default: |
7579 | break; |
7580 | }; |
7581 | } |
7582 | } |
7583 | |
7584 | void CodeGenModule::EmitMainVoidAlias() { |
7585 | // In order to transition away from "__original_main" gracefully, emit an |
7586 | // alias for "main" in the no-argument case so that libc can detect when |
7587 | // new-style no-argument main is in used. |
7588 | if (llvm::Function *F = getModule().getFunction(Name: "main" )) { |
7589 | if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() && |
7590 | F->getReturnType()->isIntegerTy(Bitwidth: Context.getTargetInfo().getIntWidth())) { |
7591 | auto *GA = llvm::GlobalAlias::create(Name: "__main_void" , Aliasee: F); |
7592 | GA->setVisibility(llvm::GlobalValue::HiddenVisibility); |
7593 | } |
7594 | } |
7595 | } |
7596 | |
7597 | /// Turns the given pointer into a constant. |
7598 | static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context, |
7599 | const void *Ptr) { |
7600 | uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr); |
7601 | llvm::Type *i64 = llvm::Type::getInt64Ty(C&: Context); |
7602 | return llvm::ConstantInt::get(Ty: i64, V: PtrInt); |
7603 | } |
7604 | |
7605 | static void EmitGlobalDeclMetadata(CodeGenModule &CGM, |
7606 | llvm::NamedMDNode *&GlobalMetadata, |
7607 | GlobalDecl D, |
7608 | llvm::GlobalValue *Addr) { |
7609 | if (!GlobalMetadata) |
7610 | GlobalMetadata = |
7611 | CGM.getModule().getOrInsertNamedMetadata(Name: "clang.global.decl.ptrs" ); |
7612 | |
7613 | // TODO: should we report variant information for ctors/dtors? |
7614 | llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(C: Addr), |
7615 | llvm::ConstantAsMetadata::get(C: GetPointerConstant( |
7616 | Context&: CGM.getLLVMContext(), Ptr: D.getDecl()))}; |
7617 | GlobalMetadata->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops)); |
7618 | } |
7619 | |
7620 | bool CodeGenModule::CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem, |
7621 | llvm::GlobalValue *CppFunc) { |
7622 | // Store the list of ifuncs we need to replace uses in. |
7623 | llvm::SmallVector<llvm::GlobalIFunc *> IFuncs; |
7624 | // List of ConstantExprs that we should be able to delete when we're done |
7625 | // here. |
7626 | llvm::SmallVector<llvm::ConstantExpr *> CEs; |
7627 | |
7628 | // It isn't valid to replace the extern-C ifuncs if all we find is itself! |
7629 | if (Elem == CppFunc) |
7630 | return false; |
7631 | |
7632 | // First make sure that all users of this are ifuncs (or ifuncs via a |
7633 | // bitcast), and collect the list of ifuncs and CEs so we can work on them |
7634 | // later. |
7635 | for (llvm::User *User : Elem->users()) { |
7636 | // Users can either be a bitcast ConstExpr that is used by the ifuncs, OR an |
7637 | // ifunc directly. In any other case, just give up, as we don't know what we |
7638 | // could break by changing those. |
7639 | if (auto *ConstExpr = dyn_cast<llvm::ConstantExpr>(Val: User)) { |
7640 | if (ConstExpr->getOpcode() != llvm::Instruction::BitCast) |
7641 | return false; |
7642 | |
7643 | for (llvm::User *CEUser : ConstExpr->users()) { |
7644 | if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: CEUser)) { |
7645 | IFuncs.push_back(Elt: IFunc); |
7646 | } else { |
7647 | return false; |
7648 | } |
7649 | } |
7650 | CEs.push_back(Elt: ConstExpr); |
7651 | } else if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(Val: User)) { |
7652 | IFuncs.push_back(Elt: IFunc); |
7653 | } else { |
7654 | // This user is one we don't know how to handle, so fail redirection. This |
7655 | // will result in an ifunc retaining a resolver name that will ultimately |
7656 | // fail to be resolved to a defined function. |
7657 | return false; |
7658 | } |
7659 | } |
7660 | |
7661 | // Now we know this is a valid case where we can do this alias replacement, we |
7662 | // need to remove all of the references to Elem (and the bitcasts!) so we can |
7663 | // delete it. |
7664 | for (llvm::GlobalIFunc *IFunc : IFuncs) |
7665 | IFunc->setResolver(nullptr); |
7666 | for (llvm::ConstantExpr *ConstExpr : CEs) |
7667 | ConstExpr->destroyConstant(); |
7668 | |
7669 | // We should now be out of uses for the 'old' version of this function, so we |
7670 | // can erase it as well. |
7671 | Elem->eraseFromParent(); |
7672 | |
7673 | for (llvm::GlobalIFunc *IFunc : IFuncs) { |
7674 | // The type of the resolver is always just a function-type that returns the |
7675 | // type of the IFunc, so create that here. If the type of the actual |
7676 | // resolver doesn't match, it just gets bitcast to the right thing. |
7677 | auto *ResolverTy = |
7678 | llvm::FunctionType::get(Result: IFunc->getType(), /*isVarArg*/ false); |
7679 | llvm::Constant *Resolver = GetOrCreateLLVMFunction( |
7680 | MangledName: CppFunc->getName(), Ty: ResolverTy, GD: {}, /*ForVTable*/ false); |
7681 | IFunc->setResolver(Resolver); |
7682 | } |
7683 | return true; |
7684 | } |
7685 | |
7686 | /// For each function which is declared within an extern "C" region and marked |
7687 | /// as 'used', but has internal linkage, create an alias from the unmangled |
7688 | /// name to the mangled name if possible. People expect to be able to refer |
7689 | /// to such functions with an unmangled name from inline assembly within the |
7690 | /// same translation unit. |
7691 | void CodeGenModule::EmitStaticExternCAliases() { |
7692 | if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases()) |
7693 | return; |
7694 | for (auto &I : StaticExternCValues) { |
7695 | const IdentifierInfo *Name = I.first; |
7696 | llvm::GlobalValue *Val = I.second; |
7697 | |
7698 | // If Val is null, that implies there were multiple declarations that each |
7699 | // had a claim to the unmangled name. In this case, generation of the alias |
7700 | // is suppressed. See CodeGenModule::MaybeHandleStaticInExternC. |
7701 | if (!Val) |
7702 | break; |
7703 | |
7704 | llvm::GlobalValue *ExistingElem = |
7705 | getModule().getNamedValue(Name: Name->getName()); |
7706 | |
7707 | // If there is either not something already by this name, or we were able to |
7708 | // replace all uses from IFuncs, create the alias. |
7709 | if (!ExistingElem || CheckAndReplaceExternCIFuncs(Elem: ExistingElem, CppFunc: Val)) |
7710 | addCompilerUsedGlobal(GV: llvm::GlobalAlias::create(Name: Name->getName(), Aliasee: Val)); |
7711 | } |
7712 | } |
7713 | |
7714 | bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName, |
7715 | GlobalDecl &Result) const { |
7716 | auto Res = Manglings.find(Key: MangledName); |
7717 | if (Res == Manglings.end()) |
7718 | return false; |
7719 | Result = Res->getValue(); |
7720 | return true; |
7721 | } |
7722 | |
7723 | /// Emits metadata nodes associating all the global values in the |
7724 | /// current module with the Decls they came from. This is useful for |
7725 | /// projects using IR gen as a subroutine. |
7726 | /// |
7727 | /// Since there's currently no way to associate an MDNode directly |
7728 | /// with an llvm::GlobalValue, we create a global named metadata |
7729 | /// with the name 'clang.global.decl.ptrs'. |
7730 | void CodeGenModule::EmitDeclMetadata() { |
7731 | llvm::NamedMDNode *GlobalMetadata = nullptr; |
7732 | |
7733 | for (auto &I : MangledDeclNames) { |
7734 | llvm::GlobalValue *Addr = getModule().getNamedValue(Name: I.second); |
7735 | // Some mangled names don't necessarily have an associated GlobalValue |
7736 | // in this module, e.g. if we mangled it for DebugInfo. |
7737 | if (Addr) |
7738 | EmitGlobalDeclMetadata(CGM&: *this, GlobalMetadata, D: I.first, Addr); |
7739 | } |
7740 | } |
7741 | |
7742 | /// Emits metadata nodes for all the local variables in the current |
7743 | /// function. |
7744 | void CodeGenFunction::EmitDeclMetadata() { |
7745 | if (LocalDeclMap.empty()) return; |
7746 | |
7747 | llvm::LLVMContext &Context = getLLVMContext(); |
7748 | |
7749 | // Find the unique metadata ID for this name. |
7750 | unsigned DeclPtrKind = Context.getMDKindID(Name: "clang.decl.ptr" ); |
7751 | |
7752 | llvm::NamedMDNode *GlobalMetadata = nullptr; |
7753 | |
7754 | for (auto &I : LocalDeclMap) { |
7755 | const Decl *D = I.first; |
7756 | llvm::Value *Addr = I.second.emitRawPointer(CGF&: *this); |
7757 | if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Val: Addr)) { |
7758 | llvm::Value *DAddr = GetPointerConstant(Context&: getLLVMContext(), Ptr: D); |
7759 | Alloca->setMetadata( |
7760 | KindID: DeclPtrKind, Node: llvm::MDNode::get( |
7761 | Context, MDs: llvm::ValueAsMetadata::getConstant(C: DAddr))); |
7762 | } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr)) { |
7763 | GlobalDecl GD = GlobalDecl(cast<VarDecl>(Val: D)); |
7764 | EmitGlobalDeclMetadata(CGM, GlobalMetadata, D: GD, Addr: GV); |
7765 | } |
7766 | } |
7767 | } |
7768 | |
7769 | void CodeGenModule::EmitVersionIdentMetadata() { |
7770 | llvm::NamedMDNode *IdentMetadata = |
7771 | TheModule.getOrInsertNamedMetadata(Name: "llvm.ident" ); |
7772 | std::string Version = getClangFullVersion(); |
7773 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7774 | |
7775 | llvm::Metadata *IdentNode[] = {llvm::MDString::get(Context&: Ctx, Str: Version)}; |
7776 | IdentMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: IdentNode)); |
7777 | } |
7778 | |
7779 | void CodeGenModule::EmitCommandLineMetadata() { |
7780 | llvm::NamedMDNode *CommandLineMetadata = |
7781 | TheModule.getOrInsertNamedMetadata(Name: "llvm.commandline" ); |
7782 | std::string CommandLine = getCodeGenOpts().RecordCommandLine; |
7783 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7784 | |
7785 | llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Context&: Ctx, Str: CommandLine)}; |
7786 | CommandLineMetadata->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: CommandLineNode)); |
7787 | } |
7788 | |
7789 | void CodeGenModule::EmitCoverageFile() { |
7790 | llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata(Name: "llvm.dbg.cu" ); |
7791 | if (!CUNode) |
7792 | return; |
7793 | |
7794 | llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata(Name: "llvm.gcov" ); |
7795 | llvm::LLVMContext &Ctx = TheModule.getContext(); |
7796 | auto *CoverageDataFile = |
7797 | llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageDataFile); |
7798 | auto *CoverageNotesFile = |
7799 | llvm::MDString::get(Context&: Ctx, Str: getCodeGenOpts().CoverageNotesFile); |
7800 | for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) { |
7801 | llvm::MDNode *CU = CUNode->getOperand(i); |
7802 | llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU}; |
7803 | GCov->addOperand(M: llvm::MDNode::get(Context&: Ctx, MDs: Elts)); |
7804 | } |
7805 | } |
7806 | |
7807 | llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty, |
7808 | bool ForEH) { |
7809 | // Return a bogus pointer if RTTI is disabled, unless it's for EH. |
7810 | // FIXME: should we even be calling this method if RTTI is disabled |
7811 | // and it's not for EH? |
7812 | if (!shouldEmitRTTI(ForEH)) |
7813 | return llvm::Constant::getNullValue(Ty: GlobalsInt8PtrTy); |
7814 | |
7815 | if (ForEH && Ty->isObjCObjectPointerType() && |
7816 | LangOpts.ObjCRuntime.isGNUFamily()) |
7817 | return ObjCRuntime->GetEHType(T: Ty); |
7818 | |
7819 | return getCXXABI().getAddrOfRTTIDescriptor(Ty); |
7820 | } |
7821 | |
7822 | void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) { |
7823 | // Do not emit threadprivates in simd-only mode. |
7824 | if (LangOpts.OpenMP && LangOpts.OpenMPSimd) |
7825 | return; |
7826 | for (auto RefExpr : D->varlist()) { |
7827 | auto *VD = cast<VarDecl>(Val: cast<DeclRefExpr>(Val: RefExpr)->getDecl()); |
7828 | bool PerformInit = |
7829 | VD->getAnyInitializer() && |
7830 | !VD->getAnyInitializer()->isConstantInitializer(Ctx&: getContext(), |
7831 | /*ForRef=*/false); |
7832 | |
7833 | Address Addr(GetAddrOfGlobalVar(D: VD), |
7834 | getTypes().ConvertTypeForMem(T: VD->getType()), |
7835 | getContext().getDeclAlign(D: VD)); |
7836 | if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition( |
7837 | VD, VDAddr: Addr, Loc: RefExpr->getBeginLoc(), PerformInit)) |
7838 | CXXGlobalInits.push_back(x: InitFunction); |
7839 | } |
7840 | } |
7841 | |
7842 | llvm::Metadata * |
7843 | CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map, |
7844 | StringRef Suffix) { |
7845 | if (auto *FnType = T->getAs<FunctionProtoType>()) |
7846 | T = getContext().getFunctionType( |
7847 | ResultTy: FnType->getReturnType(), Args: FnType->getParamTypes(), |
7848 | EPI: FnType->getExtProtoInfo().withExceptionSpec(ESI: EST_None)); |
7849 | |
7850 | llvm::Metadata *&InternalId = Map[T.getCanonicalType()]; |
7851 | if (InternalId) |
7852 | return InternalId; |
7853 | |
7854 | if (isExternallyVisible(L: T->getLinkage())) { |
7855 | std::string OutName; |
7856 | llvm::raw_string_ostream Out(OutName); |
7857 | getCXXABI().getMangleContext().mangleCanonicalTypeName( |
7858 | T, Out, NormalizeIntegers: getCodeGenOpts().SanitizeCfiICallNormalizeIntegers); |
7859 | |
7860 | if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers) |
7861 | Out << ".normalized" ; |
7862 | |
7863 | Out << Suffix; |
7864 | |
7865 | InternalId = llvm::MDString::get(Context&: getLLVMContext(), Str: Out.str()); |
7866 | } else { |
7867 | InternalId = llvm::MDNode::getDistinct(Context&: getLLVMContext(), |
7868 | MDs: llvm::ArrayRef<llvm::Metadata *>()); |
7869 | } |
7870 | |
7871 | return InternalId; |
7872 | } |
7873 | |
7874 | llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) { |
7875 | return CreateMetadataIdentifierImpl(T, Map&: MetadataIdMap, Suffix: "" ); |
7876 | } |
7877 | |
7878 | llvm::Metadata * |
7879 | CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) { |
7880 | return CreateMetadataIdentifierImpl(T, Map&: VirtualMetadataIdMap, Suffix: ".virtual" ); |
7881 | } |
7882 | |
7883 | // Generalize pointer types to a void pointer with the qualifiers of the |
7884 | // originally pointed-to type, e.g. 'const char *' and 'char * const *' |
7885 | // generalize to 'const void *' while 'char *' and 'const char **' generalize to |
7886 | // 'void *'. |
7887 | static QualType GeneralizeType(ASTContext &Ctx, QualType Ty) { |
7888 | if (!Ty->isPointerType()) |
7889 | return Ty; |
7890 | |
7891 | return Ctx.getPointerType( |
7892 | T: QualType(Ctx.VoidTy).withCVRQualifiers( |
7893 | CVR: Ty->getPointeeType().getCVRQualifiers())); |
7894 | } |
7895 | |
7896 | // Apply type generalization to a FunctionType's return and argument types |
7897 | static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) { |
7898 | if (auto *FnType = Ty->getAs<FunctionProtoType>()) { |
7899 | SmallVector<QualType, 8> GeneralizedParams; |
7900 | for (auto &Param : FnType->param_types()) |
7901 | GeneralizedParams.push_back(Elt: GeneralizeType(Ctx, Ty: Param)); |
7902 | |
7903 | return Ctx.getFunctionType( |
7904 | ResultTy: GeneralizeType(Ctx, Ty: FnType->getReturnType()), |
7905 | Args: GeneralizedParams, EPI: FnType->getExtProtoInfo()); |
7906 | } |
7907 | |
7908 | if (auto *FnType = Ty->getAs<FunctionNoProtoType>()) |
7909 | return Ctx.getFunctionNoProtoType( |
7910 | ResultTy: GeneralizeType(Ctx, Ty: FnType->getReturnType())); |
7911 | |
7912 | llvm_unreachable("Encountered unknown FunctionType" ); |
7913 | } |
7914 | |
7915 | llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) { |
7916 | return CreateMetadataIdentifierImpl(T: GeneralizeFunctionType(Ctx&: getContext(), Ty: T), |
7917 | Map&: GeneralizedMetadataIdMap, Suffix: ".generalized" ); |
7918 | } |
7919 | |
7920 | /// Returns whether this module needs the "all-vtables" type identifier. |
7921 | bool CodeGenModule::NeedAllVtablesTypeId() const { |
7922 | // Returns true if at least one of vtable-based CFI checkers is enabled and |
7923 | // is not in the trapping mode. |
7924 | return ((LangOpts.Sanitize.has(K: SanitizerKind::CFIVCall) && |
7925 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIVCall)) || |
7926 | (LangOpts.Sanitize.has(K: SanitizerKind::CFINVCall) && |
7927 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFINVCall)) || |
7928 | (LangOpts.Sanitize.has(K: SanitizerKind::CFIDerivedCast) && |
7929 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIDerivedCast)) || |
7930 | (LangOpts.Sanitize.has(K: SanitizerKind::CFIUnrelatedCast) && |
7931 | !CodeGenOpts.SanitizeTrap.has(K: SanitizerKind::CFIUnrelatedCast))); |
7932 | } |
7933 | |
7934 | void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable, |
7935 | CharUnits Offset, |
7936 | const CXXRecordDecl *RD) { |
7937 | llvm::Metadata *MD = |
7938 | CreateMetadataIdentifierForType(T: QualType(RD->getTypeForDecl(), 0)); |
7939 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD); |
7940 | |
7941 | if (CodeGenOpts.SanitizeCfiCrossDso) |
7942 | if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD)) |
7943 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), |
7944 | TypeID: llvm::ConstantAsMetadata::get(C: CrossDsoTypeId)); |
7945 | |
7946 | if (NeedAllVtablesTypeId()) { |
7947 | llvm::Metadata *MD = llvm::MDString::get(Context&: getLLVMContext(), Str: "all-vtables" ); |
7948 | VTable->addTypeMetadata(Offset: Offset.getQuantity(), TypeID: MD); |
7949 | } |
7950 | } |
7951 | |
7952 | llvm::SanitizerStatReport &CodeGenModule::getSanStats() { |
7953 | if (!SanStats) |
7954 | SanStats = std::make_unique<llvm::SanitizerStatReport>(args: &getModule()); |
7955 | |
7956 | return *SanStats; |
7957 | } |
7958 | |
7959 | llvm::Value * |
7960 | CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E, |
7961 | CodeGenFunction &CGF) { |
7962 | llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, T: E->getType()); |
7963 | auto *SamplerT = getOpenCLRuntime().getSamplerType(T: E->getType().getTypePtr()); |
7964 | auto *FTy = llvm::FunctionType::get(Result: SamplerT, Params: {C->getType()}, isVarArg: false); |
7965 | auto *Call = CGF.EmitRuntimeCall( |
7966 | callee: CreateRuntimeFunction(FTy, Name: "__translate_sampler_initializer" ), args: {C}); |
7967 | return Call; |
7968 | } |
7969 | |
7970 | CharUnits CodeGenModule::getNaturalPointeeTypeAlignment( |
7971 | QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) { |
7972 | return getNaturalTypeAlignment(T: T->getPointeeType(), BaseInfo, TBAAInfo, |
7973 | /* forPointeeType= */ true); |
7974 | } |
7975 | |
7976 | CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T, |
7977 | LValueBaseInfo *BaseInfo, |
7978 | TBAAAccessInfo *TBAAInfo, |
7979 | bool forPointeeType) { |
7980 | if (TBAAInfo) |
7981 | *TBAAInfo = getTBAAAccessInfo(AccessType: T); |
7982 | |
7983 | // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But |
7984 | // that doesn't return the information we need to compute BaseInfo. |
7985 | |
7986 | // Honor alignment typedef attributes even on incomplete types. |
7987 | // We also honor them straight for C++ class types, even as pointees; |
7988 | // there's an expressivity gap here. |
7989 | if (auto TT = T->getAs<TypedefType>()) { |
7990 | if (auto Align = TT->getDecl()->getMaxAlignment()) { |
7991 | if (BaseInfo) |
7992 | *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); |
7993 | return getContext().toCharUnitsFromBits(BitSize: Align); |
7994 | } |
7995 | } |
7996 | |
7997 | bool AlignForArray = T->isArrayType(); |
7998 | |
7999 | // Analyze the base element type, so we don't get confused by incomplete |
8000 | // array types. |
8001 | T = getContext().getBaseElementType(QT: T); |
8002 | |
8003 | if (T->isIncompleteType()) { |
8004 | // We could try to replicate the logic from |
8005 | // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the |
8006 | // type is incomplete, so it's impossible to test. We could try to reuse |
8007 | // getTypeAlignIfKnown, but that doesn't return the information we need |
8008 | // to set BaseInfo. So just ignore the possibility that the alignment is |
8009 | // greater than one. |
8010 | if (BaseInfo) |
8011 | *BaseInfo = LValueBaseInfo(AlignmentSource::Type); |
8012 | return CharUnits::One(); |
8013 | } |
8014 | |
8015 | if (BaseInfo) |
8016 | *BaseInfo = LValueBaseInfo(AlignmentSource::Type); |
8017 | |
8018 | CharUnits Alignment; |
8019 | const CXXRecordDecl *RD; |
8020 | if (T.getQualifiers().hasUnaligned()) { |
8021 | Alignment = CharUnits::One(); |
8022 | } else if (forPointeeType && !AlignForArray && |
8023 | (RD = T->getAsCXXRecordDecl())) { |
8024 | // For C++ class pointees, we don't know whether we're pointing at a |
8025 | // base or a complete object, so we generally need to use the |
8026 | // non-virtual alignment. |
8027 | Alignment = getClassPointerAlignment(CD: RD); |
8028 | } else { |
8029 | Alignment = getContext().getTypeAlignInChars(T); |
8030 | } |
8031 | |
8032 | // Cap to the global maximum type alignment unless the alignment |
8033 | // was somehow explicit on the type. |
8034 | if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) { |
8035 | if (Alignment.getQuantity() > MaxAlign && |
8036 | !getContext().isAlignmentRequired(T)) |
8037 | Alignment = CharUnits::fromQuantity(Quantity: MaxAlign); |
8038 | } |
8039 | return Alignment; |
8040 | } |
8041 | |
8042 | bool CodeGenModule::stopAutoInit() { |
8043 | unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter; |
8044 | if (StopAfter) { |
8045 | // This number is positive only when -ftrivial-auto-var-init-stop-after=* is |
8046 | // used |
8047 | if (NumAutoVarInit >= StopAfter) { |
8048 | return true; |
8049 | } |
8050 | if (!NumAutoVarInit) { |
8051 | unsigned DiagID = getDiags().getCustomDiagID( |
8052 | L: DiagnosticsEngine::Warning, |
8053 | FormatString: "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the " |
8054 | "number of times ftrivial-auto-var-init=%1 gets applied." ); |
8055 | getDiags().Report(DiagID) |
8056 | << StopAfter |
8057 | << (getContext().getLangOpts().getTrivialAutoVarInit() == |
8058 | LangOptions::TrivialAutoVarInitKind::Zero |
8059 | ? "zero" |
8060 | : "pattern" ); |
8061 | } |
8062 | ++NumAutoVarInit; |
8063 | } |
8064 | return false; |
8065 | } |
8066 | |
8067 | void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS, |
8068 | const Decl *D) const { |
8069 | // ptxas does not allow '.' in symbol names. On the other hand, HIP prefers |
8070 | // postfix beginning with '.' since the symbol name can be demangled. |
8071 | if (LangOpts.HIP) |
8072 | OS << (isa<VarDecl>(Val: D) ? ".static." : ".intern." ); |
8073 | else |
8074 | OS << (isa<VarDecl>(Val: D) ? "__static__" : "__intern__" ); |
8075 | |
8076 | // If the CUID is not specified we try to generate a unique postfix. |
8077 | if (getLangOpts().CUID.empty()) { |
8078 | SourceManager &SM = getContext().getSourceManager(); |
8079 | PresumedLoc PLoc = SM.getPresumedLoc(Loc: D->getLocation()); |
8080 | assert(PLoc.isValid() && "Source location is expected to be valid." ); |
8081 | |
8082 | // Get the hash of the user defined macros. |
8083 | llvm::MD5 Hash; |
8084 | llvm::MD5::MD5Result Result; |
8085 | for (const auto &Arg : PreprocessorOpts.Macros) |
8086 | Hash.update(Str: Arg.first); |
8087 | Hash.final(Result); |
8088 | |
8089 | // Get the UniqueID for the file containing the decl. |
8090 | llvm::sys::fs::UniqueID ID; |
8091 | if (llvm::sys::fs::getUniqueID(Path: PLoc.getFilename(), Result&: ID)) { |
8092 | PLoc = SM.getPresumedLoc(Loc: D->getLocation(), /*UseLineDirectives=*/false); |
8093 | assert(PLoc.isValid() && "Source location is expected to be valid." ); |
8094 | if (auto EC = llvm::sys::fs::getUniqueID(Path: PLoc.getFilename(), Result&: ID)) |
8095 | SM.getDiagnostics().Report(DiagID: diag::err_cannot_open_file) |
8096 | << PLoc.getFilename() << EC.message(); |
8097 | } |
8098 | OS << llvm::format(Fmt: "%x" , Vals: ID.getFile()) << llvm::format(Fmt: "%x" , Vals: ID.getDevice()) |
8099 | << "_" << llvm::utohexstr(X: Result.low(), /*LowerCase=*/true, /*Width=*/8); |
8100 | } else { |
8101 | OS << getContext().getCUIDHash(); |
8102 | } |
8103 | } |
8104 | |
8105 | void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) { |
8106 | assert(DeferredDeclsToEmit.empty() && |
8107 | "Should have emitted all decls deferred to emit." ); |
8108 | assert(NewBuilder->DeferredDecls.empty() && |
8109 | "Newly created module should not have deferred decls" ); |
8110 | NewBuilder->DeferredDecls = std::move(DeferredDecls); |
8111 | assert(EmittedDeferredDecls.empty() && |
8112 | "Still have (unmerged) EmittedDeferredDecls deferred decls" ); |
8113 | |
8114 | assert(NewBuilder->DeferredVTables.empty() && |
8115 | "Newly created module should not have deferred vtables" ); |
8116 | NewBuilder->DeferredVTables = std::move(DeferredVTables); |
8117 | |
8118 | assert(NewBuilder->MangledDeclNames.empty() && |
8119 | "Newly created module should not have mangled decl names" ); |
8120 | assert(NewBuilder->Manglings.empty() && |
8121 | "Newly created module should not have manglings" ); |
8122 | NewBuilder->Manglings = std::move(Manglings); |
8123 | |
8124 | NewBuilder->WeakRefReferences = std::move(WeakRefReferences); |
8125 | |
8126 | NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx); |
8127 | } |
8128 | |