1//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ declarations
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGHLSLRuntime.h"
15#include "CGObjCRuntime.h"
16#include "CGOpenMPRuntime.h"
17#include "CodeGenFunction.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/Basic/LangOptions.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/IR/Intrinsics.h"
23#include "llvm/IR/MDBuilder.h"
24#include "llvm/Support/Path.h"
25
26using namespace clang;
27using namespace CodeGen;
28
29static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
30 ConstantAddress DeclPtr) {
31 assert(
32 (D.hasGlobalStorage() ||
33 (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
34 "VarDecl must have global or local (in the case of OpenCL) storage!");
35 assert(!D.getType()->isReferenceType() &&
36 "Should not call EmitDeclInit on a reference!");
37
38 QualType type = D.getType();
39 LValue lv = CGF.MakeAddrLValue(Addr: DeclPtr, T: type);
40
41 const Expr *Init = D.getInit();
42 switch (CGF.getEvaluationKind(T: type)) {
43 case TEK_Scalar: {
44 CodeGenModule &CGM = CGF.CGM;
45 if (lv.isObjCStrong())
46 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, src: CGF.EmitScalarExpr(E: Init),
47 dest: DeclPtr, threadlocal: D.getTLSKind());
48 else if (lv.isObjCWeak())
49 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, src: CGF.EmitScalarExpr(E: Init),
50 dest: DeclPtr);
51 else
52 CGF.EmitScalarInit(init: Init, D: &D, lvalue: lv, capturedByInit: false);
53 return;
54 }
55 case TEK_Complex:
56 CGF.EmitComplexExprIntoLValue(E: Init, dest: lv, /*isInit*/ true);
57 return;
58 case TEK_Aggregate:
59 CGF.EmitAggExpr(E: Init,
60 AS: AggValueSlot::forLValue(LV: lv, isDestructed: AggValueSlot::IsDestructed,
61 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
62 isAliased: AggValueSlot::IsNotAliased,
63 mayOverlap: AggValueSlot::DoesNotOverlap));
64 return;
65 }
66 llvm_unreachable("bad evaluation kind");
67}
68
69/// Emit code to cause the destruction of the given variable with
70/// static storage duration.
71static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
72 ConstantAddress Addr) {
73 // Honor __attribute__((no_destroy)) and bail instead of attempting
74 // to emit a reference to a possibly nonexistent destructor, which
75 // in turn can cause a crash. This will result in a global constructor
76 // that isn't balanced out by a destructor call as intended by the
77 // attribute. This also checks for -fno-c++-static-destructors and
78 // bails even if the attribute is not present.
79 QualType::DestructionKind DtorKind = D.needsDestruction(Ctx: CGF.getContext());
80
81 // FIXME: __attribute__((cleanup)) ?
82
83 switch (DtorKind) {
84 case QualType::DK_none:
85 return;
86
87 case QualType::DK_cxx_destructor:
88 break;
89
90 case QualType::DK_objc_strong_lifetime:
91 case QualType::DK_objc_weak_lifetime:
92 case QualType::DK_nontrivial_c_struct:
93 // We don't care about releasing objects during process teardown.
94 assert(!D.getTLSKind() && "should have rejected this");
95 return;
96 }
97
98 llvm::FunctionCallee Func;
99 llvm::Constant *Argument;
100
101 CodeGenModule &CGM = CGF.CGM;
102 QualType Type = D.getType();
103
104 // Special-case non-array C++ destructors, if they have the right signature.
105 // Under some ABIs, destructors return this instead of void, and cannot be
106 // passed directly to __cxa_atexit if the target does not allow this
107 // mismatch.
108 const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
109 bool CanRegisterDestructor =
110 Record && (!CGM.getCXXABI().HasThisReturn(
111 GD: GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
112 CGM.getCXXABI().canCallMismatchedFunctionType());
113 // If __cxa_atexit is disabled via a flag, a different helper function is
114 // generated elsewhere which uses atexit instead, and it takes the destructor
115 // directly.
116 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
117 if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
118 assert(!Record->hasTrivialDestructor());
119 CXXDestructorDecl *Dtor = Record->getDestructor();
120
121 Func = CGM.getAddrAndTypeOfCXXStructor(GD: GlobalDecl(Dtor, Dtor_Complete));
122 if (CGF.getContext().getLangOpts().OpenCL) {
123 auto DestAS =
124 CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
125 auto DestTy = llvm::PointerType::get(
126 C&: CGM.getLLVMContext(), AddressSpace: CGM.getContext().getTargetAddressSpace(AS: DestAS));
127 auto SrcAS = D.getType().getQualifiers().getAddressSpace();
128 if (DestAS == SrcAS)
129 Argument = Addr.getPointer();
130 else
131 // FIXME: On addr space mismatch we are passing NULL. The generation
132 // of the global destructor function should be adjusted accordingly.
133 Argument = llvm::ConstantPointerNull::get(T: DestTy);
134 } else {
135 Argument = Addr.getPointer();
136 }
137 // Otherwise, the standard logic requires a helper function.
138 } else {
139 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: Type));
140 Func = CodeGenFunction(CGM)
141 .generateDestroyHelper(addr: Addr, type: Type, destroyer: CGF.getDestroyer(destructionKind: DtorKind),
142 useEHCleanupForArray: CGF.needsEHCleanup(kind: DtorKind), VD: &D);
143 Argument = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy);
144 }
145
146 CGM.getCXXABI().registerGlobalDtor(CGF, D, Dtor: Func, Addr: Argument);
147}
148
149/// Emit code to cause the variable at the given address to be considered as
150/// constant from this point onwards.
151static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
152 llvm::Constant *Addr) {
153 return CGF.EmitInvariantStart(
154 Addr, Size: CGF.getContext().getTypeSizeInChars(T: D.getType()));
155}
156
157void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
158 // Do not emit the intrinsic if we're not optimizing.
159 if (!CGM.getCodeGenOpts().OptimizationLevel)
160 return;
161
162 // Grab the llvm.invariant.start intrinsic.
163 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
164 // Overloaded address space type.
165 assert(Addr->getType()->isPointerTy() && "Address must be a pointer");
166 llvm::Type *ObjectPtr[1] = {Addr->getType()};
167 llvm::Function *InvariantStart = CGM.getIntrinsic(IID: InvStartID, Tys: ObjectPtr);
168
169 // Emit a call with the size in bytes of the object.
170 uint64_t Width = Size.getQuantity();
171 llvm::Value *Args[2] = {llvm::ConstantInt::getSigned(Ty: Int64Ty, V: Width), Addr};
172 Builder.CreateCall(Callee: InvariantStart, Args);
173}
174
175void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
176 llvm::GlobalVariable *GV,
177 bool PerformInit) {
178
179 const Expr *Init = D.getInit();
180 QualType T = D.getType();
181
182 // The address space of a static local variable (DeclPtr) may be different
183 // from the address space of the "this" argument of the constructor. In that
184 // case, we need an addrspacecast before calling the constructor.
185 //
186 // struct StructWithCtor {
187 // __device__ StructWithCtor() {...}
188 // };
189 // __device__ void foo() {
190 // __shared__ StructWithCtor s;
191 // ...
192 // }
193 //
194 // For example, in the above CUDA code, the static local variable s has a
195 // "shared" address space qualifier, but the constructor of StructWithCtor
196 // expects "this" in the "generic" address space.
197 unsigned ExpectedAddrSpace = getTypes().getTargetAddressSpace(T);
198 unsigned ActualAddrSpace = GV->getAddressSpace();
199 llvm::Constant *DeclPtr = GV;
200 if (ActualAddrSpace != ExpectedAddrSpace) {
201 llvm::PointerType *PTy =
202 llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: ExpectedAddrSpace);
203 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(C: DeclPtr, Ty: PTy);
204 }
205
206 ConstantAddress DeclAddr(
207 DeclPtr, GV->getValueType(), getContext().getDeclAlign(D: &D));
208
209 if (!T->isReferenceType()) {
210 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
211 D.hasAttr<OMPThreadPrivateDeclAttr>()) {
212 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
213 VD: &D, VDAddr: DeclAddr, Loc: D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
214 PerformInit, CGF: this);
215 }
216 bool NeedsDtor =
217 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
218 if (PerformInit)
219 EmitDeclInit(CGF&: *this, D, DeclPtr: DeclAddr);
220 if (D.getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: !NeedsDtor))
221 EmitDeclInvariant(CGF&: *this, D, Addr: DeclPtr);
222 else
223 EmitDeclDestroy(CGF&: *this, D, Addr: DeclAddr);
224 return;
225 }
226
227 assert(PerformInit && "cannot have constant initializer which needs "
228 "destruction for reference");
229 RValue RV = EmitReferenceBindingToExpr(E: Init);
230 EmitStoreOfScalar(Value: RV.getScalarVal(), Addr: DeclAddr, Volatile: false, Ty: T);
231}
232
233/// Create a stub function, suitable for being passed to atexit,
234/// which passes the given address to the given destructor function.
235llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
236 llvm::FunctionCallee dtor,
237 llvm::Constant *addr) {
238 // Get the destructor function type, void(*)(void).
239 llvm::FunctionType *ty = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
240 SmallString<256> FnName;
241 {
242 llvm::raw_svector_ostream Out(FnName);
243 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(D: &VD, Out);
244 }
245
246 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
247 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
248 ty, name: FnName.str(), FI, Loc: VD.getLocation());
249
250 CodeGenFunction CGF(CGM);
251
252 CGF.StartFunction(GD: GlobalDecl(&VD, DynamicInitKind::AtExit),
253 RetTy: CGM.getContext().VoidTy, Fn: fn, FnInfo: FI, Args: FunctionArgList(),
254 Loc: VD.getLocation(), StartLoc: VD.getInit()->getExprLoc());
255 // Emit an artificial location for this function.
256 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
257
258 llvm::CallInst *call = CGF.Builder.CreateCall(Callee: dtor, Args: addr);
259
260 // Make sure the call and the callee agree on calling convention.
261 if (auto *dtorFn = dyn_cast<llvm::Function>(
262 Val: dtor.getCallee()->stripPointerCastsAndAliases()))
263 call->setCallingConv(dtorFn->getCallingConv());
264
265 CGF.FinishFunction();
266
267 // Get a proper function pointer.
268 FunctionProtoType::ExtProtoInfo EPI(getContext().getDefaultCallingConvention(
269 /*IsVariadic=*/false, /*IsCXXMethod=*/false));
270 QualType fnType = getContext().getFunctionType(ResultTy: getContext().VoidTy,
271 Args: {getContext().VoidPtrTy}, EPI);
272 return CGM.getFunctionPointer(Pointer: fn, FunctionType: fnType);
273}
274
275/// Create a stub function, suitable for being passed to __pt_atexit_np,
276/// which passes the given address to the given destructor function.
277llvm::Function *CodeGenFunction::createTLSAtExitStub(
278 const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr,
279 llvm::FunctionCallee &AtExit) {
280 SmallString<256> FnName;
281 {
282 llvm::raw_svector_ostream Out(FnName);
283 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(D: &D, Out);
284 }
285
286 const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo(
287 returnType: getContext().IntTy, opts: FnInfoOpts::None, argTypes: {getContext().IntTy},
288 info: FunctionType::ExtInfo(), paramInfos: {}, args: RequiredArgs::All);
289
290 // Get the stub function type, int(*)(int,...).
291 llvm::FunctionType *StubTy =
292 llvm::FunctionType::get(Result: CGM.IntTy, Params: {CGM.IntTy}, isVarArg: true);
293
294 llvm::Function *DtorStub = CGM.CreateGlobalInitOrCleanUpFunction(
295 ty: StubTy, name: FnName.str(), FI, Loc: D.getLocation());
296
297 CodeGenFunction CGF(CGM);
298
299 FunctionArgList Args;
300 ImplicitParamDecl IPD(CGM.getContext(), CGM.getContext().IntTy,
301 ImplicitParamKind::Other);
302 Args.push_back(Elt: &IPD);
303 QualType ResTy = CGM.getContext().IntTy;
304
305 CGF.StartFunction(GD: GlobalDecl(&D, DynamicInitKind::AtExit), RetTy: ResTy, Fn: DtorStub,
306 FnInfo: FI, Args, Loc: D.getLocation(), StartLoc: D.getInit()->getExprLoc());
307
308 // Emit an artificial location for this function.
309 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
310
311 llvm::CallInst *call = CGF.Builder.CreateCall(Callee: Dtor, Args: Addr);
312
313 // Make sure the call and the callee agree on calling convention.
314 if (auto *DtorFn = dyn_cast<llvm::Function>(
315 Val: Dtor.getCallee()->stripPointerCastsAndAliases()))
316 call->setCallingConv(DtorFn->getCallingConv());
317
318 // Return 0 from function
319 CGF.Builder.CreateStore(Val: llvm::Constant::getNullValue(Ty: CGM.IntTy),
320 Addr: CGF.ReturnValue);
321
322 CGF.FinishFunction();
323
324 return DtorStub;
325}
326
327/// Register a global destructor using the C atexit runtime function.
328void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
329 llvm::FunctionCallee dtor,
330 llvm::Constant *addr) {
331 // Create a function which calls the destructor.
332 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
333 registerGlobalDtorWithAtExit(dtorStub);
334}
335
336/// Register a global destructor using the LLVM 'llvm.global_dtors' global.
337void CodeGenFunction::registerGlobalDtorWithLLVM(const VarDecl &VD,
338 llvm::FunctionCallee Dtor,
339 llvm::Constant *Addr) {
340 // Create a function which calls the destructor.
341 llvm::Function *dtorStub =
342 cast<llvm::Function>(Val: createAtExitStub(VD, dtor: Dtor, addr: Addr));
343 CGM.AddGlobalDtor(Dtor: dtorStub);
344}
345
346void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
347 // extern "C" int atexit(void (*f)(void));
348 assert(dtorStub->getType() ==
349 llvm::PointerType::get(
350 llvm::FunctionType::get(CGM.VoidTy, false),
351 dtorStub->getType()->getPointerAddressSpace()) &&
352 "Argument to atexit has a wrong type.");
353
354 llvm::FunctionType *atexitTy =
355 llvm::FunctionType::get(Result: IntTy, Params: dtorStub->getType(), isVarArg: false);
356
357 llvm::FunctionCallee atexit =
358 CGM.CreateRuntimeFunction(Ty: atexitTy, Name: "atexit", ExtraAttrs: llvm::AttributeList(),
359 /*Local=*/true);
360 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(Val: atexit.getCallee()))
361 atexitFn->setDoesNotThrow();
362
363 EmitNounwindRuntimeCall(callee: atexit, args: dtorStub);
364}
365
366llvm::Value *
367CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) {
368 // The unatexit subroutine unregisters __dtor functions that were previously
369 // registered by the atexit subroutine. If the referenced function is found,
370 // it is removed from the list of functions that are called at normal program
371 // termination and the unatexit returns a value of 0, otherwise a non-zero
372 // value is returned.
373 //
374 // extern "C" int unatexit(void (*f)(void));
375 assert(dtorStub->getType() ==
376 llvm::PointerType::get(
377 llvm::FunctionType::get(CGM.VoidTy, false),
378 dtorStub->getType()->getPointerAddressSpace()) &&
379 "Argument to unatexit has a wrong type.");
380
381 llvm::FunctionType *unatexitTy =
382 llvm::FunctionType::get(Result: IntTy, Params: {dtorStub->getType()}, /*isVarArg=*/false);
383
384 llvm::FunctionCallee unatexit =
385 CGM.CreateRuntimeFunction(Ty: unatexitTy, Name: "unatexit", ExtraAttrs: llvm::AttributeList());
386
387 cast<llvm::Function>(Val: unatexit.getCallee())->setDoesNotThrow();
388
389 return EmitNounwindRuntimeCall(callee: unatexit, args: dtorStub);
390}
391
392void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
393 llvm::GlobalVariable *DeclPtr,
394 bool PerformInit) {
395 // If we've been asked to forbid guard variables, emit an error now.
396 // This diagnostic is hard-coded for Darwin's use case; we can find
397 // better phrasing if someone else needs it.
398 if (CGM.getCodeGenOpts().ForbidGuardVariables)
399 CGM.Error(loc: D.getLocation(),
400 error: "this initialization requires a guard variable, which "
401 "the kernel does not support");
402
403 CGM.getCXXABI().EmitGuardedInit(CGF&: *this, D, DeclPtr, PerformInit);
404}
405
406void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
407 llvm::BasicBlock *InitBlock,
408 llvm::BasicBlock *NoInitBlock,
409 GuardKind Kind,
410 const VarDecl *D) {
411 assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable");
412
413 // A guess at how many times we will enter the initialization of a
414 // variable, depending on the kind of variable.
415 static const uint64_t InitsPerTLSVar = 1024;
416 static const uint64_t InitsPerLocalVar = 1024 * 1024;
417
418 llvm::MDNode *Weights;
419 if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) {
420 // For non-local variables, don't apply any weighting for now. Due to our
421 // use of COMDATs, we expect there to be at most one initialization of the
422 // variable per DSO, but we have no way to know how many DSOs will try to
423 // initialize the variable.
424 Weights = nullptr;
425 } else {
426 uint64_t NumInits;
427 // FIXME: For the TLS case, collect and use profiling information to
428 // determine a more accurate brach weight.
429 if (Kind == GuardKind::TlsGuard || D->getTLSKind())
430 NumInits = InitsPerTLSVar;
431 else
432 NumInits = InitsPerLocalVar;
433
434 // The probability of us entering the initializer is
435 // 1 / (total number of times we attempt to initialize the variable).
436 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
437 Weights = MDHelper.createBranchWeights(TrueWeight: 1, FalseWeight: NumInits - 1);
438 }
439
440 Builder.CreateCondBr(Cond: NeedsInit, True: InitBlock, False: NoInitBlock, BranchWeights: Weights);
441}
442
443llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
444 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
445 SourceLocation Loc, bool TLS, llvm::GlobalVariable::LinkageTypes Linkage) {
446 llvm::Function *Fn = llvm::Function::Create(Ty: FTy, Linkage, N: Name, M: &getModule());
447
448 if (!getLangOpts().AppleKext && !TLS) {
449 // Set the section if needed.
450 if (const char *Section = getTarget().getStaticInitSectionSpecifier())
451 Fn->setSection(Section);
452 }
453
454 if (Linkage == llvm::GlobalVariable::InternalLinkage)
455 SetInternalFunctionAttributes(GD: GlobalDecl(), F: Fn, FI);
456
457 Fn->setCallingConv(getRuntimeCC());
458
459 if (!getLangOpts().Exceptions)
460 Fn->setDoesNotThrow();
461
462 if (getLangOpts().Sanitize.has(K: SanitizerKind::Address) &&
463 !isInNoSanitizeList(Kind: SanitizerKind::Address, Fn, Loc))
464 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeAddress);
465
466 if (getLangOpts().Sanitize.has(K: SanitizerKind::KernelAddress) &&
467 !isInNoSanitizeList(Kind: SanitizerKind::KernelAddress, Fn, Loc))
468 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeAddress);
469
470 if (getLangOpts().Sanitize.has(K: SanitizerKind::HWAddress) &&
471 !isInNoSanitizeList(Kind: SanitizerKind::HWAddress, Fn, Loc))
472 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeHWAddress);
473
474 if (getLangOpts().Sanitize.has(K: SanitizerKind::KernelHWAddress) &&
475 !isInNoSanitizeList(Kind: SanitizerKind::KernelHWAddress, Fn, Loc))
476 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeHWAddress);
477
478 if (getLangOpts().Sanitize.has(K: SanitizerKind::MemtagStack) &&
479 !isInNoSanitizeList(Kind: SanitizerKind::MemtagStack, Fn, Loc))
480 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeMemTag);
481
482 if (getLangOpts().Sanitize.has(K: SanitizerKind::Thread) &&
483 !isInNoSanitizeList(Kind: SanitizerKind::Thread, Fn, Loc))
484 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeThread);
485
486 if (getLangOpts().Sanitize.has(K: SanitizerKind::NumericalStability) &&
487 !isInNoSanitizeList(Kind: SanitizerKind::NumericalStability, Fn, Loc))
488 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeNumericalStability);
489
490 if (getLangOpts().Sanitize.has(K: SanitizerKind::Memory) &&
491 !isInNoSanitizeList(Kind: SanitizerKind::Memory, Fn, Loc))
492 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeMemory);
493
494 if (getLangOpts().Sanitize.has(K: SanitizerKind::KernelMemory) &&
495 !isInNoSanitizeList(Kind: SanitizerKind::KernelMemory, Fn, Loc))
496 Fn->addFnAttr(Kind: llvm::Attribute::SanitizeMemory);
497
498 if (getLangOpts().Sanitize.has(K: SanitizerKind::SafeStack) &&
499 !isInNoSanitizeList(Kind: SanitizerKind::SafeStack, Fn, Loc))
500 Fn->addFnAttr(Kind: llvm::Attribute::SafeStack);
501
502 if (getLangOpts().Sanitize.has(K: SanitizerKind::ShadowCallStack) &&
503 !isInNoSanitizeList(Kind: SanitizerKind::ShadowCallStack, Fn, Loc))
504 Fn->addFnAttr(Kind: llvm::Attribute::ShadowCallStack);
505
506 return Fn;
507}
508
509/// Create a global pointer to a function that will initialize a global
510/// variable. The user has requested that this pointer be emitted in a specific
511/// section.
512void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
513 llvm::GlobalVariable *GV,
514 llvm::Function *InitFunc,
515 InitSegAttr *ISA) {
516 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
517 TheModule, InitFunc->getType(), /*isConstant=*/true,
518 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
519 PtrArray->setSection(ISA->getSection());
520 addUsedGlobal(GV: PtrArray);
521
522 // If the GV is already in a comdat group, then we have to join it.
523 if (llvm::Comdat *C = GV->getComdat())
524 PtrArray->setComdat(C);
525}
526
527void
528CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
529 llvm::GlobalVariable *Addr,
530 bool PerformInit) {
531
532 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
533 // __constant__ and __shared__ variables defined in namespace scope,
534 // that are of class type, cannot have a non-empty constructor. All
535 // the checks have been done in Sema by now. Whatever initializers
536 // are allowed are empty and we just need to ignore them here.
537 if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit &&
538 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
539 D->hasAttr<CUDASharedAttr>()))
540 return;
541
542 // Check if we've already initialized this decl.
543 auto I = DelayedCXXInitPosition.find(Val: D);
544 if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
545 return;
546
547 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false);
548 SmallString<256> FnName;
549 {
550 llvm::raw_svector_ostream Out(FnName);
551 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
552 }
553
554 // Create a variable initialization function.
555 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
556 FTy, Name: FnName.str(), FI: getTypes().arrangeNullaryFunction(), Loc: D->getLocation());
557
558 auto *ISA = D->getAttr<InitSegAttr>();
559 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
560 PerformInit);
561
562 llvm::GlobalVariable *COMDATKey =
563 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
564
565 if (D->getTLSKind()) {
566 // FIXME: Should we support init_priority for thread_local?
567 // FIXME: We only need to register one __cxa_thread_atexit function for the
568 // entire TU.
569 CXXThreadLocalInits.push_back(x: Fn);
570 CXXThreadLocalInitVars.push_back(x: D);
571 } else if (PerformInit && ISA) {
572 // Contract with backend that "init_seg(compiler)" corresponds to priority
573 // 200 and "init_seg(lib)" corresponds to priority 400.
574 int Priority = -1;
575 if (ISA->getSection() == ".CRT$XCC")
576 Priority = 200;
577 else if (ISA->getSection() == ".CRT$XCL")
578 Priority = 400;
579
580 if (Priority != -1)
581 AddGlobalCtor(Ctor: Fn, Priority, LexOrder: ~0U, AssociatedData: COMDATKey);
582 else
583 EmitPointerToInitFunc(D, GV: Addr, InitFunc: Fn, ISA);
584 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
585 OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(),
586 PrioritizedCXXGlobalInits.size());
587 PrioritizedCXXGlobalInits.push_back(Elt: std::make_pair(x&: Key, y&: Fn));
588 } else if (isTemplateInstantiation(Kind: D->getTemplateSpecializationKind()) ||
589 getContext().GetGVALinkageForVariable(VD: D) == GVA_DiscardableODR ||
590 D->hasAttr<SelectAnyAttr>()) {
591 // C++ [basic.start.init]p2:
592 // Definitions of explicitly specialized class template static data
593 // members have ordered initialization. Other class template static data
594 // members (i.e., implicitly or explicitly instantiated specializations)
595 // have unordered initialization.
596 //
597 // As a consequence, we can put them into their own llvm.global_ctors entry.
598 //
599 // If the global is externally visible, put the initializer into a COMDAT
600 // group with the global being initialized. On most platforms, this is a
601 // minor startup time optimization. In the MS C++ ABI, there are no guard
602 // variables, so this COMDAT key is required for correctness.
603 //
604 // SelectAny globals will be comdat-folded. Put the initializer into a
605 // COMDAT group associated with the global, so the initializers get folded
606 // too.
607 I = DelayedCXXInitPosition.find(Val: D);
608 // CXXGlobalInits.size() is the lex order number for the next deferred
609 // VarDecl. Use it when the current VarDecl is non-deferred. Although this
610 // lex order number is shared between current VarDecl and some following
611 // VarDecls, their order of insertion into `llvm.global_ctors` is the same
612 // as the lexing order and the following stable sort would preserve such
613 // order.
614 unsigned LexOrder =
615 I == DelayedCXXInitPosition.end() ? CXXGlobalInits.size() : I->second;
616 AddGlobalCtor(Ctor: Fn, Priority: 65535, LexOrder, AssociatedData: COMDATKey);
617 if (COMDATKey && (getTriple().isOSBinFormatELF() ||
618 getTarget().getCXXABI().isMicrosoft())) {
619 // When COMDAT is used on ELF or in the MS C++ ABI, the key must be in
620 // llvm.used to prevent linker GC.
621 addUsedGlobal(GV: COMDATKey);
622 }
623
624 // If we used a COMDAT key for the global ctor, the init function can be
625 // discarded if the global ctor entry is discarded.
626 // FIXME: Do we need to restrict this to ELF and Wasm?
627 llvm::Comdat *C = Addr->getComdat();
628 if (COMDATKey && C &&
629 (getTarget().getTriple().isOSBinFormatELF() ||
630 getTarget().getTriple().isOSBinFormatWasm())) {
631 Fn->setComdat(C);
632 }
633 } else {
634 I = DelayedCXXInitPosition.find(Val: D); // Re-do lookup in case of re-hash.
635 if (I == DelayedCXXInitPosition.end()) {
636 CXXGlobalInits.push_back(x: Fn);
637 } else if (I->second != ~0U) {
638 assert(I->second < CXXGlobalInits.size() &&
639 CXXGlobalInits[I->second] == nullptr);
640 CXXGlobalInits[I->second] = Fn;
641 }
642 }
643
644 // Remember that we already emitted the initializer for this global.
645 DelayedCXXInitPosition[D] = ~0U;
646}
647
648void CodeGenModule::EmitCXXThreadLocalInitFunc() {
649 getCXXABI().EmitThreadLocalInitFuncs(
650 CGM&: *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
651
652 CXXThreadLocalInits.clear();
653 CXXThreadLocalInitVars.clear();
654 CXXThreadLocals.clear();
655}
656
657/* Build the initializer for a C++20 module:
658 This is arranged to be run only once regardless of how many times the module
659 might be included transitively. This arranged by using a guard variable.
660
661 If there are no initializers at all (and also no imported modules) we reduce
662 this to an empty function (since the Itanium ABI requires that this function
663 be available to a caller, which might be produced by a different
664 implementation).
665
666 First we call any initializers for imported modules.
667 We then call initializers for the Global Module Fragment (if present)
668 We then call initializers for the current module.
669 We then call initializers for the Private Module Fragment (if present)
670*/
671
672void CodeGenModule::EmitCXXModuleInitFunc(Module *Primary) {
673 assert(Primary->isInterfaceOrPartition() &&
674 "The function should only be called for C++20 named module interface"
675 " or partition.");
676
677 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
678 CXXGlobalInits.pop_back();
679
680 // As noted above, we create the function, even if it is empty.
681 // Module initializers for imported modules are emitted first.
682
683 // Collect all the modules that we import
684 llvm::SmallSetVector<Module *, 8> AllImports;
685 // Ones that we export
686 for (auto I : Primary->Exports)
687 AllImports.insert(X: I.getPointer());
688 // Ones that we only import.
689 for (Module *M : Primary->Imports)
690 AllImports.insert(X: M);
691 // Ones that we import in the global module fragment or the private module
692 // fragment.
693 for (Module *SubM : Primary->submodules()) {
694 assert((SubM->isGlobalModule() || SubM->isPrivateModule()) &&
695 "The sub modules of C++20 module unit should only be global module "
696 "fragments or private module framents.");
697 assert(SubM->Exports.empty() &&
698 "The global mdoule fragments and the private module fragments are "
699 "not allowed to export import modules.");
700 for (Module *M : SubM->Imports)
701 AllImports.insert(X: M);
702 }
703
704 SmallVector<llvm::Function *, 8> ModuleInits;
705 for (Module *M : AllImports) {
706 // No Itanium initializer in header like modules.
707 if (M->isHeaderLikeModule())
708 continue; // TODO: warn of mixed use of module map modules and C++20?
709 // We're allowed to skip the initialization if we are sure it doesn't
710 // do any thing.
711 if (!M->isNamedModuleInterfaceHasInit())
712 continue;
713 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false);
714 SmallString<256> FnName;
715 {
716 llvm::raw_svector_ostream Out(FnName);
717 cast<ItaniumMangleContext>(Val&: getCXXABI().getMangleContext())
718 .mangleModuleInitializer(Module: M, Out);
719 }
720 assert(!GetGlobalValue(FnName.str()) &&
721 "We should only have one use of the initializer call");
722 llvm::Function *Fn = llvm::Function::Create(
723 Ty: FTy, Linkage: llvm::Function::ExternalLinkage, N: FnName.str(), M: &getModule());
724 ModuleInits.push_back(Elt: Fn);
725 }
726
727 // Add any initializers with specified priority; this uses the same approach
728 // as EmitCXXGlobalInitFunc().
729 if (!PrioritizedCXXGlobalInits.empty()) {
730 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
731 llvm::array_pod_sort(Start: PrioritizedCXXGlobalInits.begin(),
732 End: PrioritizedCXXGlobalInits.end());
733 for (SmallVectorImpl<GlobalInitData>::iterator
734 I = PrioritizedCXXGlobalInits.begin(),
735 E = PrioritizedCXXGlobalInits.end();
736 I != E;) {
737 SmallVectorImpl<GlobalInitData>::iterator PrioE =
738 std::upper_bound(first: I + 1, last: E, val: *I, comp: GlobalInitPriorityCmp());
739
740 for (; I < PrioE; ++I)
741 ModuleInits.push_back(Elt: I->second);
742 }
743 }
744
745 // Now append the ones without specified priority.
746 for (auto *F : CXXGlobalInits)
747 ModuleInits.push_back(Elt: F);
748
749 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false);
750 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
751
752 // We now build the initializer for this module, which has a mangled name
753 // as per the Itanium ABI . The action of the initializer is guarded so that
754 // each init is run just once (even though a module might be imported
755 // multiple times via nested use).
756 llvm::Function *Fn;
757 {
758 SmallString<256> InitFnName;
759 llvm::raw_svector_ostream Out(InitFnName);
760 cast<ItaniumMangleContext>(Val&: getCXXABI().getMangleContext())
761 .mangleModuleInitializer(Module: Primary, Out);
762 Fn = CreateGlobalInitOrCleanUpFunction(
763 FTy, Name: llvm::Twine(InitFnName), FI, Loc: SourceLocation(), TLS: false,
764 Linkage: llvm::GlobalVariable::ExternalLinkage);
765
766 // If we have a completely empty initializer then we do not want to create
767 // the guard variable.
768 ConstantAddress GuardAddr = ConstantAddress::invalid();
769 if (!ModuleInits.empty()) {
770 // Create the guard var.
771 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
772 getModule(), Int8Ty, /*isConstant=*/false,
773 llvm::GlobalVariable::InternalLinkage,
774 llvm::ConstantInt::get(Ty: Int8Ty, V: 0), InitFnName.str() + "__in_chrg");
775 CharUnits GuardAlign = CharUnits::One();
776 Guard->setAlignment(GuardAlign.getAsAlign());
777 GuardAddr = ConstantAddress(Guard, Int8Ty, GuardAlign);
778 }
779 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXThreadLocals: ModuleInits,
780 Guard: GuardAddr);
781 }
782
783 // We allow for the case that a module object is added to a linked binary
784 // without a specific call to the the initializer. This also ensures that
785 // implementation partition initializers are called when the partition
786 // is not imported as an interface.
787 AddGlobalCtor(Ctor: Fn);
788
789 // See the comment in EmitCXXGlobalInitFunc about OpenCL global init
790 // functions.
791 if (getLangOpts().OpenCL) {
792 GenKernelArgMetadata(FN: Fn);
793 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
794 }
795
796 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
797 getLangOpts().GPUAllowDeviceInit);
798 if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
799 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
800 Fn->addFnAttr(Kind: "device-init");
801 }
802
803 // We are done with the inits.
804 AllImports.clear();
805 PrioritizedCXXGlobalInits.clear();
806 CXXGlobalInits.clear();
807 ModuleInits.clear();
808}
809
810static SmallString<128> getTransformedFileName(llvm::Module &M) {
811 SmallString<128> FileName = llvm::sys::path::filename(path: M.getName());
812
813 if (FileName.empty())
814 FileName = "<null>";
815
816 for (size_t i = 0; i < FileName.size(); ++i) {
817 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
818 // to be the set of C preprocessing numbers.
819 if (!isPreprocessingNumberBody(c: FileName[i]))
820 FileName[i] = '_';
821 }
822
823 return FileName;
824}
825
826static std::string getPrioritySuffix(unsigned int Priority) {
827 assert(Priority <= 65535 && "Priority should always be <= 65535.");
828
829 // Compute the function suffix from priority. Prepend with zeroes to make
830 // sure the function names are also ordered as priorities.
831 std::string PrioritySuffix = llvm::utostr(X: Priority);
832 PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix;
833
834 return PrioritySuffix;
835}
836
837void
838CodeGenModule::EmitCXXGlobalInitFunc() {
839 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
840 CXXGlobalInits.pop_back();
841
842 // When we import C++20 modules, we must run their initializers first.
843 SmallVector<llvm::Function *, 8> ModuleInits;
844 if (CXX20ModuleInits)
845 for (Module *M : ImportedModules) {
846 // No Itanium initializer in header like modules.
847 if (M->isHeaderLikeModule())
848 continue;
849 // We're allowed to skip the initialization if we are sure it doesn't
850 // do any thing.
851 if (!M->isNamedModuleInterfaceHasInit())
852 continue;
853 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false);
854 SmallString<256> FnName;
855 {
856 llvm::raw_svector_ostream Out(FnName);
857 cast<ItaniumMangleContext>(Val&: getCXXABI().getMangleContext())
858 .mangleModuleInitializer(Module: M, Out);
859 }
860 assert(!GetGlobalValue(FnName.str()) &&
861 "We should only have one use of the initializer call");
862 llvm::Function *Fn = llvm::Function::Create(
863 Ty: FTy, Linkage: llvm::Function::ExternalLinkage, N: FnName.str(), M: &getModule());
864 ModuleInits.push_back(Elt: Fn);
865 }
866
867 if (ModuleInits.empty() && CXXGlobalInits.empty() &&
868 PrioritizedCXXGlobalInits.empty())
869 return;
870
871 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false);
872 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
873
874 // Create our global prioritized initialization function.
875 if (!PrioritizedCXXGlobalInits.empty()) {
876 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
877 llvm::array_pod_sort(Start: PrioritizedCXXGlobalInits.begin(),
878 End: PrioritizedCXXGlobalInits.end());
879 // Iterate over "chunks" of ctors with same priority and emit each chunk
880 // into separate function. Note - everything is sorted first by priority,
881 // second - by lex order, so we emit ctor functions in proper order.
882 for (SmallVectorImpl<GlobalInitData >::iterator
883 I = PrioritizedCXXGlobalInits.begin(),
884 E = PrioritizedCXXGlobalInits.end(); I != E; ) {
885 SmallVectorImpl<GlobalInitData >::iterator
886 PrioE = std::upper_bound(first: I + 1, last: E, val: *I, comp: GlobalInitPriorityCmp());
887
888 LocalCXXGlobalInits.clear();
889
890 unsigned int Priority = I->first.priority;
891 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
892 FTy, Name: "_GLOBAL__I_" + getPrioritySuffix(Priority), FI);
893
894 // Prepend the module inits to the highest priority set.
895 if (!ModuleInits.empty()) {
896 for (auto *F : ModuleInits)
897 LocalCXXGlobalInits.push_back(Elt: F);
898 ModuleInits.clear();
899 }
900
901 for (; I < PrioE; ++I)
902 LocalCXXGlobalInits.push_back(Elt: I->second);
903
904 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXThreadLocals: LocalCXXGlobalInits);
905 AddGlobalCtor(Ctor: Fn, Priority);
906 }
907 PrioritizedCXXGlobalInits.clear();
908 }
909
910 if (getCXXABI().useSinitAndSterm() && ModuleInits.empty() &&
911 CXXGlobalInits.empty())
912 return;
913
914 for (auto *F : CXXGlobalInits)
915 ModuleInits.push_back(Elt: F);
916 CXXGlobalInits.clear();
917
918 // Include the filename in the symbol name. Including "sub_" matches gcc
919 // and makes sure these symbols appear lexicographically behind the symbols
920 // with priority emitted above. Module implementation units behave the same
921 // way as a non-modular TU with imports.
922 llvm::Function *Fn;
923 if (CXX20ModuleInits && getContext().getCurrentNamedModule() &&
924 !getContext().getCurrentNamedModule()->isModuleImplementation()) {
925 SmallString<256> InitFnName;
926 llvm::raw_svector_ostream Out(InitFnName);
927 cast<ItaniumMangleContext>(Val&: getCXXABI().getMangleContext())
928 .mangleModuleInitializer(Module: getContext().getCurrentNamedModule(), Out);
929 Fn = CreateGlobalInitOrCleanUpFunction(
930 FTy, Name: llvm::Twine(InitFnName), FI, Loc: SourceLocation(), TLS: false,
931 Linkage: llvm::GlobalVariable::ExternalLinkage);
932 } else
933 Fn = CreateGlobalInitOrCleanUpFunction(
934 FTy,
935 Name: llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(M&: getModule())),
936 FI);
937
938 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXThreadLocals: ModuleInits);
939 AddGlobalCtor(Ctor: Fn);
940
941 // In OpenCL global init functions must be converted to kernels in order to
942 // be able to launch them from the host.
943 // FIXME: Some more work might be needed to handle destructors correctly.
944 // Current initialization function makes use of function pointers callbacks.
945 // We can't support function pointers especially between host and device.
946 // However it seems global destruction has little meaning without any
947 // dynamic resource allocation on the device and program scope variables are
948 // destroyed by the runtime when program is released.
949 if (getLangOpts().OpenCL) {
950 GenKernelArgMetadata(FN: Fn);
951 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
952 }
953
954 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
955 getLangOpts().GPUAllowDeviceInit);
956 if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
957 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
958 Fn->addFnAttr(Kind: "device-init");
959 }
960
961 ModuleInits.clear();
962}
963
964void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
965 if (CXXGlobalDtorsOrStermFinalizers.empty() &&
966 PrioritizedCXXStermFinalizers.empty())
967 return;
968
969 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false);
970 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
971
972 // Create our global prioritized cleanup function.
973 if (!PrioritizedCXXStermFinalizers.empty()) {
974 SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers;
975 llvm::array_pod_sort(Start: PrioritizedCXXStermFinalizers.begin(),
976 End: PrioritizedCXXStermFinalizers.end());
977 // Iterate over "chunks" of dtors with same priority and emit each chunk
978 // into separate function. Note - everything is sorted first by priority,
979 // second - by lex order, so we emit dtor functions in proper order.
980 for (SmallVectorImpl<StermFinalizerData>::iterator
981 I = PrioritizedCXXStermFinalizers.begin(),
982 E = PrioritizedCXXStermFinalizers.end();
983 I != E;) {
984 SmallVectorImpl<StermFinalizerData>::iterator PrioE =
985 std::upper_bound(first: I + 1, last: E, val: *I, comp: StermFinalizerPriorityCmp());
986
987 LocalCXXStermFinalizers.clear();
988
989 unsigned int Priority = I->first.priority;
990 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
991 FTy, Name: "_GLOBAL__a_" + getPrioritySuffix(Priority), FI);
992
993 for (; I < PrioE; ++I) {
994 llvm::FunctionCallee DtorFn = I->second;
995 LocalCXXStermFinalizers.emplace_back(Args: DtorFn.getFunctionType(),
996 Args: DtorFn.getCallee(), Args: nullptr);
997 }
998
999 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
1000 Fn, DtorsOrStermFinalizers: LocalCXXStermFinalizers);
1001 AddGlobalDtor(Dtor: Fn, Priority);
1002 }
1003 PrioritizedCXXStermFinalizers.clear();
1004 }
1005
1006 if (CXXGlobalDtorsOrStermFinalizers.empty())
1007 return;
1008
1009 // Create our global cleanup function.
1010 llvm::Function *Fn =
1011 CreateGlobalInitOrCleanUpFunction(FTy, Name: "_GLOBAL__D_a", FI);
1012
1013 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
1014 Fn, DtorsOrStermFinalizers: CXXGlobalDtorsOrStermFinalizers);
1015 AddGlobalDtor(Dtor: Fn);
1016 CXXGlobalDtorsOrStermFinalizers.clear();
1017}
1018
1019/// Emit the code necessary to initialize the given global variable.
1020void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
1021 const VarDecl *D,
1022 llvm::GlobalVariable *Addr,
1023 bool PerformInit) {
1024 // Check if we need to emit debug info for variable initializer.
1025 if (D->hasAttr<NoDebugAttr>())
1026 DebugInfo = nullptr; // disable debug info indefinitely for this function
1027
1028 CurEHLocation = D->getBeginLoc();
1029
1030 StartFunction(GD: GlobalDecl(D, DynamicInitKind::Initializer),
1031 RetTy: getContext().VoidTy, Fn, FnInfo: getTypes().arrangeNullaryFunction(),
1032 Args: FunctionArgList());
1033 // Emit an artificial location for this function.
1034 auto AL = ApplyDebugLocation::CreateArtificial(CGF&: *this);
1035
1036 // Use guarded initialization if the global variable is weak. This
1037 // occurs for, e.g., instantiated static data members and
1038 // definitions explicitly marked weak.
1039 //
1040 // Also use guarded initialization for a variable with dynamic TLS and
1041 // unordered initialization. (If the initialization is ordered, the ABI
1042 // layer will guard the whole-TU initialization for us.)
1043 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() ||
1044 (D->getTLSKind() == VarDecl::TLS_Dynamic &&
1045 isTemplateInstantiation(Kind: D->getTemplateSpecializationKind()))) {
1046 EmitCXXGuardedInit(D: *D, DeclPtr: Addr, PerformInit);
1047 } else {
1048 EmitCXXGlobalVarDeclInit(D: *D, GV: Addr, PerformInit);
1049 }
1050
1051 if (getLangOpts().HLSL)
1052 CGM.getHLSLRuntime().annotateHLSLResource(D, GV: Addr);
1053
1054 FinishFunction();
1055}
1056
1057void
1058CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
1059 ArrayRef<llvm::Function *> Decls,
1060 ConstantAddress Guard) {
1061 {
1062 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
1063 StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn,
1064 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList());
1065 // Emit an artificial location for this function.
1066 auto AL = ApplyDebugLocation::CreateArtificial(CGF&: *this);
1067
1068 llvm::BasicBlock *ExitBlock = nullptr;
1069 if (Guard.isValid()) {
1070 // If we have a guard variable, check whether we've already performed
1071 // these initializations. This happens for TLS initialization functions.
1072 llvm::Value *GuardVal = Builder.CreateLoad(Addr: Guard);
1073 llvm::Value *Uninit = Builder.CreateIsNull(Arg: GuardVal,
1074 Name: "guard.uninitialized");
1075 llvm::BasicBlock *InitBlock = createBasicBlock(name: "init");
1076 ExitBlock = createBasicBlock(name: "exit");
1077 EmitCXXGuardedInitBranch(NeedsInit: Uninit, InitBlock, NoInitBlock: ExitBlock,
1078 Kind: GuardKind::TlsGuard, D: nullptr);
1079 EmitBlock(BB: InitBlock);
1080 // Mark as initialized before initializing anything else. If the
1081 // initializers use previously-initialized thread_local vars, that's
1082 // probably supposed to be OK, but the standard doesn't say.
1083 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: GuardVal->getType(),V: 1), Addr: Guard);
1084
1085 // The guard variable can't ever change again.
1086 EmitInvariantStart(
1087 Addr: Guard.getPointer(),
1088 Size: CharUnits::fromQuantity(
1089 Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: GuardVal->getType())));
1090 }
1091
1092 RunCleanupsScope Scope(*this);
1093
1094 // When building in Objective-C++ ARC mode, create an autorelease pool
1095 // around the global initializers.
1096 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
1097 llvm::Value *token = EmitObjCAutoreleasePoolPush();
1098 EmitObjCAutoreleasePoolCleanup(Ptr: token);
1099 }
1100
1101 for (unsigned i = 0, e = Decls.size(); i != e; ++i)
1102 if (Decls[i])
1103 EmitRuntimeCall(callee: Decls[i]);
1104
1105 Scope.ForceCleanup();
1106
1107 if (ExitBlock) {
1108 Builder.CreateBr(Dest: ExitBlock);
1109 EmitBlock(BB: ExitBlock);
1110 }
1111 }
1112
1113 FinishFunction();
1114}
1115
1116void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
1117 llvm::Function *Fn,
1118 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
1119 llvm::Constant *>>
1120 DtorsOrStermFinalizers) {
1121 {
1122 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
1123 StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn,
1124 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList());
1125 // Emit an artificial location for this function.
1126 auto AL = ApplyDebugLocation::CreateArtificial(CGF&: *this);
1127
1128 // Emit the cleanups, in reverse order from construction.
1129 for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) {
1130 llvm::FunctionType *CalleeTy;
1131 llvm::Value *Callee;
1132 llvm::Constant *Arg;
1133 std::tie(args&: CalleeTy, args&: Callee, args&: Arg) = DtorsOrStermFinalizers[e - i - 1];
1134
1135 llvm::CallInst *CI = nullptr;
1136 if (Arg == nullptr) {
1137 assert(
1138 CGM.getCXXABI().useSinitAndSterm() &&
1139 "Arg could not be nullptr unless using sinit and sterm functions.");
1140 CI = Builder.CreateCall(FTy: CalleeTy, Callee);
1141 } else
1142 CI = Builder.CreateCall(FTy: CalleeTy, Callee, Args: Arg);
1143
1144 // Make sure the call and the callee agree on calling convention.
1145 if (llvm::Function *F = dyn_cast<llvm::Function>(Val: Callee))
1146 CI->setCallingConv(F->getCallingConv());
1147 }
1148 }
1149
1150 FinishFunction();
1151}
1152
1153/// generateDestroyHelper - Generates a helper function which, when
1154/// invoked, destroys the given object. The address of the object
1155/// should be in global memory.
1156llvm::Function *CodeGenFunction::generateDestroyHelper(
1157 Address addr, QualType type, Destroyer *destroyer,
1158 bool useEHCleanupForArray, const VarDecl *VD) {
1159 FunctionArgList args;
1160 ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
1161 ImplicitParamKind::Other);
1162 args.push_back(Elt: &Dst);
1163
1164 const CGFunctionInfo &FI =
1165 CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: getContext().VoidTy, args);
1166 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(Info: FI);
1167 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
1168 FTy, Name: "__cxx_global_array_dtor", FI, Loc: VD->getLocation());
1169
1170 CurEHLocation = VD->getBeginLoc();
1171
1172 StartFunction(GD: GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor),
1173 RetTy: getContext().VoidTy, Fn: fn, FnInfo: FI, Args: args);
1174 // Emit an artificial location for this function.
1175 auto AL = ApplyDebugLocation::CreateArtificial(CGF&: *this);
1176
1177 emitDestroy(addr, type, destroyer, useEHCleanupForArray);
1178
1179 FinishFunction();
1180
1181 return fn;
1182}
1183