1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Decl nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBlocks.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGOpenCLRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "ConstantEmitter.h"
23#include "EHScopeStack.h"
24#include "PatternInit.h"
25#include "TargetInfo.h"
26#include "clang/AST/ASTContext.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/DeclObjC.h"
31#include "clang/AST/DeclOpenACC.h"
32#include "clang/AST/DeclOpenMP.h"
33#include "clang/Basic/CodeGenOptions.h"
34#include "clang/Basic/TargetInfo.h"
35#include "clang/CodeGen/CGFunctionInfo.h"
36#include "clang/Sema/Sema.h"
37#include "llvm/Analysis/ConstantFolding.h"
38#include "llvm/Analysis/ValueTracking.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/GlobalVariable.h"
41#include "llvm/IR/Instructions.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/Type.h"
44#include <optional>
45
46using namespace clang;
47using namespace CodeGen;
48
49static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
50 "Clang max alignment greater than what LLVM supports?");
51
52void CodeGenFunction::EmitDecl(const Decl &D, bool EvaluateConditionDecl) {
53 switch (D.getKind()) {
54 case Decl::BuiltinTemplate:
55 case Decl::TranslationUnit:
56 case Decl::ExternCContext:
57 case Decl::Namespace:
58 case Decl::UnresolvedUsingTypename:
59 case Decl::ClassTemplateSpecialization:
60 case Decl::ClassTemplatePartialSpecialization:
61 case Decl::VarTemplateSpecialization:
62 case Decl::VarTemplatePartialSpecialization:
63 case Decl::TemplateTypeParm:
64 case Decl::UnresolvedUsingValue:
65 case Decl::NonTypeTemplateParm:
66 case Decl::CXXDeductionGuide:
67 case Decl::CXXMethod:
68 case Decl::CXXConstructor:
69 case Decl::CXXDestructor:
70 case Decl::CXXConversion:
71 case Decl::Field:
72 case Decl::MSProperty:
73 case Decl::IndirectField:
74 case Decl::ObjCIvar:
75 case Decl::ObjCAtDefsField:
76 case Decl::ParmVar:
77 case Decl::ImplicitParam:
78 case Decl::ClassTemplate:
79 case Decl::VarTemplate:
80 case Decl::FunctionTemplate:
81 case Decl::TypeAliasTemplate:
82 case Decl::TemplateTemplateParm:
83 case Decl::ObjCMethod:
84 case Decl::ObjCCategory:
85 case Decl::ObjCProtocol:
86 case Decl::ObjCInterface:
87 case Decl::ObjCCategoryImpl:
88 case Decl::ObjCImplementation:
89 case Decl::ObjCProperty:
90 case Decl::ObjCCompatibleAlias:
91 case Decl::PragmaComment:
92 case Decl::PragmaDetectMismatch:
93 case Decl::AccessSpec:
94 case Decl::LinkageSpec:
95 case Decl::Export:
96 case Decl::ObjCPropertyImpl:
97 case Decl::FileScopeAsm:
98 case Decl::TopLevelStmt:
99 case Decl::Friend:
100 case Decl::FriendTemplate:
101 case Decl::Block:
102 case Decl::OutlinedFunction:
103 case Decl::Captured:
104 case Decl::UsingShadow:
105 case Decl::ConstructorUsingShadow:
106 case Decl::ObjCTypeParam:
107 case Decl::Binding:
108 case Decl::UnresolvedUsingIfExists:
109 case Decl::HLSLBuffer:
110 case Decl::HLSLRootSignature:
111 llvm_unreachable("Declaration should not be in declstmts!");
112 case Decl::Record: // struct/union/class X;
113 case Decl::CXXRecord: // struct/union/class X; [C++]
114 if (CGDebugInfo *DI = getDebugInfo())
115 if (cast<RecordDecl>(Val: D).getDefinition())
116 DI->EmitAndRetainType(
117 Ty: getContext().getCanonicalTagType(TD: cast<RecordDecl>(Val: &D)));
118 return;
119 case Decl::Enum: // enum X;
120 if (CGDebugInfo *DI = getDebugInfo())
121 if (cast<EnumDecl>(Val: D).getDefinition())
122 DI->EmitAndRetainType(
123 Ty: getContext().getCanonicalTagType(TD: cast<EnumDecl>(Val: &D)));
124 return;
125 case Decl::Function: // void X();
126 case Decl::EnumConstant: // enum ? { X = ? }
127 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
128 case Decl::Label: // __label__ x;
129 case Decl::Import:
130 case Decl::MSGuid: // __declspec(uuid("..."))
131 case Decl::UnnamedGlobalConstant:
132 case Decl::TemplateParamObject:
133 case Decl::OMPThreadPrivate:
134 case Decl::OMPGroupPrivate:
135 case Decl::OMPAllocate:
136 case Decl::OMPCapturedExpr:
137 case Decl::OMPRequires:
138 case Decl::Empty:
139 case Decl::Concept:
140 case Decl::ImplicitConceptSpecialization:
141 case Decl::LifetimeExtendedTemporary:
142 case Decl::RequiresExprBody:
143 // None of these decls require codegen support.
144 return;
145
146 case Decl::NamespaceAlias:
147 if (CGDebugInfo *DI = getDebugInfo())
148 DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val: D));
149 return;
150 case Decl::Using: // using X; [C++]
151 if (CGDebugInfo *DI = getDebugInfo())
152 DI->EmitUsingDecl(UD: cast<UsingDecl>(Val: D));
153 return;
154 case Decl::UsingEnum: // using enum X; [C++]
155 if (CGDebugInfo *DI = getDebugInfo())
156 DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val: D));
157 return;
158 case Decl::UsingPack:
159 for (auto *Using : cast<UsingPackDecl>(Val: D).expansions())
160 EmitDecl(D: *Using, /*EvaluateConditionDecl=*/EvaluateConditionDecl);
161 return;
162 case Decl::UsingDirective: // using namespace X; [C++]
163 if (CGDebugInfo *DI = getDebugInfo())
164 DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val: D));
165 return;
166 case Decl::Var:
167 case Decl::Decomposition: {
168 const VarDecl &VD = cast<VarDecl>(Val: D);
169 assert(VD.isLocalVarDecl() &&
170 "Should not see file-scope variables inside a function!");
171 EmitVarDecl(D: VD);
172 if (EvaluateConditionDecl)
173 MaybeEmitDeferredVarDeclInit(var: &VD);
174
175 return;
176 }
177
178 case Decl::OMPDeclareReduction:
179 return CGM.EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: &D), CGF: this);
180
181 case Decl::OMPDeclareMapper:
182 return CGM.EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: &D), CGF: this);
183
184 case Decl::OpenACCDeclare:
185 return CGM.EmitOpenACCDeclare(D: cast<OpenACCDeclareDecl>(Val: &D), CGF: this);
186 case Decl::OpenACCRoutine:
187 return CGM.EmitOpenACCRoutine(D: cast<OpenACCRoutineDecl>(Val: &D), CGF: this);
188
189 case Decl::Typedef: // typedef int X;
190 case Decl::TypeAlias: { // using X = int; [C++0x]
191 QualType Ty = cast<TypedefNameDecl>(Val: D).getUnderlyingType();
192 if (CGDebugInfo *DI = getDebugInfo())
193 DI->EmitAndRetainType(Ty);
194 if (Ty->isVariablyModifiedType())
195 EmitVariablyModifiedType(Ty);
196 return;
197 }
198 }
199}
200
201/// EmitVarDecl - This method handles emission of any variable declaration
202/// inside a function, including static vars etc.
203void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
204 if (D.hasExternalStorage())
205 // Don't emit it now, allow it to be emitted lazily on its first use.
206 return;
207
208 // Some function-scope variable does not have static storage but still
209 // needs to be emitted like a static variable, e.g. a function-scope
210 // variable in constant address space in OpenCL.
211 if (D.getStorageDuration() != SD_Automatic) {
212 // Static sampler variables translated to function calls.
213 if (D.getType()->isSamplerT())
214 return;
215
216 llvm::GlobalValue::LinkageTypes Linkage =
217 CGM.getLLVMLinkageVarDefinition(VD: &D);
218
219 // FIXME: We need to force the emission/use of a guard variable for
220 // some variables even if we can constant-evaluate them because
221 // we can't guarantee every translation unit will constant-evaluate them.
222
223 return EmitStaticVarDecl(D, Linkage);
224 }
225
226 if (D.getType().getAddressSpace() == LangAS::opencl_local)
227 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(CGF&: *this, D);
228
229 assert(D.hasLocalStorage());
230 return EmitAutoVarDecl(D);
231}
232
233static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
234 if (CGM.getLangOpts().CPlusPlus)
235 return CGM.getMangledName(GD: &D).str();
236
237 // If this isn't C++, we don't need a mangled name, just a pretty one.
238 assert(!D.isExternallyVisible() && "name shouldn't matter");
239 std::string ContextName;
240 const DeclContext *DC = D.getDeclContext();
241 if (auto *CD = dyn_cast<CapturedDecl>(Val: DC))
242 DC = cast<DeclContext>(Val: CD->getNonClosureContext());
243 if (const auto *FD = dyn_cast<FunctionDecl>(Val: DC))
244 ContextName = std::string(CGM.getMangledName(GD: FD));
245 else if (const auto *BD = dyn_cast<BlockDecl>(Val: DC))
246 ContextName = std::string(CGM.getBlockMangledName(GD: GlobalDecl(), BD));
247 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: DC))
248 ContextName = OMD->getSelector().getAsString();
249 else
250 llvm_unreachable("Unknown context for static var decl");
251
252 ContextName += "." + D.getNameAsString();
253 return ContextName;
254}
255
256llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
257 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
258 // In general, we don't always emit static var decls once before we reference
259 // them. It is possible to reference them before emitting the function that
260 // contains them, and it is possible to emit the containing function multiple
261 // times.
262 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
263 return ExistingGV;
264
265 QualType Ty = D.getType();
266 assert(Ty->isConstantSizeType() && "VLAs can't be static");
267
268 // Use the label if the variable is renamed with the asm-label extension.
269 std::string Name;
270 if (D.hasAttr<AsmLabelAttr>())
271 Name = std::string(getMangledName(GD: &D));
272 else
273 Name = getStaticDeclName(CGM&: *this, D);
274
275 llvm::Type *LTy = getTypes().ConvertTypeForMem(T: Ty);
276 LangAS AS = GetGlobalVarAddressSpace(D: &D);
277 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
278
279 // OpenCL variables in local address space and CUDA shared
280 // variables cannot have an initializer.
281 llvm::Constant *Init = nullptr;
282 if (Ty.getAddressSpace() == LangAS::opencl_local ||
283 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
284 Init = llvm::UndefValue::get(T: LTy);
285 else
286 Init = EmitNullConstant(T: Ty);
287
288 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
289 getModule(), LTy, Ty.isConstant(Ctx: getContext()), Linkage, Init, Name,
290 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
291 GV->setAlignment(getContext().getDeclAlign(D: &D).getAsAlign());
292
293 if (supportsCOMDAT() && GV->isWeakForLinker())
294 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
295
296 if (D.getTLSKind())
297 setTLSMode(GV, D);
298
299 setGVProperties(GV, D: &D);
300 getTargetCodeGenInfo().setTargetAttributes(D: cast<Decl>(Val: &D), GV, M&: *this);
301
302 // Make sure the result is of the correct type.
303 LangAS ExpectedAS = Ty.getAddressSpace();
304 llvm::Constant *Addr = GV;
305 if (AS != ExpectedAS) {
306 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
307 CGM&: *this, V: GV, SrcAddr: AS,
308 DestTy: llvm::PointerType::get(C&: getLLVMContext(),
309 AddressSpace: getContext().getTargetAddressSpace(AS: ExpectedAS)));
310 }
311
312 setStaticLocalDeclAddress(D: &D, C: Addr);
313
314 // Ensure that the static local gets initialized by making sure the parent
315 // function gets emitted eventually.
316 const Decl *DC = cast<Decl>(Val: D.getDeclContext());
317
318 // We can't name blocks or captured statements directly, so try to emit their
319 // parents.
320 if (isa<BlockDecl>(Val: DC) || isa<CapturedDecl>(Val: DC)) {
321 DC = DC->getNonClosureContext();
322 // FIXME: Ensure that global blocks get emitted.
323 if (!DC)
324 return Addr;
325 }
326
327 GlobalDecl GD;
328 if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: DC))
329 GD = GlobalDecl(CD, Ctor_Base);
330 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: DC))
331 GD = GlobalDecl(DD, Dtor_Base);
332 else if (const auto *FD = dyn_cast<FunctionDecl>(Val: DC))
333 GD = GlobalDecl(FD);
334 else {
335 // Don't do anything for Obj-C method decls or global closures. We should
336 // never defer them.
337 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
338 }
339 if (GD.getDecl()) {
340 // Disable emission of the parent function for the OpenMP device codegen.
341 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
342 (void)GetAddrOfGlobal(GD);
343 }
344
345 return Addr;
346}
347
348/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
349/// global variable that has already been created for it. If the initializer
350/// has a different type than GV does, this may free GV and return a different
351/// one. Otherwise it just returns GV.
352llvm::GlobalVariable *
353CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
354 llvm::GlobalVariable *GV) {
355 ConstantEmitter emitter(*this);
356 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
357
358 // If constant emission failed, then this should be a C++ static
359 // initializer.
360 if (!Init) {
361 if (!getLangOpts().CPlusPlus)
362 CGM.ErrorUnsupported(S: D.getInit(), Type: "constant l-value expression");
363 else if (D.hasFlexibleArrayInit(Ctx: getContext()))
364 CGM.ErrorUnsupported(S: D.getInit(), Type: "flexible array initializer");
365 else if (HaveInsertPoint()) {
366 // Since we have a static initializer, this global variable can't
367 // be constant.
368 GV->setConstant(false);
369
370 EmitCXXGuardedInit(D, DeclPtr: GV, /*PerformInit*/true);
371 }
372 return GV;
373 }
374
375 PGO->markStmtMaybeUsed(S: D.getInit()); // FIXME: Too lazy
376
377#ifndef NDEBUG
378 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
379 D.getFlexibleArrayInitChars(getContext());
380 CharUnits CstSize = CharUnits::fromQuantity(
381 CGM.getDataLayout().getTypeAllocSize(Init->getType()));
382 assert(VarSize == CstSize && "Emitted constant has unexpected size");
383#endif
384
385 bool NeedsDtor =
386 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
387
388 GV->setConstant(
389 D.getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: !NeedsDtor));
390 GV->replaceInitializer(InitVal: Init);
391
392 emitter.finalize(global: GV);
393
394 if (NeedsDtor && HaveInsertPoint()) {
395 // We have a constant initializer, but a nontrivial destructor. We still
396 // need to perform a guarded "initialization" in order to register the
397 // destructor.
398 EmitCXXGuardedInit(D, DeclPtr: GV, /*PerformInit*/false);
399 }
400
401 return GV;
402}
403
404void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
405 llvm::GlobalValue::LinkageTypes Linkage) {
406 // Check to see if we already have a global variable for this
407 // declaration. This can happen when double-emitting function
408 // bodies, e.g. with complete and base constructors.
409 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
410 CharUnits alignment = getContext().getDeclAlign(D: &D);
411
412 // Store into LocalDeclMap before generating initializer to handle
413 // circular references.
414 llvm::Type *elemTy = ConvertTypeForMem(T: D.getType());
415 setAddrOfLocalVar(VD: &D, Addr: Address(addr, elemTy, alignment));
416
417 // We can't have a VLA here, but we can have a pointer to a VLA,
418 // even though that doesn't really make any sense.
419 // Make sure to evaluate VLA bounds now so that we have them for later.
420 if (D.getType()->isVariablyModifiedType())
421 EmitVariablyModifiedType(Ty: D.getType());
422
423 // Save the type in case adding the initializer forces a type change.
424 llvm::Type *expectedType = addr->getType();
425
426 llvm::GlobalVariable *var =
427 cast<llvm::GlobalVariable>(Val: addr->stripPointerCasts());
428
429 // CUDA's local and local static __shared__ variables should not
430 // have any non-empty initializers. This is ensured by Sema.
431 // Whatever initializer such variable may have when it gets here is
432 // a no-op and should not be emitted.
433 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
434 D.hasAttr<CUDASharedAttr>();
435 // If this value has an initializer, emit it.
436 if (D.getInit() && !isCudaSharedVar) {
437 ApplyAtomGroup Grp(getDebugInfo());
438 var = AddInitializerToStaticVarDecl(D, GV: var);
439 }
440
441 var->setAlignment(alignment.getAsAlign());
442
443 if (D.hasAttr<AnnotateAttr>())
444 CGM.AddGlobalAnnotations(D: &D, GV: var);
445
446 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
447 var->addAttribute(Kind: "bss-section", Val: SA->getName());
448 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
449 var->addAttribute(Kind: "data-section", Val: SA->getName());
450 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
451 var->addAttribute(Kind: "rodata-section", Val: SA->getName());
452 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
453 var->addAttribute(Kind: "relro-section", Val: SA->getName());
454
455 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
456 var->setSection(SA->getName());
457
458 if (D.hasAttr<RetainAttr>())
459 CGM.addUsedGlobal(GV: var);
460 else if (D.hasAttr<UsedAttr>())
461 CGM.addUsedOrCompilerUsedGlobal(GV: var);
462
463 if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
464 CGM.addUsedOrCompilerUsedGlobal(GV: var);
465
466 // We may have to cast the constant because of the initializer
467 // mismatch above.
468 //
469 // FIXME: It is really dangerous to store this in the map; if anyone
470 // RAUW's the GV uses of this constant will be invalid.
471 llvm::Constant *castedAddr =
472 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: var, Ty: expectedType);
473 LocalDeclMap.find(Val: &D)->second = Address(castedAddr, elemTy, alignment);
474 CGM.setStaticLocalDeclAddress(D: &D, C: castedAddr);
475
476 CGM.getSanitizerMetadata()->reportGlobal(GV: var, D);
477
478 // Emit global variable debug descriptor for static vars.
479 CGDebugInfo *DI = getDebugInfo();
480 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
481 DI->setLocation(D.getLocation());
482 DI->EmitGlobalVariable(GV: var, Decl: &D);
483 }
484}
485
486namespace {
487 struct DestroyObject final : EHScopeStack::Cleanup {
488 DestroyObject(Address addr, QualType type,
489 CodeGenFunction::Destroyer *destroyer,
490 bool useEHCleanupForArray)
491 : addr(addr), type(type), destroyer(destroyer),
492 useEHCleanupForArray(useEHCleanupForArray) {}
493
494 Address addr;
495 QualType type;
496 CodeGenFunction::Destroyer *destroyer;
497 bool useEHCleanupForArray;
498
499 void Emit(CodeGenFunction &CGF, Flags flags) override {
500 // Don't use an EH cleanup recursively from an EH cleanup.
501 bool useEHCleanupForArray =
502 flags.isForNormalCleanup() && this->useEHCleanupForArray;
503
504 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
505 }
506 };
507
508 template <class Derived>
509 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
510 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
511 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
512
513 llvm::Value *NRVOFlag;
514 Address Loc;
515 QualType Ty;
516
517 void Emit(CodeGenFunction &CGF, Flags flags) override {
518 // Along the exceptions path we always execute the dtor.
519 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
520
521 llvm::BasicBlock *SkipDtorBB = nullptr;
522 if (NRVO) {
523 // If we exited via NRVO, we skip the destructor call.
524 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock(name: "nrvo.unused");
525 SkipDtorBB = CGF.createBasicBlock(name: "nrvo.skipdtor");
526 llvm::Value *DidNRVO =
527 CGF.Builder.CreateFlagLoad(Addr: NRVOFlag, Name: "nrvo.val");
528 CGF.Builder.CreateCondBr(Cond: DidNRVO, True: SkipDtorBB, False: RunDtorBB);
529 CGF.EmitBlock(BB: RunDtorBB);
530 }
531
532 static_cast<Derived *>(this)->emitDestructorCall(CGF);
533
534 if (NRVO) CGF.EmitBlock(BB: SkipDtorBB);
535 }
536
537 virtual ~DestroyNRVOVariable() = default;
538 };
539
540 struct DestroyNRVOVariableCXX final
541 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
542 DestroyNRVOVariableCXX(Address addr, QualType type,
543 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
544 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
545 Dtor(Dtor) {}
546
547 const CXXDestructorDecl *Dtor;
548
549 void emitDestructorCall(CodeGenFunction &CGF) {
550 CGF.EmitCXXDestructorCall(D: Dtor, Type: Dtor_Complete,
551 /*ForVirtualBase=*/false,
552 /*Delegating=*/false, This: Loc, ThisTy: Ty);
553 }
554 };
555
556 struct DestroyNRVOVariableC final
557 : DestroyNRVOVariable<DestroyNRVOVariableC> {
558 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
559 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
560
561 void emitDestructorCall(CodeGenFunction &CGF) {
562 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
563 }
564 };
565
566 struct CallStackRestore final : EHScopeStack::Cleanup {
567 Address Stack;
568 CallStackRestore(Address Stack) : Stack(Stack) {}
569 bool isRedundantBeforeReturn() override { return true; }
570 void Emit(CodeGenFunction &CGF, Flags flags) override {
571 llvm::Value *V = CGF.Builder.CreateLoad(Addr: Stack);
572 CGF.Builder.CreateStackRestore(Ptr: V);
573 }
574 };
575
576 struct KmpcAllocFree final : EHScopeStack::Cleanup {
577 std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
578 KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
579 : AddrSizePair(AddrSizePair) {}
580 void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
581 auto &RT = CGF.CGM.getOpenMPRuntime();
582 RT.getKmpcFreeShared(CGF, AddrSizePair);
583 }
584 };
585
586 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
587 const VarDecl &Var;
588 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
589
590 void Emit(CodeGenFunction &CGF, Flags flags) override {
591 // Compute the address of the local variable, in case it's a
592 // byref or something.
593 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
594 Var.getType(), VK_LValue, SourceLocation());
595 llvm::Value *value = CGF.EmitLoadOfScalar(lvalue: CGF.EmitDeclRefLValue(E: &DRE),
596 Loc: SourceLocation());
597 CGF.EmitExtendGCLifetime(object: value);
598 }
599 };
600
601 struct CallCleanupFunction final : EHScopeStack::Cleanup {
602 llvm::Constant *CleanupFn;
603 const CGFunctionInfo &FnInfo;
604 const VarDecl &Var;
605 const CleanupAttr *Attribute;
606
607 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
608 const VarDecl *Var, const CleanupAttr *Attr)
609 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var), Attribute(Attr) {}
610
611 void Emit(CodeGenFunction &CGF, Flags flags) override {
612 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
613 Var.getType(), VK_LValue, SourceLocation());
614 // Compute the address of the local variable, in case it's a byref
615 // or something.
616 llvm::Value *Addr = CGF.EmitDeclRefLValue(E: &DRE).getPointer(CGF);
617
618 // In some cases, the type of the function argument will be different from
619 // the type of the pointer. An example of this is
620 // void f(void* arg);
621 // __attribute__((cleanup(f))) void *g;
622 //
623 // To fix this we insert a bitcast here.
624 QualType ArgTy = FnInfo.arg_begin()->type;
625 llvm::Value *Arg =
626 CGF.Builder.CreateBitCast(V: Addr, DestTy: CGF.ConvertType(T: ArgTy));
627
628 CallArgList Args;
629 Args.add(rvalue: RValue::get(V: Arg),
630 type: CGF.getContext().getPointerType(T: Var.getType()));
631 GlobalDecl GD = GlobalDecl(Attribute->getFunctionDecl());
632 auto Callee = CGCallee::forDirect(functionPtr: CleanupFn, abstractInfo: CGCalleeInfo(GD));
633 CGF.EmitCall(CallInfo: FnInfo, Callee, ReturnValue: ReturnValueSlot(), Args,
634 /*callOrInvoke*/ CallOrInvoke: nullptr, /*IsMustTail*/ false,
635 Loc: Attribute->getLoc());
636 }
637 };
638} // end anonymous namespace
639
640/// EmitAutoVarWithLifetime - Does the setup required for an automatic
641/// variable with lifetime.
642static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
643 Address addr,
644 Qualifiers::ObjCLifetime lifetime) {
645 switch (lifetime) {
646 case Qualifiers::OCL_None:
647 llvm_unreachable("present but none");
648
649 case Qualifiers::OCL_ExplicitNone:
650 // nothing to do
651 break;
652
653 case Qualifiers::OCL_Strong: {
654 CodeGenFunction::Destroyer *destroyer =
655 (var.hasAttr<ObjCPreciseLifetimeAttr>()
656 ? CodeGenFunction::destroyARCStrongPrecise
657 : CodeGenFunction::destroyARCStrongImprecise);
658
659 CleanupKind cleanupKind = CGF.getARCCleanupKind();
660 CGF.pushDestroy(kind: cleanupKind, addr, type: var.getType(), destroyer,
661 useEHCleanupForArray: cleanupKind & EHCleanup);
662 break;
663 }
664 case Qualifiers::OCL_Autoreleasing:
665 // nothing to do
666 break;
667
668 case Qualifiers::OCL_Weak:
669 // __weak objects always get EH cleanups; otherwise, exceptions
670 // could cause really nasty crashes instead of mere leaks.
671 CGF.pushDestroy(kind: NormalAndEHCleanup, addr, type: var.getType(),
672 destroyer: CodeGenFunction::destroyARCWeak,
673 /*useEHCleanup*/ useEHCleanupForArray: true);
674 break;
675 }
676}
677
678static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
679 if (const Expr *e = dyn_cast<Expr>(Val: s)) {
680 // Skip the most common kinds of expressions that make
681 // hierarchy-walking expensive.
682 s = e = e->IgnoreParenCasts();
683
684 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(Val: e))
685 return (ref->getDecl() == &var);
686 if (const BlockExpr *be = dyn_cast<BlockExpr>(Val: e)) {
687 const BlockDecl *block = be->getBlockDecl();
688 for (const auto &I : block->captures()) {
689 if (I.getVariable() == &var)
690 return true;
691 }
692 }
693 }
694
695 for (const Stmt *SubStmt : s->children())
696 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
697 if (SubStmt && isAccessedBy(var, s: SubStmt))
698 return true;
699
700 return false;
701}
702
703static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
704 if (!decl) return false;
705 if (!isa<VarDecl>(Val: decl)) return false;
706 const VarDecl *var = cast<VarDecl>(Val: decl);
707 return isAccessedBy(var: *var, s: e);
708}
709
710static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
711 const LValue &destLV, const Expr *init) {
712 bool needsCast = false;
713
714 while (auto castExpr = dyn_cast<CastExpr>(Val: init->IgnoreParens())) {
715 switch (castExpr->getCastKind()) {
716 // Look through casts that don't require representation changes.
717 case CK_NoOp:
718 case CK_BitCast:
719 case CK_BlockPointerToObjCPointerCast:
720 needsCast = true;
721 break;
722
723 // If we find an l-value to r-value cast from a __weak variable,
724 // emit this operation as a copy or move.
725 case CK_LValueToRValue: {
726 const Expr *srcExpr = castExpr->getSubExpr();
727 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
728 return false;
729
730 // Emit the source l-value.
731 LValue srcLV = CGF.EmitLValue(E: srcExpr);
732
733 // Handle a formal type change to avoid asserting.
734 auto srcAddr = srcLV.getAddress();
735 if (needsCast) {
736 srcAddr = srcAddr.withElementType(ElemTy: destLV.getAddress().getElementType());
737 }
738
739 // If it was an l-value, use objc_copyWeak.
740 if (srcExpr->isLValue()) {
741 CGF.EmitARCCopyWeak(dst: destLV.getAddress(), src: srcAddr);
742 } else {
743 assert(srcExpr->isXValue());
744 CGF.EmitARCMoveWeak(dst: destLV.getAddress(), src: srcAddr);
745 }
746 return true;
747 }
748
749 // Stop at anything else.
750 default:
751 return false;
752 }
753
754 init = castExpr->getSubExpr();
755 }
756 return false;
757}
758
759static void drillIntoBlockVariable(CodeGenFunction &CGF,
760 LValue &lvalue,
761 const VarDecl *var) {
762 lvalue.setAddress(CGF.emitBlockByrefAddress(baseAddr: lvalue.getAddress(), V: var));
763}
764
765void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
766 SourceLocation Loc) {
767 if (!SanOpts.has(K: SanitizerKind::NullabilityAssign))
768 return;
769
770 auto Nullability = LHS.getType()->getNullability();
771 if (!Nullability || *Nullability != NullabilityKind::NonNull)
772 return;
773
774 // Check if the right hand side of the assignment is nonnull, if the left
775 // hand side must be nonnull.
776 auto CheckOrdinal = SanitizerKind::SO_NullabilityAssign;
777 auto CheckHandler = SanitizerHandler::TypeMismatch;
778 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
779 llvm::Value *IsNotNull = Builder.CreateIsNotNull(Arg: RHS);
780 llvm::Constant *StaticData[] = {
781 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: LHS.getType()),
782 llvm::ConstantInt::get(Ty: Int8Ty, V: 0), // The LogAlignment info is unused.
783 llvm::ConstantInt::get(Ty: Int8Ty, V: TCK_NonnullAssign)};
784 EmitCheck(Checked: {{IsNotNull, CheckOrdinal}}, Check: CheckHandler, StaticArgs: StaticData, DynamicArgs: RHS);
785}
786
787void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
788 LValue lvalue, bool capturedByInit) {
789 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
790 if (!lifetime) {
791 llvm::Value *Value;
792 if (PointerAuthQualifier PtrAuth = lvalue.getQuals().getPointerAuth()) {
793 Value = EmitPointerAuthQualify(Qualifier: PtrAuth, PointerExpr: init, StorageAddress: lvalue.getAddress());
794 lvalue.getQuals().removePointerAuth();
795 } else {
796 Value = EmitScalarExpr(E: init);
797 }
798 if (capturedByInit)
799 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
800 EmitNullabilityCheck(LHS: lvalue, RHS: Value, Loc: init->getExprLoc());
801 EmitStoreThroughLValue(Src: RValue::get(V: Value), Dst: lvalue, isInit: true);
802 return;
803 }
804
805 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(Val: init))
806 init = DIE->getExpr();
807
808 // If we're emitting a value with lifetime, we have to do the
809 // initialization *before* we leave the cleanup scopes.
810 if (auto *EWC = dyn_cast<ExprWithCleanups>(Val: init)) {
811 CodeGenFunction::RunCleanupsScope Scope(*this);
812 return EmitScalarInit(init: EWC->getSubExpr(), D, lvalue, capturedByInit);
813 }
814
815 // We have to maintain the illusion that the variable is
816 // zero-initialized. If the variable might be accessed in its
817 // initializer, zero-initialize before running the initializer, then
818 // actually perform the initialization with an assign.
819 bool accessedByInit = false;
820 if (lifetime != Qualifiers::OCL_ExplicitNone)
821 accessedByInit = (capturedByInit || isAccessedBy(decl: D, e: init));
822 if (accessedByInit) {
823 LValue tempLV = lvalue;
824 // Drill down to the __block object if necessary.
825 if (capturedByInit) {
826 // We can use a simple GEP for this because it can't have been
827 // moved yet.
828 tempLV.setAddress(emitBlockByrefAddress(baseAddr: tempLV.getAddress(),
829 V: cast<VarDecl>(Val: D),
830 /*follow*/ followForward: false));
831 }
832
833 auto ty = cast<llvm::PointerType>(Val: tempLV.getAddress().getElementType());
834 llvm::Value *zero = CGM.getNullPointer(T: ty, QT: tempLV.getType());
835
836 // If __weak, we want to use a barrier under certain conditions.
837 if (lifetime == Qualifiers::OCL_Weak)
838 EmitARCInitWeak(addr: tempLV.getAddress(), value: zero);
839
840 // Otherwise just do a simple store.
841 else
842 EmitStoreOfScalar(value: zero, lvalue: tempLV, /* isInitialization */ isInit: true);
843 }
844
845 // Emit the initializer.
846 llvm::Value *value = nullptr;
847
848 switch (lifetime) {
849 case Qualifiers::OCL_None:
850 llvm_unreachable("present but none");
851
852 case Qualifiers::OCL_Strong: {
853 if (!D || !isa<VarDecl>(Val: D) || !cast<VarDecl>(Val: D)->isARCPseudoStrong()) {
854 value = EmitARCRetainScalarExpr(expr: init);
855 break;
856 }
857 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
858 // that we omit the retain, and causes non-autoreleased return values to be
859 // immediately released.
860 [[fallthrough]];
861 }
862
863 case Qualifiers::OCL_ExplicitNone:
864 value = EmitARCUnsafeUnretainedScalarExpr(expr: init);
865 break;
866
867 case Qualifiers::OCL_Weak: {
868 // If it's not accessed by the initializer, try to emit the
869 // initialization with a copy or move.
870 if (!accessedByInit && tryEmitARCCopyWeakInit(CGF&: *this, destLV: lvalue, init)) {
871 return;
872 }
873
874 // No way to optimize a producing initializer into this. It's not
875 // worth optimizing for, because the value will immediately
876 // disappear in the common case.
877 value = EmitScalarExpr(E: init);
878
879 if (capturedByInit) drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
880 if (accessedByInit)
881 EmitARCStoreWeak(addr: lvalue.getAddress(), value, /*ignored*/ true);
882 else
883 EmitARCInitWeak(addr: lvalue.getAddress(), value);
884 return;
885 }
886
887 case Qualifiers::OCL_Autoreleasing:
888 value = EmitARCRetainAutoreleaseScalarExpr(expr: init);
889 break;
890 }
891
892 if (capturedByInit) drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
893
894 EmitNullabilityCheck(LHS: lvalue, RHS: value, Loc: init->getExprLoc());
895
896 // If the variable might have been accessed by its initializer, we
897 // might have to initialize with a barrier. We have to do this for
898 // both __weak and __strong, but __weak got filtered out above.
899 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
900 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, Loc: init->getExprLoc());
901 EmitStoreOfScalar(value, lvalue, /* isInitialization */ isInit: true);
902 EmitARCRelease(value: oldValue, precise: ARCImpreciseLifetime);
903 return;
904 }
905
906 EmitStoreOfScalar(value, lvalue, /* isInitialization */ isInit: true);
907}
908
909/// Decide whether we can emit the non-zero parts of the specified initializer
910/// with equal or fewer than NumStores scalar stores.
911static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
912 unsigned &NumStores) {
913 // Zero and Undef never requires any extra stores.
914 if (isa<llvm::ConstantAggregateZero>(Val: Init) ||
915 isa<llvm::ConstantPointerNull>(Val: Init) ||
916 isa<llvm::UndefValue>(Val: Init))
917 return true;
918 if (isa<llvm::ConstantInt>(Val: Init) || isa<llvm::ConstantFP>(Val: Init) ||
919 isa<llvm::ConstantVector>(Val: Init) || isa<llvm::BlockAddress>(Val: Init) ||
920 isa<llvm::ConstantExpr>(Val: Init))
921 return Init->isNullValue() || NumStores--;
922
923 // See if we can emit each element.
924 if (isa<llvm::ConstantArray>(Val: Init) || isa<llvm::ConstantStruct>(Val: Init)) {
925 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
926 llvm::Constant *Elt = cast<llvm::Constant>(Val: Init->getOperand(i));
927 if (!canEmitInitWithFewStoresAfterBZero(Init: Elt, NumStores))
928 return false;
929 }
930 return true;
931 }
932
933 if (llvm::ConstantDataSequential *CDS =
934 dyn_cast<llvm::ConstantDataSequential>(Val: Init)) {
935 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
936 llvm::Constant *Elt = CDS->getElementAsConstant(i);
937 if (!canEmitInitWithFewStoresAfterBZero(Init: Elt, NumStores))
938 return false;
939 }
940 return true;
941 }
942
943 // Anything else is hard and scary.
944 return false;
945}
946
947/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
948/// the scalar stores that would be required.
949void CodeGenFunction::emitStoresForInitAfterBZero(llvm::Constant *Init,
950 Address Loc, bool isVolatile,
951 bool IsAutoInit) {
952 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
953 "called emitStoresForInitAfterBZero for zero or undef value.");
954
955 if (isa<llvm::ConstantInt>(Val: Init) || isa<llvm::ConstantFP>(Val: Init) ||
956 isa<llvm::ConstantVector>(Val: Init) || isa<llvm::BlockAddress>(Val: Init) ||
957 isa<llvm::ConstantExpr>(Val: Init)) {
958 auto *I = Builder.CreateStore(Val: Init, Addr: Loc, IsVolatile: isVolatile);
959 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
960 if (IsAutoInit)
961 I->addAnnotationMetadata(Annotation: "auto-init");
962 return;
963 }
964
965 if (llvm::ConstantDataSequential *CDS =
966 dyn_cast<llvm::ConstantDataSequential>(Val: Init)) {
967 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
968 llvm::Constant *Elt = CDS->getElementAsConstant(i);
969
970 // If necessary, get a pointer to the element and emit it.
971 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Val: Elt))
972 emitStoresForInitAfterBZero(
973 Init: Elt, Loc: Builder.CreateConstInBoundsGEP2_32(Addr: Loc, Idx0: 0, Idx1: i), isVolatile,
974 IsAutoInit);
975 }
976 return;
977 }
978
979 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
980 "Unknown value type!");
981
982 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
983 llvm::Constant *Elt = cast<llvm::Constant>(Val: Init->getOperand(i));
984
985 // If necessary, get a pointer to the element and emit it.
986 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Val: Elt))
987 emitStoresForInitAfterBZero(Init: Elt,
988 Loc: Builder.CreateConstInBoundsGEP2_32(Addr: Loc, Idx0: 0, Idx1: i),
989 isVolatile, IsAutoInit);
990 }
991}
992
993/// Decide whether we should use bzero plus some stores to initialize a local
994/// variable instead of using a memcpy from a constant global. It is beneficial
995/// to use bzero if the global is all zeros, or mostly zeros and large.
996static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
997 uint64_t GlobalSize) {
998 // If a global is all zeros, always use a bzero.
999 if (isa<llvm::ConstantAggregateZero>(Val: Init)) return true;
1000
1001 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
1002 // do it if it will require 6 or fewer scalar stores.
1003 // TODO: Should budget depends on the size? Avoiding a large global warrants
1004 // plopping in more stores.
1005 unsigned StoreBudget = 6;
1006 uint64_t SizeLimit = 32;
1007
1008 return GlobalSize > SizeLimit &&
1009 canEmitInitWithFewStoresAfterBZero(Init, NumStores&: StoreBudget);
1010}
1011
1012/// Decide whether we should use memset to initialize a local variable instead
1013/// of using a memcpy from a constant global. Assumes we've already decided to
1014/// not user bzero.
1015/// FIXME We could be more clever, as we are for bzero above, and generate
1016/// memset followed by stores. It's unclear that's worth the effort.
1017static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
1018 uint64_t GlobalSize,
1019 const llvm::DataLayout &DL) {
1020 uint64_t SizeLimit = 32;
1021 if (GlobalSize <= SizeLimit)
1022 return nullptr;
1023 return llvm::isBytewiseValue(V: Init, DL);
1024}
1025
1026/// Decide whether we want to split a constant structure or array store into a
1027/// sequence of its fields' stores. This may cost us code size and compilation
1028/// speed, but plays better with store optimizations.
1029static bool shouldSplitConstantStore(CodeGenModule &CGM,
1030 uint64_t GlobalByteSize) {
1031 // Don't break things that occupy more than one cacheline.
1032 uint64_t ByteSizeLimit = 64;
1033 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1034 return false;
1035 if (GlobalByteSize <= ByteSizeLimit)
1036 return true;
1037 return false;
1038}
1039
1040enum class IsPattern { No, Yes };
1041
1042/// Generate a constant filled with either a pattern or zeroes.
1043static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1044 llvm::Type *Ty) {
1045 if (isPattern == IsPattern::Yes)
1046 return initializationPatternFor(CGM, Ty);
1047 else
1048 return llvm::Constant::getNullValue(Ty);
1049}
1050
1051static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1052 llvm::Constant *constant);
1053
1054/// Helper function for constWithPadding() to deal with padding in structures.
1055static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1056 IsPattern isPattern,
1057 llvm::StructType *STy,
1058 llvm::Constant *constant) {
1059 const llvm::DataLayout &DL = CGM.getDataLayout();
1060 const llvm::StructLayout *Layout = DL.getStructLayout(Ty: STy);
1061 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(C&: CGM.getLLVMContext());
1062 unsigned SizeSoFar = 0;
1063 SmallVector<llvm::Constant *, 8> Values;
1064 bool NestedIntact = true;
1065 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1066 unsigned CurOff = Layout->getElementOffset(Idx: i);
1067 if (SizeSoFar < CurOff) {
1068 assert(!STy->isPacked());
1069 auto *PadTy = llvm::ArrayType::get(ElementType: Int8Ty, NumElements: CurOff - SizeSoFar);
1070 Values.push_back(Elt: patternOrZeroFor(CGM, isPattern, Ty: PadTy));
1071 }
1072 llvm::Constant *CurOp;
1073 if (constant->isZeroValue())
1074 CurOp = llvm::Constant::getNullValue(Ty: STy->getElementType(N: i));
1075 else
1076 CurOp = cast<llvm::Constant>(Val: constant->getAggregateElement(Elt: i));
1077 auto *NewOp = constWithPadding(CGM, isPattern, constant: CurOp);
1078 if (CurOp != NewOp)
1079 NestedIntact = false;
1080 Values.push_back(Elt: NewOp);
1081 SizeSoFar = CurOff + DL.getTypeAllocSize(Ty: CurOp->getType());
1082 }
1083 unsigned TotalSize = Layout->getSizeInBytes();
1084 if (SizeSoFar < TotalSize) {
1085 auto *PadTy = llvm::ArrayType::get(ElementType: Int8Ty, NumElements: TotalSize - SizeSoFar);
1086 Values.push_back(Elt: patternOrZeroFor(CGM, isPattern, Ty: PadTy));
1087 }
1088 if (NestedIntact && Values.size() == STy->getNumElements())
1089 return constant;
1090 return llvm::ConstantStruct::getAnon(V: Values, Packed: STy->isPacked());
1091}
1092
1093/// Replace all padding bytes in a given constant with either a pattern byte or
1094/// 0x00.
1095static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1096 llvm::Constant *constant) {
1097 llvm::Type *OrigTy = constant->getType();
1098 if (const auto STy = dyn_cast<llvm::StructType>(Val: OrigTy))
1099 return constStructWithPadding(CGM, isPattern, STy, constant);
1100 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: OrigTy)) {
1101 llvm::SmallVector<llvm::Constant *, 8> Values;
1102 uint64_t Size = ArrayTy->getNumElements();
1103 if (!Size)
1104 return constant;
1105 llvm::Type *ElemTy = ArrayTy->getElementType();
1106 bool ZeroInitializer = constant->isNullValue();
1107 llvm::Constant *OpValue, *PaddedOp;
1108 if (ZeroInitializer) {
1109 OpValue = llvm::Constant::getNullValue(Ty: ElemTy);
1110 PaddedOp = constWithPadding(CGM, isPattern, constant: OpValue);
1111 }
1112 for (unsigned Op = 0; Op != Size; ++Op) {
1113 if (!ZeroInitializer) {
1114 OpValue = constant->getAggregateElement(Elt: Op);
1115 PaddedOp = constWithPadding(CGM, isPattern, constant: OpValue);
1116 }
1117 Values.push_back(Elt: PaddedOp);
1118 }
1119 auto *NewElemTy = Values[0]->getType();
1120 if (NewElemTy == ElemTy)
1121 return constant;
1122 auto *NewArrayTy = llvm::ArrayType::get(ElementType: NewElemTy, NumElements: Size);
1123 return llvm::ConstantArray::get(T: NewArrayTy, V: Values);
1124 }
1125 // FIXME: Add handling for tail padding in vectors. Vectors don't
1126 // have padding between or inside elements, but the total amount of
1127 // data can be less than the allocated size.
1128 return constant;
1129}
1130
1131Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1132 llvm::Constant *Constant,
1133 CharUnits Align) {
1134 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1135 if (const auto *FD = dyn_cast<FunctionDecl>(Val: DC)) {
1136 if (const auto *CC = dyn_cast<CXXConstructorDecl>(Val: FD))
1137 return CC->getNameAsString();
1138 if (const auto *CD = dyn_cast<CXXDestructorDecl>(Val: FD))
1139 return CD->getNameAsString();
1140 return std::string(getMangledName(GD: FD));
1141 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(Val: DC)) {
1142 return OM->getNameAsString();
1143 } else if (isa<BlockDecl>(Val: DC)) {
1144 return "<block>";
1145 } else if (isa<CapturedDecl>(Val: DC)) {
1146 return "<captured>";
1147 } else {
1148 llvm_unreachable("expected a function or method");
1149 }
1150 };
1151
1152 // Form a simple per-variable cache of these values in case we find we
1153 // want to reuse them.
1154 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1155 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1156 auto *Ty = Constant->getType();
1157 bool isConstant = true;
1158 llvm::GlobalVariable *InsertBefore = nullptr;
1159 unsigned AS =
1160 getContext().getTargetAddressSpace(AS: GetGlobalConstantAddressSpace());
1161 std::string Name;
1162 if (D.hasGlobalStorage())
1163 Name = getMangledName(GD: &D).str() + ".const";
1164 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1165 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1166 else
1167 llvm_unreachable("local variable has no parent function or method");
1168 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1169 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1170 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1171 GV->setAlignment(Align.getAsAlign());
1172 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1173 CacheEntry = GV;
1174 } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1175 CacheEntry->setAlignment(Align.getAsAlign());
1176 }
1177
1178 return Address(CacheEntry, CacheEntry->getValueType(), Align);
1179}
1180
1181static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1182 const VarDecl &D,
1183 CGBuilderTy &Builder,
1184 llvm::Constant *Constant,
1185 CharUnits Align) {
1186 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1187 return SrcPtr.withElementType(ElemTy: CGM.Int8Ty);
1188}
1189
1190void CodeGenFunction::emitStoresForConstant(const VarDecl &D, Address Loc,
1191 bool isVolatile,
1192 llvm::Constant *constant,
1193 bool IsAutoInit) {
1194 auto *Ty = constant->getType();
1195 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1196 if (!ConstantSize)
1197 return;
1198
1199 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1200 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1201 if (canDoSingleStore) {
1202 auto *I = Builder.CreateStore(Val: constant, Addr: Loc, IsVolatile: isVolatile);
1203 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
1204 if (IsAutoInit)
1205 I->addAnnotationMetadata(Annotation: "auto-init");
1206 return;
1207 }
1208
1209 auto *SizeVal = llvm::ConstantInt::get(Ty: CGM.IntPtrTy, V: ConstantSize);
1210
1211 // If the initializer is all or mostly the same, codegen with bzero / memset
1212 // then do a few stores afterward.
1213 if (shouldUseBZeroPlusStoresToInitialize(Init: constant, GlobalSize: ConstantSize)) {
1214 auto *I = Builder.CreateMemSet(Dest: Loc, Value: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 0),
1215 Size: SizeVal, IsVolatile: isVolatile);
1216 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
1217
1218 if (IsAutoInit)
1219 I->addAnnotationMetadata(Annotation: "auto-init");
1220
1221 bool valueAlreadyCorrect =
1222 constant->isNullValue() || isa<llvm::UndefValue>(Val: constant);
1223 if (!valueAlreadyCorrect) {
1224 Loc = Loc.withElementType(ElemTy: Ty);
1225 emitStoresForInitAfterBZero(Init: constant, Loc, isVolatile, IsAutoInit);
1226 }
1227 return;
1228 }
1229
1230 // If the initializer is a repeated byte pattern, use memset.
1231 llvm::Value *Pattern =
1232 shouldUseMemSetToInitialize(Init: constant, GlobalSize: ConstantSize, DL: CGM.getDataLayout());
1233 if (Pattern) {
1234 uint64_t Value = 0x00;
1235 if (!isa<llvm::UndefValue>(Val: Pattern)) {
1236 const llvm::APInt &AP = cast<llvm::ConstantInt>(Val: Pattern)->getValue();
1237 assert(AP.getBitWidth() <= 8);
1238 Value = AP.getLimitedValue();
1239 }
1240 auto *I = Builder.CreateMemSet(
1241 Dest: Loc, Value: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: Value), Size: SizeVal, IsVolatile: isVolatile);
1242 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
1243 if (IsAutoInit)
1244 I->addAnnotationMetadata(Annotation: "auto-init");
1245 return;
1246 }
1247
1248 // If the initializer is small or trivialAutoVarInit is set, use a handful of
1249 // stores.
1250 bool IsTrivialAutoVarInitPattern =
1251 CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
1252 LangOptions::TrivialAutoVarInitKind::Pattern;
1253 if (shouldSplitConstantStore(CGM, GlobalByteSize: ConstantSize)) {
1254 if (auto *STy = dyn_cast<llvm::StructType>(Val: Ty)) {
1255 if (STy == Loc.getElementType() || IsTrivialAutoVarInitPattern) {
1256 const llvm::StructLayout *Layout =
1257 CGM.getDataLayout().getStructLayout(Ty: STy);
1258 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1259 CharUnits CurOff =
1260 CharUnits::fromQuantity(Quantity: Layout->getElementOffset(Idx: i));
1261 Address EltPtr = Builder.CreateConstInBoundsByteGEP(
1262 Addr: Loc.withElementType(ElemTy: CGM.Int8Ty), Offset: CurOff);
1263 emitStoresForConstant(D, Loc: EltPtr, isVolatile,
1264 constant: constant->getAggregateElement(Elt: i), IsAutoInit);
1265 }
1266 return;
1267 }
1268 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Val: Ty)) {
1269 if (ATy == Loc.getElementType() || IsTrivialAutoVarInitPattern) {
1270 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1271 Address EltPtr = Builder.CreateConstGEP(
1272 Addr: Loc.withElementType(ElemTy: ATy->getElementType()), Index: i);
1273 emitStoresForConstant(D, Loc: EltPtr, isVolatile,
1274 constant: constant->getAggregateElement(Elt: i), IsAutoInit);
1275 }
1276 return;
1277 }
1278 }
1279 }
1280
1281 // Copy from a global.
1282 auto *I =
1283 Builder.CreateMemCpy(Dest: Loc,
1284 Src: createUnnamedGlobalForMemcpyFrom(
1285 CGM, D, Builder, Constant: constant, Align: Loc.getAlignment()),
1286 Size: SizeVal, IsVolatile: isVolatile);
1287 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
1288
1289 if (IsAutoInit)
1290 I->addAnnotationMetadata(Annotation: "auto-init");
1291}
1292
1293void CodeGenFunction::emitStoresForZeroInit(const VarDecl &D, Address Loc,
1294 bool isVolatile) {
1295 llvm::Type *ElTy = Loc.getElementType();
1296 llvm::Constant *constant =
1297 constWithPadding(CGM, isPattern: IsPattern::No, constant: llvm::Constant::getNullValue(Ty: ElTy));
1298 emitStoresForConstant(D, Loc, isVolatile, constant,
1299 /*IsAutoInit=*/true);
1300}
1301
1302void CodeGenFunction::emitStoresForPatternInit(const VarDecl &D, Address Loc,
1303 bool isVolatile) {
1304 llvm::Type *ElTy = Loc.getElementType();
1305 llvm::Constant *constant = constWithPadding(
1306 CGM, isPattern: IsPattern::Yes, constant: initializationPatternFor(CGM, ElTy));
1307 assert(!isa<llvm::UndefValue>(constant));
1308 emitStoresForConstant(D, Loc, isVolatile, constant,
1309 /*IsAutoInit=*/true);
1310}
1311
1312static bool containsUndef(llvm::Constant *constant) {
1313 auto *Ty = constant->getType();
1314 if (isa<llvm::UndefValue>(Val: constant))
1315 return true;
1316 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1317 for (llvm::Use &Op : constant->operands())
1318 if (containsUndef(constant: cast<llvm::Constant>(Val&: Op)))
1319 return true;
1320 return false;
1321}
1322
1323static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1324 llvm::Constant *constant) {
1325 auto *Ty = constant->getType();
1326 if (isa<llvm::UndefValue>(Val: constant))
1327 return patternOrZeroFor(CGM, isPattern, Ty);
1328 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1329 return constant;
1330 if (!containsUndef(constant))
1331 return constant;
1332 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1333 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1334 auto *OpValue = cast<llvm::Constant>(Val: constant->getOperand(i: Op));
1335 Values[Op] = replaceUndef(CGM, isPattern, constant: OpValue);
1336 }
1337 if (Ty->isStructTy())
1338 return llvm::ConstantStruct::get(T: cast<llvm::StructType>(Val: Ty), V: Values);
1339 if (Ty->isArrayTy())
1340 return llvm::ConstantArray::get(T: cast<llvm::ArrayType>(Val: Ty), V: Values);
1341 assert(Ty->isVectorTy());
1342 return llvm::ConstantVector::get(V: Values);
1343}
1344
1345/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1346/// variable declaration with auto, register, or no storage class specifier.
1347/// These turn into simple stack objects, or GlobalValues depending on target.
1348void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1349 AutoVarEmission emission = EmitAutoVarAlloca(var: D);
1350 EmitAutoVarInit(emission);
1351 EmitAutoVarCleanups(emission);
1352}
1353
1354/// Emit a lifetime.begin marker if some criteria are satisfied.
1355/// \return whether the marker was emitted.
1356bool CodeGenFunction::EmitLifetimeStart(llvm::Value *Addr) {
1357 if (!ShouldEmitLifetimeMarkers)
1358 return false;
1359
1360 assert(Addr->getType()->getPointerAddressSpace() ==
1361 CGM.getDataLayout().getAllocaAddrSpace() &&
1362 "Pointer should be in alloca address space");
1363 llvm::CallInst *C = Builder.CreateCall(Callee: CGM.getLLVMLifetimeStartFn(), Args: {Addr});
1364 C->setDoesNotThrow();
1365 return true;
1366}
1367
1368void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Addr) {
1369 if (!ShouldEmitLifetimeMarkers)
1370 return;
1371
1372 assert(Addr->getType()->getPointerAddressSpace() ==
1373 CGM.getDataLayout().getAllocaAddrSpace() &&
1374 "Pointer should be in alloca address space");
1375 llvm::CallInst *C = Builder.CreateCall(Callee: CGM.getLLVMLifetimeEndFn(), Args: {Addr});
1376 C->setDoesNotThrow();
1377}
1378
1379void CodeGenFunction::EmitFakeUse(Address Addr) {
1380 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
1381 llvm::Value *V = Builder.CreateLoad(Addr, Name: "fake.use");
1382 llvm::CallInst *C = Builder.CreateCall(Callee: CGM.getLLVMFakeUseFn(), Args: {V});
1383 C->setDoesNotThrow();
1384 C->setTailCallKind(llvm::CallInst::TCK_NoTail);
1385}
1386
1387void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1388 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1389 // For each dimension stores its QualType and corresponding
1390 // size-expression Value.
1391 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1392 SmallVector<const IdentifierInfo *, 4> VLAExprNames;
1393
1394 // Break down the array into individual dimensions.
1395 QualType Type1D = D.getType();
1396 while (getContext().getAsVariableArrayType(T: Type1D)) {
1397 auto VlaSize = getVLAElements1D(vla: Type1D);
1398 if (auto *C = dyn_cast<llvm::ConstantInt>(Val: VlaSize.NumElts))
1399 Dimensions.emplace_back(Args&: C, Args: Type1D.getUnqualifiedType());
1400 else {
1401 // Generate a locally unique name for the size expression.
1402 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1403 SmallString<12> Buffer;
1404 StringRef NameRef = Name.toStringRef(Out&: Buffer);
1405 auto &Ident = getContext().Idents.getOwn(Name: NameRef);
1406 VLAExprNames.push_back(Elt: &Ident);
1407 auto SizeExprAddr =
1408 CreateDefaultAlignTempAlloca(Ty: VlaSize.NumElts->getType(), Name: NameRef);
1409 Builder.CreateStore(Val: VlaSize.NumElts, Addr: SizeExprAddr);
1410 Dimensions.emplace_back(Args: SizeExprAddr.getPointer(),
1411 Args: Type1D.getUnqualifiedType());
1412 }
1413 Type1D = VlaSize.Type;
1414 }
1415
1416 if (!EmitDebugInfo)
1417 return;
1418
1419 // Register each dimension's size-expression with a DILocalVariable,
1420 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1421 // to describe this array.
1422 unsigned NameIdx = 0;
1423 for (auto &VlaSize : Dimensions) {
1424 llvm::Metadata *MD;
1425 if (auto *C = dyn_cast<llvm::ConstantInt>(Val: VlaSize.NumElts))
1426 MD = llvm::ConstantAsMetadata::get(C);
1427 else {
1428 // Create an artificial VarDecl to generate debug info for.
1429 const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1430 auto QT = getContext().getIntTypeForBitwidth(
1431 DestWidth: SizeTy->getScalarSizeInBits(), Signed: false);
1432 auto *ArtificialDecl = VarDecl::Create(
1433 C&: getContext(), DC: const_cast<DeclContext *>(D.getDeclContext()),
1434 StartLoc: D.getLocation(), IdLoc: D.getLocation(), Id: NameIdent, T: QT,
1435 TInfo: getContext().CreateTypeSourceInfo(T: QT), S: SC_Auto);
1436 ArtificialDecl->setImplicit();
1437
1438 MD = DI->EmitDeclareOfAutoVariable(Decl: ArtificialDecl, AI: VlaSize.NumElts,
1439 Builder);
1440 }
1441 assert(MD && "No Size expression debug node created");
1442 DI->registerVLASizeExpression(Ty: VlaSize.Type, SizeExpr: MD);
1443 }
1444}
1445
1446/// Return the maximum size of an aggregate for which we generate a fake use
1447/// intrinsic when -fextend-variable-liveness is in effect.
1448static uint64_t maxFakeUseAggregateSize(const ASTContext &C) {
1449 return 4 * C.getTypeSize(T: C.UnsignedIntTy);
1450}
1451
1452// Helper function to determine whether a variable's or parameter's lifetime
1453// should be extended.
1454static bool shouldExtendLifetime(const ASTContext &Context,
1455 const Decl *FuncDecl, const VarDecl &D,
1456 ImplicitParamDecl *CXXABIThisDecl) {
1457 // When we're not inside a valid function it is unlikely that any
1458 // lifetime extension is useful.
1459 if (!FuncDecl)
1460 return false;
1461 if (FuncDecl->isImplicit())
1462 return false;
1463 // Do not extend compiler-created variables except for the this pointer.
1464 if (D.isImplicit() && &D != CXXABIThisDecl)
1465 return false;
1466 QualType Ty = D.getType();
1467 // No need to extend volatiles, they have a memory location.
1468 if (Ty.isVolatileQualified())
1469 return false;
1470 // Don't extend variables that exceed a certain size.
1471 if (Context.getTypeSize(T: Ty) > maxFakeUseAggregateSize(C: Context))
1472 return false;
1473 // Do not extend variables in nodebug or optnone functions.
1474 if (FuncDecl->hasAttr<NoDebugAttr>() || FuncDecl->hasAttr<OptimizeNoneAttr>())
1475 return false;
1476 return true;
1477}
1478
1479/// EmitAutoVarAlloca - Emit the alloca and debug information for a
1480/// local variable. Does not emit initialization or destruction.
1481CodeGenFunction::AutoVarEmission
1482CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1483 QualType Ty = D.getType();
1484 assert(
1485 Ty.getAddressSpace() == LangAS::Default ||
1486 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1487
1488 AutoVarEmission emission(D);
1489
1490 bool isEscapingByRef = D.isEscapingByref();
1491 emission.IsEscapingByRef = isEscapingByRef;
1492
1493 CharUnits alignment = getContext().getDeclAlign(D: &D);
1494
1495 // If the type is variably-modified, emit all the VLA sizes for it.
1496 if (Ty->isVariablyModifiedType())
1497 EmitVariablyModifiedType(Ty);
1498
1499 auto *DI = getDebugInfo();
1500 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1501
1502 Address address = Address::invalid();
1503 RawAddress AllocaAddr = RawAddress::invalid();
1504 Address OpenMPLocalAddr = Address::invalid();
1505 if (CGM.getLangOpts().OpenMPIRBuilder)
1506 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(CGF&: *this, VD: &D);
1507 else
1508 OpenMPLocalAddr =
1509 getLangOpts().OpenMP
1510 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(CGF&: *this, VD: &D)
1511 : Address::invalid();
1512
1513 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1514
1515 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1516 address = OpenMPLocalAddr;
1517 AllocaAddr = OpenMPLocalAddr;
1518 } else if (Ty->isConstantSizeType()) {
1519 // If this value is an array or struct with a statically determinable
1520 // constant initializer, there are optimizations we can do.
1521 //
1522 // TODO: We should constant-evaluate the initializer of any variable,
1523 // as long as it is initialized by a constant expression. Currently,
1524 // isConstantInitializer produces wrong answers for structs with
1525 // reference or bitfield members, and a few other cases, and checking
1526 // for POD-ness protects us from some of these.
1527 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1528 (D.isConstexpr() ||
1529 ((Ty.isPODType(Context: getContext()) ||
1530 getContext().getBaseElementType(QT: Ty)->isObjCObjectPointerType()) &&
1531 D.getInit()->isConstantInitializer(Ctx&: getContext(), ForRef: false)))) {
1532
1533 // If the variable's a const type, and it's neither an NRVO
1534 // candidate nor a __block variable and has no mutable members,
1535 // emit it as a global instead.
1536 // Exception is if a variable is located in non-constant address space
1537 // in OpenCL.
1538 bool NeedsDtor =
1539 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
1540 if ((!getLangOpts().OpenCL ||
1541 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1542 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1543 !isEscapingByRef &&
1544 Ty.isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: !NeedsDtor))) {
1545 EmitStaticVarDecl(D, Linkage: llvm::GlobalValue::InternalLinkage);
1546
1547 // Signal this condition to later callbacks.
1548 emission.Addr = Address::invalid();
1549 assert(emission.wasEmittedAsGlobal());
1550 return emission;
1551 }
1552
1553 // Otherwise, tell the initialization code that we're in this case.
1554 emission.IsConstantAggregate = true;
1555 }
1556
1557 // A normal fixed sized variable becomes an alloca in the entry block,
1558 // unless:
1559 // - it's an NRVO variable.
1560 // - we are compiling OpenMP and it's an OpenMP local variable.
1561 if (NRVO) {
1562 // The named return value optimization: allocate this variable in the
1563 // return slot, so that we can elide the copy when returning this
1564 // variable (C++0x [class.copy]p34).
1565 AllocaAddr =
1566 RawAddress(ReturnValue.emitRawPointer(CGF&: *this),
1567 ReturnValue.getElementType(), ReturnValue.getAlignment());
1568 address = MaybeCastStackAddressSpace(Alloca: AllocaAddr, DestLangAS: Ty.getAddressSpace());
1569
1570 if (const auto *RD = Ty->getAsRecordDecl()) {
1571 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD);
1572 (CXXRD && !CXXRD->hasTrivialDestructor()) ||
1573 RD->isNonTrivialToPrimitiveDestroy()) {
1574 // Create a flag that is used to indicate when the NRVO was applied
1575 // to this variable. Set it to zero to indicate that NRVO was not
1576 // applied.
1577 llvm::Value *Zero = Builder.getFalse();
1578 RawAddress NRVOFlag =
1579 CreateTempAlloca(Ty: Zero->getType(), align: CharUnits::One(), Name: "nrvo");
1580 EnsureInsertPoint();
1581 Builder.CreateStore(Val: Zero, Addr: NRVOFlag);
1582
1583 // Record the NRVO flag for this variable.
1584 NRVOFlags[&D] = NRVOFlag.getPointer();
1585 emission.NRVOFlag = NRVOFlag.getPointer();
1586 }
1587 }
1588 } else {
1589 CharUnits allocaAlignment;
1590 llvm::Type *allocaTy;
1591 if (isEscapingByRef) {
1592 auto &byrefInfo = getBlockByrefInfo(var: &D);
1593 allocaTy = byrefInfo.Type;
1594 allocaAlignment = byrefInfo.ByrefAlignment;
1595 } else {
1596 allocaTy = ConvertTypeForMem(T: Ty);
1597 allocaAlignment = alignment;
1598 }
1599
1600 // Create the alloca. Note that we set the name separately from
1601 // building the instruction so that it's there even in no-asserts
1602 // builds.
1603 address = CreateTempAlloca(Ty: allocaTy, UseAddrSpace: Ty.getAddressSpace(),
1604 align: allocaAlignment, Name: D.getName(),
1605 /*ArraySize=*/nullptr, Alloca: &AllocaAddr);
1606
1607 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1608 // the catch parameter starts in the catchpad instruction, and we can't
1609 // insert code in those basic blocks.
1610 bool IsMSCatchParam =
1611 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1612
1613 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1614 // if we don't have a valid insertion point (?).
1615 if (HaveInsertPoint() && !IsMSCatchParam) {
1616 // If there's a jump into the lifetime of this variable, its lifetime
1617 // gets broken up into several regions in IR, which requires more work
1618 // to handle correctly. For now, just omit the intrinsics; this is a
1619 // rare case, and it's better to just be conservatively correct.
1620 // PR28267.
1621 //
1622 // We have to do this in all language modes if there's a jump past the
1623 // declaration. We also have to do it in C if there's a jump to an
1624 // earlier point in the current block because non-VLA lifetimes begin as
1625 // soon as the containing block is entered, not when its variables
1626 // actually come into scope; suppressing the lifetime annotations
1627 // completely in this case is unnecessarily pessimistic, but again, this
1628 // is rare.
1629 if (!Bypasses.IsBypassed(D: &D) &&
1630 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1631 emission.UseLifetimeMarkers =
1632 EmitLifetimeStart(Addr: AllocaAddr.getPointer());
1633 }
1634 } else {
1635 assert(!emission.useLifetimeMarkers());
1636 }
1637 }
1638
1639 if (D.hasAttr<StackProtectorIgnoreAttr>()) {
1640 if (auto *AI = dyn_cast<llvm::AllocaInst>(Val: address.getBasePointer())) {
1641 llvm::LLVMContext &Ctx = Builder.getContext();
1642 auto *Operand = llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 0));
1643 AI->setMetadata(Kind: "stack-protector", Node: llvm::MDNode::get(Context&: Ctx, MDs: {Operand}));
1644 }
1645
1646 std::optional<llvm::Attribute::AttrKind> Attr =
1647 CGM.StackProtectorAttribute(D: &D);
1648 if (Attr && (*Attr == llvm::Attribute::StackProtectReq)) {
1649 CGM.getDiags().Report(Loc: D.getLocation(),
1650 DiagID: diag::warn_stack_protection_ignore_attribute);
1651 }
1652 }
1653 } else {
1654 EnsureInsertPoint();
1655
1656 // Delayed globalization for variable length declarations. This ensures that
1657 // the expression representing the length has been emitted and can be used
1658 // by the definition of the VLA. Since this is an escaped declaration, in
1659 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1660 // deallocation call to __kmpc_free_shared() is emitted later.
1661 bool VarAllocated = false;
1662 if (getLangOpts().OpenMPIsTargetDevice) {
1663 auto &RT = CGM.getOpenMPRuntime();
1664 if (RT.isDelayedVariableLengthDecl(CGF&: *this, VD: &D)) {
1665 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1666 std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1667 RT.getKmpcAllocShared(CGF&: *this, VD: &D);
1668
1669 // Save the address of the allocation:
1670 LValue Base = MakeAddrLValue(V: AddrSizePair.first, T: D.getType(),
1671 Alignment: CGM.getContext().getDeclAlign(D: &D),
1672 Source: AlignmentSource::Decl);
1673 address = Base.getAddress();
1674
1675 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1676 // appropriate location at the end of the scope of the
1677 // __kmpc_alloc_shared functions:
1678 pushKmpcAllocFree(Kind: NormalCleanup, AddrSizePair);
1679
1680 // Mark variable as allocated:
1681 VarAllocated = true;
1682 }
1683 }
1684
1685 if (!VarAllocated) {
1686 if (!DidCallStackSave) {
1687 // Save the stack.
1688 Address Stack =
1689 CreateDefaultAlignTempAlloca(Ty: AllocaInt8PtrTy, Name: "saved_stack");
1690
1691 llvm::Value *V = Builder.CreateStackSave();
1692 assert(V->getType() == AllocaInt8PtrTy);
1693 Builder.CreateStore(Val: V, Addr: Stack);
1694
1695 DidCallStackSave = true;
1696
1697 // Push a cleanup block and restore the stack there.
1698 // FIXME: in general circumstances, this should be an EH cleanup.
1699 pushStackRestore(kind: NormalCleanup, SPMem: Stack);
1700 }
1701
1702 auto VlaSize = getVLASize(vla: Ty);
1703 llvm::Type *llvmTy = ConvertTypeForMem(T: VlaSize.Type);
1704
1705 // Allocate memory for the array.
1706 address = CreateTempAlloca(Ty: llvmTy, align: alignment, Name: "vla", ArraySize: VlaSize.NumElts,
1707 Alloca: &AllocaAddr);
1708 }
1709
1710 // If we have debug info enabled, properly describe the VLA dimensions for
1711 // this type by registering the vla size expression for each of the
1712 // dimensions.
1713 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1714 }
1715
1716 setAddrOfLocalVar(VD: &D, Addr: address);
1717 emission.Addr = address;
1718 emission.AllocaAddr = AllocaAddr;
1719
1720 // Emit debug info for local var declaration.
1721 if (EmitDebugInfo && HaveInsertPoint()) {
1722 Address DebugAddr = address;
1723 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1724 DI->setLocation(D.getLocation());
1725
1726 // If NRVO, use a pointer to the return address.
1727 if (UsePointerValue) {
1728 DebugAddr = ReturnValuePointer;
1729 AllocaAddr = ReturnValuePointer;
1730 }
1731 (void)DI->EmitDeclareOfAutoVariable(Decl: &D, AI: AllocaAddr.getPointer(), Builder,
1732 UsePointerValue);
1733 }
1734
1735 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1736 EmitVarAnnotations(D: &D, V: address.emitRawPointer(CGF&: *this));
1737
1738 // Make sure we call @llvm.lifetime.end.
1739 if (emission.useLifetimeMarkers())
1740 EHStack.pushCleanup<CallLifetimeEnd>(
1741 Kind: NormalEHLifetimeMarker, A: emission.getOriginalAllocatedAddress());
1742
1743 // Analogous to lifetime markers, we use a 'cleanup' to emit fake.use
1744 // calls for local variables. We are exempting volatile variables and
1745 // non-scalars larger than 4 times the size of an unsigned int. Larger
1746 // non-scalars are often allocated in memory and may create unnecessary
1747 // overhead.
1748 if (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
1749 CodeGenOptions::ExtendVariableLivenessKind::All) {
1750 if (shouldExtendLifetime(Context: getContext(), FuncDecl: CurCodeDecl, D, CXXABIThisDecl))
1751 EHStack.pushCleanup<FakeUse>(Kind: NormalFakeUse,
1752 A: emission.getAllocatedAddress());
1753 }
1754
1755 return emission;
1756}
1757
1758static bool isCapturedBy(const VarDecl &, const Expr *);
1759
1760/// Determines whether the given __block variable is potentially
1761/// captured by the given statement.
1762static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1763 if (const Expr *E = dyn_cast<Expr>(Val: S))
1764 return isCapturedBy(Var, E);
1765 for (const Stmt *SubStmt : S->children())
1766 if (isCapturedBy(Var, S: SubStmt))
1767 return true;
1768 return false;
1769}
1770
1771/// Determines whether the given __block variable is potentially
1772/// captured by the given expression.
1773static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1774 // Skip the most common kinds of expressions that make
1775 // hierarchy-walking expensive.
1776 E = E->IgnoreParenCasts();
1777
1778 if (const BlockExpr *BE = dyn_cast<BlockExpr>(Val: E)) {
1779 const BlockDecl *Block = BE->getBlockDecl();
1780 for (const auto &I : Block->captures()) {
1781 if (I.getVariable() == &Var)
1782 return true;
1783 }
1784
1785 // No need to walk into the subexpressions.
1786 return false;
1787 }
1788
1789 if (const StmtExpr *SE = dyn_cast<StmtExpr>(Val: E)) {
1790 const CompoundStmt *CS = SE->getSubStmt();
1791 for (const auto *BI : CS->body())
1792 if (const auto *BIE = dyn_cast<Expr>(Val: BI)) {
1793 if (isCapturedBy(Var, E: BIE))
1794 return true;
1795 }
1796 else if (const auto *DS = dyn_cast<DeclStmt>(Val: BI)) {
1797 // special case declarations
1798 for (const auto *I : DS->decls()) {
1799 if (const auto *VD = dyn_cast<VarDecl>(Val: (I))) {
1800 const Expr *Init = VD->getInit();
1801 if (Init && isCapturedBy(Var, E: Init))
1802 return true;
1803 }
1804 }
1805 }
1806 else
1807 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1808 // Later, provide code to poke into statements for capture analysis.
1809 return true;
1810 return false;
1811 }
1812
1813 for (const Stmt *SubStmt : E->children())
1814 if (isCapturedBy(Var, S: SubStmt))
1815 return true;
1816
1817 return false;
1818}
1819
1820/// Determine whether the given initializer is trivial in the sense
1821/// that it requires no code to be generated.
1822bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1823 if (!Init)
1824 return true;
1825
1826 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Val: Init))
1827 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1828 if (Constructor->isTrivial() &&
1829 Constructor->isDefaultConstructor() &&
1830 !Construct->requiresZeroInitialization())
1831 return true;
1832
1833 return false;
1834}
1835
1836void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1837 const VarDecl &D,
1838 Address Loc) {
1839 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1840 auto trivialAutoVarInitMaxSize =
1841 getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1842 CharUnits Size = getContext().getTypeSizeInChars(T: type);
1843 bool isVolatile = type.isVolatileQualified();
1844 if (!Size.isZero()) {
1845 // We skip auto-init variables by their alloc size. Take this as an example:
1846 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1847 // All Foo type variables will be skipped. Ideally, we only skip the buff
1848 // array and still auto-init X in this example.
1849 // TODO: Improve the size filtering to by member size.
1850 auto allocSize = CGM.getDataLayout().getTypeAllocSize(Ty: Loc.getElementType());
1851 switch (trivialAutoVarInit) {
1852 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1853 llvm_unreachable("Uninitialized handled by caller");
1854 case LangOptions::TrivialAutoVarInitKind::Zero:
1855 if (CGM.stopAutoInit())
1856 return;
1857 if (trivialAutoVarInitMaxSize > 0 &&
1858 allocSize > trivialAutoVarInitMaxSize)
1859 return;
1860 emitStoresForZeroInit(D, Loc, isVolatile);
1861 break;
1862 case LangOptions::TrivialAutoVarInitKind::Pattern:
1863 if (CGM.stopAutoInit())
1864 return;
1865 if (trivialAutoVarInitMaxSize > 0 &&
1866 allocSize > trivialAutoVarInitMaxSize)
1867 return;
1868 emitStoresForPatternInit(D, Loc, isVolatile);
1869 break;
1870 }
1871 return;
1872 }
1873
1874 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1875 // them, so emit a memcpy with the VLA size to initialize each element.
1876 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1877 // will catch that code, but there exists code which generates zero-sized
1878 // VLAs. Be nice and initialize whatever they requested.
1879 const auto *VlaType = getContext().getAsVariableArrayType(T: type);
1880 if (!VlaType)
1881 return;
1882 auto VlaSize = getVLASize(vla: VlaType);
1883 auto SizeVal = VlaSize.NumElts;
1884 CharUnits EltSize = getContext().getTypeSizeInChars(T: VlaSize.Type);
1885 switch (trivialAutoVarInit) {
1886 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1887 llvm_unreachable("Uninitialized handled by caller");
1888
1889 case LangOptions::TrivialAutoVarInitKind::Zero: {
1890 if (CGM.stopAutoInit())
1891 return;
1892 if (!EltSize.isOne())
1893 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: EltSize));
1894 auto *I = Builder.CreateMemSet(Dest: Loc, Value: llvm::ConstantInt::get(Ty: Int8Ty, V: 0),
1895 Size: SizeVal, IsVolatile: isVolatile);
1896 I->addAnnotationMetadata(Annotation: "auto-init");
1897 break;
1898 }
1899
1900 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1901 if (CGM.stopAutoInit())
1902 return;
1903 llvm::Type *ElTy = Loc.getElementType();
1904 llvm::Constant *Constant = constWithPadding(
1905 CGM, isPattern: IsPattern::Yes, constant: initializationPatternFor(CGM, ElTy));
1906 CharUnits ConstantAlign = getContext().getTypeAlignInChars(T: VlaSize.Type);
1907 llvm::BasicBlock *SetupBB = createBasicBlock(name: "vla-setup.loop");
1908 llvm::BasicBlock *LoopBB = createBasicBlock(name: "vla-init.loop");
1909 llvm::BasicBlock *ContBB = createBasicBlock(name: "vla-init.cont");
1910 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1911 LHS: SizeVal, RHS: llvm::ConstantInt::get(Ty: SizeVal->getType(), V: 0),
1912 Name: "vla.iszerosized");
1913 Builder.CreateCondBr(Cond: IsZeroSizedVLA, True: ContBB, False: SetupBB);
1914 EmitBlock(BB: SetupBB);
1915 if (!EltSize.isOne())
1916 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: EltSize));
1917 llvm::Value *BaseSizeInChars =
1918 llvm::ConstantInt::get(Ty: IntPtrTy, V: EltSize.getQuantity());
1919 Address Begin = Loc.withElementType(ElemTy: Int8Ty);
1920 llvm::Value *End = Builder.CreateInBoundsGEP(Ty: Begin.getElementType(),
1921 Ptr: Begin.emitRawPointer(CGF&: *this),
1922 IdxList: SizeVal, Name: "vla.end");
1923 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1924 EmitBlock(BB: LoopBB);
1925 llvm::PHINode *Cur = Builder.CreatePHI(Ty: Begin.getType(), NumReservedValues: 2, Name: "vla.cur");
1926 Cur->addIncoming(V: Begin.emitRawPointer(CGF&: *this), BB: OriginBB);
1927 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(elementSize: EltSize);
1928 auto *I =
1929 Builder.CreateMemCpy(Dest: Address(Cur, Int8Ty, CurAlign),
1930 Src: createUnnamedGlobalForMemcpyFrom(
1931 CGM, D, Builder, Constant, Align: ConstantAlign),
1932 Size: BaseSizeInChars, IsVolatile: isVolatile);
1933 I->addAnnotationMetadata(Annotation: "auto-init");
1934 llvm::Value *Next =
1935 Builder.CreateInBoundsGEP(Ty: Int8Ty, Ptr: Cur, IdxList: BaseSizeInChars, Name: "vla.next");
1936 llvm::Value *Done = Builder.CreateICmpEQ(LHS: Next, RHS: End, Name: "vla-init.isdone");
1937 Builder.CreateCondBr(Cond: Done, True: ContBB, False: LoopBB);
1938 Cur->addIncoming(V: Next, BB: LoopBB);
1939 EmitBlock(BB: ContBB);
1940 } break;
1941 }
1942}
1943
1944void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1945 assert(emission.Variable && "emission was not valid!");
1946
1947 // If this was emitted as a global constant, we're done.
1948 if (emission.wasEmittedAsGlobal()) return;
1949
1950 const VarDecl &D = *emission.Variable;
1951 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF&: *this, TemporaryLocation: D.getLocation());
1952 ApplyAtomGroup Grp(getDebugInfo());
1953 QualType type = D.getType();
1954
1955 // If this local has an initializer, emit it now.
1956 const Expr *Init = D.getInit();
1957
1958 // If we are at an unreachable point, we don't need to emit the initializer
1959 // unless it contains a label.
1960 if (!HaveInsertPoint()) {
1961 if (!Init || !ContainsLabel(S: Init)) {
1962 PGO->markStmtMaybeUsed(S: Init);
1963 return;
1964 }
1965 EnsureInsertPoint();
1966 }
1967
1968 // Initialize the structure of a __block variable.
1969 if (emission.IsEscapingByRef)
1970 emitByrefStructureInit(emission);
1971
1972 // Initialize the variable here if it doesn't have a initializer and it is a
1973 // C struct that is non-trivial to initialize or an array containing such a
1974 // struct.
1975 if (!Init &&
1976 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1977 QualType::PDIK_Struct) {
1978 LValue Dst = MakeAddrLValue(Addr: emission.getAllocatedAddress(), T: type);
1979 if (emission.IsEscapingByRef)
1980 drillIntoBlockVariable(CGF&: *this, lvalue&: Dst, var: &D);
1981 defaultInitNonTrivialCStructVar(Dst);
1982 return;
1983 }
1984
1985 // Check whether this is a byref variable that's potentially
1986 // captured and moved by its own initializer. If so, we'll need to
1987 // emit the initializer first, then copy into the variable.
1988 bool capturedByInit =
1989 Init && emission.IsEscapingByRef && isCapturedBy(Var: D, E: Init);
1990
1991 bool locIsByrefHeader = !capturedByInit;
1992 const Address Loc =
1993 locIsByrefHeader ? emission.getObjectAddress(CGF&: *this) : emission.Addr;
1994
1995 auto hasNoTrivialAutoVarInitAttr = [&](const Decl *D) {
1996 return D && D->hasAttr<NoTrivialAutoVarInitAttr>();
1997 };
1998 // Note: constexpr already initializes everything correctly.
1999 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
2000 ((D.isConstexpr() || D.getAttr<UninitializedAttr>() ||
2001 hasNoTrivialAutoVarInitAttr(type->getAsTagDecl()) ||
2002 hasNoTrivialAutoVarInitAttr(CurFuncDecl))
2003 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
2004 : getContext().getLangOpts().getTrivialAutoVarInit());
2005
2006 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
2007 if (trivialAutoVarInit ==
2008 LangOptions::TrivialAutoVarInitKind::Uninitialized)
2009 return;
2010
2011 // Only initialize a __block's storage: we always initialize the header.
2012 if (emission.IsEscapingByRef && !locIsByrefHeader)
2013 Loc = emitBlockByrefAddress(baseAddr: Loc, V: &D, /*follow=*/followForward: false);
2014
2015 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
2016 };
2017
2018 if (isTrivialInitializer(Init))
2019 return initializeWhatIsTechnicallyUninitialized(Loc);
2020
2021 llvm::Constant *constant = nullptr;
2022 if (emission.IsConstantAggregate ||
2023 D.mightBeUsableInConstantExpressions(C: getContext())) {
2024 assert(!capturedByInit && "constant init contains a capturing block?");
2025 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
2026 if (constant && !constant->isZeroValue() &&
2027 (trivialAutoVarInit !=
2028 LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
2029 IsPattern isPattern =
2030 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
2031 ? IsPattern::Yes
2032 : IsPattern::No;
2033 // C guarantees that brace-init with fewer initializers than members in
2034 // the aggregate will initialize the rest of the aggregate as-if it were
2035 // static initialization. In turn static initialization guarantees that
2036 // padding is initialized to zero bits. We could instead pattern-init if D
2037 // has any ImplicitValueInitExpr, but that seems to be unintuitive
2038 // behavior.
2039 constant = constWithPadding(CGM, isPattern: IsPattern::No,
2040 constant: replaceUndef(CGM, isPattern, constant));
2041 }
2042
2043 if (constant && type->isBitIntType() &&
2044 CGM.getTypes().typeRequiresSplitIntoByteArray(ASTTy: type)) {
2045 // Constants for long _BitInt types are split into individual bytes.
2046 // Try to fold these back into an integer constant so it can be stored
2047 // properly.
2048 llvm::Type *LoadType =
2049 CGM.getTypes().convertTypeForLoadStore(T: type, LLVMTy: constant->getType());
2050 constant = llvm::ConstantFoldLoadFromConst(
2051 C: constant, Ty: LoadType, Offset: llvm::APInt::getZero(numBits: 32), DL: CGM.getDataLayout());
2052 }
2053 }
2054
2055 if (!constant) {
2056 if (trivialAutoVarInit !=
2057 LangOptions::TrivialAutoVarInitKind::Uninitialized) {
2058 // At this point, we know D has an Init expression, but isn't a constant.
2059 // - If D is not a scalar, auto-var-init conservatively (members may be
2060 // left uninitialized by constructor Init expressions for example).
2061 // - If D is a scalar, we only need to auto-var-init if there is a
2062 // self-reference. Otherwise, the Init expression should be sufficient.
2063 // It may be that the Init expression uses other uninitialized memory,
2064 // but auto-var-init here would not help, as auto-init would get
2065 // overwritten by Init.
2066 if (!type->isScalarType() || capturedByInit || isAccessedBy(var: D, s: Init)) {
2067 initializeWhatIsTechnicallyUninitialized(Loc);
2068 }
2069 }
2070 LValue lv = MakeAddrLValue(Addr: Loc, T: type);
2071 lv.setNonGC(true);
2072 return EmitExprAsInit(init: Init, D: &D, lvalue: lv, capturedByInit);
2073 }
2074
2075 PGO->markStmtMaybeUsed(S: Init);
2076
2077 if (!emission.IsConstantAggregate) {
2078 // For simple scalar/complex initialization, store the value directly.
2079 LValue lv = MakeAddrLValue(Addr: Loc, T: type);
2080 lv.setNonGC(true);
2081 return EmitStoreThroughLValue(Src: RValue::get(V: constant), Dst: lv, isInit: true);
2082 }
2083
2084 emitStoresForConstant(D, Loc: Loc.withElementType(ElemTy: CGM.Int8Ty),
2085 isVolatile: type.isVolatileQualified(), constant,
2086 /*IsAutoInit=*/false);
2087}
2088
2089void CodeGenFunction::MaybeEmitDeferredVarDeclInit(const VarDecl *VD) {
2090 if (auto *DD = dyn_cast_if_present<DecompositionDecl>(Val: VD)) {
2091 for (auto *B : DD->flat_bindings())
2092 if (auto *HD = B->getHoldingVar())
2093 EmitVarDecl(D: *HD);
2094 }
2095}
2096
2097/// Emit an expression as an initializer for an object (variable, field, etc.)
2098/// at the given location. The expression is not necessarily the normal
2099/// initializer for the object, and the address is not necessarily
2100/// its normal location.
2101///
2102/// \param init the initializing expression
2103/// \param D the object to act as if we're initializing
2104/// \param lvalue the lvalue to initialize
2105/// \param capturedByInit true if \p D is a __block variable
2106/// whose address is potentially changed by the initializer
2107void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
2108 LValue lvalue, bool capturedByInit) {
2109 QualType type = D->getType();
2110
2111 if (type->isReferenceType()) {
2112 RValue rvalue = EmitReferenceBindingToExpr(E: init);
2113 if (capturedByInit)
2114 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
2115 EmitStoreThroughLValue(Src: rvalue, Dst: lvalue, isInit: true);
2116 return;
2117 }
2118 switch (getEvaluationKind(T: type)) {
2119 case TEK_Scalar:
2120 EmitScalarInit(init, D, lvalue, capturedByInit);
2121 return;
2122 case TEK_Complex: {
2123 ComplexPairTy complex = EmitComplexExpr(E: init);
2124 if (capturedByInit)
2125 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
2126 EmitStoreOfComplex(V: complex, dest: lvalue, /*init*/ isInit: true);
2127 return;
2128 }
2129 case TEK_Aggregate:
2130 if (type->isAtomicType()) {
2131 EmitAtomicInit(E: const_cast<Expr*>(init), lvalue);
2132 } else {
2133 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
2134 if (isa<VarDecl>(Val: D))
2135 Overlap = AggValueSlot::DoesNotOverlap;
2136 else if (auto *FD = dyn_cast<FieldDecl>(Val: D))
2137 Overlap = getOverlapForFieldInit(FD);
2138 // TODO: how can we delay here if D is captured by its initializer?
2139 EmitAggExpr(E: init,
2140 AS: AggValueSlot::forLValue(LV: lvalue, isDestructed: AggValueSlot::IsDestructed,
2141 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
2142 isAliased: AggValueSlot::IsNotAliased, mayOverlap: Overlap));
2143 }
2144 return;
2145 }
2146 llvm_unreachable("bad evaluation kind");
2147}
2148
2149/// Enter a destroy cleanup for the given local variable.
2150void CodeGenFunction::emitAutoVarTypeCleanup(
2151 const CodeGenFunction::AutoVarEmission &emission,
2152 QualType::DestructionKind dtorKind) {
2153 assert(dtorKind != QualType::DK_none);
2154
2155 // Note that for __block variables, we want to destroy the
2156 // original stack object, not the possibly forwarded object.
2157 Address addr = emission.getObjectAddress(CGF&: *this);
2158
2159 const VarDecl *var = emission.Variable;
2160 QualType type = var->getType();
2161
2162 CleanupKind cleanupKind = NormalAndEHCleanup;
2163 CodeGenFunction::Destroyer *destroyer = nullptr;
2164
2165 switch (dtorKind) {
2166 case QualType::DK_none:
2167 llvm_unreachable("no cleanup for trivially-destructible variable");
2168
2169 case QualType::DK_cxx_destructor:
2170 // If there's an NRVO flag on the emission, we need a different
2171 // cleanup.
2172 if (emission.NRVOFlag) {
2173 assert(!type->isArrayType());
2174 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2175 EHStack.pushCleanup<DestroyNRVOVariableCXX>(Kind: cleanupKind, A: addr, A: type, A: dtor,
2176 A: emission.NRVOFlag);
2177 return;
2178 }
2179 break;
2180
2181 case QualType::DK_objc_strong_lifetime:
2182 // Suppress cleanups for pseudo-strong variables.
2183 if (var->isARCPseudoStrong()) return;
2184
2185 // Otherwise, consider whether to use an EH cleanup or not.
2186 cleanupKind = getARCCleanupKind();
2187
2188 // Use the imprecise destroyer by default.
2189 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2190 destroyer = CodeGenFunction::destroyARCStrongImprecise;
2191 break;
2192
2193 case QualType::DK_objc_weak_lifetime:
2194 break;
2195
2196 case QualType::DK_nontrivial_c_struct:
2197 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2198 if (emission.NRVOFlag) {
2199 assert(!type->isArrayType());
2200 EHStack.pushCleanup<DestroyNRVOVariableC>(Kind: cleanupKind, A: addr,
2201 A: emission.NRVOFlag, A: type);
2202 return;
2203 }
2204 break;
2205 }
2206
2207 // If we haven't chosen a more specific destroyer, use the default.
2208 if (!destroyer) destroyer = getDestroyer(destructionKind: dtorKind);
2209
2210 // Use an EH cleanup in array destructors iff the destructor itself
2211 // is being pushed as an EH cleanup.
2212 bool useEHCleanup = (cleanupKind & EHCleanup);
2213 EHStack.pushCleanup<DestroyObject>(Kind: cleanupKind, A: addr, A: type, A: destroyer,
2214 A: useEHCleanup);
2215}
2216
2217void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2218 assert(emission.Variable && "emission was not valid!");
2219
2220 // If this was emitted as a global constant, we're done.
2221 if (emission.wasEmittedAsGlobal()) return;
2222
2223 // If we don't have an insertion point, we're done. Sema prevents
2224 // us from jumping into any of these scopes anyway.
2225 if (!HaveInsertPoint()) return;
2226
2227 const VarDecl &D = *emission.Variable;
2228
2229 // Check the type for a cleanup.
2230 if (QualType::DestructionKind dtorKind = D.needsDestruction(Ctx: getContext()))
2231 emitAutoVarTypeCleanup(emission, dtorKind);
2232
2233 // In GC mode, honor objc_precise_lifetime.
2234 if (getLangOpts().getGC() != LangOptions::NonGC &&
2235 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2236 EHStack.pushCleanup<ExtendGCLifetime>(Kind: NormalCleanup, A: &D);
2237 }
2238
2239 // Handle the cleanup attribute.
2240 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2241 const FunctionDecl *FD = CA->getFunctionDecl();
2242
2243 llvm::Constant *F = CGM.GetAddrOfFunction(GD: FD);
2244 assert(F && "Could not find function!");
2245
2246 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(GD: FD);
2247 EHStack.pushCleanup<CallCleanupFunction>(Kind: NormalAndEHCleanup, A: F, A: &Info, A: &D,
2248 A: CA);
2249 }
2250
2251 // If this is a block variable, call _Block_object_destroy
2252 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2253 // mode.
2254 if (emission.IsEscapingByRef &&
2255 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2256 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2257 if (emission.Variable->getType().isObjCGCWeak())
2258 Flags |= BLOCK_FIELD_IS_WEAK;
2259 enterByrefCleanup(Kind: NormalAndEHCleanup, Addr: emission.Addr, Flags,
2260 /*LoadBlockVarAddr*/ false,
2261 CanThrow: cxxDestructorCanThrow(T: emission.Variable->getType()));
2262 }
2263}
2264
2265CodeGenFunction::Destroyer *
2266CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2267 switch (kind) {
2268 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2269 case QualType::DK_cxx_destructor:
2270 return destroyCXXObject;
2271 case QualType::DK_objc_strong_lifetime:
2272 return destroyARCStrongPrecise;
2273 case QualType::DK_objc_weak_lifetime:
2274 return destroyARCWeak;
2275 case QualType::DK_nontrivial_c_struct:
2276 return destroyNonTrivialCStruct;
2277 }
2278 llvm_unreachable("Unknown DestructionKind");
2279}
2280
2281/// pushEHDestroy - Push the standard destructor for the given type as
2282/// an EH-only cleanup.
2283void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2284 Address addr, QualType type) {
2285 assert(dtorKind && "cannot push destructor for trivial type");
2286 assert(needsEHCleanup(dtorKind));
2287
2288 pushDestroy(kind: EHCleanup, addr, type, destroyer: getDestroyer(kind: dtorKind), useEHCleanupForArray: true);
2289}
2290
2291/// pushDestroy - Push the standard destructor for the given type as
2292/// at least a normal cleanup.
2293void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2294 Address addr, QualType type) {
2295 assert(dtorKind && "cannot push destructor for trivial type");
2296
2297 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2298 pushDestroy(kind: cleanupKind, addr, type, destroyer: getDestroyer(kind: dtorKind),
2299 useEHCleanupForArray: cleanupKind & EHCleanup);
2300}
2301
2302void CodeGenFunction::pushLifetimeExtendedDestroy(
2303 QualType::DestructionKind dtorKind, Address addr, QualType type) {
2304 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2305 pushLifetimeExtendedDestroy(kind: cleanupKind, addr, type, destroyer: getDestroyer(kind: dtorKind),
2306 useEHCleanupForArray: cleanupKind & EHCleanup);
2307}
2308
2309void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2310 QualType type, Destroyer *destroyer,
2311 bool useEHCleanupForArray) {
2312 pushFullExprCleanup<DestroyObject>(kind: cleanupKind, A: addr, A: type, A: destroyer,
2313 A: useEHCleanupForArray);
2314}
2315
2316// Pushes a destroy and defers its deactivation until its
2317// CleanupDeactivationScope is exited.
2318void CodeGenFunction::pushDestroyAndDeferDeactivation(
2319 QualType::DestructionKind dtorKind, Address addr, QualType type) {
2320 assert(dtorKind && "cannot push destructor for trivial type");
2321
2322 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2323 pushDestroyAndDeferDeactivation(
2324 cleanupKind, addr, type, destroyer: getDestroyer(kind: dtorKind), useEHCleanupForArray: cleanupKind & EHCleanup);
2325}
2326
2327void CodeGenFunction::pushDestroyAndDeferDeactivation(
2328 CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer,
2329 bool useEHCleanupForArray) {
2330 llvm::Instruction *DominatingIP =
2331 Builder.CreateFlagLoad(Addr: llvm::Constant::getNullValue(Ty: Int8PtrTy));
2332 pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2333 DeferredDeactivationCleanupStack.push_back(
2334 Elt: {.Cleanup: EHStack.stable_begin(), .DominatingIP: DominatingIP});
2335}
2336
2337void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2338 EHStack.pushCleanup<CallStackRestore>(Kind, A: SPMem);
2339}
2340
2341void CodeGenFunction::pushKmpcAllocFree(
2342 CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2343 EHStack.pushCleanup<KmpcAllocFree>(Kind, A: AddrSizePair);
2344}
2345
2346void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2347 Address addr, QualType type,
2348 Destroyer *destroyer,
2349 bool useEHCleanupForArray) {
2350 // If we're not in a conditional branch, we don't need to bother generating a
2351 // conditional cleanup.
2352 if (!isInConditionalBranch()) {
2353 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2354 // around in case a temporary's destructor throws an exception.
2355
2356 // Add the cleanup to the EHStack. After the full-expr, this would be
2357 // deactivated before being popped from the stack.
2358 pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer,
2359 useEHCleanupForArray);
2360
2361 // Since this is lifetime-extended, push it once again to the EHStack after
2362 // the full expression.
2363 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2364 Kind: cleanupKind, ActiveFlag: Address::invalid(), A: addr, A: type, A: destroyer,
2365 A: useEHCleanupForArray);
2366 }
2367
2368 // Otherwise, we should only destroy the object if it's been initialized.
2369
2370 using ConditionalCleanupType =
2371 EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2372 Destroyer *, bool>;
2373 DominatingValue<Address>::saved_type SavedAddr = saveValueInCond(value: addr);
2374
2375 // Remember to emit cleanup if we branch-out before end of full-expression
2376 // (eg: through stmt-expr or coro suspensions).
2377 AllocaTrackerRAII DeactivationAllocas(*this);
2378 Address ActiveFlagForDeactivation = createCleanupActiveFlag();
2379
2380 pushCleanupAndDeferDeactivation<ConditionalCleanupType>(
2381 Kind: cleanupKind, A: SavedAddr, A: type, A: destroyer, A: useEHCleanupForArray);
2382 initFullExprCleanupWithFlag(ActiveFlag: ActiveFlagForDeactivation);
2383 EHCleanupScope &cleanup = cast<EHCleanupScope>(Val&: *EHStack.begin());
2384 // Erase the active flag if the cleanup was not emitted.
2385 cleanup.AddAuxAllocas(Allocas: std::move(DeactivationAllocas).Take());
2386
2387 // Since this is lifetime-extended, push it once again to the EHStack after
2388 // the full expression.
2389 // The previous active flag would always be 'false' due to forced deferred
2390 // deactivation. Use a separate flag for lifetime-extension to correctly
2391 // remember if this branch was taken and the object was initialized.
2392 Address ActiveFlagForLifetimeExt = createCleanupActiveFlag();
2393 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2394 Kind: cleanupKind, ActiveFlag: ActiveFlagForLifetimeExt, A: SavedAddr, A: type, A: destroyer,
2395 A: useEHCleanupForArray);
2396}
2397
2398/// emitDestroy - Immediately perform the destruction of the given
2399/// object.
2400///
2401/// \param addr - the address of the object; a type*
2402/// \param type - the type of the object; if an array type, all
2403/// objects are destroyed in reverse order
2404/// \param destroyer - the function to call to destroy individual
2405/// elements
2406/// \param useEHCleanupForArray - whether an EH cleanup should be
2407/// used when destroying array elements, in case one of the
2408/// destructions throws an exception
2409void CodeGenFunction::emitDestroy(Address addr, QualType type,
2410 Destroyer *destroyer,
2411 bool useEHCleanupForArray) {
2412 const ArrayType *arrayType = getContext().getAsArrayType(T: type);
2413 if (!arrayType)
2414 return destroyer(*this, addr, type);
2415
2416 llvm::Value *length = emitArrayLength(arrayType, baseType&: type, addr);
2417
2418 CharUnits elementAlign =
2419 addr.getAlignment()
2420 .alignmentOfArrayElement(elementSize: getContext().getTypeSizeInChars(T: type));
2421
2422 // Normally we have to check whether the array is zero-length.
2423 bool checkZeroLength = true;
2424
2425 // But if the array length is constant, we can suppress that.
2426 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(Val: length)) {
2427 // ...and if it's constant zero, we can just skip the entire thing.
2428 if (constLength->isZero()) return;
2429 checkZeroLength = false;
2430 }
2431
2432 llvm::Value *begin = addr.emitRawPointer(CGF&: *this);
2433 llvm::Value *end =
2434 Builder.CreateInBoundsGEP(Ty: addr.getElementType(), Ptr: begin, IdxList: length);
2435 emitArrayDestroy(begin, end, elementType: type, elementAlign, destroyer,
2436 checkZeroLength, useEHCleanup: useEHCleanupForArray);
2437}
2438
2439/// emitArrayDestroy - Destroys all the elements of the given array,
2440/// beginning from last to first. The array cannot be zero-length.
2441///
2442/// \param begin - a type* denoting the first element of the array
2443/// \param end - a type* denoting one past the end of the array
2444/// \param elementType - the element type of the array
2445/// \param destroyer - the function to call to destroy elements
2446/// \param useEHCleanup - whether to push an EH cleanup to destroy
2447/// the remaining elements in case the destruction of a single
2448/// element throws
2449void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2450 llvm::Value *end,
2451 QualType elementType,
2452 CharUnits elementAlign,
2453 Destroyer *destroyer,
2454 bool checkZeroLength,
2455 bool useEHCleanup) {
2456 assert(!elementType->isArrayType());
2457
2458 // The basic structure here is a do-while loop, because we don't
2459 // need to check for the zero-element case.
2460 llvm::BasicBlock *bodyBB = createBasicBlock(name: "arraydestroy.body");
2461 llvm::BasicBlock *doneBB = createBasicBlock(name: "arraydestroy.done");
2462
2463 if (checkZeroLength) {
2464 llvm::Value *isEmpty = Builder.CreateICmpEQ(LHS: begin, RHS: end,
2465 Name: "arraydestroy.isempty");
2466 Builder.CreateCondBr(Cond: isEmpty, True: doneBB, False: bodyBB);
2467 }
2468
2469 // Enter the loop body, making that address the current address.
2470 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2471 EmitBlock(BB: bodyBB);
2472 llvm::PHINode *elementPast =
2473 Builder.CreatePHI(Ty: begin->getType(), NumReservedValues: 2, Name: "arraydestroy.elementPast");
2474 elementPast->addIncoming(V: end, BB: entryBB);
2475
2476 // Shift the address back by one element.
2477 llvm::Value *negativeOne = llvm::ConstantInt::get(Ty: SizeTy, V: -1, IsSigned: true);
2478 llvm::Type *llvmElementType = ConvertTypeForMem(T: elementType);
2479 llvm::Value *element = Builder.CreateInBoundsGEP(
2480 Ty: llvmElementType, Ptr: elementPast, IdxList: negativeOne, Name: "arraydestroy.element");
2481
2482 if (useEHCleanup)
2483 pushRegularPartialArrayCleanup(arrayBegin: begin, arrayEnd: element, elementType, elementAlignment: elementAlign,
2484 destroyer);
2485
2486 // Perform the actual destruction there.
2487 destroyer(*this, Address(element, llvmElementType, elementAlign),
2488 elementType);
2489
2490 if (useEHCleanup)
2491 PopCleanupBlock();
2492
2493 // Check whether we've reached the end.
2494 llvm::Value *done = Builder.CreateICmpEQ(LHS: element, RHS: begin, Name: "arraydestroy.done");
2495 Builder.CreateCondBr(Cond: done, True: doneBB, False: bodyBB);
2496 elementPast->addIncoming(V: element, BB: Builder.GetInsertBlock());
2497
2498 // Done.
2499 EmitBlock(BB: doneBB);
2500}
2501
2502/// Perform partial array destruction as if in an EH cleanup. Unlike
2503/// emitArrayDestroy, the element type here may still be an array type.
2504static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2505 llvm::Value *begin, llvm::Value *end,
2506 QualType type, CharUnits elementAlign,
2507 CodeGenFunction::Destroyer *destroyer) {
2508 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
2509
2510 // If the element type is itself an array, drill down.
2511 unsigned arrayDepth = 0;
2512 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(T: type)) {
2513 // VLAs don't require a GEP index to walk into.
2514 if (!isa<VariableArrayType>(Val: arrayType))
2515 arrayDepth++;
2516 type = arrayType->getElementType();
2517 }
2518
2519 if (arrayDepth) {
2520 llvm::Value *zero = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 0);
2521
2522 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2523 begin = CGF.Builder.CreateInBoundsGEP(
2524 Ty: elemTy, Ptr: begin, IdxList: gepIndices, Name: "pad.arraybegin");
2525 end = CGF.Builder.CreateInBoundsGEP(
2526 Ty: elemTy, Ptr: end, IdxList: gepIndices, Name: "pad.arrayend");
2527 }
2528
2529 // Destroy the array. We don't ever need an EH cleanup because we
2530 // assume that we're in an EH cleanup ourselves, so a throwing
2531 // destructor causes an immediate terminate.
2532 CGF.emitArrayDestroy(begin, end, elementType: type, elementAlign, destroyer,
2533 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2534}
2535
2536namespace {
2537 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2538 /// array destroy where the end pointer is regularly determined and
2539 /// does not need to be loaded from a local.
2540 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2541 llvm::Value *ArrayBegin;
2542 llvm::Value *ArrayEnd;
2543 QualType ElementType;
2544 CodeGenFunction::Destroyer *Destroyer;
2545 CharUnits ElementAlign;
2546 public:
2547 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2548 QualType elementType, CharUnits elementAlign,
2549 CodeGenFunction::Destroyer *destroyer)
2550 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2551 ElementType(elementType), Destroyer(destroyer),
2552 ElementAlign(elementAlign) {}
2553
2554 void Emit(CodeGenFunction &CGF, Flags flags) override {
2555 emitPartialArrayDestroy(CGF, begin: ArrayBegin, end: ArrayEnd,
2556 type: ElementType, elementAlign: ElementAlign, destroyer: Destroyer);
2557 }
2558 };
2559
2560 /// IrregularPartialArrayDestroy - a cleanup which performs a
2561 /// partial array destroy where the end pointer is irregularly
2562 /// determined and must be loaded from a local.
2563 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2564 llvm::Value *ArrayBegin;
2565 Address ArrayEndPointer;
2566 QualType ElementType;
2567 CodeGenFunction::Destroyer *Destroyer;
2568 CharUnits ElementAlign;
2569 public:
2570 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2571 Address arrayEndPointer,
2572 QualType elementType,
2573 CharUnits elementAlign,
2574 CodeGenFunction::Destroyer *destroyer)
2575 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2576 ElementType(elementType), Destroyer(destroyer),
2577 ElementAlign(elementAlign) {}
2578
2579 void Emit(CodeGenFunction &CGF, Flags flags) override {
2580 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(Addr: ArrayEndPointer);
2581 emitPartialArrayDestroy(CGF, begin: ArrayBegin, end: arrayEnd,
2582 type: ElementType, elementAlign: ElementAlign, destroyer: Destroyer);
2583 }
2584 };
2585} // end anonymous namespace
2586
2587/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
2588/// destroy already-constructed elements of the given array. The cleanup may be
2589/// popped with DeactivateCleanupBlock or PopCleanupBlock.
2590///
2591/// \param elementType - the immediate element type of the array;
2592/// possibly still an array type
2593void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2594 Address arrayEndPointer,
2595 QualType elementType,
2596 CharUnits elementAlign,
2597 Destroyer *destroyer) {
2598 pushFullExprCleanup<IrregularPartialArrayDestroy>(
2599 kind: NormalAndEHCleanup, A: arrayBegin, A: arrayEndPointer, A: elementType,
2600 A: elementAlign, A: destroyer);
2601}
2602
2603/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2604/// already-constructed elements of the given array. The cleanup
2605/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2606///
2607/// \param elementType - the immediate element type of the array;
2608/// possibly still an array type
2609void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2610 llvm::Value *arrayEnd,
2611 QualType elementType,
2612 CharUnits elementAlign,
2613 Destroyer *destroyer) {
2614 pushFullExprCleanup<RegularPartialArrayDestroy>(kind: EHCleanup,
2615 A: arrayBegin, A: arrayEnd,
2616 A: elementType, A: elementAlign,
2617 A: destroyer);
2618}
2619
2620/// Lazily declare the @llvm.lifetime.start intrinsic.
2621llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2622 if (LifetimeStartFn)
2623 return LifetimeStartFn;
2624 LifetimeStartFn = llvm::Intrinsic::getOrInsertDeclaration(
2625 M: &getModule(), id: llvm::Intrinsic::lifetime_start, Tys: AllocaInt8PtrTy);
2626 return LifetimeStartFn;
2627}
2628
2629/// Lazily declare the @llvm.lifetime.end intrinsic.
2630llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2631 if (LifetimeEndFn)
2632 return LifetimeEndFn;
2633 LifetimeEndFn = llvm::Intrinsic::getOrInsertDeclaration(
2634 M: &getModule(), id: llvm::Intrinsic::lifetime_end, Tys: AllocaInt8PtrTy);
2635 return LifetimeEndFn;
2636}
2637
2638/// Lazily declare the @llvm.fake.use intrinsic.
2639llvm::Function *CodeGenModule::getLLVMFakeUseFn() {
2640 if (FakeUseFn)
2641 return FakeUseFn;
2642 FakeUseFn = llvm::Intrinsic::getOrInsertDeclaration(
2643 M: &getModule(), id: llvm::Intrinsic::fake_use);
2644 return FakeUseFn;
2645}
2646
2647namespace {
2648 /// A cleanup to perform a release of an object at the end of a
2649 /// function. This is used to balance out the incoming +1 of a
2650 /// ns_consumed argument when we can't reasonably do that just by
2651 /// not doing the initial retain for a __block argument.
2652 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2653 ConsumeARCParameter(llvm::Value *param,
2654 ARCPreciseLifetime_t precise)
2655 : Param(param), Precise(precise) {}
2656
2657 llvm::Value *Param;
2658 ARCPreciseLifetime_t Precise;
2659
2660 void Emit(CodeGenFunction &CGF, Flags flags) override {
2661 CGF.EmitARCRelease(value: Param, precise: Precise);
2662 }
2663 };
2664} // end anonymous namespace
2665
2666/// Emit an alloca (or GlobalValue depending on target)
2667/// for the specified parameter and set up LocalDeclMap.
2668void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2669 unsigned ArgNo) {
2670 bool NoDebugInfo = false;
2671 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2672 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2673 "Invalid argument to EmitParmDecl");
2674
2675 // Set the name of the parameter's initial value to make IR easier to
2676 // read. Don't modify the names of globals.
2677 if (!isa<llvm::GlobalValue>(Val: Arg.getAnyValue()))
2678 Arg.getAnyValue()->setName(D.getName());
2679
2680 QualType Ty = D.getType();
2681
2682 // Use better IR generation for certain implicit parameters.
2683 if (auto IPD = dyn_cast<ImplicitParamDecl>(Val: &D)) {
2684 // The only implicit argument a block has is its literal.
2685 // This may be passed as an inalloca'ed value on Windows x86.
2686 if (BlockInfo) {
2687 llvm::Value *V = Arg.isIndirect()
2688 ? Builder.CreateLoad(Addr: Arg.getIndirectAddress())
2689 : Arg.getDirectValue();
2690 setBlockContextParameter(D: IPD, argNum: ArgNo, ptr: V);
2691 return;
2692 }
2693 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2694 // debug info of TLS variables.
2695 NoDebugInfo =
2696 (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2697 }
2698
2699 Address DeclPtr = Address::invalid();
2700 RawAddress AllocaPtr = Address::invalid();
2701 bool DoStore = false;
2702 bool IsScalar = hasScalarEvaluationKind(T: Ty);
2703 bool UseIndirectDebugAddress = false;
2704
2705 // If we already have a pointer to the argument, reuse the input pointer.
2706 if (Arg.isIndirect()) {
2707 DeclPtr = Arg.getIndirectAddress();
2708 DeclPtr = DeclPtr.withElementType(ElemTy: ConvertTypeForMem(T: Ty));
2709 // Indirect argument is in alloca address space, which may be different
2710 // from the default address space.
2711 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2712 auto *V = DeclPtr.emitRawPointer(CGF&: *this);
2713 AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
2714
2715 // For truly ABI indirect arguments -- those that are not `byval` -- store
2716 // the address of the argument on the stack to preserve debug information.
2717 ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2718 if (ArgInfo.isIndirect())
2719 UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2720 if (UseIndirectDebugAddress) {
2721 auto PtrTy = getContext().getPointerType(T: Ty);
2722 AllocaPtr = CreateMemTemp(T: PtrTy, Align: getContext().getTypeAlignInChars(T: PtrTy),
2723 Name: D.getName() + ".indirect_addr");
2724 EmitStoreOfScalar(Value: V, Addr: AllocaPtr, /* Volatile */ false, Ty: PtrTy);
2725 }
2726
2727 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2728 auto DestLangAS =
2729 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2730 if (SrcLangAS != DestLangAS) {
2731 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2732 CGM.getDataLayout().getAllocaAddrSpace());
2733 auto DestAS = getContext().getTargetAddressSpace(AS: DestLangAS);
2734 auto *T = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: DestAS);
2735 DeclPtr = DeclPtr.withPointer(
2736 NewPointer: getTargetHooks().performAddrSpaceCast(CGF&: *this, V, SrcAddr: SrcLangAS, DestTy: T, IsNonNull: true),
2737 IsKnownNonNull: DeclPtr.isKnownNonNull());
2738 }
2739
2740 // Push a destructor cleanup for this parameter if the ABI requires it.
2741 // Don't push a cleanup in a thunk for a method that will also emit a
2742 // cleanup.
2743 if (Ty->isRecordType() && !CurFuncIsThunk &&
2744 Ty->castAsRecordDecl()->isParamDestroyedInCallee()) {
2745 if (QualType::DestructionKind DtorKind =
2746 D.needsDestruction(Ctx: getContext())) {
2747 assert((DtorKind == QualType::DK_cxx_destructor ||
2748 DtorKind == QualType::DK_nontrivial_c_struct) &&
2749 "unexpected destructor type");
2750 pushDestroy(dtorKind: DtorKind, addr: DeclPtr, type: Ty);
2751 CalleeDestructedParamCleanups[cast<ParmVarDecl>(Val: &D)] =
2752 EHStack.stable_begin();
2753 }
2754 }
2755 } else {
2756 // Check if the parameter address is controlled by OpenMP runtime.
2757 Address OpenMPLocalAddr =
2758 getLangOpts().OpenMP
2759 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(CGF&: *this, VD: &D)
2760 : Address::invalid();
2761 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2762 DeclPtr = OpenMPLocalAddr;
2763 AllocaPtr = DeclPtr;
2764 } else {
2765 // Otherwise, create a temporary to hold the value.
2766 DeclPtr = CreateMemTemp(T: Ty, Align: getContext().getDeclAlign(D: &D),
2767 Name: D.getName() + ".addr", Alloca: &AllocaPtr);
2768 }
2769 DoStore = true;
2770 }
2771
2772 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2773
2774 LValue lv = MakeAddrLValue(Addr: DeclPtr, T: Ty);
2775 if (IsScalar) {
2776 Qualifiers qs = Ty.getQualifiers();
2777 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2778 // We honor __attribute__((ns_consumed)) for types with lifetime.
2779 // For __strong, it's handled by just skipping the initial retain;
2780 // otherwise we have to balance out the initial +1 with an extra
2781 // cleanup to do the release at the end of the function.
2782 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2783
2784 // If a parameter is pseudo-strong then we can omit the implicit retain.
2785 if (D.isARCPseudoStrong()) {
2786 assert(lt == Qualifiers::OCL_Strong &&
2787 "pseudo-strong variable isn't strong?");
2788 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2789 lt = Qualifiers::OCL_ExplicitNone;
2790 }
2791
2792 // Load objects passed indirectly.
2793 if (Arg.isIndirect() && !ArgVal)
2794 ArgVal = Builder.CreateLoad(Addr: DeclPtr);
2795
2796 if (lt == Qualifiers::OCL_Strong) {
2797 if (!isConsumed) {
2798 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2799 // use objc_storeStrong(&dest, value) for retaining the
2800 // object. But first, store a null into 'dest' because
2801 // objc_storeStrong attempts to release its old value.
2802 llvm::Value *Null = CGM.EmitNullConstant(T: D.getType());
2803 EmitStoreOfScalar(value: Null, lvalue: lv, /* isInitialization */ isInit: true);
2804 EmitARCStoreStrongCall(addr: lv.getAddress(), value: ArgVal, resultIgnored: true);
2805 DoStore = false;
2806 }
2807 else
2808 // Don't use objc_retainBlock for block pointers, because we
2809 // don't want to Block_copy something just because we got it
2810 // as a parameter.
2811 ArgVal = EmitARCRetainNonBlock(value: ArgVal);
2812 }
2813 } else {
2814 // Push the cleanup for a consumed parameter.
2815 if (isConsumed) {
2816 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2817 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2818 EHStack.pushCleanup<ConsumeARCParameter>(Kind: getARCCleanupKind(), A: ArgVal,
2819 A: precise);
2820 }
2821
2822 if (lt == Qualifiers::OCL_Weak) {
2823 EmitARCInitWeak(addr: DeclPtr, value: ArgVal);
2824 DoStore = false; // The weak init is a store, no need to do two.
2825 }
2826 }
2827
2828 // Enter the cleanup scope.
2829 EmitAutoVarWithLifetime(CGF&: *this, var: D, addr: DeclPtr, lifetime: lt);
2830 }
2831 }
2832
2833 // Store the initial value into the alloca.
2834 if (DoStore)
2835 EmitStoreOfScalar(value: ArgVal, lvalue: lv, /* isInitialization */ isInit: true);
2836
2837 setAddrOfLocalVar(VD: &D, Addr: DeclPtr);
2838
2839 // Push a FakeUse 'cleanup' object onto the EHStack for the parameter,
2840 // which may be the 'this' pointer. This causes the emission of a fake.use
2841 // call with the parameter as argument at the end of the function.
2842 if (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
2843 CodeGenOptions::ExtendVariableLivenessKind::All ||
2844 (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
2845 CodeGenOptions::ExtendVariableLivenessKind::This &&
2846 &D == CXXABIThisDecl)) {
2847 if (shouldExtendLifetime(Context: getContext(), FuncDecl: CurCodeDecl, D, CXXABIThisDecl))
2848 EHStack.pushCleanup<FakeUse>(Kind: NormalFakeUse, A: DeclPtr);
2849 }
2850
2851 // Emit debug info for param declarations in non-thunk functions.
2852 if (CGDebugInfo *DI = getDebugInfo()) {
2853 if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
2854 !NoDebugInfo) {
2855 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2856 Decl: &D, AI: AllocaPtr.getPointer(), ArgNo, Builder, UsePointerValue: UseIndirectDebugAddress);
2857 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(Val: &D))
2858 DI->getParamDbgMappings().insert(KV: {Var, DILocalVar});
2859 }
2860 }
2861
2862 if (D.hasAttr<AnnotateAttr>())
2863 EmitVarAnnotations(D: &D, V: DeclPtr.emitRawPointer(CGF&: *this));
2864
2865 // We can only check return value nullability if all arguments to the
2866 // function satisfy their nullability preconditions. This makes it necessary
2867 // to emit null checks for args in the function body itself.
2868 if (requiresReturnValueNullabilityCheck()) {
2869 auto Nullability = Ty->getNullability();
2870 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2871 SanitizerScope SanScope(this);
2872 RetValNullabilityPrecondition =
2873 Builder.CreateAnd(LHS: RetValNullabilityPrecondition,
2874 RHS: Builder.CreateIsNotNull(Arg: Arg.getAnyValue()));
2875 }
2876 }
2877}
2878
2879void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2880 CodeGenFunction *CGF) {
2881 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2882 return;
2883 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2884}
2885
2886void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2887 CodeGenFunction *CGF) {
2888 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2889 (!LangOpts.EmitAllDecls && !D->isUsed()))
2890 return;
2891 getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2892}
2893
2894void CodeGenModule::EmitOpenACCDeclare(const OpenACCDeclareDecl *D,
2895 CodeGenFunction *CGF) {
2896 // This is a no-op, we cna just ignore these declarations.
2897}
2898
2899void CodeGenModule::EmitOpenACCRoutine(const OpenACCRoutineDecl *D,
2900 CodeGenFunction *CGF) {
2901 // This is a no-op, we cna just ignore these declarations.
2902}
2903
2904void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2905 getOpenMPRuntime().processRequiresDirective(D);
2906}
2907
2908void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2909 for (const Expr *E : D->varlist()) {
2910 const auto *DE = cast<DeclRefExpr>(Val: E);
2911 const auto *VD = cast<VarDecl>(Val: DE->getDecl());
2912
2913 // Skip all but globals.
2914 if (!VD->hasGlobalStorage())
2915 continue;
2916
2917 // Check if the global has been materialized yet or not. If not, we are done
2918 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2919 // we already emitted the global we might have done so before the
2920 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2921 // (potentially). While not pretty, common practise is to remove the old IR
2922 // global and generate a new one, so we do that here too. Uses are replaced
2923 // properly.
2924 StringRef MangledName = getMangledName(GD: VD);
2925 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
2926 if (!Entry)
2927 continue;
2928
2929 // We can also keep the existing global if the address space is what we
2930 // expect it to be, if not, it is replaced.
2931 clang::LangAS GVAS = GetGlobalVarAddressSpace(D: VD);
2932 auto TargetAS = getContext().getTargetAddressSpace(AS: GVAS);
2933 if (Entry->getType()->getAddressSpace() == TargetAS)
2934 continue;
2935
2936 llvm::PointerType *PTy = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: TargetAS);
2937
2938 // Replace all uses of the old global with a cast. Since we mutate the type
2939 // in place we neeed an intermediate that takes the spot of the old entry
2940 // until we can create the cast.
2941 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2942 getModule(), Entry->getValueType(), false,
2943 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2944 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2945 Entry->replaceAllUsesWith(V: DummyGV);
2946
2947 Entry->mutateType(Ty: PTy);
2948 llvm::Constant *NewPtrForOldDecl =
2949 llvm::ConstantExpr::getAddrSpaceCast(C: Entry, Ty: DummyGV->getType());
2950
2951 // Now we have a casted version of the changed global, the dummy can be
2952 // replaced and deleted.
2953 DummyGV->replaceAllUsesWith(V: NewPtrForOldDecl);
2954 DummyGV->eraseFromParent();
2955 }
2956}
2957
2958std::optional<CharUnits>
2959CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
2960 if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2961 if (Expr *Alignment = AA->getAlignment()) {
2962 unsigned UserAlign =
2963 Alignment->EvaluateKnownConstInt(Ctx: getContext()).getExtValue();
2964 CharUnits NaturalAlign =
2965 getNaturalTypeAlignment(T: VD->getType().getNonReferenceType());
2966
2967 // OpenMP5.1 pg 185 lines 7-10
2968 // Each item in the align modifier list must be aligned to the maximum
2969 // of the specified alignment and the type's natural alignment.
2970 return CharUnits::fromQuantity(
2971 Quantity: std::max<unsigned>(a: UserAlign, b: NaturalAlign.getQuantity()));
2972 }
2973 }
2974 return std::nullopt;
2975}
2976