1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Decl nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBlocks.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGOpenCLRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "ConstantEmitter.h"
23#include "EHScopeStack.h"
24#include "PatternInit.h"
25#include "TargetInfo.h"
26#include "clang/AST/ASTContext.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/DeclObjC.h"
31#include "clang/AST/DeclOpenACC.h"
32#include "clang/AST/DeclOpenMP.h"
33#include "clang/Basic/CodeGenOptions.h"
34#include "clang/Basic/TargetInfo.h"
35#include "clang/CodeGen/CGFunctionInfo.h"
36#include "clang/Sema/Sema.h"
37#include "llvm/Analysis/ConstantFolding.h"
38#include "llvm/Analysis/ValueTracking.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/GlobalVariable.h"
41#include "llvm/IR/Instructions.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/Type.h"
44#include <optional>
45
46using namespace clang;
47using namespace CodeGen;
48
49static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
50 "Clang max alignment greater than what LLVM supports?");
51
52void CodeGenFunction::EmitDecl(const Decl &D, bool EvaluateConditionDecl) {
53 switch (D.getKind()) {
54 case Decl::BuiltinTemplate:
55 case Decl::TranslationUnit:
56 case Decl::ExternCContext:
57 case Decl::Namespace:
58 case Decl::UnresolvedUsingTypename:
59 case Decl::ClassTemplateSpecialization:
60 case Decl::ClassTemplatePartialSpecialization:
61 case Decl::VarTemplateSpecialization:
62 case Decl::VarTemplatePartialSpecialization:
63 case Decl::TemplateTypeParm:
64 case Decl::UnresolvedUsingValue:
65 case Decl::NonTypeTemplateParm:
66 case Decl::CXXDeductionGuide:
67 case Decl::CXXMethod:
68 case Decl::CXXConstructor:
69 case Decl::CXXDestructor:
70 case Decl::CXXConversion:
71 case Decl::Field:
72 case Decl::MSProperty:
73 case Decl::IndirectField:
74 case Decl::ObjCIvar:
75 case Decl::ObjCAtDefsField:
76 case Decl::ParmVar:
77 case Decl::ImplicitParam:
78 case Decl::ClassTemplate:
79 case Decl::VarTemplate:
80 case Decl::FunctionTemplate:
81 case Decl::TypeAliasTemplate:
82 case Decl::TemplateTemplateParm:
83 case Decl::ObjCMethod:
84 case Decl::ObjCCategory:
85 case Decl::ObjCProtocol:
86 case Decl::ObjCInterface:
87 case Decl::ObjCCategoryImpl:
88 case Decl::ObjCImplementation:
89 case Decl::ObjCProperty:
90 case Decl::ObjCCompatibleAlias:
91 case Decl::PragmaComment:
92 case Decl::PragmaDetectMismatch:
93 case Decl::AccessSpec:
94 case Decl::LinkageSpec:
95 case Decl::Export:
96 case Decl::ObjCPropertyImpl:
97 case Decl::FileScopeAsm:
98 case Decl::TopLevelStmt:
99 case Decl::Friend:
100 case Decl::FriendTemplate:
101 case Decl::Block:
102 case Decl::OutlinedFunction:
103 case Decl::Captured:
104 case Decl::UsingShadow:
105 case Decl::ConstructorUsingShadow:
106 case Decl::ObjCTypeParam:
107 case Decl::Binding:
108 case Decl::UnresolvedUsingIfExists:
109 case Decl::HLSLBuffer:
110 case Decl::HLSLRootSignature:
111 llvm_unreachable("Declaration should not be in declstmts!");
112 case Decl::Record: // struct/union/class X;
113 case Decl::CXXRecord: // struct/union/class X; [C++]
114 if (CGDebugInfo *DI = getDebugInfo())
115 if (cast<RecordDecl>(Val: D).getDefinition())
116 DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: &D)));
117 return;
118 case Decl::Enum: // enum X;
119 if (CGDebugInfo *DI = getDebugInfo())
120 if (cast<EnumDecl>(Val: D).getDefinition())
121 DI->EmitAndRetainType(Ty: getContext().getEnumType(Decl: cast<EnumDecl>(Val: &D)));
122 return;
123 case Decl::Function: // void X();
124 case Decl::EnumConstant: // enum ? { X = ? }
125 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
126 case Decl::Label: // __label__ x;
127 case Decl::Import:
128 case Decl::MSGuid: // __declspec(uuid("..."))
129 case Decl::UnnamedGlobalConstant:
130 case Decl::TemplateParamObject:
131 case Decl::OMPThreadPrivate:
132 case Decl::OMPAllocate:
133 case Decl::OMPCapturedExpr:
134 case Decl::OMPRequires:
135 case Decl::Empty:
136 case Decl::Concept:
137 case Decl::ImplicitConceptSpecialization:
138 case Decl::LifetimeExtendedTemporary:
139 case Decl::RequiresExprBody:
140 // None of these decls require codegen support.
141 return;
142
143 case Decl::NamespaceAlias:
144 if (CGDebugInfo *DI = getDebugInfo())
145 DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val: D));
146 return;
147 case Decl::Using: // using X; [C++]
148 if (CGDebugInfo *DI = getDebugInfo())
149 DI->EmitUsingDecl(UD: cast<UsingDecl>(Val: D));
150 return;
151 case Decl::UsingEnum: // using enum X; [C++]
152 if (CGDebugInfo *DI = getDebugInfo())
153 DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val: D));
154 return;
155 case Decl::UsingPack:
156 for (auto *Using : cast<UsingPackDecl>(Val: D).expansions())
157 EmitDecl(D: *Using, /*EvaluateConditionDecl=*/EvaluateConditionDecl);
158 return;
159 case Decl::UsingDirective: // using namespace X; [C++]
160 if (CGDebugInfo *DI = getDebugInfo())
161 DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val: D));
162 return;
163 case Decl::Var:
164 case Decl::Decomposition: {
165 const VarDecl &VD = cast<VarDecl>(Val: D);
166 assert(VD.isLocalVarDecl() &&
167 "Should not see file-scope variables inside a function!");
168 EmitVarDecl(D: VD);
169 if (EvaluateConditionDecl)
170 MaybeEmitDeferredVarDeclInit(var: &VD);
171
172 return;
173 }
174
175 case Decl::OMPDeclareReduction:
176 return CGM.EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: &D), CGF: this);
177
178 case Decl::OMPDeclareMapper:
179 return CGM.EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: &D), CGF: this);
180
181 case Decl::OpenACCDeclare:
182 return CGM.EmitOpenACCDeclare(D: cast<OpenACCDeclareDecl>(Val: &D), CGF: this);
183 case Decl::OpenACCRoutine:
184 return CGM.EmitOpenACCRoutine(D: cast<OpenACCRoutineDecl>(Val: &D), CGF: this);
185
186 case Decl::Typedef: // typedef int X;
187 case Decl::TypeAlias: { // using X = int; [C++0x]
188 QualType Ty = cast<TypedefNameDecl>(Val: D).getUnderlyingType();
189 if (CGDebugInfo *DI = getDebugInfo())
190 DI->EmitAndRetainType(Ty);
191 if (Ty->isVariablyModifiedType())
192 EmitVariablyModifiedType(Ty);
193 return;
194 }
195 }
196}
197
198/// EmitVarDecl - This method handles emission of any variable declaration
199/// inside a function, including static vars etc.
200void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
201 if (D.hasExternalStorage())
202 // Don't emit it now, allow it to be emitted lazily on its first use.
203 return;
204
205 // Some function-scope variable does not have static storage but still
206 // needs to be emitted like a static variable, e.g. a function-scope
207 // variable in constant address space in OpenCL.
208 if (D.getStorageDuration() != SD_Automatic) {
209 // Static sampler variables translated to function calls.
210 if (D.getType()->isSamplerT())
211 return;
212
213 llvm::GlobalValue::LinkageTypes Linkage =
214 CGM.getLLVMLinkageVarDefinition(VD: &D);
215
216 // FIXME: We need to force the emission/use of a guard variable for
217 // some variables even if we can constant-evaluate them because
218 // we can't guarantee every translation unit will constant-evaluate them.
219
220 return EmitStaticVarDecl(D, Linkage);
221 }
222
223 if (D.getType().getAddressSpace() == LangAS::opencl_local)
224 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(CGF&: *this, D);
225
226 assert(D.hasLocalStorage());
227 return EmitAutoVarDecl(D);
228}
229
230static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
231 if (CGM.getLangOpts().CPlusPlus)
232 return CGM.getMangledName(GD: &D).str();
233
234 // If this isn't C++, we don't need a mangled name, just a pretty one.
235 assert(!D.isExternallyVisible() && "name shouldn't matter");
236 std::string ContextName;
237 const DeclContext *DC = D.getDeclContext();
238 if (auto *CD = dyn_cast<CapturedDecl>(Val: DC))
239 DC = cast<DeclContext>(Val: CD->getNonClosureContext());
240 if (const auto *FD = dyn_cast<FunctionDecl>(Val: DC))
241 ContextName = std::string(CGM.getMangledName(GD: FD));
242 else if (const auto *BD = dyn_cast<BlockDecl>(Val: DC))
243 ContextName = std::string(CGM.getBlockMangledName(GD: GlobalDecl(), BD));
244 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: DC))
245 ContextName = OMD->getSelector().getAsString();
246 else
247 llvm_unreachable("Unknown context for static var decl");
248
249 ContextName += "." + D.getNameAsString();
250 return ContextName;
251}
252
253llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
254 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
255 // In general, we don't always emit static var decls once before we reference
256 // them. It is possible to reference them before emitting the function that
257 // contains them, and it is possible to emit the containing function multiple
258 // times.
259 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
260 return ExistingGV;
261
262 QualType Ty = D.getType();
263 assert(Ty->isConstantSizeType() && "VLAs can't be static");
264
265 // Use the label if the variable is renamed with the asm-label extension.
266 std::string Name;
267 if (D.hasAttr<AsmLabelAttr>())
268 Name = std::string(getMangledName(GD: &D));
269 else
270 Name = getStaticDeclName(CGM&: *this, D);
271
272 llvm::Type *LTy = getTypes().ConvertTypeForMem(T: Ty);
273 LangAS AS = GetGlobalVarAddressSpace(D: &D);
274 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
275
276 // OpenCL variables in local address space and CUDA shared
277 // variables cannot have an initializer.
278 llvm::Constant *Init = nullptr;
279 if (Ty.getAddressSpace() == LangAS::opencl_local ||
280 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
281 Init = llvm::UndefValue::get(T: LTy);
282 else
283 Init = EmitNullConstant(T: Ty);
284
285 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
286 getModule(), LTy, Ty.isConstant(Ctx: getContext()), Linkage, Init, Name,
287 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
288 GV->setAlignment(getContext().getDeclAlign(D: &D).getAsAlign());
289
290 if (supportsCOMDAT() && GV->isWeakForLinker())
291 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
292
293 if (D.getTLSKind())
294 setTLSMode(GV, D);
295
296 setGVProperties(GV, D: &D);
297 getTargetCodeGenInfo().setTargetAttributes(D: cast<Decl>(Val: &D), GV, M&: *this);
298
299 // Make sure the result is of the correct type.
300 LangAS ExpectedAS = Ty.getAddressSpace();
301 llvm::Constant *Addr = GV;
302 if (AS != ExpectedAS) {
303 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
304 CGM&: *this, V: GV, SrcAddr: AS,
305 DestTy: llvm::PointerType::get(C&: getLLVMContext(),
306 AddressSpace: getContext().getTargetAddressSpace(AS: ExpectedAS)));
307 }
308
309 setStaticLocalDeclAddress(D: &D, C: Addr);
310
311 // Ensure that the static local gets initialized by making sure the parent
312 // function gets emitted eventually.
313 const Decl *DC = cast<Decl>(Val: D.getDeclContext());
314
315 // We can't name blocks or captured statements directly, so try to emit their
316 // parents.
317 if (isa<BlockDecl>(Val: DC) || isa<CapturedDecl>(Val: DC)) {
318 DC = DC->getNonClosureContext();
319 // FIXME: Ensure that global blocks get emitted.
320 if (!DC)
321 return Addr;
322 }
323
324 GlobalDecl GD;
325 if (const auto *CD = dyn_cast<CXXConstructorDecl>(Val: DC))
326 GD = GlobalDecl(CD, Ctor_Base);
327 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: DC))
328 GD = GlobalDecl(DD, Dtor_Base);
329 else if (const auto *FD = dyn_cast<FunctionDecl>(Val: DC))
330 GD = GlobalDecl(FD);
331 else {
332 // Don't do anything for Obj-C method decls or global closures. We should
333 // never defer them.
334 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
335 }
336 if (GD.getDecl()) {
337 // Disable emission of the parent function for the OpenMP device codegen.
338 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
339 (void)GetAddrOfGlobal(GD);
340 }
341
342 return Addr;
343}
344
345/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
346/// global variable that has already been created for it. If the initializer
347/// has a different type than GV does, this may free GV and return a different
348/// one. Otherwise it just returns GV.
349llvm::GlobalVariable *
350CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
351 llvm::GlobalVariable *GV) {
352 ConstantEmitter emitter(*this);
353 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
354
355 // If constant emission failed, then this should be a C++ static
356 // initializer.
357 if (!Init) {
358 if (!getLangOpts().CPlusPlus)
359 CGM.ErrorUnsupported(S: D.getInit(), Type: "constant l-value expression");
360 else if (D.hasFlexibleArrayInit(Ctx: getContext()))
361 CGM.ErrorUnsupported(S: D.getInit(), Type: "flexible array initializer");
362 else if (HaveInsertPoint()) {
363 // Since we have a static initializer, this global variable can't
364 // be constant.
365 GV->setConstant(false);
366
367 EmitCXXGuardedInit(D, DeclPtr: GV, /*PerformInit*/true);
368 }
369 return GV;
370 }
371
372 PGO->markStmtMaybeUsed(S: D.getInit()); // FIXME: Too lazy
373
374#ifndef NDEBUG
375 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
376 D.getFlexibleArrayInitChars(getContext());
377 CharUnits CstSize = CharUnits::fromQuantity(
378 CGM.getDataLayout().getTypeAllocSize(Init->getType()));
379 assert(VarSize == CstSize && "Emitted constant has unexpected size");
380#endif
381
382 bool NeedsDtor =
383 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
384
385 GV->setConstant(
386 D.getType().isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: !NeedsDtor));
387 GV->replaceInitializer(InitVal: Init);
388
389 emitter.finalize(global: GV);
390
391 if (NeedsDtor && HaveInsertPoint()) {
392 // We have a constant initializer, but a nontrivial destructor. We still
393 // need to perform a guarded "initialization" in order to register the
394 // destructor.
395 EmitCXXGuardedInit(D, DeclPtr: GV, /*PerformInit*/false);
396 }
397
398 return GV;
399}
400
401void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
402 llvm::GlobalValue::LinkageTypes Linkage) {
403 // Check to see if we already have a global variable for this
404 // declaration. This can happen when double-emitting function
405 // bodies, e.g. with complete and base constructors.
406 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
407 CharUnits alignment = getContext().getDeclAlign(D: &D);
408
409 // Store into LocalDeclMap before generating initializer to handle
410 // circular references.
411 llvm::Type *elemTy = ConvertTypeForMem(T: D.getType());
412 setAddrOfLocalVar(VD: &D, Addr: Address(addr, elemTy, alignment));
413
414 // We can't have a VLA here, but we can have a pointer to a VLA,
415 // even though that doesn't really make any sense.
416 // Make sure to evaluate VLA bounds now so that we have them for later.
417 if (D.getType()->isVariablyModifiedType())
418 EmitVariablyModifiedType(Ty: D.getType());
419
420 // Save the type in case adding the initializer forces a type change.
421 llvm::Type *expectedType = addr->getType();
422
423 llvm::GlobalVariable *var =
424 cast<llvm::GlobalVariable>(Val: addr->stripPointerCasts());
425
426 // CUDA's local and local static __shared__ variables should not
427 // have any non-empty initializers. This is ensured by Sema.
428 // Whatever initializer such variable may have when it gets here is
429 // a no-op and should not be emitted.
430 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
431 D.hasAttr<CUDASharedAttr>();
432 // If this value has an initializer, emit it.
433 if (D.getInit() && !isCudaSharedVar) {
434 ApplyAtomGroup Grp(getDebugInfo());
435 var = AddInitializerToStaticVarDecl(D, GV: var);
436 }
437
438 var->setAlignment(alignment.getAsAlign());
439
440 if (D.hasAttr<AnnotateAttr>())
441 CGM.AddGlobalAnnotations(D: &D, GV: var);
442
443 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
444 var->addAttribute(Kind: "bss-section", Val: SA->getName());
445 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
446 var->addAttribute(Kind: "data-section", Val: SA->getName());
447 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
448 var->addAttribute(Kind: "rodata-section", Val: SA->getName());
449 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
450 var->addAttribute(Kind: "relro-section", Val: SA->getName());
451
452 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
453 var->setSection(SA->getName());
454
455 if (D.hasAttr<RetainAttr>())
456 CGM.addUsedGlobal(GV: var);
457 else if (D.hasAttr<UsedAttr>())
458 CGM.addUsedOrCompilerUsedGlobal(GV: var);
459
460 if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
461 CGM.addUsedOrCompilerUsedGlobal(GV: var);
462
463 // We may have to cast the constant because of the initializer
464 // mismatch above.
465 //
466 // FIXME: It is really dangerous to store this in the map; if anyone
467 // RAUW's the GV uses of this constant will be invalid.
468 llvm::Constant *castedAddr =
469 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: var, Ty: expectedType);
470 LocalDeclMap.find(Val: &D)->second = Address(castedAddr, elemTy, alignment);
471 CGM.setStaticLocalDeclAddress(D: &D, C: castedAddr);
472
473 CGM.getSanitizerMetadata()->reportGlobal(GV: var, D);
474
475 // Emit global variable debug descriptor for static vars.
476 CGDebugInfo *DI = getDebugInfo();
477 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
478 DI->setLocation(D.getLocation());
479 DI->EmitGlobalVariable(GV: var, Decl: &D);
480 }
481}
482
483namespace {
484 struct DestroyObject final : EHScopeStack::Cleanup {
485 DestroyObject(Address addr, QualType type,
486 CodeGenFunction::Destroyer *destroyer,
487 bool useEHCleanupForArray)
488 : addr(addr), type(type), destroyer(destroyer),
489 useEHCleanupForArray(useEHCleanupForArray) {}
490
491 Address addr;
492 QualType type;
493 CodeGenFunction::Destroyer *destroyer;
494 bool useEHCleanupForArray;
495
496 void Emit(CodeGenFunction &CGF, Flags flags) override {
497 // Don't use an EH cleanup recursively from an EH cleanup.
498 bool useEHCleanupForArray =
499 flags.isForNormalCleanup() && this->useEHCleanupForArray;
500
501 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
502 }
503 };
504
505 template <class Derived>
506 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
507 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
508 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
509
510 llvm::Value *NRVOFlag;
511 Address Loc;
512 QualType Ty;
513
514 void Emit(CodeGenFunction &CGF, Flags flags) override {
515 // Along the exceptions path we always execute the dtor.
516 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
517
518 llvm::BasicBlock *SkipDtorBB = nullptr;
519 if (NRVO) {
520 // If we exited via NRVO, we skip the destructor call.
521 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock(name: "nrvo.unused");
522 SkipDtorBB = CGF.createBasicBlock(name: "nrvo.skipdtor");
523 llvm::Value *DidNRVO =
524 CGF.Builder.CreateFlagLoad(Addr: NRVOFlag, Name: "nrvo.val");
525 CGF.Builder.CreateCondBr(Cond: DidNRVO, True: SkipDtorBB, False: RunDtorBB);
526 CGF.EmitBlock(BB: RunDtorBB);
527 }
528
529 static_cast<Derived *>(this)->emitDestructorCall(CGF);
530
531 if (NRVO) CGF.EmitBlock(BB: SkipDtorBB);
532 }
533
534 virtual ~DestroyNRVOVariable() = default;
535 };
536
537 struct DestroyNRVOVariableCXX final
538 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
539 DestroyNRVOVariableCXX(Address addr, QualType type,
540 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
541 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
542 Dtor(Dtor) {}
543
544 const CXXDestructorDecl *Dtor;
545
546 void emitDestructorCall(CodeGenFunction &CGF) {
547 CGF.EmitCXXDestructorCall(D: Dtor, Type: Dtor_Complete,
548 /*ForVirtualBase=*/false,
549 /*Delegating=*/false, This: Loc, ThisTy: Ty);
550 }
551 };
552
553 struct DestroyNRVOVariableC final
554 : DestroyNRVOVariable<DestroyNRVOVariableC> {
555 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
556 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
557
558 void emitDestructorCall(CodeGenFunction &CGF) {
559 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
560 }
561 };
562
563 struct CallStackRestore final : EHScopeStack::Cleanup {
564 Address Stack;
565 CallStackRestore(Address Stack) : Stack(Stack) {}
566 bool isRedundantBeforeReturn() override { return true; }
567 void Emit(CodeGenFunction &CGF, Flags flags) override {
568 llvm::Value *V = CGF.Builder.CreateLoad(Addr: Stack);
569 CGF.Builder.CreateStackRestore(Ptr: V);
570 }
571 };
572
573 struct KmpcAllocFree final : EHScopeStack::Cleanup {
574 std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
575 KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
576 : AddrSizePair(AddrSizePair) {}
577 void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
578 auto &RT = CGF.CGM.getOpenMPRuntime();
579 RT.getKmpcFreeShared(CGF, AddrSizePair);
580 }
581 };
582
583 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
584 const VarDecl &Var;
585 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
586
587 void Emit(CodeGenFunction &CGF, Flags flags) override {
588 // Compute the address of the local variable, in case it's a
589 // byref or something.
590 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
591 Var.getType(), VK_LValue, SourceLocation());
592 llvm::Value *value = CGF.EmitLoadOfScalar(lvalue: CGF.EmitDeclRefLValue(E: &DRE),
593 Loc: SourceLocation());
594 CGF.EmitExtendGCLifetime(object: value);
595 }
596 };
597
598 struct CallCleanupFunction final : EHScopeStack::Cleanup {
599 llvm::Constant *CleanupFn;
600 const CGFunctionInfo &FnInfo;
601 const VarDecl &Var;
602
603 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
604 const VarDecl *Var)
605 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
606
607 void Emit(CodeGenFunction &CGF, Flags flags) override {
608 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
609 Var.getType(), VK_LValue, SourceLocation());
610 // Compute the address of the local variable, in case it's a byref
611 // or something.
612 llvm::Value *Addr = CGF.EmitDeclRefLValue(E: &DRE).getPointer(CGF);
613
614 // In some cases, the type of the function argument will be different from
615 // the type of the pointer. An example of this is
616 // void f(void* arg);
617 // __attribute__((cleanup(f))) void *g;
618 //
619 // To fix this we insert a bitcast here.
620 QualType ArgTy = FnInfo.arg_begin()->type;
621 llvm::Value *Arg =
622 CGF.Builder.CreateBitCast(V: Addr, DestTy: CGF.ConvertType(T: ArgTy));
623
624 CallArgList Args;
625 Args.add(rvalue: RValue::get(V: Arg),
626 type: CGF.getContext().getPointerType(T: Var.getType()));
627 auto Callee = CGCallee::forDirect(functionPtr: CleanupFn);
628 CGF.EmitCall(CallInfo: FnInfo, Callee, ReturnValue: ReturnValueSlot(), Args);
629 }
630 };
631} // end anonymous namespace
632
633/// EmitAutoVarWithLifetime - Does the setup required for an automatic
634/// variable with lifetime.
635static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
636 Address addr,
637 Qualifiers::ObjCLifetime lifetime) {
638 switch (lifetime) {
639 case Qualifiers::OCL_None:
640 llvm_unreachable("present but none");
641
642 case Qualifiers::OCL_ExplicitNone:
643 // nothing to do
644 break;
645
646 case Qualifiers::OCL_Strong: {
647 CodeGenFunction::Destroyer *destroyer =
648 (var.hasAttr<ObjCPreciseLifetimeAttr>()
649 ? CodeGenFunction::destroyARCStrongPrecise
650 : CodeGenFunction::destroyARCStrongImprecise);
651
652 CleanupKind cleanupKind = CGF.getARCCleanupKind();
653 CGF.pushDestroy(kind: cleanupKind, addr, type: var.getType(), destroyer,
654 useEHCleanupForArray: cleanupKind & EHCleanup);
655 break;
656 }
657 case Qualifiers::OCL_Autoreleasing:
658 // nothing to do
659 break;
660
661 case Qualifiers::OCL_Weak:
662 // __weak objects always get EH cleanups; otherwise, exceptions
663 // could cause really nasty crashes instead of mere leaks.
664 CGF.pushDestroy(kind: NormalAndEHCleanup, addr, type: var.getType(),
665 destroyer: CodeGenFunction::destroyARCWeak,
666 /*useEHCleanup*/ useEHCleanupForArray: true);
667 break;
668 }
669}
670
671static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
672 if (const Expr *e = dyn_cast<Expr>(Val: s)) {
673 // Skip the most common kinds of expressions that make
674 // hierarchy-walking expensive.
675 s = e = e->IgnoreParenCasts();
676
677 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(Val: e))
678 return (ref->getDecl() == &var);
679 if (const BlockExpr *be = dyn_cast<BlockExpr>(Val: e)) {
680 const BlockDecl *block = be->getBlockDecl();
681 for (const auto &I : block->captures()) {
682 if (I.getVariable() == &var)
683 return true;
684 }
685 }
686 }
687
688 for (const Stmt *SubStmt : s->children())
689 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
690 if (SubStmt && isAccessedBy(var, s: SubStmt))
691 return true;
692
693 return false;
694}
695
696static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
697 if (!decl) return false;
698 if (!isa<VarDecl>(Val: decl)) return false;
699 const VarDecl *var = cast<VarDecl>(Val: decl);
700 return isAccessedBy(var: *var, s: e);
701}
702
703static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
704 const LValue &destLV, const Expr *init) {
705 bool needsCast = false;
706
707 while (auto castExpr = dyn_cast<CastExpr>(Val: init->IgnoreParens())) {
708 switch (castExpr->getCastKind()) {
709 // Look through casts that don't require representation changes.
710 case CK_NoOp:
711 case CK_BitCast:
712 case CK_BlockPointerToObjCPointerCast:
713 needsCast = true;
714 break;
715
716 // If we find an l-value to r-value cast from a __weak variable,
717 // emit this operation as a copy or move.
718 case CK_LValueToRValue: {
719 const Expr *srcExpr = castExpr->getSubExpr();
720 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
721 return false;
722
723 // Emit the source l-value.
724 LValue srcLV = CGF.EmitLValue(E: srcExpr);
725
726 // Handle a formal type change to avoid asserting.
727 auto srcAddr = srcLV.getAddress();
728 if (needsCast) {
729 srcAddr = srcAddr.withElementType(ElemTy: destLV.getAddress().getElementType());
730 }
731
732 // If it was an l-value, use objc_copyWeak.
733 if (srcExpr->isLValue()) {
734 CGF.EmitARCCopyWeak(dst: destLV.getAddress(), src: srcAddr);
735 } else {
736 assert(srcExpr->isXValue());
737 CGF.EmitARCMoveWeak(dst: destLV.getAddress(), src: srcAddr);
738 }
739 return true;
740 }
741
742 // Stop at anything else.
743 default:
744 return false;
745 }
746
747 init = castExpr->getSubExpr();
748 }
749 return false;
750}
751
752static void drillIntoBlockVariable(CodeGenFunction &CGF,
753 LValue &lvalue,
754 const VarDecl *var) {
755 lvalue.setAddress(CGF.emitBlockByrefAddress(baseAddr: lvalue.getAddress(), V: var));
756}
757
758void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
759 SourceLocation Loc) {
760 if (!SanOpts.has(K: SanitizerKind::NullabilityAssign))
761 return;
762
763 auto Nullability = LHS.getType()->getNullability();
764 if (!Nullability || *Nullability != NullabilityKind::NonNull)
765 return;
766
767 // Check if the right hand side of the assignment is nonnull, if the left
768 // hand side must be nonnull.
769 auto CheckOrdinal = SanitizerKind::SO_NullabilityAssign;
770 auto CheckHandler = SanitizerHandler::TypeMismatch;
771 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
772 llvm::Value *IsNotNull = Builder.CreateIsNotNull(Arg: RHS);
773 llvm::Constant *StaticData[] = {
774 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: LHS.getType()),
775 llvm::ConstantInt::get(Ty: Int8Ty, V: 0), // The LogAlignment info is unused.
776 llvm::ConstantInt::get(Ty: Int8Ty, V: TCK_NonnullAssign)};
777 EmitCheck(Checked: {{IsNotNull, CheckOrdinal}}, Check: CheckHandler, StaticArgs: StaticData, DynamicArgs: RHS);
778}
779
780void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
781 LValue lvalue, bool capturedByInit) {
782 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
783 if (!lifetime) {
784 llvm::Value *Value;
785 if (PointerAuthQualifier PtrAuth = lvalue.getQuals().getPointerAuth()) {
786 Value = EmitPointerAuthQualify(Qualifier: PtrAuth, PointerExpr: init, StorageAddress: lvalue.getAddress());
787 lvalue.getQuals().removePointerAuth();
788 } else {
789 Value = EmitScalarExpr(E: init);
790 }
791 if (capturedByInit)
792 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
793 EmitNullabilityCheck(LHS: lvalue, RHS: Value, Loc: init->getExprLoc());
794 EmitStoreThroughLValue(Src: RValue::get(V: Value), Dst: lvalue, isInit: true);
795 return;
796 }
797
798 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(Val: init))
799 init = DIE->getExpr();
800
801 // If we're emitting a value with lifetime, we have to do the
802 // initialization *before* we leave the cleanup scopes.
803 if (auto *EWC = dyn_cast<ExprWithCleanups>(Val: init)) {
804 CodeGenFunction::RunCleanupsScope Scope(*this);
805 return EmitScalarInit(init: EWC->getSubExpr(), D, lvalue, capturedByInit);
806 }
807
808 // We have to maintain the illusion that the variable is
809 // zero-initialized. If the variable might be accessed in its
810 // initializer, zero-initialize before running the initializer, then
811 // actually perform the initialization with an assign.
812 bool accessedByInit = false;
813 if (lifetime != Qualifiers::OCL_ExplicitNone)
814 accessedByInit = (capturedByInit || isAccessedBy(decl: D, e: init));
815 if (accessedByInit) {
816 LValue tempLV = lvalue;
817 // Drill down to the __block object if necessary.
818 if (capturedByInit) {
819 // We can use a simple GEP for this because it can't have been
820 // moved yet.
821 tempLV.setAddress(emitBlockByrefAddress(baseAddr: tempLV.getAddress(),
822 V: cast<VarDecl>(Val: D),
823 /*follow*/ followForward: false));
824 }
825
826 auto ty = cast<llvm::PointerType>(Val: tempLV.getAddress().getElementType());
827 llvm::Value *zero = CGM.getNullPointer(T: ty, QT: tempLV.getType());
828
829 // If __weak, we want to use a barrier under certain conditions.
830 if (lifetime == Qualifiers::OCL_Weak)
831 EmitARCInitWeak(addr: tempLV.getAddress(), value: zero);
832
833 // Otherwise just do a simple store.
834 else
835 EmitStoreOfScalar(value: zero, lvalue: tempLV, /* isInitialization */ isInit: true);
836 }
837
838 // Emit the initializer.
839 llvm::Value *value = nullptr;
840
841 switch (lifetime) {
842 case Qualifiers::OCL_None:
843 llvm_unreachable("present but none");
844
845 case Qualifiers::OCL_Strong: {
846 if (!D || !isa<VarDecl>(Val: D) || !cast<VarDecl>(Val: D)->isARCPseudoStrong()) {
847 value = EmitARCRetainScalarExpr(expr: init);
848 break;
849 }
850 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
851 // that we omit the retain, and causes non-autoreleased return values to be
852 // immediately released.
853 [[fallthrough]];
854 }
855
856 case Qualifiers::OCL_ExplicitNone:
857 value = EmitARCUnsafeUnretainedScalarExpr(expr: init);
858 break;
859
860 case Qualifiers::OCL_Weak: {
861 // If it's not accessed by the initializer, try to emit the
862 // initialization with a copy or move.
863 if (!accessedByInit && tryEmitARCCopyWeakInit(CGF&: *this, destLV: lvalue, init)) {
864 return;
865 }
866
867 // No way to optimize a producing initializer into this. It's not
868 // worth optimizing for, because the value will immediately
869 // disappear in the common case.
870 value = EmitScalarExpr(E: init);
871
872 if (capturedByInit) drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
873 if (accessedByInit)
874 EmitARCStoreWeak(addr: lvalue.getAddress(), value, /*ignored*/ true);
875 else
876 EmitARCInitWeak(addr: lvalue.getAddress(), value);
877 return;
878 }
879
880 case Qualifiers::OCL_Autoreleasing:
881 value = EmitARCRetainAutoreleaseScalarExpr(expr: init);
882 break;
883 }
884
885 if (capturedByInit) drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
886
887 EmitNullabilityCheck(LHS: lvalue, RHS: value, Loc: init->getExprLoc());
888
889 // If the variable might have been accessed by its initializer, we
890 // might have to initialize with a barrier. We have to do this for
891 // both __weak and __strong, but __weak got filtered out above.
892 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
893 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, Loc: init->getExprLoc());
894 EmitStoreOfScalar(value, lvalue, /* isInitialization */ isInit: true);
895 EmitARCRelease(value: oldValue, precise: ARCImpreciseLifetime);
896 return;
897 }
898
899 EmitStoreOfScalar(value, lvalue, /* isInitialization */ isInit: true);
900}
901
902/// Decide whether we can emit the non-zero parts of the specified initializer
903/// with equal or fewer than NumStores scalar stores.
904static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
905 unsigned &NumStores) {
906 // Zero and Undef never requires any extra stores.
907 if (isa<llvm::ConstantAggregateZero>(Val: Init) ||
908 isa<llvm::ConstantPointerNull>(Val: Init) ||
909 isa<llvm::UndefValue>(Val: Init))
910 return true;
911 if (isa<llvm::ConstantInt>(Val: Init) || isa<llvm::ConstantFP>(Val: Init) ||
912 isa<llvm::ConstantVector>(Val: Init) || isa<llvm::BlockAddress>(Val: Init) ||
913 isa<llvm::ConstantExpr>(Val: Init))
914 return Init->isNullValue() || NumStores--;
915
916 // See if we can emit each element.
917 if (isa<llvm::ConstantArray>(Val: Init) || isa<llvm::ConstantStruct>(Val: Init)) {
918 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
919 llvm::Constant *Elt = cast<llvm::Constant>(Val: Init->getOperand(i));
920 if (!canEmitInitWithFewStoresAfterBZero(Init: Elt, NumStores))
921 return false;
922 }
923 return true;
924 }
925
926 if (llvm::ConstantDataSequential *CDS =
927 dyn_cast<llvm::ConstantDataSequential>(Val: Init)) {
928 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
929 llvm::Constant *Elt = CDS->getElementAsConstant(i);
930 if (!canEmitInitWithFewStoresAfterBZero(Init: Elt, NumStores))
931 return false;
932 }
933 return true;
934 }
935
936 // Anything else is hard and scary.
937 return false;
938}
939
940/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
941/// the scalar stores that would be required.
942void CodeGenFunction::emitStoresForInitAfterBZero(llvm::Constant *Init,
943 Address Loc, bool isVolatile,
944 bool IsAutoInit) {
945 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
946 "called emitStoresForInitAfterBZero for zero or undef value.");
947
948 if (isa<llvm::ConstantInt>(Val: Init) || isa<llvm::ConstantFP>(Val: Init) ||
949 isa<llvm::ConstantVector>(Val: Init) || isa<llvm::BlockAddress>(Val: Init) ||
950 isa<llvm::ConstantExpr>(Val: Init)) {
951 auto *I = Builder.CreateStore(Val: Init, Addr: Loc, IsVolatile: isVolatile);
952 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
953 if (IsAutoInit)
954 I->addAnnotationMetadata(Annotation: "auto-init");
955 return;
956 }
957
958 if (llvm::ConstantDataSequential *CDS =
959 dyn_cast<llvm::ConstantDataSequential>(Val: Init)) {
960 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
961 llvm::Constant *Elt = CDS->getElementAsConstant(i);
962
963 // If necessary, get a pointer to the element and emit it.
964 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Val: Elt))
965 emitStoresForInitAfterBZero(
966 Init: Elt, Loc: Builder.CreateConstInBoundsGEP2_32(Addr: Loc, Idx0: 0, Idx1: i), isVolatile,
967 IsAutoInit);
968 }
969 return;
970 }
971
972 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
973 "Unknown value type!");
974
975 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
976 llvm::Constant *Elt = cast<llvm::Constant>(Val: Init->getOperand(i));
977
978 // If necessary, get a pointer to the element and emit it.
979 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Val: Elt))
980 emitStoresForInitAfterBZero(Init: Elt,
981 Loc: Builder.CreateConstInBoundsGEP2_32(Addr: Loc, Idx0: 0, Idx1: i),
982 isVolatile, IsAutoInit);
983 }
984}
985
986/// Decide whether we should use bzero plus some stores to initialize a local
987/// variable instead of using a memcpy from a constant global. It is beneficial
988/// to use bzero if the global is all zeros, or mostly zeros and large.
989static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
990 uint64_t GlobalSize) {
991 // If a global is all zeros, always use a bzero.
992 if (isa<llvm::ConstantAggregateZero>(Val: Init)) return true;
993
994 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
995 // do it if it will require 6 or fewer scalar stores.
996 // TODO: Should budget depends on the size? Avoiding a large global warrants
997 // plopping in more stores.
998 unsigned StoreBudget = 6;
999 uint64_t SizeLimit = 32;
1000
1001 return GlobalSize > SizeLimit &&
1002 canEmitInitWithFewStoresAfterBZero(Init, NumStores&: StoreBudget);
1003}
1004
1005/// Decide whether we should use memset to initialize a local variable instead
1006/// of using a memcpy from a constant global. Assumes we've already decided to
1007/// not user bzero.
1008/// FIXME We could be more clever, as we are for bzero above, and generate
1009/// memset followed by stores. It's unclear that's worth the effort.
1010static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
1011 uint64_t GlobalSize,
1012 const llvm::DataLayout &DL) {
1013 uint64_t SizeLimit = 32;
1014 if (GlobalSize <= SizeLimit)
1015 return nullptr;
1016 return llvm::isBytewiseValue(V: Init, DL);
1017}
1018
1019/// Decide whether we want to split a constant structure or array store into a
1020/// sequence of its fields' stores. This may cost us code size and compilation
1021/// speed, but plays better with store optimizations.
1022static bool shouldSplitConstantStore(CodeGenModule &CGM,
1023 uint64_t GlobalByteSize) {
1024 // Don't break things that occupy more than one cacheline.
1025 uint64_t ByteSizeLimit = 64;
1026 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1027 return false;
1028 if (GlobalByteSize <= ByteSizeLimit)
1029 return true;
1030 return false;
1031}
1032
1033enum class IsPattern { No, Yes };
1034
1035/// Generate a constant filled with either a pattern or zeroes.
1036static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1037 llvm::Type *Ty) {
1038 if (isPattern == IsPattern::Yes)
1039 return initializationPatternFor(CGM, Ty);
1040 else
1041 return llvm::Constant::getNullValue(Ty);
1042}
1043
1044static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1045 llvm::Constant *constant);
1046
1047/// Helper function for constWithPadding() to deal with padding in structures.
1048static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1049 IsPattern isPattern,
1050 llvm::StructType *STy,
1051 llvm::Constant *constant) {
1052 const llvm::DataLayout &DL = CGM.getDataLayout();
1053 const llvm::StructLayout *Layout = DL.getStructLayout(Ty: STy);
1054 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(C&: CGM.getLLVMContext());
1055 unsigned SizeSoFar = 0;
1056 SmallVector<llvm::Constant *, 8> Values;
1057 bool NestedIntact = true;
1058 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1059 unsigned CurOff = Layout->getElementOffset(Idx: i);
1060 if (SizeSoFar < CurOff) {
1061 assert(!STy->isPacked());
1062 auto *PadTy = llvm::ArrayType::get(ElementType: Int8Ty, NumElements: CurOff - SizeSoFar);
1063 Values.push_back(Elt: patternOrZeroFor(CGM, isPattern, Ty: PadTy));
1064 }
1065 llvm::Constant *CurOp;
1066 if (constant->isZeroValue())
1067 CurOp = llvm::Constant::getNullValue(Ty: STy->getElementType(N: i));
1068 else
1069 CurOp = cast<llvm::Constant>(Val: constant->getAggregateElement(Elt: i));
1070 auto *NewOp = constWithPadding(CGM, isPattern, constant: CurOp);
1071 if (CurOp != NewOp)
1072 NestedIntact = false;
1073 Values.push_back(Elt: NewOp);
1074 SizeSoFar = CurOff + DL.getTypeAllocSize(Ty: CurOp->getType());
1075 }
1076 unsigned TotalSize = Layout->getSizeInBytes();
1077 if (SizeSoFar < TotalSize) {
1078 auto *PadTy = llvm::ArrayType::get(ElementType: Int8Ty, NumElements: TotalSize - SizeSoFar);
1079 Values.push_back(Elt: patternOrZeroFor(CGM, isPattern, Ty: PadTy));
1080 }
1081 if (NestedIntact && Values.size() == STy->getNumElements())
1082 return constant;
1083 return llvm::ConstantStruct::getAnon(V: Values, Packed: STy->isPacked());
1084}
1085
1086/// Replace all padding bytes in a given constant with either a pattern byte or
1087/// 0x00.
1088static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1089 llvm::Constant *constant) {
1090 llvm::Type *OrigTy = constant->getType();
1091 if (const auto STy = dyn_cast<llvm::StructType>(Val: OrigTy))
1092 return constStructWithPadding(CGM, isPattern, STy, constant);
1093 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: OrigTy)) {
1094 llvm::SmallVector<llvm::Constant *, 8> Values;
1095 uint64_t Size = ArrayTy->getNumElements();
1096 if (!Size)
1097 return constant;
1098 llvm::Type *ElemTy = ArrayTy->getElementType();
1099 bool ZeroInitializer = constant->isNullValue();
1100 llvm::Constant *OpValue, *PaddedOp;
1101 if (ZeroInitializer) {
1102 OpValue = llvm::Constant::getNullValue(Ty: ElemTy);
1103 PaddedOp = constWithPadding(CGM, isPattern, constant: OpValue);
1104 }
1105 for (unsigned Op = 0; Op != Size; ++Op) {
1106 if (!ZeroInitializer) {
1107 OpValue = constant->getAggregateElement(Elt: Op);
1108 PaddedOp = constWithPadding(CGM, isPattern, constant: OpValue);
1109 }
1110 Values.push_back(Elt: PaddedOp);
1111 }
1112 auto *NewElemTy = Values[0]->getType();
1113 if (NewElemTy == ElemTy)
1114 return constant;
1115 auto *NewArrayTy = llvm::ArrayType::get(ElementType: NewElemTy, NumElements: Size);
1116 return llvm::ConstantArray::get(T: NewArrayTy, V: Values);
1117 }
1118 // FIXME: Add handling for tail padding in vectors. Vectors don't
1119 // have padding between or inside elements, but the total amount of
1120 // data can be less than the allocated size.
1121 return constant;
1122}
1123
1124Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1125 llvm::Constant *Constant,
1126 CharUnits Align) {
1127 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1128 if (const auto *FD = dyn_cast<FunctionDecl>(Val: DC)) {
1129 if (const auto *CC = dyn_cast<CXXConstructorDecl>(Val: FD))
1130 return CC->getNameAsString();
1131 if (const auto *CD = dyn_cast<CXXDestructorDecl>(Val: FD))
1132 return CD->getNameAsString();
1133 return std::string(getMangledName(GD: FD));
1134 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(Val: DC)) {
1135 return OM->getNameAsString();
1136 } else if (isa<BlockDecl>(Val: DC)) {
1137 return "<block>";
1138 } else if (isa<CapturedDecl>(Val: DC)) {
1139 return "<captured>";
1140 } else {
1141 llvm_unreachable("expected a function or method");
1142 }
1143 };
1144
1145 // Form a simple per-variable cache of these values in case we find we
1146 // want to reuse them.
1147 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1148 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1149 auto *Ty = Constant->getType();
1150 bool isConstant = true;
1151 llvm::GlobalVariable *InsertBefore = nullptr;
1152 unsigned AS =
1153 getContext().getTargetAddressSpace(AS: GetGlobalConstantAddressSpace());
1154 std::string Name;
1155 if (D.hasGlobalStorage())
1156 Name = getMangledName(GD: &D).str() + ".const";
1157 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1158 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1159 else
1160 llvm_unreachable("local variable has no parent function or method");
1161 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1162 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1163 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1164 GV->setAlignment(Align.getAsAlign());
1165 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1166 CacheEntry = GV;
1167 } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1168 CacheEntry->setAlignment(Align.getAsAlign());
1169 }
1170
1171 return Address(CacheEntry, CacheEntry->getValueType(), Align);
1172}
1173
1174static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1175 const VarDecl &D,
1176 CGBuilderTy &Builder,
1177 llvm::Constant *Constant,
1178 CharUnits Align) {
1179 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1180 return SrcPtr.withElementType(ElemTy: CGM.Int8Ty);
1181}
1182
1183void CodeGenFunction::emitStoresForConstant(const VarDecl &D, Address Loc,
1184 bool isVolatile,
1185 llvm::Constant *constant,
1186 bool IsAutoInit) {
1187 auto *Ty = constant->getType();
1188 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1189 if (!ConstantSize)
1190 return;
1191
1192 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1193 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1194 if (canDoSingleStore) {
1195 auto *I = Builder.CreateStore(Val: constant, Addr: Loc, IsVolatile: isVolatile);
1196 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
1197 if (IsAutoInit)
1198 I->addAnnotationMetadata(Annotation: "auto-init");
1199 return;
1200 }
1201
1202 auto *SizeVal = llvm::ConstantInt::get(Ty: CGM.IntPtrTy, V: ConstantSize);
1203
1204 // If the initializer is all or mostly the same, codegen with bzero / memset
1205 // then do a few stores afterward.
1206 if (shouldUseBZeroPlusStoresToInitialize(Init: constant, GlobalSize: ConstantSize)) {
1207 auto *I = Builder.CreateMemSet(Dest: Loc, Value: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 0),
1208 Size: SizeVal, IsVolatile: isVolatile);
1209 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
1210
1211 if (IsAutoInit)
1212 I->addAnnotationMetadata(Annotation: "auto-init");
1213
1214 bool valueAlreadyCorrect =
1215 constant->isNullValue() || isa<llvm::UndefValue>(Val: constant);
1216 if (!valueAlreadyCorrect) {
1217 Loc = Loc.withElementType(ElemTy: Ty);
1218 emitStoresForInitAfterBZero(Init: constant, Loc, isVolatile, IsAutoInit);
1219 }
1220 return;
1221 }
1222
1223 // If the initializer is a repeated byte pattern, use memset.
1224 llvm::Value *Pattern =
1225 shouldUseMemSetToInitialize(Init: constant, GlobalSize: ConstantSize, DL: CGM.getDataLayout());
1226 if (Pattern) {
1227 uint64_t Value = 0x00;
1228 if (!isa<llvm::UndefValue>(Val: Pattern)) {
1229 const llvm::APInt &AP = cast<llvm::ConstantInt>(Val: Pattern)->getValue();
1230 assert(AP.getBitWidth() <= 8);
1231 Value = AP.getLimitedValue();
1232 }
1233 auto *I = Builder.CreateMemSet(
1234 Dest: Loc, Value: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: Value), Size: SizeVal, IsVolatile: isVolatile);
1235 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
1236 if (IsAutoInit)
1237 I->addAnnotationMetadata(Annotation: "auto-init");
1238 return;
1239 }
1240
1241 // If the initializer is small or trivialAutoVarInit is set, use a handful of
1242 // stores.
1243 bool IsTrivialAutoVarInitPattern =
1244 CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
1245 LangOptions::TrivialAutoVarInitKind::Pattern;
1246 if (shouldSplitConstantStore(CGM, GlobalByteSize: ConstantSize)) {
1247 if (auto *STy = dyn_cast<llvm::StructType>(Val: Ty)) {
1248 if (STy == Loc.getElementType() ||
1249 (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1250 const llvm::StructLayout *Layout =
1251 CGM.getDataLayout().getStructLayout(Ty: STy);
1252 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1253 CharUnits CurOff =
1254 CharUnits::fromQuantity(Quantity: Layout->getElementOffset(Idx: i));
1255 Address EltPtr = Builder.CreateConstInBoundsByteGEP(
1256 Addr: Loc.withElementType(ElemTy: CGM.Int8Ty), Offset: CurOff);
1257 emitStoresForConstant(D, Loc: EltPtr, isVolatile,
1258 constant: constant->getAggregateElement(Elt: i), IsAutoInit);
1259 }
1260 return;
1261 }
1262 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Val: Ty)) {
1263 if (ATy == Loc.getElementType() ||
1264 (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1265 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1266 Address EltPtr = Builder.CreateConstGEP(
1267 Addr: Loc.withElementType(ElemTy: ATy->getElementType()), Index: i);
1268 emitStoresForConstant(D, Loc: EltPtr, isVolatile,
1269 constant: constant->getAggregateElement(Elt: i), IsAutoInit);
1270 }
1271 return;
1272 }
1273 }
1274 }
1275
1276 // Copy from a global.
1277 auto *I =
1278 Builder.CreateMemCpy(Dest: Loc,
1279 Src: createUnnamedGlobalForMemcpyFrom(
1280 CGM, D, Builder, Constant: constant, Align: Loc.getAlignment()),
1281 Size: SizeVal, IsVolatile: isVolatile);
1282 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
1283
1284 if (IsAutoInit)
1285 I->addAnnotationMetadata(Annotation: "auto-init");
1286}
1287
1288void CodeGenFunction::emitStoresForZeroInit(const VarDecl &D, Address Loc,
1289 bool isVolatile) {
1290 llvm::Type *ElTy = Loc.getElementType();
1291 llvm::Constant *constant =
1292 constWithPadding(CGM, isPattern: IsPattern::No, constant: llvm::Constant::getNullValue(Ty: ElTy));
1293 emitStoresForConstant(D, Loc, isVolatile, constant,
1294 /*IsAutoInit=*/true);
1295}
1296
1297void CodeGenFunction::emitStoresForPatternInit(const VarDecl &D, Address Loc,
1298 bool isVolatile) {
1299 llvm::Type *ElTy = Loc.getElementType();
1300 llvm::Constant *constant = constWithPadding(
1301 CGM, isPattern: IsPattern::Yes, constant: initializationPatternFor(CGM, ElTy));
1302 assert(!isa<llvm::UndefValue>(constant));
1303 emitStoresForConstant(D, Loc, isVolatile, constant,
1304 /*IsAutoInit=*/true);
1305}
1306
1307static bool containsUndef(llvm::Constant *constant) {
1308 auto *Ty = constant->getType();
1309 if (isa<llvm::UndefValue>(Val: constant))
1310 return true;
1311 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1312 for (llvm::Use &Op : constant->operands())
1313 if (containsUndef(constant: cast<llvm::Constant>(Val&: Op)))
1314 return true;
1315 return false;
1316}
1317
1318static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1319 llvm::Constant *constant) {
1320 auto *Ty = constant->getType();
1321 if (isa<llvm::UndefValue>(Val: constant))
1322 return patternOrZeroFor(CGM, isPattern, Ty);
1323 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1324 return constant;
1325 if (!containsUndef(constant))
1326 return constant;
1327 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1328 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1329 auto *OpValue = cast<llvm::Constant>(Val: constant->getOperand(i: Op));
1330 Values[Op] = replaceUndef(CGM, isPattern, constant: OpValue);
1331 }
1332 if (Ty->isStructTy())
1333 return llvm::ConstantStruct::get(T: cast<llvm::StructType>(Val: Ty), V: Values);
1334 if (Ty->isArrayTy())
1335 return llvm::ConstantArray::get(T: cast<llvm::ArrayType>(Val: Ty), V: Values);
1336 assert(Ty->isVectorTy());
1337 return llvm::ConstantVector::get(V: Values);
1338}
1339
1340/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1341/// variable declaration with auto, register, or no storage class specifier.
1342/// These turn into simple stack objects, or GlobalValues depending on target.
1343void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1344 AutoVarEmission emission = EmitAutoVarAlloca(var: D);
1345 EmitAutoVarInit(emission);
1346 EmitAutoVarCleanups(emission);
1347}
1348
1349/// Emit a lifetime.begin marker if some criteria are satisfied.
1350/// \return a pointer to the temporary size Value if a marker was emitted, null
1351/// otherwise
1352llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1353 llvm::Value *Addr) {
1354 if (!ShouldEmitLifetimeMarkers)
1355 return nullptr;
1356
1357 assert(Addr->getType()->getPointerAddressSpace() ==
1358 CGM.getDataLayout().getAllocaAddrSpace() &&
1359 "Pointer should be in alloca address space");
1360 llvm::Value *SizeV = llvm::ConstantInt::get(
1361 Ty: Int64Ty, V: Size.isScalable() ? -1 : Size.getFixedValue());
1362 llvm::CallInst *C =
1363 Builder.CreateCall(Callee: CGM.getLLVMLifetimeStartFn(), Args: {SizeV, Addr});
1364 C->setDoesNotThrow();
1365 return SizeV;
1366}
1367
1368void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1369 assert(Addr->getType()->getPointerAddressSpace() ==
1370 CGM.getDataLayout().getAllocaAddrSpace() &&
1371 "Pointer should be in alloca address space");
1372 llvm::CallInst *C =
1373 Builder.CreateCall(Callee: CGM.getLLVMLifetimeEndFn(), Args: {Size, Addr});
1374 C->setDoesNotThrow();
1375}
1376
1377void CodeGenFunction::EmitFakeUse(Address Addr) {
1378 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
1379 llvm::Value *V = Builder.CreateLoad(Addr, Name: "fake.use");
1380 llvm::CallInst *C = Builder.CreateCall(Callee: CGM.getLLVMFakeUseFn(), Args: {V});
1381 C->setDoesNotThrow();
1382 C->setTailCallKind(llvm::CallInst::TCK_NoTail);
1383}
1384
1385void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1386 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1387 // For each dimension stores its QualType and corresponding
1388 // size-expression Value.
1389 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1390 SmallVector<const IdentifierInfo *, 4> VLAExprNames;
1391
1392 // Break down the array into individual dimensions.
1393 QualType Type1D = D.getType();
1394 while (getContext().getAsVariableArrayType(T: Type1D)) {
1395 auto VlaSize = getVLAElements1D(vla: Type1D);
1396 if (auto *C = dyn_cast<llvm::ConstantInt>(Val: VlaSize.NumElts))
1397 Dimensions.emplace_back(Args&: C, Args: Type1D.getUnqualifiedType());
1398 else {
1399 // Generate a locally unique name for the size expression.
1400 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1401 SmallString<12> Buffer;
1402 StringRef NameRef = Name.toStringRef(Out&: Buffer);
1403 auto &Ident = getContext().Idents.getOwn(Name: NameRef);
1404 VLAExprNames.push_back(Elt: &Ident);
1405 auto SizeExprAddr =
1406 CreateDefaultAlignTempAlloca(Ty: VlaSize.NumElts->getType(), Name: NameRef);
1407 Builder.CreateStore(Val: VlaSize.NumElts, Addr: SizeExprAddr);
1408 Dimensions.emplace_back(Args: SizeExprAddr.getPointer(),
1409 Args: Type1D.getUnqualifiedType());
1410 }
1411 Type1D = VlaSize.Type;
1412 }
1413
1414 if (!EmitDebugInfo)
1415 return;
1416
1417 // Register each dimension's size-expression with a DILocalVariable,
1418 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1419 // to describe this array.
1420 unsigned NameIdx = 0;
1421 for (auto &VlaSize : Dimensions) {
1422 llvm::Metadata *MD;
1423 if (auto *C = dyn_cast<llvm::ConstantInt>(Val: VlaSize.NumElts))
1424 MD = llvm::ConstantAsMetadata::get(C);
1425 else {
1426 // Create an artificial VarDecl to generate debug info for.
1427 const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1428 auto QT = getContext().getIntTypeForBitwidth(
1429 DestWidth: SizeTy->getScalarSizeInBits(), Signed: false);
1430 auto *ArtificialDecl = VarDecl::Create(
1431 C&: getContext(), DC: const_cast<DeclContext *>(D.getDeclContext()),
1432 StartLoc: D.getLocation(), IdLoc: D.getLocation(), Id: NameIdent, T: QT,
1433 TInfo: getContext().CreateTypeSourceInfo(T: QT), S: SC_Auto);
1434 ArtificialDecl->setImplicit();
1435
1436 MD = DI->EmitDeclareOfAutoVariable(Decl: ArtificialDecl, AI: VlaSize.NumElts,
1437 Builder);
1438 }
1439 assert(MD && "No Size expression debug node created");
1440 DI->registerVLASizeExpression(Ty: VlaSize.Type, SizeExpr: MD);
1441 }
1442}
1443
1444/// Return the maximum size of an aggregate for which we generate a fake use
1445/// intrinsic when -fextend-variable-liveness is in effect.
1446static uint64_t maxFakeUseAggregateSize(const ASTContext &C) {
1447 return 4 * C.getTypeSize(T: C.UnsignedIntTy);
1448}
1449
1450// Helper function to determine whether a variable's or parameter's lifetime
1451// should be extended.
1452static bool shouldExtendLifetime(const ASTContext &Context,
1453 const Decl *FuncDecl, const VarDecl &D,
1454 ImplicitParamDecl *CXXABIThisDecl) {
1455 // When we're not inside a valid function it is unlikely that any
1456 // lifetime extension is useful.
1457 if (!FuncDecl)
1458 return false;
1459 if (FuncDecl->isImplicit())
1460 return false;
1461 // Do not extend compiler-created variables except for the this pointer.
1462 if (D.isImplicit() && &D != CXXABIThisDecl)
1463 return false;
1464 QualType Ty = D.getType();
1465 // No need to extend volatiles, they have a memory location.
1466 if (Ty.isVolatileQualified())
1467 return false;
1468 // Don't extend variables that exceed a certain size.
1469 if (Context.getTypeSize(T: Ty) > maxFakeUseAggregateSize(C: Context))
1470 return false;
1471 // Do not extend variables in nodebug or optnone functions.
1472 if (FuncDecl->hasAttr<NoDebugAttr>() || FuncDecl->hasAttr<OptimizeNoneAttr>())
1473 return false;
1474 return true;
1475}
1476
1477/// EmitAutoVarAlloca - Emit the alloca and debug information for a
1478/// local variable. Does not emit initialization or destruction.
1479CodeGenFunction::AutoVarEmission
1480CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1481 QualType Ty = D.getType();
1482 assert(
1483 Ty.getAddressSpace() == LangAS::Default ||
1484 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1485
1486 AutoVarEmission emission(D);
1487
1488 bool isEscapingByRef = D.isEscapingByref();
1489 emission.IsEscapingByRef = isEscapingByRef;
1490
1491 CharUnits alignment = getContext().getDeclAlign(D: &D);
1492
1493 // If the type is variably-modified, emit all the VLA sizes for it.
1494 if (Ty->isVariablyModifiedType())
1495 EmitVariablyModifiedType(Ty);
1496
1497 auto *DI = getDebugInfo();
1498 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1499
1500 Address address = Address::invalid();
1501 RawAddress AllocaAddr = RawAddress::invalid();
1502 Address OpenMPLocalAddr = Address::invalid();
1503 if (CGM.getLangOpts().OpenMPIRBuilder)
1504 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(CGF&: *this, VD: &D);
1505 else
1506 OpenMPLocalAddr =
1507 getLangOpts().OpenMP
1508 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(CGF&: *this, VD: &D)
1509 : Address::invalid();
1510
1511 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1512
1513 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1514 address = OpenMPLocalAddr;
1515 AllocaAddr = OpenMPLocalAddr;
1516 } else if (Ty->isConstantSizeType()) {
1517 // If this value is an array or struct with a statically determinable
1518 // constant initializer, there are optimizations we can do.
1519 //
1520 // TODO: We should constant-evaluate the initializer of any variable,
1521 // as long as it is initialized by a constant expression. Currently,
1522 // isConstantInitializer produces wrong answers for structs with
1523 // reference or bitfield members, and a few other cases, and checking
1524 // for POD-ness protects us from some of these.
1525 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1526 (D.isConstexpr() ||
1527 ((Ty.isPODType(Context: getContext()) ||
1528 getContext().getBaseElementType(QT: Ty)->isObjCObjectPointerType()) &&
1529 D.getInit()->isConstantInitializer(Ctx&: getContext(), ForRef: false)))) {
1530
1531 // If the variable's a const type, and it's neither an NRVO
1532 // candidate nor a __block variable and has no mutable members,
1533 // emit it as a global instead.
1534 // Exception is if a variable is located in non-constant address space
1535 // in OpenCL.
1536 bool NeedsDtor =
1537 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
1538 if ((!getLangOpts().OpenCL ||
1539 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1540 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1541 !isEscapingByRef &&
1542 Ty.isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: !NeedsDtor))) {
1543 EmitStaticVarDecl(D, Linkage: llvm::GlobalValue::InternalLinkage);
1544
1545 // Signal this condition to later callbacks.
1546 emission.Addr = Address::invalid();
1547 assert(emission.wasEmittedAsGlobal());
1548 return emission;
1549 }
1550
1551 // Otherwise, tell the initialization code that we're in this case.
1552 emission.IsConstantAggregate = true;
1553 }
1554
1555 // A normal fixed sized variable becomes an alloca in the entry block,
1556 // unless:
1557 // - it's an NRVO variable.
1558 // - we are compiling OpenMP and it's an OpenMP local variable.
1559 if (NRVO) {
1560 // The named return value optimization: allocate this variable in the
1561 // return slot, so that we can elide the copy when returning this
1562 // variable (C++0x [class.copy]p34).
1563 address = ReturnValue;
1564 AllocaAddr =
1565 RawAddress(ReturnValue.emitRawPointer(CGF&: *this),
1566 ReturnValue.getElementType(), ReturnValue.getAlignment());
1567 ;
1568
1569 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1570 const auto *RD = RecordTy->getDecl();
1571 const auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD);
1572 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1573 RD->isNonTrivialToPrimitiveDestroy()) {
1574 // Create a flag that is used to indicate when the NRVO was applied
1575 // to this variable. Set it to zero to indicate that NRVO was not
1576 // applied.
1577 llvm::Value *Zero = Builder.getFalse();
1578 RawAddress NRVOFlag =
1579 CreateTempAlloca(Ty: Zero->getType(), align: CharUnits::One(), Name: "nrvo");
1580 EnsureInsertPoint();
1581 Builder.CreateStore(Val: Zero, Addr: NRVOFlag);
1582
1583 // Record the NRVO flag for this variable.
1584 NRVOFlags[&D] = NRVOFlag.getPointer();
1585 emission.NRVOFlag = NRVOFlag.getPointer();
1586 }
1587 }
1588 } else {
1589 CharUnits allocaAlignment;
1590 llvm::Type *allocaTy;
1591 if (isEscapingByRef) {
1592 auto &byrefInfo = getBlockByrefInfo(var: &D);
1593 allocaTy = byrefInfo.Type;
1594 allocaAlignment = byrefInfo.ByrefAlignment;
1595 } else {
1596 allocaTy = ConvertTypeForMem(T: Ty);
1597 allocaAlignment = alignment;
1598 }
1599
1600 // Create the alloca. Note that we set the name separately from
1601 // building the instruction so that it's there even in no-asserts
1602 // builds.
1603 address = CreateTempAlloca(Ty: allocaTy, UseAddrSpace: Ty.getAddressSpace(),
1604 align: allocaAlignment, Name: D.getName(),
1605 /*ArraySize=*/nullptr, Alloca: &AllocaAddr);
1606
1607 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1608 // the catch parameter starts in the catchpad instruction, and we can't
1609 // insert code in those basic blocks.
1610 bool IsMSCatchParam =
1611 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1612
1613 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1614 // if we don't have a valid insertion point (?).
1615 if (HaveInsertPoint() && !IsMSCatchParam) {
1616 // If there's a jump into the lifetime of this variable, its lifetime
1617 // gets broken up into several regions in IR, which requires more work
1618 // to handle correctly. For now, just omit the intrinsics; this is a
1619 // rare case, and it's better to just be conservatively correct.
1620 // PR28267.
1621 //
1622 // We have to do this in all language modes if there's a jump past the
1623 // declaration. We also have to do it in C if there's a jump to an
1624 // earlier point in the current block because non-VLA lifetimes begin as
1625 // soon as the containing block is entered, not when its variables
1626 // actually come into scope; suppressing the lifetime annotations
1627 // completely in this case is unnecessarily pessimistic, but again, this
1628 // is rare.
1629 if (!Bypasses.IsBypassed(D: &D) &&
1630 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1631 llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(Ty: allocaTy);
1632 emission.SizeForLifetimeMarkers =
1633 EmitLifetimeStart(Size, Addr: AllocaAddr.getPointer());
1634 }
1635 } else {
1636 assert(!emission.useLifetimeMarkers());
1637 }
1638 }
1639 } else {
1640 EnsureInsertPoint();
1641
1642 // Delayed globalization for variable length declarations. This ensures that
1643 // the expression representing the length has been emitted and can be used
1644 // by the definition of the VLA. Since this is an escaped declaration, in
1645 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1646 // deallocation call to __kmpc_free_shared() is emitted later.
1647 bool VarAllocated = false;
1648 if (getLangOpts().OpenMPIsTargetDevice) {
1649 auto &RT = CGM.getOpenMPRuntime();
1650 if (RT.isDelayedVariableLengthDecl(CGF&: *this, VD: &D)) {
1651 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1652 std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1653 RT.getKmpcAllocShared(CGF&: *this, VD: &D);
1654
1655 // Save the address of the allocation:
1656 LValue Base = MakeAddrLValue(V: AddrSizePair.first, T: D.getType(),
1657 Alignment: CGM.getContext().getDeclAlign(D: &D),
1658 Source: AlignmentSource::Decl);
1659 address = Base.getAddress();
1660
1661 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1662 // appropriate location at the end of the scope of the
1663 // __kmpc_alloc_shared functions:
1664 pushKmpcAllocFree(Kind: NormalCleanup, AddrSizePair);
1665
1666 // Mark variable as allocated:
1667 VarAllocated = true;
1668 }
1669 }
1670
1671 if (!VarAllocated) {
1672 if (!DidCallStackSave) {
1673 // Save the stack.
1674 Address Stack =
1675 CreateDefaultAlignTempAlloca(Ty: AllocaInt8PtrTy, Name: "saved_stack");
1676
1677 llvm::Value *V = Builder.CreateStackSave();
1678 assert(V->getType() == AllocaInt8PtrTy);
1679 Builder.CreateStore(Val: V, Addr: Stack);
1680
1681 DidCallStackSave = true;
1682
1683 // Push a cleanup block and restore the stack there.
1684 // FIXME: in general circumstances, this should be an EH cleanup.
1685 pushStackRestore(kind: NormalCleanup, SPMem: Stack);
1686 }
1687
1688 auto VlaSize = getVLASize(vla: Ty);
1689 llvm::Type *llvmTy = ConvertTypeForMem(T: VlaSize.Type);
1690
1691 // Allocate memory for the array.
1692 address = CreateTempAlloca(Ty: llvmTy, align: alignment, Name: "vla", ArraySize: VlaSize.NumElts,
1693 Alloca: &AllocaAddr);
1694 }
1695
1696 // If we have debug info enabled, properly describe the VLA dimensions for
1697 // this type by registering the vla size expression for each of the
1698 // dimensions.
1699 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1700 }
1701
1702 setAddrOfLocalVar(VD: &D, Addr: address);
1703 emission.Addr = address;
1704 emission.AllocaAddr = AllocaAddr;
1705
1706 // Emit debug info for local var declaration.
1707 if (EmitDebugInfo && HaveInsertPoint()) {
1708 Address DebugAddr = address;
1709 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1710 DI->setLocation(D.getLocation());
1711
1712 // If NRVO, use a pointer to the return address.
1713 if (UsePointerValue) {
1714 DebugAddr = ReturnValuePointer;
1715 AllocaAddr = ReturnValuePointer;
1716 }
1717 (void)DI->EmitDeclareOfAutoVariable(Decl: &D, AI: AllocaAddr.getPointer(), Builder,
1718 UsePointerValue);
1719 }
1720
1721 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1722 EmitVarAnnotations(D: &D, V: address.emitRawPointer(CGF&: *this));
1723
1724 // Make sure we call @llvm.lifetime.end.
1725 if (emission.useLifetimeMarkers())
1726 EHStack.pushCleanup<CallLifetimeEnd>(Kind: NormalEHLifetimeMarker,
1727 A: emission.getOriginalAllocatedAddress(),
1728 A: emission.getSizeForLifetimeMarkers());
1729
1730 // Analogous to lifetime markers, we use a 'cleanup' to emit fake.use
1731 // calls for local variables. We are exempting volatile variables and
1732 // non-scalars larger than 4 times the size of an unsigned int. Larger
1733 // non-scalars are often allocated in memory and may create unnecessary
1734 // overhead.
1735 if (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
1736 CodeGenOptions::ExtendVariableLivenessKind::All) {
1737 if (shouldExtendLifetime(Context: getContext(), FuncDecl: CurCodeDecl, D, CXXABIThisDecl))
1738 EHStack.pushCleanup<FakeUse>(Kind: NormalFakeUse,
1739 A: emission.getAllocatedAddress());
1740 }
1741
1742 return emission;
1743}
1744
1745static bool isCapturedBy(const VarDecl &, const Expr *);
1746
1747/// Determines whether the given __block variable is potentially
1748/// captured by the given statement.
1749static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1750 if (const Expr *E = dyn_cast<Expr>(Val: S))
1751 return isCapturedBy(Var, E);
1752 for (const Stmt *SubStmt : S->children())
1753 if (isCapturedBy(Var, S: SubStmt))
1754 return true;
1755 return false;
1756}
1757
1758/// Determines whether the given __block variable is potentially
1759/// captured by the given expression.
1760static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1761 // Skip the most common kinds of expressions that make
1762 // hierarchy-walking expensive.
1763 E = E->IgnoreParenCasts();
1764
1765 if (const BlockExpr *BE = dyn_cast<BlockExpr>(Val: E)) {
1766 const BlockDecl *Block = BE->getBlockDecl();
1767 for (const auto &I : Block->captures()) {
1768 if (I.getVariable() == &Var)
1769 return true;
1770 }
1771
1772 // No need to walk into the subexpressions.
1773 return false;
1774 }
1775
1776 if (const StmtExpr *SE = dyn_cast<StmtExpr>(Val: E)) {
1777 const CompoundStmt *CS = SE->getSubStmt();
1778 for (const auto *BI : CS->body())
1779 if (const auto *BIE = dyn_cast<Expr>(Val: BI)) {
1780 if (isCapturedBy(Var, E: BIE))
1781 return true;
1782 }
1783 else if (const auto *DS = dyn_cast<DeclStmt>(Val: BI)) {
1784 // special case declarations
1785 for (const auto *I : DS->decls()) {
1786 if (const auto *VD = dyn_cast<VarDecl>(Val: (I))) {
1787 const Expr *Init = VD->getInit();
1788 if (Init && isCapturedBy(Var, E: Init))
1789 return true;
1790 }
1791 }
1792 }
1793 else
1794 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1795 // Later, provide code to poke into statements for capture analysis.
1796 return true;
1797 return false;
1798 }
1799
1800 for (const Stmt *SubStmt : E->children())
1801 if (isCapturedBy(Var, S: SubStmt))
1802 return true;
1803
1804 return false;
1805}
1806
1807/// Determine whether the given initializer is trivial in the sense
1808/// that it requires no code to be generated.
1809bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1810 if (!Init)
1811 return true;
1812
1813 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Val: Init))
1814 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1815 if (Constructor->isTrivial() &&
1816 Constructor->isDefaultConstructor() &&
1817 !Construct->requiresZeroInitialization())
1818 return true;
1819
1820 return false;
1821}
1822
1823void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1824 const VarDecl &D,
1825 Address Loc) {
1826 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1827 auto trivialAutoVarInitMaxSize =
1828 getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1829 CharUnits Size = getContext().getTypeSizeInChars(T: type);
1830 bool isVolatile = type.isVolatileQualified();
1831 if (!Size.isZero()) {
1832 // We skip auto-init variables by their alloc size. Take this as an example:
1833 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1834 // All Foo type variables will be skipped. Ideally, we only skip the buff
1835 // array and still auto-init X in this example.
1836 // TODO: Improve the size filtering to by member size.
1837 auto allocSize = CGM.getDataLayout().getTypeAllocSize(Ty: Loc.getElementType());
1838 switch (trivialAutoVarInit) {
1839 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1840 llvm_unreachable("Uninitialized handled by caller");
1841 case LangOptions::TrivialAutoVarInitKind::Zero:
1842 if (CGM.stopAutoInit())
1843 return;
1844 if (trivialAutoVarInitMaxSize > 0 &&
1845 allocSize > trivialAutoVarInitMaxSize)
1846 return;
1847 emitStoresForZeroInit(D, Loc, isVolatile);
1848 break;
1849 case LangOptions::TrivialAutoVarInitKind::Pattern:
1850 if (CGM.stopAutoInit())
1851 return;
1852 if (trivialAutoVarInitMaxSize > 0 &&
1853 allocSize > trivialAutoVarInitMaxSize)
1854 return;
1855 emitStoresForPatternInit(D, Loc, isVolatile);
1856 break;
1857 }
1858 return;
1859 }
1860
1861 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1862 // them, so emit a memcpy with the VLA size to initialize each element.
1863 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1864 // will catch that code, but there exists code which generates zero-sized
1865 // VLAs. Be nice and initialize whatever they requested.
1866 const auto *VlaType = getContext().getAsVariableArrayType(T: type);
1867 if (!VlaType)
1868 return;
1869 auto VlaSize = getVLASize(vla: VlaType);
1870 auto SizeVal = VlaSize.NumElts;
1871 CharUnits EltSize = getContext().getTypeSizeInChars(T: VlaSize.Type);
1872 switch (trivialAutoVarInit) {
1873 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1874 llvm_unreachable("Uninitialized handled by caller");
1875
1876 case LangOptions::TrivialAutoVarInitKind::Zero: {
1877 if (CGM.stopAutoInit())
1878 return;
1879 if (!EltSize.isOne())
1880 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: EltSize));
1881 auto *I = Builder.CreateMemSet(Dest: Loc, Value: llvm::ConstantInt::get(Ty: Int8Ty, V: 0),
1882 Size: SizeVal, IsVolatile: isVolatile);
1883 I->addAnnotationMetadata(Annotation: "auto-init");
1884 break;
1885 }
1886
1887 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1888 if (CGM.stopAutoInit())
1889 return;
1890 llvm::Type *ElTy = Loc.getElementType();
1891 llvm::Constant *Constant = constWithPadding(
1892 CGM, isPattern: IsPattern::Yes, constant: initializationPatternFor(CGM, ElTy));
1893 CharUnits ConstantAlign = getContext().getTypeAlignInChars(T: VlaSize.Type);
1894 llvm::BasicBlock *SetupBB = createBasicBlock(name: "vla-setup.loop");
1895 llvm::BasicBlock *LoopBB = createBasicBlock(name: "vla-init.loop");
1896 llvm::BasicBlock *ContBB = createBasicBlock(name: "vla-init.cont");
1897 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1898 LHS: SizeVal, RHS: llvm::ConstantInt::get(Ty: SizeVal->getType(), V: 0),
1899 Name: "vla.iszerosized");
1900 Builder.CreateCondBr(Cond: IsZeroSizedVLA, True: ContBB, False: SetupBB);
1901 EmitBlock(BB: SetupBB);
1902 if (!EltSize.isOne())
1903 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: EltSize));
1904 llvm::Value *BaseSizeInChars =
1905 llvm::ConstantInt::get(Ty: IntPtrTy, V: EltSize.getQuantity());
1906 Address Begin = Loc.withElementType(ElemTy: Int8Ty);
1907 llvm::Value *End = Builder.CreateInBoundsGEP(Ty: Begin.getElementType(),
1908 Ptr: Begin.emitRawPointer(CGF&: *this),
1909 IdxList: SizeVal, Name: "vla.end");
1910 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1911 EmitBlock(BB: LoopBB);
1912 llvm::PHINode *Cur = Builder.CreatePHI(Ty: Begin.getType(), NumReservedValues: 2, Name: "vla.cur");
1913 Cur->addIncoming(V: Begin.emitRawPointer(CGF&: *this), BB: OriginBB);
1914 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(elementSize: EltSize);
1915 auto *I =
1916 Builder.CreateMemCpy(Dest: Address(Cur, Int8Ty, CurAlign),
1917 Src: createUnnamedGlobalForMemcpyFrom(
1918 CGM, D, Builder, Constant, Align: ConstantAlign),
1919 Size: BaseSizeInChars, IsVolatile: isVolatile);
1920 I->addAnnotationMetadata(Annotation: "auto-init");
1921 llvm::Value *Next =
1922 Builder.CreateInBoundsGEP(Ty: Int8Ty, Ptr: Cur, IdxList: BaseSizeInChars, Name: "vla.next");
1923 llvm::Value *Done = Builder.CreateICmpEQ(LHS: Next, RHS: End, Name: "vla-init.isdone");
1924 Builder.CreateCondBr(Cond: Done, True: ContBB, False: LoopBB);
1925 Cur->addIncoming(V: Next, BB: LoopBB);
1926 EmitBlock(BB: ContBB);
1927 } break;
1928 }
1929}
1930
1931void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1932 assert(emission.Variable && "emission was not valid!");
1933
1934 // If this was emitted as a global constant, we're done.
1935 if (emission.wasEmittedAsGlobal()) return;
1936
1937 const VarDecl &D = *emission.Variable;
1938 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF&: *this, TemporaryLocation: D.getLocation());
1939 ApplyAtomGroup Grp(getDebugInfo());
1940 QualType type = D.getType();
1941
1942 // If this local has an initializer, emit it now.
1943 const Expr *Init = D.getInit();
1944
1945 // If we are at an unreachable point, we don't need to emit the initializer
1946 // unless it contains a label.
1947 if (!HaveInsertPoint()) {
1948 if (!Init || !ContainsLabel(S: Init)) {
1949 PGO->markStmtMaybeUsed(S: Init);
1950 return;
1951 }
1952 EnsureInsertPoint();
1953 }
1954
1955 // Initialize the structure of a __block variable.
1956 if (emission.IsEscapingByRef)
1957 emitByrefStructureInit(emission);
1958
1959 // Initialize the variable here if it doesn't have a initializer and it is a
1960 // C struct that is non-trivial to initialize or an array containing such a
1961 // struct.
1962 if (!Init &&
1963 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1964 QualType::PDIK_Struct) {
1965 LValue Dst = MakeAddrLValue(Addr: emission.getAllocatedAddress(), T: type);
1966 if (emission.IsEscapingByRef)
1967 drillIntoBlockVariable(CGF&: *this, lvalue&: Dst, var: &D);
1968 defaultInitNonTrivialCStructVar(Dst);
1969 return;
1970 }
1971
1972 // Check whether this is a byref variable that's potentially
1973 // captured and moved by its own initializer. If so, we'll need to
1974 // emit the initializer first, then copy into the variable.
1975 bool capturedByInit =
1976 Init && emission.IsEscapingByRef && isCapturedBy(Var: D, E: Init);
1977
1978 bool locIsByrefHeader = !capturedByInit;
1979 const Address Loc =
1980 locIsByrefHeader ? emission.getObjectAddress(CGF&: *this) : emission.Addr;
1981
1982 auto hasNoTrivialAutoVarInitAttr = [&](const Decl *D) {
1983 return D && D->hasAttr<NoTrivialAutoVarInitAttr>();
1984 };
1985 // Note: constexpr already initializes everything correctly.
1986 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1987 ((D.isConstexpr() || D.getAttr<UninitializedAttr>() ||
1988 hasNoTrivialAutoVarInitAttr(type->getAsTagDecl()) ||
1989 hasNoTrivialAutoVarInitAttr(CurFuncDecl))
1990 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1991 : getContext().getLangOpts().getTrivialAutoVarInit());
1992
1993 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1994 if (trivialAutoVarInit ==
1995 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1996 return;
1997
1998 // Only initialize a __block's storage: we always initialize the header.
1999 if (emission.IsEscapingByRef && !locIsByrefHeader)
2000 Loc = emitBlockByrefAddress(baseAddr: Loc, V: &D, /*follow=*/followForward: false);
2001
2002 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
2003 };
2004
2005 if (isTrivialInitializer(Init))
2006 return initializeWhatIsTechnicallyUninitialized(Loc);
2007
2008 llvm::Constant *constant = nullptr;
2009 if (emission.IsConstantAggregate ||
2010 D.mightBeUsableInConstantExpressions(C: getContext())) {
2011 assert(!capturedByInit && "constant init contains a capturing block?");
2012 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
2013 if (constant && !constant->isZeroValue() &&
2014 (trivialAutoVarInit !=
2015 LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
2016 IsPattern isPattern =
2017 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
2018 ? IsPattern::Yes
2019 : IsPattern::No;
2020 // C guarantees that brace-init with fewer initializers than members in
2021 // the aggregate will initialize the rest of the aggregate as-if it were
2022 // static initialization. In turn static initialization guarantees that
2023 // padding is initialized to zero bits. We could instead pattern-init if D
2024 // has any ImplicitValueInitExpr, but that seems to be unintuitive
2025 // behavior.
2026 constant = constWithPadding(CGM, isPattern: IsPattern::No,
2027 constant: replaceUndef(CGM, isPattern, constant));
2028 }
2029
2030 if (constant && type->isBitIntType() &&
2031 CGM.getTypes().typeRequiresSplitIntoByteArray(ASTTy: type)) {
2032 // Constants for long _BitInt types are split into individual bytes.
2033 // Try to fold these back into an integer constant so it can be stored
2034 // properly.
2035 llvm::Type *LoadType =
2036 CGM.getTypes().convertTypeForLoadStore(T: type, LLVMTy: constant->getType());
2037 constant = llvm::ConstantFoldLoadFromConst(
2038 C: constant, Ty: LoadType, Offset: llvm::APInt::getZero(numBits: 32), DL: CGM.getDataLayout());
2039 }
2040 }
2041
2042 if (!constant) {
2043 if (trivialAutoVarInit !=
2044 LangOptions::TrivialAutoVarInitKind::Uninitialized) {
2045 // At this point, we know D has an Init expression, but isn't a constant.
2046 // - If D is not a scalar, auto-var-init conservatively (members may be
2047 // left uninitialized by constructor Init expressions for example).
2048 // - If D is a scalar, we only need to auto-var-init if there is a
2049 // self-reference. Otherwise, the Init expression should be sufficient.
2050 // It may be that the Init expression uses other uninitialized memory,
2051 // but auto-var-init here would not help, as auto-init would get
2052 // overwritten by Init.
2053 if (!type->isScalarType() || capturedByInit || isAccessedBy(var: D, s: Init)) {
2054 initializeWhatIsTechnicallyUninitialized(Loc);
2055 }
2056 }
2057 LValue lv = MakeAddrLValue(Addr: Loc, T: type);
2058 lv.setNonGC(true);
2059 return EmitExprAsInit(init: Init, D: &D, lvalue: lv, capturedByInit);
2060 }
2061
2062 PGO->markStmtMaybeUsed(S: Init);
2063
2064 if (!emission.IsConstantAggregate) {
2065 // For simple scalar/complex initialization, store the value directly.
2066 LValue lv = MakeAddrLValue(Addr: Loc, T: type);
2067 lv.setNonGC(true);
2068 return EmitStoreThroughLValue(Src: RValue::get(V: constant), Dst: lv, isInit: true);
2069 }
2070
2071 emitStoresForConstant(D, Loc: Loc.withElementType(ElemTy: CGM.Int8Ty),
2072 isVolatile: type.isVolatileQualified(), constant,
2073 /*IsAutoInit=*/false);
2074}
2075
2076void CodeGenFunction::MaybeEmitDeferredVarDeclInit(const VarDecl *VD) {
2077 if (auto *DD = dyn_cast_if_present<DecompositionDecl>(Val: VD)) {
2078 for (auto *B : DD->flat_bindings())
2079 if (auto *HD = B->getHoldingVar())
2080 EmitVarDecl(D: *HD);
2081 }
2082}
2083
2084/// Emit an expression as an initializer for an object (variable, field, etc.)
2085/// at the given location. The expression is not necessarily the normal
2086/// initializer for the object, and the address is not necessarily
2087/// its normal location.
2088///
2089/// \param init the initializing expression
2090/// \param D the object to act as if we're initializing
2091/// \param lvalue the lvalue to initialize
2092/// \param capturedByInit true if \p D is a __block variable
2093/// whose address is potentially changed by the initializer
2094void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
2095 LValue lvalue, bool capturedByInit) {
2096 QualType type = D->getType();
2097
2098 if (type->isReferenceType()) {
2099 RValue rvalue = EmitReferenceBindingToExpr(E: init);
2100 if (capturedByInit)
2101 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
2102 EmitStoreThroughLValue(Src: rvalue, Dst: lvalue, isInit: true);
2103 return;
2104 }
2105 switch (getEvaluationKind(T: type)) {
2106 case TEK_Scalar:
2107 EmitScalarInit(init, D, lvalue, capturedByInit);
2108 return;
2109 case TEK_Complex: {
2110 ComplexPairTy complex = EmitComplexExpr(E: init);
2111 if (capturedByInit)
2112 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
2113 EmitStoreOfComplex(V: complex, dest: lvalue, /*init*/ isInit: true);
2114 return;
2115 }
2116 case TEK_Aggregate:
2117 if (type->isAtomicType()) {
2118 EmitAtomicInit(E: const_cast<Expr*>(init), lvalue);
2119 } else {
2120 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
2121 if (isa<VarDecl>(Val: D))
2122 Overlap = AggValueSlot::DoesNotOverlap;
2123 else if (auto *FD = dyn_cast<FieldDecl>(Val: D))
2124 Overlap = getOverlapForFieldInit(FD);
2125 // TODO: how can we delay here if D is captured by its initializer?
2126 EmitAggExpr(E: init,
2127 AS: AggValueSlot::forLValue(LV: lvalue, isDestructed: AggValueSlot::IsDestructed,
2128 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
2129 isAliased: AggValueSlot::IsNotAliased, mayOverlap: Overlap));
2130 }
2131 return;
2132 }
2133 llvm_unreachable("bad evaluation kind");
2134}
2135
2136/// Enter a destroy cleanup for the given local variable.
2137void CodeGenFunction::emitAutoVarTypeCleanup(
2138 const CodeGenFunction::AutoVarEmission &emission,
2139 QualType::DestructionKind dtorKind) {
2140 assert(dtorKind != QualType::DK_none);
2141
2142 // Note that for __block variables, we want to destroy the
2143 // original stack object, not the possibly forwarded object.
2144 Address addr = emission.getObjectAddress(CGF&: *this);
2145
2146 const VarDecl *var = emission.Variable;
2147 QualType type = var->getType();
2148
2149 CleanupKind cleanupKind = NormalAndEHCleanup;
2150 CodeGenFunction::Destroyer *destroyer = nullptr;
2151
2152 switch (dtorKind) {
2153 case QualType::DK_none:
2154 llvm_unreachable("no cleanup for trivially-destructible variable");
2155
2156 case QualType::DK_cxx_destructor:
2157 // If there's an NRVO flag on the emission, we need a different
2158 // cleanup.
2159 if (emission.NRVOFlag) {
2160 assert(!type->isArrayType());
2161 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2162 EHStack.pushCleanup<DestroyNRVOVariableCXX>(Kind: cleanupKind, A: addr, A: type, A: dtor,
2163 A: emission.NRVOFlag);
2164 return;
2165 }
2166 break;
2167
2168 case QualType::DK_objc_strong_lifetime:
2169 // Suppress cleanups for pseudo-strong variables.
2170 if (var->isARCPseudoStrong()) return;
2171
2172 // Otherwise, consider whether to use an EH cleanup or not.
2173 cleanupKind = getARCCleanupKind();
2174
2175 // Use the imprecise destroyer by default.
2176 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2177 destroyer = CodeGenFunction::destroyARCStrongImprecise;
2178 break;
2179
2180 case QualType::DK_objc_weak_lifetime:
2181 break;
2182
2183 case QualType::DK_nontrivial_c_struct:
2184 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2185 if (emission.NRVOFlag) {
2186 assert(!type->isArrayType());
2187 EHStack.pushCleanup<DestroyNRVOVariableC>(Kind: cleanupKind, A: addr,
2188 A: emission.NRVOFlag, A: type);
2189 return;
2190 }
2191 break;
2192 }
2193
2194 // If we haven't chosen a more specific destroyer, use the default.
2195 if (!destroyer) destroyer = getDestroyer(destructionKind: dtorKind);
2196
2197 // Use an EH cleanup in array destructors iff the destructor itself
2198 // is being pushed as an EH cleanup.
2199 bool useEHCleanup = (cleanupKind & EHCleanup);
2200 EHStack.pushCleanup<DestroyObject>(Kind: cleanupKind, A: addr, A: type, A: destroyer,
2201 A: useEHCleanup);
2202}
2203
2204void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2205 assert(emission.Variable && "emission was not valid!");
2206
2207 // If this was emitted as a global constant, we're done.
2208 if (emission.wasEmittedAsGlobal()) return;
2209
2210 // If we don't have an insertion point, we're done. Sema prevents
2211 // us from jumping into any of these scopes anyway.
2212 if (!HaveInsertPoint()) return;
2213
2214 const VarDecl &D = *emission.Variable;
2215
2216 // Check the type for a cleanup.
2217 if (QualType::DestructionKind dtorKind = D.needsDestruction(Ctx: getContext()))
2218 emitAutoVarTypeCleanup(emission, dtorKind);
2219
2220 // In GC mode, honor objc_precise_lifetime.
2221 if (getLangOpts().getGC() != LangOptions::NonGC &&
2222 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2223 EHStack.pushCleanup<ExtendGCLifetime>(Kind: NormalCleanup, A: &D);
2224 }
2225
2226 // Handle the cleanup attribute.
2227 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2228 const FunctionDecl *FD = CA->getFunctionDecl();
2229
2230 llvm::Constant *F = CGM.GetAddrOfFunction(GD: FD);
2231 assert(F && "Could not find function!");
2232
2233 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(GD: FD);
2234 EHStack.pushCleanup<CallCleanupFunction>(Kind: NormalAndEHCleanup, A: F, A: &Info, A: &D);
2235 }
2236
2237 // If this is a block variable, call _Block_object_destroy
2238 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2239 // mode.
2240 if (emission.IsEscapingByRef &&
2241 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2242 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2243 if (emission.Variable->getType().isObjCGCWeak())
2244 Flags |= BLOCK_FIELD_IS_WEAK;
2245 enterByrefCleanup(Kind: NormalAndEHCleanup, Addr: emission.Addr, Flags,
2246 /*LoadBlockVarAddr*/ false,
2247 CanThrow: cxxDestructorCanThrow(T: emission.Variable->getType()));
2248 }
2249}
2250
2251CodeGenFunction::Destroyer *
2252CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2253 switch (kind) {
2254 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2255 case QualType::DK_cxx_destructor:
2256 return destroyCXXObject;
2257 case QualType::DK_objc_strong_lifetime:
2258 return destroyARCStrongPrecise;
2259 case QualType::DK_objc_weak_lifetime:
2260 return destroyARCWeak;
2261 case QualType::DK_nontrivial_c_struct:
2262 return destroyNonTrivialCStruct;
2263 }
2264 llvm_unreachable("Unknown DestructionKind");
2265}
2266
2267/// pushEHDestroy - Push the standard destructor for the given type as
2268/// an EH-only cleanup.
2269void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2270 Address addr, QualType type) {
2271 assert(dtorKind && "cannot push destructor for trivial type");
2272 assert(needsEHCleanup(dtorKind));
2273
2274 pushDestroy(kind: EHCleanup, addr, type, destroyer: getDestroyer(kind: dtorKind), useEHCleanupForArray: true);
2275}
2276
2277/// pushDestroy - Push the standard destructor for the given type as
2278/// at least a normal cleanup.
2279void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2280 Address addr, QualType type) {
2281 assert(dtorKind && "cannot push destructor for trivial type");
2282
2283 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2284 pushDestroy(kind: cleanupKind, addr, type, destroyer: getDestroyer(kind: dtorKind),
2285 useEHCleanupForArray: cleanupKind & EHCleanup);
2286}
2287
2288void CodeGenFunction::pushLifetimeExtendedDestroy(
2289 QualType::DestructionKind dtorKind, Address addr, QualType type) {
2290 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2291 pushLifetimeExtendedDestroy(kind: cleanupKind, addr, type, destroyer: getDestroyer(kind: dtorKind),
2292 useEHCleanupForArray: cleanupKind & EHCleanup);
2293}
2294
2295void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2296 QualType type, Destroyer *destroyer,
2297 bool useEHCleanupForArray) {
2298 pushFullExprCleanup<DestroyObject>(kind: cleanupKind, A: addr, A: type, A: destroyer,
2299 A: useEHCleanupForArray);
2300}
2301
2302// Pushes a destroy and defers its deactivation until its
2303// CleanupDeactivationScope is exited.
2304void CodeGenFunction::pushDestroyAndDeferDeactivation(
2305 QualType::DestructionKind dtorKind, Address addr, QualType type) {
2306 assert(dtorKind && "cannot push destructor for trivial type");
2307
2308 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2309 pushDestroyAndDeferDeactivation(
2310 cleanupKind, addr, type, destroyer: getDestroyer(kind: dtorKind), useEHCleanupForArray: cleanupKind & EHCleanup);
2311}
2312
2313void CodeGenFunction::pushDestroyAndDeferDeactivation(
2314 CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer,
2315 bool useEHCleanupForArray) {
2316 llvm::Instruction *DominatingIP =
2317 Builder.CreateFlagLoad(Addr: llvm::Constant::getNullValue(Ty: Int8PtrTy));
2318 pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2319 DeferredDeactivationCleanupStack.push_back(
2320 Elt: {.Cleanup: EHStack.stable_begin(), .DominatingIP: DominatingIP});
2321}
2322
2323void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2324 EHStack.pushCleanup<CallStackRestore>(Kind, A: SPMem);
2325}
2326
2327void CodeGenFunction::pushKmpcAllocFree(
2328 CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2329 EHStack.pushCleanup<KmpcAllocFree>(Kind, A: AddrSizePair);
2330}
2331
2332void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2333 Address addr, QualType type,
2334 Destroyer *destroyer,
2335 bool useEHCleanupForArray) {
2336 // If we're not in a conditional branch, we don't need to bother generating a
2337 // conditional cleanup.
2338 if (!isInConditionalBranch()) {
2339 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2340 // around in case a temporary's destructor throws an exception.
2341
2342 // Add the cleanup to the EHStack. After the full-expr, this would be
2343 // deactivated before being popped from the stack.
2344 pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer,
2345 useEHCleanupForArray);
2346
2347 // Since this is lifetime-extended, push it once again to the EHStack after
2348 // the full expression.
2349 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2350 Kind: cleanupKind, ActiveFlag: Address::invalid(), A: addr, A: type, A: destroyer,
2351 A: useEHCleanupForArray);
2352 }
2353
2354 // Otherwise, we should only destroy the object if it's been initialized.
2355
2356 using ConditionalCleanupType =
2357 EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2358 Destroyer *, bool>;
2359 DominatingValue<Address>::saved_type SavedAddr = saveValueInCond(value: addr);
2360
2361 // Remember to emit cleanup if we branch-out before end of full-expression
2362 // (eg: through stmt-expr or coro suspensions).
2363 AllocaTrackerRAII DeactivationAllocas(*this);
2364 Address ActiveFlagForDeactivation = createCleanupActiveFlag();
2365
2366 pushCleanupAndDeferDeactivation<ConditionalCleanupType>(
2367 Kind: cleanupKind, A: SavedAddr, A: type, A: destroyer, A: useEHCleanupForArray);
2368 initFullExprCleanupWithFlag(ActiveFlag: ActiveFlagForDeactivation);
2369 EHCleanupScope &cleanup = cast<EHCleanupScope>(Val&: *EHStack.begin());
2370 // Erase the active flag if the cleanup was not emitted.
2371 cleanup.AddAuxAllocas(Allocas: std::move(DeactivationAllocas).Take());
2372
2373 // Since this is lifetime-extended, push it once again to the EHStack after
2374 // the full expression.
2375 // The previous active flag would always be 'false' due to forced deferred
2376 // deactivation. Use a separate flag for lifetime-extension to correctly
2377 // remember if this branch was taken and the object was initialized.
2378 Address ActiveFlagForLifetimeExt = createCleanupActiveFlag();
2379 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2380 Kind: cleanupKind, ActiveFlag: ActiveFlagForLifetimeExt, A: SavedAddr, A: type, A: destroyer,
2381 A: useEHCleanupForArray);
2382}
2383
2384/// emitDestroy - Immediately perform the destruction of the given
2385/// object.
2386///
2387/// \param addr - the address of the object; a type*
2388/// \param type - the type of the object; if an array type, all
2389/// objects are destroyed in reverse order
2390/// \param destroyer - the function to call to destroy individual
2391/// elements
2392/// \param useEHCleanupForArray - whether an EH cleanup should be
2393/// used when destroying array elements, in case one of the
2394/// destructions throws an exception
2395void CodeGenFunction::emitDestroy(Address addr, QualType type,
2396 Destroyer *destroyer,
2397 bool useEHCleanupForArray) {
2398 const ArrayType *arrayType = getContext().getAsArrayType(T: type);
2399 if (!arrayType)
2400 return destroyer(*this, addr, type);
2401
2402 llvm::Value *length = emitArrayLength(arrayType, baseType&: type, addr);
2403
2404 CharUnits elementAlign =
2405 addr.getAlignment()
2406 .alignmentOfArrayElement(elementSize: getContext().getTypeSizeInChars(T: type));
2407
2408 // Normally we have to check whether the array is zero-length.
2409 bool checkZeroLength = true;
2410
2411 // But if the array length is constant, we can suppress that.
2412 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(Val: length)) {
2413 // ...and if it's constant zero, we can just skip the entire thing.
2414 if (constLength->isZero()) return;
2415 checkZeroLength = false;
2416 }
2417
2418 llvm::Value *begin = addr.emitRawPointer(CGF&: *this);
2419 llvm::Value *end =
2420 Builder.CreateInBoundsGEP(Ty: addr.getElementType(), Ptr: begin, IdxList: length);
2421 emitArrayDestroy(begin, end, elementType: type, elementAlign, destroyer,
2422 checkZeroLength, useEHCleanup: useEHCleanupForArray);
2423}
2424
2425/// emitArrayDestroy - Destroys all the elements of the given array,
2426/// beginning from last to first. The array cannot be zero-length.
2427///
2428/// \param begin - a type* denoting the first element of the array
2429/// \param end - a type* denoting one past the end of the array
2430/// \param elementType - the element type of the array
2431/// \param destroyer - the function to call to destroy elements
2432/// \param useEHCleanup - whether to push an EH cleanup to destroy
2433/// the remaining elements in case the destruction of a single
2434/// element throws
2435void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2436 llvm::Value *end,
2437 QualType elementType,
2438 CharUnits elementAlign,
2439 Destroyer *destroyer,
2440 bool checkZeroLength,
2441 bool useEHCleanup) {
2442 assert(!elementType->isArrayType());
2443
2444 // The basic structure here is a do-while loop, because we don't
2445 // need to check for the zero-element case.
2446 llvm::BasicBlock *bodyBB = createBasicBlock(name: "arraydestroy.body");
2447 llvm::BasicBlock *doneBB = createBasicBlock(name: "arraydestroy.done");
2448
2449 if (checkZeroLength) {
2450 llvm::Value *isEmpty = Builder.CreateICmpEQ(LHS: begin, RHS: end,
2451 Name: "arraydestroy.isempty");
2452 Builder.CreateCondBr(Cond: isEmpty, True: doneBB, False: bodyBB);
2453 }
2454
2455 // Enter the loop body, making that address the current address.
2456 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2457 EmitBlock(BB: bodyBB);
2458 llvm::PHINode *elementPast =
2459 Builder.CreatePHI(Ty: begin->getType(), NumReservedValues: 2, Name: "arraydestroy.elementPast");
2460 elementPast->addIncoming(V: end, BB: entryBB);
2461
2462 // Shift the address back by one element.
2463 llvm::Value *negativeOne = llvm::ConstantInt::get(Ty: SizeTy, V: -1, IsSigned: true);
2464 llvm::Type *llvmElementType = ConvertTypeForMem(T: elementType);
2465 llvm::Value *element = Builder.CreateInBoundsGEP(
2466 Ty: llvmElementType, Ptr: elementPast, IdxList: negativeOne, Name: "arraydestroy.element");
2467
2468 if (useEHCleanup)
2469 pushRegularPartialArrayCleanup(arrayBegin: begin, arrayEnd: element, elementType, elementAlignment: elementAlign,
2470 destroyer);
2471
2472 // Perform the actual destruction there.
2473 destroyer(*this, Address(element, llvmElementType, elementAlign),
2474 elementType);
2475
2476 if (useEHCleanup)
2477 PopCleanupBlock();
2478
2479 // Check whether we've reached the end.
2480 llvm::Value *done = Builder.CreateICmpEQ(LHS: element, RHS: begin, Name: "arraydestroy.done");
2481 Builder.CreateCondBr(Cond: done, True: doneBB, False: bodyBB);
2482 elementPast->addIncoming(V: element, BB: Builder.GetInsertBlock());
2483
2484 // Done.
2485 EmitBlock(BB: doneBB);
2486}
2487
2488/// Perform partial array destruction as if in an EH cleanup. Unlike
2489/// emitArrayDestroy, the element type here may still be an array type.
2490static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2491 llvm::Value *begin, llvm::Value *end,
2492 QualType type, CharUnits elementAlign,
2493 CodeGenFunction::Destroyer *destroyer) {
2494 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
2495
2496 // If the element type is itself an array, drill down.
2497 unsigned arrayDepth = 0;
2498 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(T: type)) {
2499 // VLAs don't require a GEP index to walk into.
2500 if (!isa<VariableArrayType>(Val: arrayType))
2501 arrayDepth++;
2502 type = arrayType->getElementType();
2503 }
2504
2505 if (arrayDepth) {
2506 llvm::Value *zero = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 0);
2507
2508 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2509 begin = CGF.Builder.CreateInBoundsGEP(
2510 Ty: elemTy, Ptr: begin, IdxList: gepIndices, Name: "pad.arraybegin");
2511 end = CGF.Builder.CreateInBoundsGEP(
2512 Ty: elemTy, Ptr: end, IdxList: gepIndices, Name: "pad.arrayend");
2513 }
2514
2515 // Destroy the array. We don't ever need an EH cleanup because we
2516 // assume that we're in an EH cleanup ourselves, so a throwing
2517 // destructor causes an immediate terminate.
2518 CGF.emitArrayDestroy(begin, end, elementType: type, elementAlign, destroyer,
2519 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2520}
2521
2522namespace {
2523 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2524 /// array destroy where the end pointer is regularly determined and
2525 /// does not need to be loaded from a local.
2526 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2527 llvm::Value *ArrayBegin;
2528 llvm::Value *ArrayEnd;
2529 QualType ElementType;
2530 CodeGenFunction::Destroyer *Destroyer;
2531 CharUnits ElementAlign;
2532 public:
2533 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2534 QualType elementType, CharUnits elementAlign,
2535 CodeGenFunction::Destroyer *destroyer)
2536 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2537 ElementType(elementType), Destroyer(destroyer),
2538 ElementAlign(elementAlign) {}
2539
2540 void Emit(CodeGenFunction &CGF, Flags flags) override {
2541 emitPartialArrayDestroy(CGF, begin: ArrayBegin, end: ArrayEnd,
2542 type: ElementType, elementAlign: ElementAlign, destroyer: Destroyer);
2543 }
2544 };
2545
2546 /// IrregularPartialArrayDestroy - a cleanup which performs a
2547 /// partial array destroy where the end pointer is irregularly
2548 /// determined and must be loaded from a local.
2549 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2550 llvm::Value *ArrayBegin;
2551 Address ArrayEndPointer;
2552 QualType ElementType;
2553 CodeGenFunction::Destroyer *Destroyer;
2554 CharUnits ElementAlign;
2555 public:
2556 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2557 Address arrayEndPointer,
2558 QualType elementType,
2559 CharUnits elementAlign,
2560 CodeGenFunction::Destroyer *destroyer)
2561 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2562 ElementType(elementType), Destroyer(destroyer),
2563 ElementAlign(elementAlign) {}
2564
2565 void Emit(CodeGenFunction &CGF, Flags flags) override {
2566 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(Addr: ArrayEndPointer);
2567 emitPartialArrayDestroy(CGF, begin: ArrayBegin, end: arrayEnd,
2568 type: ElementType, elementAlign: ElementAlign, destroyer: Destroyer);
2569 }
2570 };
2571} // end anonymous namespace
2572
2573/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
2574/// destroy already-constructed elements of the given array. The cleanup may be
2575/// popped with DeactivateCleanupBlock or PopCleanupBlock.
2576///
2577/// \param elementType - the immediate element type of the array;
2578/// possibly still an array type
2579void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2580 Address arrayEndPointer,
2581 QualType elementType,
2582 CharUnits elementAlign,
2583 Destroyer *destroyer) {
2584 pushFullExprCleanup<IrregularPartialArrayDestroy>(
2585 kind: NormalAndEHCleanup, A: arrayBegin, A: arrayEndPointer, A: elementType,
2586 A: elementAlign, A: destroyer);
2587}
2588
2589/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2590/// already-constructed elements of the given array. The cleanup
2591/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2592///
2593/// \param elementType - the immediate element type of the array;
2594/// possibly still an array type
2595void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2596 llvm::Value *arrayEnd,
2597 QualType elementType,
2598 CharUnits elementAlign,
2599 Destroyer *destroyer) {
2600 pushFullExprCleanup<RegularPartialArrayDestroy>(kind: EHCleanup,
2601 A: arrayBegin, A: arrayEnd,
2602 A: elementType, A: elementAlign,
2603 A: destroyer);
2604}
2605
2606/// Lazily declare the @llvm.lifetime.start intrinsic.
2607llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2608 if (LifetimeStartFn)
2609 return LifetimeStartFn;
2610 LifetimeStartFn = llvm::Intrinsic::getOrInsertDeclaration(
2611 M: &getModule(), id: llvm::Intrinsic::lifetime_start, Tys: AllocaInt8PtrTy);
2612 return LifetimeStartFn;
2613}
2614
2615/// Lazily declare the @llvm.lifetime.end intrinsic.
2616llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2617 if (LifetimeEndFn)
2618 return LifetimeEndFn;
2619 LifetimeEndFn = llvm::Intrinsic::getOrInsertDeclaration(
2620 M: &getModule(), id: llvm::Intrinsic::lifetime_end, Tys: AllocaInt8PtrTy);
2621 return LifetimeEndFn;
2622}
2623
2624/// Lazily declare the @llvm.fake.use intrinsic.
2625llvm::Function *CodeGenModule::getLLVMFakeUseFn() {
2626 if (FakeUseFn)
2627 return FakeUseFn;
2628 FakeUseFn = llvm::Intrinsic::getOrInsertDeclaration(
2629 M: &getModule(), id: llvm::Intrinsic::fake_use);
2630 return FakeUseFn;
2631}
2632
2633namespace {
2634 /// A cleanup to perform a release of an object at the end of a
2635 /// function. This is used to balance out the incoming +1 of a
2636 /// ns_consumed argument when we can't reasonably do that just by
2637 /// not doing the initial retain for a __block argument.
2638 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2639 ConsumeARCParameter(llvm::Value *param,
2640 ARCPreciseLifetime_t precise)
2641 : Param(param), Precise(precise) {}
2642
2643 llvm::Value *Param;
2644 ARCPreciseLifetime_t Precise;
2645
2646 void Emit(CodeGenFunction &CGF, Flags flags) override {
2647 CGF.EmitARCRelease(value: Param, precise: Precise);
2648 }
2649 };
2650} // end anonymous namespace
2651
2652/// Emit an alloca (or GlobalValue depending on target)
2653/// for the specified parameter and set up LocalDeclMap.
2654void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2655 unsigned ArgNo) {
2656 bool NoDebugInfo = false;
2657 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2658 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2659 "Invalid argument to EmitParmDecl");
2660
2661 // Set the name of the parameter's initial value to make IR easier to
2662 // read. Don't modify the names of globals.
2663 if (!isa<llvm::GlobalValue>(Val: Arg.getAnyValue()))
2664 Arg.getAnyValue()->setName(D.getName());
2665
2666 QualType Ty = D.getType();
2667
2668 // Use better IR generation for certain implicit parameters.
2669 if (auto IPD = dyn_cast<ImplicitParamDecl>(Val: &D)) {
2670 // The only implicit argument a block has is its literal.
2671 // This may be passed as an inalloca'ed value on Windows x86.
2672 if (BlockInfo) {
2673 llvm::Value *V = Arg.isIndirect()
2674 ? Builder.CreateLoad(Addr: Arg.getIndirectAddress())
2675 : Arg.getDirectValue();
2676 setBlockContextParameter(D: IPD, argNum: ArgNo, ptr: V);
2677 return;
2678 }
2679 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2680 // debug info of TLS variables.
2681 NoDebugInfo =
2682 (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2683 }
2684
2685 Address DeclPtr = Address::invalid();
2686 RawAddress AllocaPtr = Address::invalid();
2687 bool DoStore = false;
2688 bool IsScalar = hasScalarEvaluationKind(T: Ty);
2689 bool UseIndirectDebugAddress = false;
2690
2691 // If we already have a pointer to the argument, reuse the input pointer.
2692 if (Arg.isIndirect()) {
2693 DeclPtr = Arg.getIndirectAddress();
2694 DeclPtr = DeclPtr.withElementType(ElemTy: ConvertTypeForMem(T: Ty));
2695 // Indirect argument is in alloca address space, which may be different
2696 // from the default address space.
2697 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2698 auto *V = DeclPtr.emitRawPointer(CGF&: *this);
2699 AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
2700
2701 // For truly ABI indirect arguments -- those that are not `byval` -- store
2702 // the address of the argument on the stack to preserve debug information.
2703 ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2704 if (ArgInfo.isIndirect())
2705 UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2706 if (UseIndirectDebugAddress) {
2707 auto PtrTy = getContext().getPointerType(T: Ty);
2708 AllocaPtr = CreateMemTemp(T: PtrTy, Align: getContext().getTypeAlignInChars(T: PtrTy),
2709 Name: D.getName() + ".indirect_addr");
2710 EmitStoreOfScalar(Value: V, Addr: AllocaPtr, /* Volatile */ false, Ty: PtrTy);
2711 }
2712
2713 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2714 auto DestLangAS =
2715 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2716 if (SrcLangAS != DestLangAS) {
2717 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2718 CGM.getDataLayout().getAllocaAddrSpace());
2719 auto DestAS = getContext().getTargetAddressSpace(AS: DestLangAS);
2720 auto *T = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: DestAS);
2721 DeclPtr = DeclPtr.withPointer(
2722 NewPointer: getTargetHooks().performAddrSpaceCast(CGF&: *this, V, SrcAddr: SrcLangAS, DestTy: T, IsNonNull: true),
2723 IsKnownNonNull: DeclPtr.isKnownNonNull());
2724 }
2725
2726 // Push a destructor cleanup for this parameter if the ABI requires it.
2727 // Don't push a cleanup in a thunk for a method that will also emit a
2728 // cleanup.
2729 if (Ty->isRecordType() && !CurFuncIsThunk &&
2730 Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2731 if (QualType::DestructionKind DtorKind =
2732 D.needsDestruction(Ctx: getContext())) {
2733 assert((DtorKind == QualType::DK_cxx_destructor ||
2734 DtorKind == QualType::DK_nontrivial_c_struct) &&
2735 "unexpected destructor type");
2736 pushDestroy(dtorKind: DtorKind, addr: DeclPtr, type: Ty);
2737 CalleeDestructedParamCleanups[cast<ParmVarDecl>(Val: &D)] =
2738 EHStack.stable_begin();
2739 }
2740 }
2741 } else {
2742 // Check if the parameter address is controlled by OpenMP runtime.
2743 Address OpenMPLocalAddr =
2744 getLangOpts().OpenMP
2745 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(CGF&: *this, VD: &D)
2746 : Address::invalid();
2747 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2748 DeclPtr = OpenMPLocalAddr;
2749 AllocaPtr = DeclPtr;
2750 } else {
2751 // Otherwise, create a temporary to hold the value.
2752 DeclPtr = CreateMemTemp(T: Ty, Align: getContext().getDeclAlign(D: &D),
2753 Name: D.getName() + ".addr", Alloca: &AllocaPtr);
2754 }
2755 DoStore = true;
2756 }
2757
2758 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2759
2760 LValue lv = MakeAddrLValue(Addr: DeclPtr, T: Ty);
2761 if (IsScalar) {
2762 Qualifiers qs = Ty.getQualifiers();
2763 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2764 // We honor __attribute__((ns_consumed)) for types with lifetime.
2765 // For __strong, it's handled by just skipping the initial retain;
2766 // otherwise we have to balance out the initial +1 with an extra
2767 // cleanup to do the release at the end of the function.
2768 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2769
2770 // If a parameter is pseudo-strong then we can omit the implicit retain.
2771 if (D.isARCPseudoStrong()) {
2772 assert(lt == Qualifiers::OCL_Strong &&
2773 "pseudo-strong variable isn't strong?");
2774 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2775 lt = Qualifiers::OCL_ExplicitNone;
2776 }
2777
2778 // Load objects passed indirectly.
2779 if (Arg.isIndirect() && !ArgVal)
2780 ArgVal = Builder.CreateLoad(Addr: DeclPtr);
2781
2782 if (lt == Qualifiers::OCL_Strong) {
2783 if (!isConsumed) {
2784 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2785 // use objc_storeStrong(&dest, value) for retaining the
2786 // object. But first, store a null into 'dest' because
2787 // objc_storeStrong attempts to release its old value.
2788 llvm::Value *Null = CGM.EmitNullConstant(T: D.getType());
2789 EmitStoreOfScalar(value: Null, lvalue: lv, /* isInitialization */ isInit: true);
2790 EmitARCStoreStrongCall(addr: lv.getAddress(), value: ArgVal, resultIgnored: true);
2791 DoStore = false;
2792 }
2793 else
2794 // Don't use objc_retainBlock for block pointers, because we
2795 // don't want to Block_copy something just because we got it
2796 // as a parameter.
2797 ArgVal = EmitARCRetainNonBlock(value: ArgVal);
2798 }
2799 } else {
2800 // Push the cleanup for a consumed parameter.
2801 if (isConsumed) {
2802 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2803 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2804 EHStack.pushCleanup<ConsumeARCParameter>(Kind: getARCCleanupKind(), A: ArgVal,
2805 A: precise);
2806 }
2807
2808 if (lt == Qualifiers::OCL_Weak) {
2809 EmitARCInitWeak(addr: DeclPtr, value: ArgVal);
2810 DoStore = false; // The weak init is a store, no need to do two.
2811 }
2812 }
2813
2814 // Enter the cleanup scope.
2815 EmitAutoVarWithLifetime(CGF&: *this, var: D, addr: DeclPtr, lifetime: lt);
2816 }
2817 }
2818
2819 // Store the initial value into the alloca.
2820 if (DoStore)
2821 EmitStoreOfScalar(value: ArgVal, lvalue: lv, /* isInitialization */ isInit: true);
2822
2823 setAddrOfLocalVar(VD: &D, Addr: DeclPtr);
2824
2825 // Push a FakeUse 'cleanup' object onto the EHStack for the parameter,
2826 // which may be the 'this' pointer. This causes the emission of a fake.use
2827 // call with the parameter as argument at the end of the function.
2828 if (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
2829 CodeGenOptions::ExtendVariableLivenessKind::All ||
2830 (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
2831 CodeGenOptions::ExtendVariableLivenessKind::This &&
2832 &D == CXXABIThisDecl)) {
2833 if (shouldExtendLifetime(Context: getContext(), FuncDecl: CurCodeDecl, D, CXXABIThisDecl))
2834 EHStack.pushCleanup<FakeUse>(Kind: NormalFakeUse, A: DeclPtr);
2835 }
2836
2837 // Emit debug info for param declarations in non-thunk functions.
2838 if (CGDebugInfo *DI = getDebugInfo()) {
2839 if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
2840 !NoDebugInfo) {
2841 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2842 Decl: &D, AI: AllocaPtr.getPointer(), ArgNo, Builder, UsePointerValue: UseIndirectDebugAddress);
2843 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(Val: &D))
2844 DI->getParamDbgMappings().insert(KV: {Var, DILocalVar});
2845 }
2846 }
2847
2848 if (D.hasAttr<AnnotateAttr>())
2849 EmitVarAnnotations(D: &D, V: DeclPtr.emitRawPointer(CGF&: *this));
2850
2851 // We can only check return value nullability if all arguments to the
2852 // function satisfy their nullability preconditions. This makes it necessary
2853 // to emit null checks for args in the function body itself.
2854 if (requiresReturnValueNullabilityCheck()) {
2855 auto Nullability = Ty->getNullability();
2856 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2857 SanitizerScope SanScope(this);
2858 RetValNullabilityPrecondition =
2859 Builder.CreateAnd(LHS: RetValNullabilityPrecondition,
2860 RHS: Builder.CreateIsNotNull(Arg: Arg.getAnyValue()));
2861 }
2862 }
2863}
2864
2865void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2866 CodeGenFunction *CGF) {
2867 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2868 return;
2869 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2870}
2871
2872void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2873 CodeGenFunction *CGF) {
2874 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2875 (!LangOpts.EmitAllDecls && !D->isUsed()))
2876 return;
2877 getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2878}
2879
2880void CodeGenModule::EmitOpenACCDeclare(const OpenACCDeclareDecl *D,
2881 CodeGenFunction *CGF) {
2882 // This is a no-op, we cna just ignore these declarations.
2883}
2884
2885void CodeGenModule::EmitOpenACCRoutine(const OpenACCRoutineDecl *D,
2886 CodeGenFunction *CGF) {
2887 // This is a no-op, we cna just ignore these declarations.
2888}
2889
2890void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2891 getOpenMPRuntime().processRequiresDirective(D);
2892}
2893
2894void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2895 for (const Expr *E : D->varlist()) {
2896 const auto *DE = cast<DeclRefExpr>(Val: E);
2897 const auto *VD = cast<VarDecl>(Val: DE->getDecl());
2898
2899 // Skip all but globals.
2900 if (!VD->hasGlobalStorage())
2901 continue;
2902
2903 // Check if the global has been materialized yet or not. If not, we are done
2904 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2905 // we already emitted the global we might have done so before the
2906 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2907 // (potentially). While not pretty, common practise is to remove the old IR
2908 // global and generate a new one, so we do that here too. Uses are replaced
2909 // properly.
2910 StringRef MangledName = getMangledName(GD: VD);
2911 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
2912 if (!Entry)
2913 continue;
2914
2915 // We can also keep the existing global if the address space is what we
2916 // expect it to be, if not, it is replaced.
2917 clang::LangAS GVAS = GetGlobalVarAddressSpace(D: VD);
2918 auto TargetAS = getContext().getTargetAddressSpace(AS: GVAS);
2919 if (Entry->getType()->getAddressSpace() == TargetAS)
2920 continue;
2921
2922 llvm::PointerType *PTy = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: TargetAS);
2923
2924 // Replace all uses of the old global with a cast. Since we mutate the type
2925 // in place we neeed an intermediate that takes the spot of the old entry
2926 // until we can create the cast.
2927 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2928 getModule(), Entry->getValueType(), false,
2929 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2930 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2931 Entry->replaceAllUsesWith(V: DummyGV);
2932
2933 Entry->mutateType(Ty: PTy);
2934 llvm::Constant *NewPtrForOldDecl =
2935 llvm::ConstantExpr::getAddrSpaceCast(C: Entry, Ty: DummyGV->getType());
2936
2937 // Now we have a casted version of the changed global, the dummy can be
2938 // replaced and deleted.
2939 DummyGV->replaceAllUsesWith(V: NewPtrForOldDecl);
2940 DummyGV->eraseFromParent();
2941 }
2942}
2943
2944std::optional<CharUnits>
2945CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
2946 if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2947 if (Expr *Alignment = AA->getAlignment()) {
2948 unsigned UserAlign =
2949 Alignment->EvaluateKnownConstInt(Ctx: getContext()).getExtValue();
2950 CharUnits NaturalAlign =
2951 getNaturalTypeAlignment(T: VD->getType().getNonReferenceType());
2952
2953 // OpenMP5.1 pg 185 lines 7-10
2954 // Each item in the align modifier list must be aligned to the maximum
2955 // of the specified alignment and the type's natural alignment.
2956 return CharUnits::fromQuantity(
2957 Quantity: std::max<unsigned>(a: UserAlign, b: NaturalAlign.getQuantity()));
2958 }
2959 }
2960 return std::nullopt;
2961}
2962