1//===--- CGClass.cpp - Emit LLVM Code for C++ classes -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with C++ code generation of classes
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGBlocks.h"
15#include "CGCXXABI.h"
16#include "CGDebugInfo.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "TargetInfo.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/CXXInheritance.h"
22#include "clang/AST/CharUnits.h"
23#include "clang/AST/DeclTemplate.h"
24#include "clang/AST/EvaluatedExprVisitor.h"
25#include "clang/AST/RecordLayout.h"
26#include "clang/AST/StmtCXX.h"
27#include "clang/Basic/CodeGenOptions.h"
28#include "clang/CodeGen/CGFunctionInfo.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/Metadata.h"
31#include "llvm/Support/SaveAndRestore.h"
32#include "llvm/Transforms/Utils/SanitizerStats.h"
33#include <optional>
34
35using namespace clang;
36using namespace CodeGen;
37
38/// Return the best known alignment for an unknown pointer to a
39/// particular class.
40CharUnits CodeGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) {
41 if (!RD->hasDefinition())
42 return CharUnits::One(); // Hopefully won't be used anywhere.
43
44 auto &layout = getContext().getASTRecordLayout(D: RD);
45
46 // If the class is final, then we know that the pointer points to an
47 // object of that type and can use the full alignment.
48 if (RD->isEffectivelyFinal())
49 return layout.getAlignment();
50
51 // Otherwise, we have to assume it could be a subclass.
52 return layout.getNonVirtualAlignment();
53}
54
55/// Return the smallest possible amount of storage that might be allocated
56/// starting from the beginning of an object of a particular class.
57///
58/// This may be smaller than sizeof(RD) if RD has virtual base classes.
59CharUnits CodeGenModule::getMinimumClassObjectSize(const CXXRecordDecl *RD) {
60 if (!RD->hasDefinition())
61 return CharUnits::One();
62
63 auto &layout = getContext().getASTRecordLayout(D: RD);
64
65 // If the class is final, then we know that the pointer points to an
66 // object of that type and can use the full alignment.
67 if (RD->isEffectivelyFinal())
68 return layout.getSize();
69
70 // Otherwise, we have to assume it could be a subclass.
71 return std::max(a: layout.getNonVirtualSize(), b: CharUnits::One());
72}
73
74/// Return the best known alignment for a pointer to a virtual base,
75/// given the alignment of a pointer to the derived class.
76CharUnits CodeGenModule::getVBaseAlignment(CharUnits actualDerivedAlign,
77 const CXXRecordDecl *derivedClass,
78 const CXXRecordDecl *vbaseClass) {
79 // The basic idea here is that an underaligned derived pointer might
80 // indicate an underaligned base pointer.
81
82 assert(vbaseClass->isCompleteDefinition());
83 auto &baseLayout = getContext().getASTRecordLayout(D: vbaseClass);
84 CharUnits expectedVBaseAlign = baseLayout.getNonVirtualAlignment();
85
86 return getDynamicOffsetAlignment(ActualAlign: actualDerivedAlign, Class: derivedClass,
87 ExpectedTargetAlign: expectedVBaseAlign);
88}
89
90CharUnits
91CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign,
92 const CXXRecordDecl *baseDecl,
93 CharUnits expectedTargetAlign) {
94 // If the base is an incomplete type (which is, alas, possible with
95 // member pointers), be pessimistic.
96 if (!baseDecl->isCompleteDefinition())
97 return std::min(a: actualBaseAlign, b: expectedTargetAlign);
98
99 auto &baseLayout = getContext().getASTRecordLayout(D: baseDecl);
100 CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment();
101
102 // If the class is properly aligned, assume the target offset is, too.
103 //
104 // This actually isn't necessarily the right thing to do --- if the
105 // class is a complete object, but it's only properly aligned for a
106 // base subobject, then the alignments of things relative to it are
107 // probably off as well. (Note that this requires the alignment of
108 // the target to be greater than the NV alignment of the derived
109 // class.)
110 //
111 // However, our approach to this kind of under-alignment can only
112 // ever be best effort; after all, we're never going to propagate
113 // alignments through variables or parameters. Note, in particular,
114 // that constructing a polymorphic type in an address that's less
115 // than pointer-aligned will generally trap in the constructor,
116 // unless we someday add some sort of attribute to change the
117 // assumed alignment of 'this'. So our goal here is pretty much
118 // just to allow the user to explicitly say that a pointer is
119 // under-aligned and then safely access its fields and vtables.
120 if (actualBaseAlign >= expectedBaseAlign) {
121 return expectedTargetAlign;
122 }
123
124 // Otherwise, we might be offset by an arbitrary multiple of the
125 // actual alignment. The correct adjustment is to take the min of
126 // the two alignments.
127 return std::min(a: actualBaseAlign, b: expectedTargetAlign);
128}
129
130Address CodeGenFunction::LoadCXXThisAddress() {
131 assert(CurFuncDecl && "loading 'this' without a func declaration?");
132 auto *MD = cast<CXXMethodDecl>(Val: CurFuncDecl);
133
134 // Lazily compute CXXThisAlignment.
135 if (CXXThisAlignment.isZero()) {
136 // Just use the best known alignment for the parent.
137 // TODO: if we're currently emitting a complete-object ctor/dtor,
138 // we can always use the complete-object alignment.
139 CXXThisAlignment = CGM.getClassPointerAlignment(RD: MD->getParent());
140 }
141
142 return makeNaturalAddressForPointer(
143 Ptr: LoadCXXThis(), T: MD->getFunctionObjectParameterType(), Alignment: CXXThisAlignment,
144 ForPointeeType: false, BaseInfo: nullptr, TBAAInfo: nullptr, IsKnownNonNull: KnownNonNull);
145}
146
147/// Emit the address of a field using a member data pointer.
148///
149/// \param E Only used for emergency diagnostics
150Address CodeGenFunction::EmitCXXMemberDataPointerAddress(
151 const Expr *E, Address base, llvm::Value *memberPtr,
152 const MemberPointerType *memberPtrType, bool IsInBounds,
153 LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
154 // Ask the ABI to compute the actual address.
155 llvm::Value *ptr = CGM.getCXXABI().EmitMemberDataPointerAddress(
156 CGF&: *this, E, Base: base, MemPtr: memberPtr, MPT: memberPtrType, IsInBounds);
157
158 QualType memberType = memberPtrType->getPointeeType();
159 CharUnits memberAlign =
160 CGM.getNaturalTypeAlignment(T: memberType, BaseInfo, TBAAInfo);
161 memberAlign = CGM.getDynamicOffsetAlignment(
162 actualBaseAlign: base.getAlignment(), baseDecl: memberPtrType->getMostRecentCXXRecordDecl(),
163 expectedTargetAlign: memberAlign);
164 return Address(ptr, ConvertTypeForMem(T: memberPtrType->getPointeeType()),
165 memberAlign);
166}
167
168CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
169 const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start,
170 CastExpr::path_const_iterator End) {
171 CharUnits Offset = CharUnits::Zero();
172
173 const ASTContext &Context = getContext();
174 const CXXRecordDecl *RD = DerivedClass;
175
176 for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
177 const CXXBaseSpecifier *Base = *I;
178 assert(!Base->isVirtual() && "Should not see virtual bases here!");
179
180 // Get the layout.
181 const ASTRecordLayout &Layout = Context.getASTRecordLayout(D: RD);
182
183 const auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
184 // Add the offset.
185 Offset += Layout.getBaseClassOffset(Base: BaseDecl);
186
187 RD = BaseDecl;
188 }
189
190 return Offset;
191}
192
193llvm::Constant *CodeGenModule::GetNonVirtualBaseClassOffset(
194 const CXXRecordDecl *ClassDecl, CastExpr::path_const_iterator PathBegin,
195 CastExpr::path_const_iterator PathEnd) {
196 assert(PathBegin != PathEnd && "Base path should not be empty!");
197
198 CharUnits Offset =
199 computeNonVirtualBaseClassOffset(DerivedClass: ClassDecl, Start: PathBegin, End: PathEnd);
200 if (Offset.isZero())
201 return nullptr;
202
203 llvm::Type *PtrDiffTy =
204 getTypes().ConvertType(T: getContext().getPointerDiffType());
205
206 return llvm::ConstantInt::get(Ty: PtrDiffTy, V: Offset.getQuantity());
207}
208
209/// Gets the address of a direct base class within a complete object.
210/// This should only be used for (1) non-virtual bases or (2) virtual bases
211/// when the type is known to be complete (e.g. in complete destructors).
212///
213/// The object pointed to by 'This' is assumed to be non-null.
214Address CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(
215 Address This, const CXXRecordDecl *Derived, const CXXRecordDecl *Base,
216 bool BaseIsVirtual) {
217 // 'this' must be a pointer (in some address space) to Derived.
218 assert(This.getElementType() == ConvertType(Derived));
219
220 // Compute the offset of the virtual base.
221 CharUnits Offset;
222 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D: Derived);
223 if (BaseIsVirtual)
224 Offset = Layout.getVBaseClassOffset(VBase: Base);
225 else
226 Offset = Layout.getBaseClassOffset(Base);
227
228 // Shift and cast down to the base type.
229 // TODO: for complete types, this should be possible with a GEP.
230 Address V = This;
231 if (!Offset.isZero()) {
232 V = V.withElementType(ElemTy: Int8Ty);
233 V = Builder.CreateConstInBoundsByteGEP(Addr: V, Offset);
234 }
235 return V.withElementType(ElemTy: ConvertType(T: Base));
236}
237
238static Address ApplyNonVirtualAndVirtualOffset(
239 CodeGenFunction &CGF, Address addr, CharUnits nonVirtualOffset,
240 llvm::Value *virtualOffset, const CXXRecordDecl *derivedClass,
241 const CXXRecordDecl *nearestVBase) {
242 // Assert that we have something to do.
243 assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
244
245 // Compute the offset from the static and dynamic components.
246 llvm::Value *baseOffset;
247 if (!nonVirtualOffset.isZero()) {
248 llvm::Type *OffsetType =
249 (CGF.CGM.getTarget().getCXXABI().isItaniumFamily() &&
250 CGF.CGM.getItaniumVTableContext().isRelativeLayout())
251 ? CGF.Int32Ty
252 : CGF.PtrDiffTy;
253 baseOffset =
254 llvm::ConstantInt::get(Ty: OffsetType, V: nonVirtualOffset.getQuantity());
255 if (virtualOffset) {
256 baseOffset = CGF.Builder.CreateAdd(LHS: virtualOffset, RHS: baseOffset);
257 }
258 } else {
259 baseOffset = virtualOffset;
260 }
261
262 // Apply the base offset.
263 llvm::Value *ptr = addr.emitRawPointer(CGF);
264 ptr = CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: ptr, IdxList: baseOffset, Name: "add.ptr");
265
266 // If we have a virtual component, the alignment of the result will
267 // be relative only to the known alignment of that vbase.
268 CharUnits alignment;
269 if (virtualOffset) {
270 assert(nearestVBase && "virtual offset without vbase?");
271 alignment = CGF.CGM.getVBaseAlignment(actualDerivedAlign: addr.getAlignment(), derivedClass,
272 vbaseClass: nearestVBase);
273 } else {
274 alignment = addr.getAlignment();
275 }
276 alignment = alignment.alignmentAtOffset(offset: nonVirtualOffset);
277
278 return Address(ptr, CGF.Int8Ty, alignment);
279}
280
281Address CodeGenFunction::GetAddressOfBaseClass(
282 Address Value, const CXXRecordDecl *Derived,
283 CastExpr::path_const_iterator PathBegin,
284 CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
285 SourceLocation Loc) {
286 assert(PathBegin != PathEnd && "Base path should not be empty!");
287
288 CastExpr::path_const_iterator Start = PathBegin;
289 const CXXRecordDecl *VBase = nullptr;
290
291 // Sema has done some convenient canonicalization here: if the
292 // access path involved any virtual steps, the conversion path will
293 // *start* with a step down to the correct virtual base subobject,
294 // and hence will not require any further steps.
295 if ((*Start)->isVirtual()) {
296 VBase = (*Start)->getType()->castAsCXXRecordDecl();
297 ++Start;
298 }
299
300 // Compute the static offset of the ultimate destination within its
301 // allocating subobject (the virtual base, if there is one, or else
302 // the "complete" object that we see).
303 CharUnits NonVirtualOffset = CGM.computeNonVirtualBaseClassOffset(
304 DerivedClass: VBase ? VBase : Derived, Start, End: PathEnd);
305
306 // If there's a virtual step, we can sometimes "devirtualize" it.
307 // For now, that's limited to when the derived type is final.
308 // TODO: "devirtualize" this for accesses to known-complete objects.
309 if (VBase && Derived->hasAttr<FinalAttr>()) {
310 const ASTRecordLayout &layout = getContext().getASTRecordLayout(D: Derived);
311 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
312 NonVirtualOffset += vBaseOffset;
313 VBase = nullptr; // we no longer have a virtual step
314 }
315
316 // Get the base pointer type.
317 llvm::Type *BaseValueTy = ConvertType(T: (PathEnd[-1])->getType());
318 llvm::Type *PtrTy = llvm::PointerType::get(
319 C&: CGM.getLLVMContext(), AddressSpace: Value.getType()->getPointerAddressSpace());
320
321 CanQualType DerivedTy = getContext().getCanonicalTagType(TD: Derived);
322 CharUnits DerivedAlign = CGM.getClassPointerAlignment(RD: Derived);
323
324 // If the static offset is zero and we don't have a virtual step,
325 // just do a bitcast; null checks are unnecessary.
326 if (NonVirtualOffset.isZero() && !VBase) {
327 if (sanitizePerformTypeCheck()) {
328 SanitizerSet SkippedChecks;
329 SkippedChecks.set(K: SanitizerKind::Null, Value: !NullCheckValue);
330 EmitTypeCheck(TCK: TCK_Upcast, Loc, V: Value.emitRawPointer(CGF&: *this), Type: DerivedTy,
331 Alignment: DerivedAlign, SkippedChecks);
332 }
333 return Value.withElementType(ElemTy: BaseValueTy);
334 }
335
336 llvm::BasicBlock *origBB = nullptr;
337 llvm::BasicBlock *endBB = nullptr;
338
339 // Skip over the offset (and the vtable load) if we're supposed to
340 // null-check the pointer.
341 if (NullCheckValue) {
342 origBB = Builder.GetInsertBlock();
343 llvm::BasicBlock *notNullBB = createBasicBlock(name: "cast.notnull");
344 endBB = createBasicBlock(name: "cast.end");
345
346 llvm::Value *isNull = Builder.CreateIsNull(Addr: Value);
347 Builder.CreateCondBr(Cond: isNull, True: endBB, False: notNullBB);
348 EmitBlock(BB: notNullBB);
349 }
350
351 if (sanitizePerformTypeCheck()) {
352 SanitizerSet SkippedChecks;
353 SkippedChecks.set(K: SanitizerKind::Null, Value: true);
354 EmitTypeCheck(TCK: VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc,
355 V: Value.emitRawPointer(CGF&: *this), Type: DerivedTy, Alignment: DerivedAlign,
356 SkippedChecks);
357 }
358
359 // Compute the virtual offset.
360 llvm::Value *VirtualOffset = nullptr;
361 if (VBase) {
362 VirtualOffset =
363 CGM.getCXXABI().GetVirtualBaseClassOffset(CGF&: *this, This: Value, ClassDecl: Derived, BaseClassDecl: VBase);
364 }
365
366 // Apply both offsets.
367 Value = ApplyNonVirtualAndVirtualOffset(CGF&: *this, addr: Value, nonVirtualOffset: NonVirtualOffset,
368 virtualOffset: VirtualOffset, derivedClass: Derived, nearestVBase: VBase);
369
370 // Cast to the destination type.
371 Value = Value.withElementType(ElemTy: BaseValueTy);
372
373 // Build a phi if we needed a null check.
374 if (NullCheckValue) {
375 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
376 Builder.CreateBr(Dest: endBB);
377 EmitBlock(BB: endBB);
378
379 llvm::PHINode *PHI = Builder.CreatePHI(Ty: PtrTy, NumReservedValues: 2, Name: "cast.result");
380 PHI->addIncoming(V: Value.emitRawPointer(CGF&: *this), BB: notNullBB);
381 PHI->addIncoming(V: llvm::Constant::getNullValue(Ty: PtrTy), BB: origBB);
382 Value = Value.withPointer(NewPointer: PHI, IsKnownNonNull: NotKnownNonNull);
383 }
384
385 return Value;
386}
387
388Address CodeGenFunction::GetAddressOfDerivedClass(
389 Address BaseAddr, const CXXRecordDecl *Derived,
390 CastExpr::path_const_iterator PathBegin,
391 CastExpr::path_const_iterator PathEnd, bool NullCheckValue) {
392 assert(PathBegin != PathEnd && "Base path should not be empty!");
393
394 CanQualType DerivedTy = getContext().getCanonicalTagType(TD: Derived);
395 llvm::Type *DerivedValueTy = ConvertType(T: DerivedTy);
396
397 llvm::Value *NonVirtualOffset =
398 CGM.GetNonVirtualBaseClassOffset(ClassDecl: Derived, PathBegin, PathEnd);
399
400 if (!NonVirtualOffset) {
401 // No offset, we can just cast back.
402 return BaseAddr.withElementType(ElemTy: DerivedValueTy);
403 }
404
405 llvm::BasicBlock *CastNull = nullptr;
406 llvm::BasicBlock *CastNotNull = nullptr;
407 llvm::BasicBlock *CastEnd = nullptr;
408
409 if (NullCheckValue) {
410 CastNull = createBasicBlock(name: "cast.null");
411 CastNotNull = createBasicBlock(name: "cast.notnull");
412 CastEnd = createBasicBlock(name: "cast.end");
413
414 llvm::Value *IsNull = Builder.CreateIsNull(Addr: BaseAddr);
415 Builder.CreateCondBr(Cond: IsNull, True: CastNull, False: CastNotNull);
416 EmitBlock(BB: CastNotNull);
417 }
418
419 // Apply the offset.
420 Address Addr = BaseAddr.withElementType(ElemTy: Int8Ty);
421 Addr = Builder.CreateInBoundsGEP(
422 Addr, IdxList: Builder.CreateNeg(V: NonVirtualOffset), ElementType: Int8Ty,
423 Align: CGM.getClassPointerAlignment(RD: Derived), Name: "sub.ptr");
424
425 // Just cast.
426 Addr = Addr.withElementType(ElemTy: DerivedValueTy);
427
428 // Produce a PHI if we had a null-check.
429 if (NullCheckValue) {
430 Builder.CreateBr(Dest: CastEnd);
431 EmitBlock(BB: CastNull);
432 Builder.CreateBr(Dest: CastEnd);
433 EmitBlock(BB: CastEnd);
434
435 llvm::Value *Value = Addr.emitRawPointer(CGF&: *this);
436 llvm::PHINode *PHI = Builder.CreatePHI(Ty: Value->getType(), NumReservedValues: 2);
437 PHI->addIncoming(V: Value, BB: CastNotNull);
438 PHI->addIncoming(V: llvm::Constant::getNullValue(Ty: Value->getType()), BB: CastNull);
439 return Address(PHI, Addr.getElementType(),
440 CGM.getClassPointerAlignment(RD: Derived));
441 }
442
443 return Addr;
444}
445
446llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
447 bool ForVirtualBase,
448 bool Delegating) {
449 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
450 // This constructor/destructor does not need a VTT parameter.
451 return nullptr;
452 }
453
454 const CXXRecordDecl *RD = cast<CXXMethodDecl>(Val: CurCodeDecl)->getParent();
455 const CXXRecordDecl *Base = cast<CXXMethodDecl>(Val: GD.getDecl())->getParent();
456
457 uint64_t SubVTTIndex;
458
459 if (Delegating) {
460 // If this is a delegating constructor call, just load the VTT.
461 return LoadCXXVTT();
462 } else if (RD == Base) {
463 // If the record matches the base, this is the complete ctor/dtor
464 // variant calling the base variant in a class with virtual bases.
465 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
466 "doing no-op VTT offset in base dtor/ctor?");
467 assert(!ForVirtualBase && "Can't have same class as virtual base!");
468 SubVTTIndex = 0;
469 } else {
470 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D: RD);
471 CharUnits BaseOffset = ForVirtualBase ? Layout.getVBaseClassOffset(VBase: Base)
472 : Layout.getBaseClassOffset(Base);
473
474 SubVTTIndex =
475 CGM.getVTables().getSubVTTIndex(RD, Base: BaseSubobject(Base, BaseOffset));
476 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
477 }
478
479 llvm::Value *VTT;
480 if (CGM.getCXXABI().NeedsVTTParameter(GD: CurGD)) {
481 // A VTT parameter was passed to the constructor, use it.
482 VTT = LoadCXXVTT();
483 } else {
484 // We're the complete constructor, so get the VTT by name.
485 VTT = CGM.getVTables().GetAddrOfVTT(RD);
486 }
487 return Builder.CreateConstInBoundsGEP1_64(Ty: CGM.GlobalsInt8PtrTy, Ptr: VTT,
488 Idx0: SubVTTIndex);
489}
490
491namespace {
492/// Call the destructor for a direct base class.
493struct CallBaseDtor final : EHScopeStack::Cleanup {
494 const CXXRecordDecl *BaseClass;
495 bool BaseIsVirtual;
496 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
497 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
498
499 void Emit(CodeGenFunction &CGF, Flags flags) override {
500 const CXXRecordDecl *DerivedClass =
501 cast<CXXMethodDecl>(Val: CGF.CurCodeDecl)->getParent();
502
503 const CXXDestructorDecl *D = BaseClass->getDestructor();
504 // We are already inside a destructor, so presumably the object being
505 // destroyed should have the expected type.
506 QualType ThisTy = D->getFunctionObjectParameterType();
507 Address Addr = CGF.GetAddressOfDirectBaseInCompleteClass(
508 This: CGF.LoadCXXThisAddress(), Derived: DerivedClass, Base: BaseClass, BaseIsVirtual);
509 CGF.EmitCXXDestructorCall(D, Type: Dtor_Base, ForVirtualBase: BaseIsVirtual,
510 /*Delegating=*/false, This: Addr, ThisTy);
511 }
512};
513
514/// A visitor which checks whether an initializer uses 'this' in a
515/// way which requires the vtable to be properly set.
516struct DynamicThisUseChecker
517 : ConstEvaluatedExprVisitor<DynamicThisUseChecker> {
518 typedef ConstEvaluatedExprVisitor<DynamicThisUseChecker> super;
519
520 bool UsesThis;
521
522 DynamicThisUseChecker(const ASTContext &C) : super(C), UsesThis(false) {}
523
524 // Black-list all explicit and implicit references to 'this'.
525 //
526 // Do we need to worry about external references to 'this' derived
527 // from arbitrary code? If so, then anything which runs arbitrary
528 // external code might potentially access the vtable.
529 void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; }
530};
531} // end anonymous namespace
532
533static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
534 DynamicThisUseChecker Checker(C);
535 Checker.Visit(S: Init);
536 return Checker.UsesThis;
537}
538
539static void EmitBaseInitializer(CodeGenFunction &CGF,
540 const CXXRecordDecl *ClassDecl,
541 CXXCtorInitializer *BaseInit) {
542 assert(BaseInit->isBaseInitializer() && "Must have base initializer!");
543
544 Address ThisPtr = CGF.LoadCXXThisAddress();
545
546 const auto *BaseClassDecl = BaseInit->getBaseClass()->castAsCXXRecordDecl();
547
548 bool isBaseVirtual = BaseInit->isBaseVirtual();
549
550 // If the initializer for the base (other than the constructor
551 // itself) accesses 'this' in any way, we need to initialize the
552 // vtables.
553 if (BaseInitializerUsesThis(C&: CGF.getContext(), Init: BaseInit->getInit()))
554 CGF.InitializeVTablePointers(ClassDecl);
555
556 // We can pretend to be a complete class because it only matters for
557 // virtual bases, and we only do virtual bases for complete ctors.
558 Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
559 This: ThisPtr, Derived: ClassDecl, Base: BaseClassDecl, BaseIsVirtual: isBaseVirtual);
560 AggValueSlot AggSlot = AggValueSlot::forAddr(
561 addr: V, quals: Qualifiers(), isDestructed: AggValueSlot::IsDestructed,
562 needsGC: AggValueSlot::DoesNotNeedGCBarriers, isAliased: AggValueSlot::IsNotAliased,
563 mayOverlap: CGF.getOverlapForBaseInit(RD: ClassDecl, BaseRD: BaseClassDecl, IsVirtual: isBaseVirtual));
564
565 CGF.EmitAggExpr(E: BaseInit->getInit(), AS: AggSlot);
566
567 if (CGF.CGM.getLangOpts().Exceptions &&
568 !BaseClassDecl->hasTrivialDestructor())
569 CGF.EHStack.pushCleanup<CallBaseDtor>(Kind: EHCleanup, A: BaseClassDecl,
570 A: isBaseVirtual);
571}
572
573static bool isMemcpyEquivalentSpecialMember(CodeGenModule &CGM,
574 const CXXMethodDecl *D) {
575 auto *CD = dyn_cast<CXXConstructorDecl>(Val: D);
576 if (!(CD && CD->isCopyOrMoveConstructor()) &&
577 !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator())
578 return false;
579
580 // Non-trivially-copyable fields with pointer field protection need to be
581 // copied one by one.
582 ASTContext &Ctx = CGM.getContext();
583 const CXXRecordDecl *Parent = D->getParent();
584 if (!Ctx.arePFPFieldsTriviallyCopyable(RD: Parent) &&
585 Ctx.hasPFPFields(Ty: Ctx.getCanonicalTagType(TD: Parent)))
586 return false;
587
588 // We can emit a memcpy for a trivial copy or move constructor/assignment.
589 if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding())
590 return true;
591
592 // We *must* emit a memcpy for a defaulted union copy or move op.
593 if (D->getParent()->isUnion() && D->isDefaulted())
594 return true;
595
596 return false;
597}
598
599static void EmitLValueForAnyFieldInitialization(CodeGenFunction &CGF,
600 CXXCtorInitializer *MemberInit,
601 LValue &LHS) {
602 FieldDecl *Field = MemberInit->getAnyMember();
603 if (MemberInit->isIndirectMemberInitializer()) {
604 // If we are initializing an anonymous union field, drill down to the field.
605 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
606 for (const auto *I : IndirectField->chain())
607 LHS = CGF.EmitLValueForFieldInitialization(Base: LHS, Field: cast<FieldDecl>(Val: I));
608 } else {
609 LHS = CGF.EmitLValueForFieldInitialization(Base: LHS, Field);
610 }
611}
612
613static void EmitMemberInitializer(CodeGenFunction &CGF,
614 const CXXRecordDecl *ClassDecl,
615 CXXCtorInitializer *MemberInit,
616 const CXXConstructorDecl *Constructor,
617 FunctionArgList &Args) {
618 ApplyAtomGroup Grp(CGF.getDebugInfo());
619 ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation());
620 assert(MemberInit->isAnyMemberInitializer() &&
621 "Must have member initializer!");
622 assert(MemberInit->getInit() && "Must have initializer!");
623
624 // non-static data member initializers.
625 FieldDecl *Field = MemberInit->getAnyMember();
626 QualType FieldType = Field->getType();
627
628 llvm::Value *ThisPtr = CGF.LoadCXXThis();
629 CanQualType RecordTy = CGF.getContext().getCanonicalTagType(TD: ClassDecl);
630 LValue LHS;
631
632 // If a base constructor is being emitted, create an LValue that has the
633 // non-virtual alignment.
634 if (CGF.CurGD.getCtorType() == Ctor_Base)
635 LHS = CGF.MakeNaturalAlignPointeeAddrLValue(V: ThisPtr, T: RecordTy);
636 else
637 LHS = CGF.MakeNaturalAlignAddrLValue(V: ThisPtr, T: RecordTy);
638
639 EmitLValueForAnyFieldInitialization(CGF, MemberInit, LHS);
640
641 // Special case: if we are in a copy or move constructor, and we are copying
642 // an array of PODs or classes with trivial copy constructors, ignore the
643 // AST and perform the copy we know is equivalent.
644 // FIXME: This is hacky at best... if we had a bit more explicit information
645 // in the AST, we could generalize it more easily.
646 const ConstantArrayType *Array =
647 CGF.getContext().getAsConstantArrayType(T: FieldType);
648 if (Array && Constructor->isDefaulted() &&
649 Constructor->isCopyOrMoveConstructor()) {
650 QualType BaseElementTy = CGF.getContext().getBaseElementType(VAT: Array);
651 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Val: MemberInit->getInit());
652 if (BaseElementTy.isPODType(Context: CGF.getContext()) ||
653 (CE &&
654 isMemcpyEquivalentSpecialMember(CGM&: CGF.CGM, D: CE->getConstructor()))) {
655 unsigned SrcArgIndex =
656 CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args);
657 llvm::Value *SrcPtr =
658 CGF.Builder.CreateLoad(Addr: CGF.GetAddrOfLocalVar(VD: Args[SrcArgIndex]));
659 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(V: SrcPtr, T: RecordTy);
660 LValue Src = CGF.EmitLValueForFieldInitialization(Base: ThisRHSLV, Field);
661
662 // Copy the aggregate.
663 CGF.EmitAggregateCopy(Dest: LHS, Src, EltTy: FieldType,
664 MayOverlap: CGF.getOverlapForFieldInit(FD: Field),
665 isVolatile: LHS.isVolatileQualified());
666 // Ensure that we destroy the objects if an exception is thrown later in
667 // the constructor.
668 QualType::DestructionKind dtorKind = FieldType.isDestructedType();
669 if (CGF.needsEHCleanup(kind: dtorKind))
670 CGF.pushEHDestroy(dtorKind, addr: LHS.getAddress(), type: FieldType);
671 return;
672 }
673 }
674
675 CGF.EmitInitializerForField(Field, LHS, Init: MemberInit->getInit());
676}
677
678void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
679 Expr *Init) {
680 QualType FieldType = Field->getType();
681 switch (getEvaluationKind(T: FieldType)) {
682 case TEK_Scalar:
683 if (LHS.isSimple()) {
684 EmitExprAsInit(init: Init, D: Field, lvalue: LHS, capturedByInit: false);
685 } else {
686 RValue RHS = RValue::get(V: EmitScalarExpr(E: Init));
687 EmitStoreThroughLValue(Src: RHS, Dst: LHS);
688 }
689 break;
690 case TEK_Complex:
691 EmitComplexExprIntoLValue(E: Init, dest: LHS, /*isInit*/ true);
692 break;
693 case TEK_Aggregate: {
694 AggValueSlot Slot = AggValueSlot::forLValue(
695 LV: LHS, isDestructed: AggValueSlot::IsDestructed, needsGC: AggValueSlot::DoesNotNeedGCBarriers,
696 isAliased: AggValueSlot::IsNotAliased, mayOverlap: getOverlapForFieldInit(FD: Field),
697 isZeroed: AggValueSlot::IsNotZeroed,
698 // Checks are made by the code that calls constructor.
699 isChecked: AggValueSlot::IsSanitizerChecked);
700 EmitAggExpr(E: Init, AS: Slot);
701 break;
702 }
703 }
704
705 // Ensure that we destroy this object if an exception is thrown
706 // later in the constructor.
707 QualType::DestructionKind dtorKind = FieldType.isDestructedType();
708 if (needsEHCleanup(kind: dtorKind))
709 pushEHDestroy(dtorKind, addr: LHS.getAddress(), type: FieldType);
710}
711
712/// Checks whether the given constructor is a valid subject for the
713/// complete-to-base constructor delegation optimization, i.e.
714/// emitting the complete constructor as a simple call to the base
715/// constructor.
716bool CodeGenFunction::IsConstructorDelegationValid(
717 const CXXConstructorDecl *Ctor) {
718
719 // Currently we disable the optimization for classes with virtual
720 // bases because (1) the addresses of parameter variables need to be
721 // consistent across all initializers but (2) the delegate function
722 // call necessarily creates a second copy of the parameter variable.
723 //
724 // The limiting example (purely theoretical AFAIK):
725 // struct A { A(int &c) { c++; } };
726 // struct B : virtual A {
727 // B(int count) : A(count) { printf("%d\n", count); }
728 // };
729 // ...although even this example could in principle be emitted as a
730 // delegation since the address of the parameter doesn't escape.
731 if (Ctor->getParent()->getNumVBases()) {
732 // TODO: white-list trivial vbase initializers. This case wouldn't
733 // be subject to the restrictions below.
734
735 // TODO: white-list cases where:
736 // - there are no non-reference parameters to the constructor
737 // - the initializers don't access any non-reference parameters
738 // - the initializers don't take the address of non-reference
739 // parameters
740 // - etc.
741 // If we ever add any of the above cases, remember that:
742 // - function-try-blocks will always exclude this optimization
743 // - we need to perform the constructor prologue and cleanup in
744 // EmitConstructorBody.
745
746 return false;
747 }
748
749 // We also disable the optimization for variadic functions because
750 // it's impossible to "re-pass" varargs.
751 if (Ctor->getType()->castAs<FunctionProtoType>()->isVariadic())
752 return false;
753
754 // FIXME: Decide if we can do a delegation of a delegating constructor.
755 if (Ctor->isDelegatingConstructor())
756 return false;
757
758 return true;
759}
760
761// Emit code in ctor (Prologue==true) or dtor (Prologue==false)
762// to poison the extra field paddings inserted under
763// -fsanitize-address-field-padding=1|2.
764void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
765 ASTContext &Context = getContext();
766 const CXXRecordDecl *ClassDecl =
767 Prologue ? cast<CXXConstructorDecl>(Val: CurGD.getDecl())->getParent()
768 : cast<CXXDestructorDecl>(Val: CurGD.getDecl())->getParent();
769 if (!ClassDecl->mayInsertExtraPadding())
770 return;
771
772 struct SizeAndOffset {
773 uint64_t Size;
774 uint64_t Offset;
775 };
776
777 unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits();
778 const ASTRecordLayout &Info = Context.getASTRecordLayout(D: ClassDecl);
779
780 // Populate sizes and offsets of fields.
781 SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount());
782 for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i)
783 SSV[i].Offset =
784 Context.toCharUnitsFromBits(BitSize: Info.getFieldOffset(FieldNo: i)).getQuantity();
785
786 size_t NumFields = 0;
787 for (const auto *Field : ClassDecl->fields()) {
788 const FieldDecl *D = Field;
789 auto FieldInfo = Context.getTypeInfoInChars(T: D->getType());
790 CharUnits FieldSize = FieldInfo.Width;
791 assert(NumFields < SSV.size());
792 SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
793 NumFields++;
794 }
795 assert(NumFields == SSV.size());
796 if (SSV.size() <= 1)
797 return;
798
799 // We will insert calls to __asan_* run-time functions.
800 // LLVM AddressSanitizer pass may decide to inline them later.
801 llvm::Type *Args[2] = {IntPtrTy, IntPtrTy};
802 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, Params: Args, isVarArg: false);
803 llvm::FunctionCallee F = CGM.CreateRuntimeFunction(
804 Ty: FTy, Name: Prologue ? "__asan_poison_intra_object_redzone"
805 : "__asan_unpoison_intra_object_redzone");
806
807 llvm::Value *ThisPtr = LoadCXXThis();
808 ThisPtr = Builder.CreatePtrToInt(V: ThisPtr, DestTy: IntPtrTy);
809 uint64_t TypeSize = Info.getNonVirtualSize().getQuantity();
810 // For each field check if it has sufficient padding,
811 // if so (un)poison it with a call.
812 for (size_t i = 0; i < SSV.size(); i++) {
813 uint64_t AsanAlignment = 8;
814 uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset;
815 uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size;
816 uint64_t EndOffset = SSV[i].Offset + SSV[i].Size;
817 if (PoisonSize < AsanAlignment || !SSV[i].Size ||
818 (NextField % AsanAlignment) != 0)
819 continue;
820 Builder.CreateCall(
821 Callee: F, Args: {Builder.CreateAdd(LHS: ThisPtr, RHS: Builder.getIntN(N: PtrSize, C: EndOffset)),
822 Builder.getIntN(N: PtrSize, C: PoisonSize)});
823 }
824}
825
826/// EmitConstructorBody - Emits the body of the current constructor.
827void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
828 EmitAsanPrologueOrEpilogue(Prologue: true);
829 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(Val: CurGD.getDecl());
830 CXXCtorType CtorType = CurGD.getCtorType();
831
832 assert((CGM.getTarget().getCXXABI().hasConstructorVariants() ||
833 CtorType == Ctor_Complete) &&
834 "can only generate complete ctor for this ABI");
835
836 // Before we go any further, try the complete->base constructor
837 // delegation optimization.
838 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
839 CGM.getTarget().getCXXABI().hasConstructorVariants()) {
840 EmitDelegateCXXConstructorCall(Ctor, CtorType: Ctor_Base, Args, Loc: Ctor->getEndLoc());
841 return;
842 }
843
844 const FunctionDecl *Definition = nullptr;
845 Stmt *Body = Ctor->getBody(Definition);
846 assert(Definition == Ctor && "emitting wrong constructor body");
847
848 // Enter the function-try-block before the constructor prologue if
849 // applicable.
850 bool IsTryBody = isa_and_nonnull<CXXTryStmt>(Val: Body);
851 if (IsTryBody)
852 EnterCXXTryStmt(S: *cast<CXXTryStmt>(Val: Body), IsFnTryBlock: true);
853
854 incrementProfileCounter(S: Body);
855 maybeCreateMCDCCondBitmap();
856
857 RunCleanupsScope RunCleanups(*this);
858
859 // TODO: in restricted cases, we can emit the vbase initializers of
860 // a complete ctor and then delegate to the base ctor.
861
862 // Emit the constructor prologue, i.e. the base and member
863 // initializers.
864 EmitCtorPrologue(CD: Ctor, Type: CtorType, Args);
865
866 // Emit the body of the statement.
867 if (IsTryBody)
868 EmitStmt(S: cast<CXXTryStmt>(Val: Body)->getTryBlock());
869 else if (Body)
870 EmitStmt(S: Body);
871
872 // Emit any cleanup blocks associated with the member or base
873 // initializers, which includes (along the exceptional path) the
874 // destructors for those members and bases that were fully
875 // constructed.
876 RunCleanups.ForceCleanup();
877
878 if (IsTryBody)
879 ExitCXXTryStmt(S: *cast<CXXTryStmt>(Val: Body), IsFnTryBlock: true);
880}
881
882namespace {
883/// RAII object to indicate that codegen is copying the value representation
884/// instead of the object representation. Useful when copying a struct or
885/// class which has uninitialized members and we're only performing
886/// lvalue-to-rvalue conversion on the object but not its members.
887class CopyingValueRepresentation {
888public:
889 explicit CopyingValueRepresentation(CodeGenFunction &CGF)
890 : CGF(CGF), OldSanOpts(CGF.SanOpts) {
891 CGF.SanOpts.set(K: SanitizerKind::Bool, Value: false);
892 CGF.SanOpts.set(K: SanitizerKind::Enum, Value: false);
893 }
894 ~CopyingValueRepresentation() { CGF.SanOpts = OldSanOpts; }
895
896private:
897 CodeGenFunction &CGF;
898 SanitizerSet OldSanOpts;
899};
900} // end anonymous namespace
901
902namespace {
903class FieldMemcpyizer {
904public:
905 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
906 const VarDecl *SrcRec)
907 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
908 RecLayout(CGF.getContext().getASTRecordLayout(D: ClassDecl)),
909 FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0),
910 LastFieldOffset(0), LastAddedFieldIndex(0) {}
911
912 bool isMemcpyableField(FieldDecl *F) const {
913 // Never memcpy fields when we are adding poisoned paddings.
914 if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding)
915 return false;
916 Qualifiers Qual = F->getType().getQualifiers();
917 if (Qual.hasVolatile() || Qual.hasObjCLifetime())
918 return false;
919 if (PointerAuthQualifier Q = F->getType().getPointerAuth();
920 Q && Q.isAddressDiscriminated())
921 return false;
922 // Non-trivially-copyable fields with pointer field protection need to be
923 // copied one by one.
924 if (!CGF.getContext().arePFPFieldsTriviallyCopyable(RD: ClassDecl) &&
925 CGF.getContext().isPFPField(Field: F))
926 return false;
927 return true;
928 }
929
930 void addMemcpyableField(FieldDecl *F) {
931 if (isEmptyFieldForLayout(Context: CGF.getContext(), FD: F))
932 return;
933 if (!FirstField)
934 addInitialField(F);
935 else
936 addNextField(F);
937 }
938
939 CharUnits getMemcpySize(uint64_t FirstByteOffset) const {
940 ASTContext &Ctx = CGF.getContext();
941 unsigned LastFieldSize =
942 LastField->isBitField()
943 ? LastField->getBitWidthValue()
944 : Ctx.toBits(
945 CharSize: Ctx.getTypeInfoDataSizeInChars(T: LastField->getType()).Width);
946 uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize -
947 FirstByteOffset + Ctx.getCharWidth() - 1;
948 CharUnits MemcpySize = Ctx.toCharUnitsFromBits(BitSize: MemcpySizeBits);
949 return MemcpySize;
950 }
951
952 void emitMemcpy() {
953 // Give the subclass a chance to bail out if it feels the memcpy isn't
954 // worth it (e.g. Hasn't aggregated enough data).
955 if (!FirstField) {
956 return;
957 }
958
959 uint64_t FirstByteOffset;
960 if (FirstField->isBitField()) {
961 const CGRecordLayout &RL =
962 CGF.getTypes().getCGRecordLayout(FirstField->getParent());
963 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FD: FirstField);
964 // FirstFieldOffset is not appropriate for bitfields,
965 // we need to use the storage offset instead.
966 FirstByteOffset = CGF.getContext().toBits(CharSize: BFInfo.StorageOffset);
967 } else {
968 FirstByteOffset = FirstFieldOffset;
969 }
970
971 CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
972 CanQualType RecordTy = CGF.getContext().getCanonicalTagType(TD: ClassDecl);
973 Address ThisPtr = CGF.LoadCXXThisAddress();
974 LValue DestLV = CGF.MakeAddrLValue(Addr: ThisPtr, T: RecordTy);
975 LValue Dest = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field: FirstField);
976 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(Addr: CGF.GetAddrOfLocalVar(VD: SrcRec));
977 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(V: SrcPtr, T: RecordTy);
978 LValue Src = CGF.EmitLValueForFieldInitialization(Base: SrcLV, Field: FirstField);
979
980 emitMemcpyIR(DestPtr: Dest.isBitField() ? Dest.getBitFieldAddress()
981 : Dest.getAddress(),
982 SrcPtr: Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(),
983 Size: MemcpySize);
984 reset();
985 }
986
987 void reset() { FirstField = nullptr; }
988
989protected:
990 CodeGenFunction &CGF;
991 const CXXRecordDecl *ClassDecl;
992
993private:
994 void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
995 DestPtr = DestPtr.withElementType(ElemTy: CGF.Int8Ty);
996 SrcPtr = SrcPtr.withElementType(ElemTy: CGF.Int8Ty);
997 auto *I = CGF.Builder.CreateMemCpy(Dest: DestPtr, Src: SrcPtr, Size: Size.getQuantity());
998 CGF.addInstToCurrentSourceAtom(KeyInstruction: I, Backup: nullptr);
999 }
1000
1001 void addInitialField(FieldDecl *F) {
1002 FirstField = F;
1003 LastField = F;
1004 FirstFieldOffset = RecLayout.getFieldOffset(FieldNo: F->getFieldIndex());
1005 LastFieldOffset = FirstFieldOffset;
1006 LastAddedFieldIndex = F->getFieldIndex();
1007 }
1008
1009 void addNextField(FieldDecl *F) {
1010 // For the most part, the following invariant will hold:
1011 // F->getFieldIndex() == LastAddedFieldIndex + 1
1012 // The one exception is that Sema won't add a copy-initializer for an
1013 // unnamed bitfield, which will show up here as a gap in the sequence.
1014 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
1015 "Cannot aggregate fields out of order.");
1016 LastAddedFieldIndex = F->getFieldIndex();
1017
1018 // The 'first' and 'last' fields are chosen by offset, rather than field
1019 // index. This allows the code to support bitfields, as well as regular
1020 // fields.
1021 uint64_t FOffset = RecLayout.getFieldOffset(FieldNo: F->getFieldIndex());
1022 if (FOffset < FirstFieldOffset) {
1023 FirstField = F;
1024 FirstFieldOffset = FOffset;
1025 } else if (FOffset >= LastFieldOffset) {
1026 LastField = F;
1027 LastFieldOffset = FOffset;
1028 }
1029 }
1030
1031 const VarDecl *SrcRec;
1032 const ASTRecordLayout &RecLayout;
1033 FieldDecl *FirstField;
1034 FieldDecl *LastField;
1035 uint64_t FirstFieldOffset, LastFieldOffset;
1036 unsigned LastAddedFieldIndex;
1037};
1038
1039class ConstructorMemcpyizer : public FieldMemcpyizer {
1040private:
1041 /// Get source argument for copy constructor. Returns null if not a copy
1042 /// constructor.
1043 static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF,
1044 const CXXConstructorDecl *CD,
1045 FunctionArgList &Args) {
1046 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
1047 return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)];
1048 return nullptr;
1049 }
1050
1051 // Returns true if a CXXCtorInitializer represents a member initialization
1052 // that can be rolled into a memcpy.
1053 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
1054 if (!MemcpyableCtor)
1055 return false;
1056 FieldDecl *Field = MemberInit->getMember();
1057 assert(Field && "No field for member init.");
1058 QualType FieldType = Field->getType();
1059 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Val: MemberInit->getInit());
1060
1061 // Bail out on non-memcpyable, not-trivially-copyable members.
1062 if (!(CE &&
1063 isMemcpyEquivalentSpecialMember(CGM&: CGF.CGM, D: CE->getConstructor())) &&
1064 !(FieldType.isTriviallyCopyableType(Context: CGF.getContext()) ||
1065 FieldType->isReferenceType()))
1066 return false;
1067
1068 // Bail out on volatile fields.
1069 if (!isMemcpyableField(F: Field))
1070 return false;
1071
1072 // Otherwise we're good.
1073 return true;
1074 }
1075
1076public:
1077 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
1078 FunctionArgList &Args)
1079 : FieldMemcpyizer(CGF, CD->getParent(),
1080 getTrivialCopySource(CGF, CD, Args)),
1081 ConstructorDecl(CD),
1082 MemcpyableCtor(CD->isDefaulted() && CD->isCopyOrMoveConstructor() &&
1083 CGF.getLangOpts().getGC() == LangOptions::NonGC),
1084 Args(Args) {}
1085
1086 void addMemberInitializer(CXXCtorInitializer *MemberInit) {
1087 if (isMemberInitMemcpyable(MemberInit)) {
1088 AggregatedInits.push_back(Elt: MemberInit);
1089 addMemcpyableField(F: MemberInit->getMember());
1090 } else {
1091 emitAggregatedInits();
1092 EmitMemberInitializer(CGF, ClassDecl: ConstructorDecl->getParent(), MemberInit,
1093 Constructor: ConstructorDecl, Args);
1094 }
1095 }
1096
1097 void emitAggregatedInits() {
1098 if (AggregatedInits.size() <= 1) {
1099 // This memcpy is too small to be worthwhile. Fall back on default
1100 // codegen.
1101 if (!AggregatedInits.empty()) {
1102 CopyingValueRepresentation CVR(CGF);
1103 EmitMemberInitializer(CGF, ClassDecl: ConstructorDecl->getParent(),
1104 MemberInit: AggregatedInits[0], Constructor: ConstructorDecl, Args);
1105 AggregatedInits.clear();
1106 }
1107 reset();
1108 return;
1109 }
1110
1111 pushEHDestructors();
1112 ApplyAtomGroup Grp(CGF.getDebugInfo());
1113 emitMemcpy();
1114 AggregatedInits.clear();
1115 }
1116
1117 void pushEHDestructors() {
1118 Address ThisPtr = CGF.LoadCXXThisAddress();
1119 CanQualType RecordTy = CGF.getContext().getCanonicalTagType(TD: ClassDecl);
1120 LValue LHS = CGF.MakeAddrLValue(Addr: ThisPtr, T: RecordTy);
1121
1122 for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
1123 CXXCtorInitializer *MemberInit = AggregatedInits[i];
1124 QualType FieldType = MemberInit->getAnyMember()->getType();
1125 QualType::DestructionKind dtorKind = FieldType.isDestructedType();
1126 if (!CGF.needsEHCleanup(kind: dtorKind))
1127 continue;
1128 LValue FieldLHS = LHS;
1129 EmitLValueForAnyFieldInitialization(CGF, MemberInit, LHS&: FieldLHS);
1130 CGF.pushEHDestroy(dtorKind, addr: FieldLHS.getAddress(), type: FieldType);
1131 }
1132 }
1133
1134 void finish() { emitAggregatedInits(); }
1135
1136private:
1137 const CXXConstructorDecl *ConstructorDecl;
1138 bool MemcpyableCtor;
1139 FunctionArgList &Args;
1140 SmallVector<CXXCtorInitializer *, 16> AggregatedInits;
1141};
1142
1143class AssignmentMemcpyizer : public FieldMemcpyizer {
1144private:
1145 // Returns the memcpyable field copied by the given statement, if one
1146 // exists. Otherwise returns null.
1147 FieldDecl *getMemcpyableField(Stmt *S) {
1148 if (!AssignmentsMemcpyable)
1149 return nullptr;
1150 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: S)) {
1151 // Recognise trivial assignments.
1152 if (BO->getOpcode() != BO_Assign)
1153 return nullptr;
1154 MemberExpr *ME = dyn_cast<MemberExpr>(Val: BO->getLHS());
1155 if (!ME)
1156 return nullptr;
1157 FieldDecl *Field = dyn_cast<FieldDecl>(Val: ME->getMemberDecl());
1158 if (!Field || !isMemcpyableField(F: Field))
1159 return nullptr;
1160 Stmt *RHS = BO->getRHS();
1161 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(Val: RHS))
1162 RHS = EC->getSubExpr();
1163 if (!RHS)
1164 return nullptr;
1165 if (MemberExpr *ME2 = dyn_cast<MemberExpr>(Val: RHS)) {
1166 if (ME2->getMemberDecl() == Field)
1167 return Field;
1168 }
1169 return nullptr;
1170 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(Val: S)) {
1171 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: MCE->getCalleeDecl());
1172 if (!(MD && isMemcpyEquivalentSpecialMember(CGM&: CGF.CGM, D: MD)))
1173 return nullptr;
1174 MemberExpr *IOA = dyn_cast<MemberExpr>(Val: MCE->getImplicitObjectArgument());
1175 if (!IOA)
1176 return nullptr;
1177 FieldDecl *Field = dyn_cast<FieldDecl>(Val: IOA->getMemberDecl());
1178 if (!Field || !isMemcpyableField(F: Field))
1179 return nullptr;
1180 MemberExpr *Arg0 = dyn_cast<MemberExpr>(Val: MCE->getArg(Arg: 0));
1181 if (!Arg0 || Field != dyn_cast<FieldDecl>(Val: Arg0->getMemberDecl()))
1182 return nullptr;
1183 return Field;
1184 } else if (CallExpr *CE = dyn_cast<CallExpr>(Val: S)) {
1185 FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: CE->getCalleeDecl());
1186 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
1187 return nullptr;
1188 Expr *DstPtr = CE->getArg(Arg: 0);
1189 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(Val: DstPtr))
1190 DstPtr = DC->getSubExpr();
1191 UnaryOperator *DUO = dyn_cast<UnaryOperator>(Val: DstPtr);
1192 if (!DUO || DUO->getOpcode() != UO_AddrOf)
1193 return nullptr;
1194 MemberExpr *ME = dyn_cast<MemberExpr>(Val: DUO->getSubExpr());
1195 if (!ME)
1196 return nullptr;
1197 FieldDecl *Field = dyn_cast<FieldDecl>(Val: ME->getMemberDecl());
1198 if (!Field || !isMemcpyableField(F: Field))
1199 return nullptr;
1200 Expr *SrcPtr = CE->getArg(Arg: 1);
1201 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(Val: SrcPtr))
1202 SrcPtr = SC->getSubExpr();
1203 UnaryOperator *SUO = dyn_cast<UnaryOperator>(Val: SrcPtr);
1204 if (!SUO || SUO->getOpcode() != UO_AddrOf)
1205 return nullptr;
1206 MemberExpr *ME2 = dyn_cast<MemberExpr>(Val: SUO->getSubExpr());
1207 if (!ME2 || Field != dyn_cast<FieldDecl>(Val: ME2->getMemberDecl()))
1208 return nullptr;
1209 return Field;
1210 }
1211
1212 return nullptr;
1213 }
1214
1215 bool AssignmentsMemcpyable;
1216 SmallVector<Stmt *, 16> AggregatedStmts;
1217
1218public:
1219 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
1220 FunctionArgList &Args)
1221 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
1222 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
1223 assert(Args.size() == 2);
1224 }
1225
1226 void emitAssignment(Stmt *S) {
1227 FieldDecl *F = getMemcpyableField(S);
1228 if (F) {
1229 addMemcpyableField(F);
1230 AggregatedStmts.push_back(Elt: S);
1231 } else {
1232 emitAggregatedStmts();
1233 CGF.EmitStmt(S);
1234 }
1235 }
1236
1237 void emitAggregatedStmts() {
1238 if (AggregatedStmts.size() <= 1) {
1239 if (!AggregatedStmts.empty()) {
1240 CopyingValueRepresentation CVR(CGF);
1241 CGF.EmitStmt(S: AggregatedStmts[0]);
1242 }
1243 reset();
1244 }
1245
1246 ApplyAtomGroup Grp(CGF.getDebugInfo());
1247 emitMemcpy();
1248 AggregatedStmts.clear();
1249 }
1250
1251 void finish() { emitAggregatedStmts(); }
1252};
1253
1254} // end anonymous namespace
1255
1256static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) {
1257 const Type *BaseType = BaseInit->getBaseClass();
1258 return BaseType->castAsCXXRecordDecl()->isDynamicClass();
1259}
1260
1261/// EmitCtorPrologue - This routine generates necessary code to initialize
1262/// base classes and non-static data members belonging to this constructor.
1263void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
1264 CXXCtorType CtorType,
1265 FunctionArgList &Args) {
1266 if (CD->isDelegatingConstructor())
1267 return EmitDelegatingCXXConstructorCall(Ctor: CD, Args);
1268
1269 const CXXRecordDecl *ClassDecl = CD->getParent();
1270
1271 // Virtual base initializers aren't needed if:
1272 // - This is a base ctor variant
1273 // - There are no vbases
1274 // - The class is abstract, so a complete object of it cannot be constructed
1275 //
1276 // The check for an abstract class is necessary because sema may not have
1277 // marked virtual base destructors referenced.
1278 bool ConstructVBases = CtorType != Ctor_Base &&
1279 ClassDecl->getNumVBases() != 0 &&
1280 !ClassDecl->isAbstract();
1281
1282 // In the Microsoft C++ ABI, there are no constructor variants. Instead, the
1283 // constructor of a class with virtual bases takes an additional parameter to
1284 // conditionally construct the virtual bases. Emit that check here.
1285 llvm::BasicBlock *BaseCtorContinueBB = nullptr;
1286 if (ConstructVBases &&
1287 !CGM.getTarget().getCXXABI().hasConstructorVariants()) {
1288 BaseCtorContinueBB =
1289 CGM.getCXXABI().EmitCtorCompleteObjectHandler(CGF&: *this, RD: ClassDecl);
1290 assert(BaseCtorContinueBB);
1291 }
1292
1293 // Create three separate ranges for the different types of initializers.
1294 auto AllInits = CD->inits();
1295
1296 // Find the boundaries between the three groups.
1297 auto VirtualBaseEnd = std::find_if(
1298 first: AllInits.begin(), last: AllInits.end(), pred: [](const CXXCtorInitializer *Init) {
1299 return !(Init->isBaseInitializer() && Init->isBaseVirtual());
1300 });
1301
1302 auto NonVirtualBaseEnd = std::find_if(first: VirtualBaseEnd, last: AllInits.end(),
1303 pred: [](const CXXCtorInitializer *Init) {
1304 return !Init->isBaseInitializer();
1305 });
1306
1307 // Create the three ranges.
1308 auto VirtualBaseInits = llvm::make_range(x: AllInits.begin(), y: VirtualBaseEnd);
1309 auto NonVirtualBaseInits =
1310 llvm::make_range(x: VirtualBaseEnd, y: NonVirtualBaseEnd);
1311 auto MemberInits = llvm::make_range(x: NonVirtualBaseEnd, y: AllInits.end());
1312
1313 // Process virtual base initializers, if necessary.
1314 if (ConstructVBases) {
1315 for (CXXCtorInitializer *Initializer : VirtualBaseInits) {
1316 SaveAndRestore ThisRAII(CXXThisValue);
1317 if (CGM.getCodeGenOpts().StrictVTablePointers &&
1318 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1319 isInitializerOfDynamicClass(BaseInit: Initializer))
1320 CXXThisValue = Builder.CreateLaunderInvariantGroup(Ptr: LoadCXXThis());
1321 EmitBaseInitializer(CGF&: *this, ClassDecl, BaseInit: Initializer);
1322 }
1323 }
1324
1325 if (BaseCtorContinueBB) {
1326 // Complete object handler should continue to the remaining initializers.
1327 Builder.CreateBr(Dest: BaseCtorContinueBB);
1328 EmitBlock(BB: BaseCtorContinueBB);
1329 }
1330
1331 // Then, non-virtual base initializers.
1332 for (CXXCtorInitializer *Initializer : NonVirtualBaseInits) {
1333 assert(!Initializer->isBaseVirtual());
1334 SaveAndRestore ThisRAII(CXXThisValue);
1335 if (CGM.getCodeGenOpts().StrictVTablePointers &&
1336 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1337 isInitializerOfDynamicClass(BaseInit: Initializer))
1338 CXXThisValue = Builder.CreateLaunderInvariantGroup(Ptr: LoadCXXThis());
1339 EmitBaseInitializer(CGF&: *this, ClassDecl, BaseInit: Initializer);
1340 }
1341
1342 InitializeVTablePointers(ClassDecl);
1343
1344 // And finally, initialize class members.
1345 FieldConstructionScope FCS(*this, LoadCXXThisAddress());
1346 ConstructorMemcpyizer CM(*this, CD, Args);
1347 for (CXXCtorInitializer *Member : MemberInits) {
1348 assert(!Member->isBaseInitializer());
1349 assert(Member->isAnyMemberInitializer() &&
1350 "Delegating initializer on non-delegating constructor");
1351 CM.addMemberInitializer(MemberInit: Member);
1352 }
1353
1354 CM.finish();
1355}
1356
1357static bool FieldHasTrivialDestructorBody(ASTContext &Context,
1358 const FieldDecl *Field);
1359
1360static bool
1361HasTrivialDestructorBody(ASTContext &Context,
1362 const CXXRecordDecl *BaseClassDecl,
1363 const CXXRecordDecl *MostDerivedClassDecl) {
1364 // If the destructor is trivial we don't have to check anything else.
1365 if (BaseClassDecl->hasTrivialDestructor())
1366 return true;
1367
1368 if (!BaseClassDecl->getDestructor()->hasTrivialBody())
1369 return false;
1370
1371 // Check fields.
1372 for (const auto *Field : BaseClassDecl->fields())
1373 if (!FieldHasTrivialDestructorBody(Context, Field))
1374 return false;
1375
1376 // Check non-virtual bases.
1377 for (const auto &I : BaseClassDecl->bases()) {
1378 if (I.isVirtual())
1379 continue;
1380
1381 const auto *NonVirtualBase = I.getType()->castAsCXXRecordDecl();
1382 if (!HasTrivialDestructorBody(Context, BaseClassDecl: NonVirtualBase,
1383 MostDerivedClassDecl))
1384 return false;
1385 }
1386
1387 if (BaseClassDecl == MostDerivedClassDecl) {
1388 // Check virtual bases.
1389 for (const auto &I : BaseClassDecl->vbases()) {
1390 const auto *VirtualBase = I.getType()->castAsCXXRecordDecl();
1391 if (!HasTrivialDestructorBody(Context, BaseClassDecl: VirtualBase, MostDerivedClassDecl))
1392 return false;
1393 }
1394 }
1395
1396 return true;
1397}
1398
1399static bool FieldHasTrivialDestructorBody(ASTContext &Context,
1400 const FieldDecl *Field) {
1401 QualType FieldBaseElementType = Context.getBaseElementType(QT: Field->getType());
1402
1403 auto *FieldClassDecl = FieldBaseElementType->getAsCXXRecordDecl();
1404 if (!FieldClassDecl)
1405 return true;
1406
1407 // The destructor for an implicit anonymous union member is never invoked.
1408 if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
1409 return true;
1410
1411 return HasTrivialDestructorBody(Context, BaseClassDecl: FieldClassDecl, MostDerivedClassDecl: FieldClassDecl);
1412}
1413
1414/// CanSkipVTablePointerInitialization - Check whether we need to initialize
1415/// any vtable pointers before calling this destructor.
1416static bool CanSkipVTablePointerInitialization(CodeGenFunction &CGF,
1417 const CXXDestructorDecl *Dtor) {
1418 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1419 if (!ClassDecl->isDynamicClass())
1420 return true;
1421
1422 // For a final class, the vtable pointer is known to already point to the
1423 // class's vtable.
1424 if (ClassDecl->isEffectivelyFinal())
1425 return true;
1426
1427 if (!Dtor->hasTrivialBody())
1428 return false;
1429
1430 // Check the fields.
1431 for (const auto *Field : ClassDecl->fields())
1432 if (!FieldHasTrivialDestructorBody(Context&: CGF.getContext(), Field))
1433 return false;
1434
1435 return true;
1436}
1437
1438static void EmitConditionalArrayDtorCall(const CXXDestructorDecl *DD,
1439 CodeGenFunction &CGF,
1440 llvm::Value *ShouldDeleteCondition) {
1441 Address ThisPtr = CGF.LoadCXXThisAddress();
1442 llvm::BasicBlock *ScalarBB = CGF.createBasicBlock(name: "dtor.scalar");
1443 llvm::BasicBlock *callDeleteBB =
1444 CGF.createBasicBlock(name: "dtor.call_delete_after_array_destroy");
1445 llvm::BasicBlock *VectorBB = CGF.createBasicBlock(name: "dtor.vector");
1446 auto *CondTy = cast<llvm::IntegerType>(Val: ShouldDeleteCondition->getType());
1447 llvm::Value *CheckTheBitForArrayDestroy = CGF.Builder.CreateAnd(
1448 LHS: ShouldDeleteCondition, RHS: llvm::ConstantInt::get(Ty: CondTy, V: 2));
1449 llvm::Value *ShouldDestroyArray =
1450 CGF.Builder.CreateIsNull(Arg: CheckTheBitForArrayDestroy);
1451 CGF.Builder.CreateCondBr(Cond: ShouldDestroyArray, True: ScalarBB, False: VectorBB);
1452
1453 CGF.EmitBlock(BB: VectorBB);
1454
1455 llvm::Value *numElements = nullptr;
1456 llvm::Value *allocatedPtr = nullptr;
1457 CharUnits cookieSize;
1458 QualType EltTy = DD->getThisType()->getPointeeType();
1459 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr: ThisPtr, ElementType: EltTy, NumElements&: numElements,
1460 AllocPtr&: allocatedPtr, CookieSize&: cookieSize);
1461
1462 // Destroy the elements.
1463 QualType::DestructionKind dtorKind = EltTy.isDestructedType();
1464
1465 assert(dtorKind);
1466 assert(numElements && "no element count for a type with a destructor!");
1467
1468 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(T: EltTy);
1469 CharUnits elementAlign =
1470 ThisPtr.getAlignment().alignmentOfArrayElement(elementSize);
1471
1472 llvm::Value *arrayBegin = ThisPtr.emitRawPointer(CGF);
1473 llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
1474 Ty: ThisPtr.getElementType(), Ptr: arrayBegin, IdxList: numElements, Name: "delete.end");
1475
1476 // We already checked that the array is not 0-length before entering vector
1477 // deleting dtor.
1478 CGF.emitArrayDestroy(begin: arrayBegin, end: arrayEnd, elementType: EltTy, elementAlign,
1479 destroyer: CGF.getDestroyer(destructionKind: dtorKind),
1480 /*checkZeroLength*/ false, useEHCleanup: CGF.needsEHCleanup(kind: dtorKind));
1481
1482 llvm::BasicBlock *VectorBBCont = CGF.createBasicBlock(name: "dtor.vector.cont");
1483 CGF.EmitBlock(BB: VectorBBCont);
1484
1485 llvm::Value *CheckTheBitForDeleteCall = CGF.Builder.CreateAnd(
1486 LHS: ShouldDeleteCondition, RHS: llvm::ConstantInt::get(Ty: CondTy, V: 1));
1487
1488 llvm::Value *ShouldCallDelete =
1489 CGF.Builder.CreateIsNull(Arg: CheckTheBitForDeleteCall);
1490 CGF.Builder.CreateCondBr(Cond: ShouldCallDelete, True: CGF.ReturnBlock.getBlock(),
1491 False: callDeleteBB);
1492 CGF.EmitBlock(BB: callDeleteBB);
1493 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(Val: CGF.CurCodeDecl);
1494 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1495 if (Dtor->getArrayOperatorDelete()) {
1496 if (!Dtor->getGlobalArrayOperatorDelete()) {
1497 CGF.EmitDeleteCall(DeleteFD: Dtor->getArrayOperatorDelete(), Ptr: allocatedPtr,
1498 DeleteTy: CGF.getContext().getCanonicalTagType(TD: ClassDecl),
1499 NumElements: numElements, CookieSize: cookieSize);
1500 } else {
1501 // If global operator[] is set, the class had its own operator delete[].
1502 // In that case, check the 4th bit. If it is set, we need to call
1503 // ::delete[].
1504 llvm::Value *CheckTheBitForGlobDeleteCall = CGF.Builder.CreateAnd(
1505 LHS: ShouldDeleteCondition, RHS: llvm::ConstantInt::get(Ty: CondTy, V: 4));
1506
1507 llvm::Value *ShouldCallGlobDelete =
1508 CGF.Builder.CreateIsNull(Arg: CheckTheBitForGlobDeleteCall);
1509 llvm::BasicBlock *GlobDelete =
1510 CGF.createBasicBlock(name: "dtor.call_glob_delete_after_array_destroy");
1511 llvm::BasicBlock *ClassDelete =
1512 CGF.createBasicBlock(name: "dtor.call_class_delete_after_array_destroy");
1513 CGF.Builder.CreateCondBr(Cond: ShouldCallGlobDelete, True: ClassDelete, False: GlobDelete);
1514 CGF.EmitBlock(BB: ClassDelete);
1515 CGF.EmitDeleteCall(DeleteFD: Dtor->getArrayOperatorDelete(), Ptr: allocatedPtr,
1516 DeleteTy: CGF.getContext().getCanonicalTagType(TD: ClassDecl),
1517 NumElements: numElements, CookieSize: cookieSize);
1518 CGF.EmitBranchThroughCleanup(Dest: CGF.ReturnBlock);
1519
1520 CGF.EmitBlock(BB: GlobDelete);
1521 CGF.EmitDeleteCall(DeleteFD: Dtor->getGlobalArrayOperatorDelete(), Ptr: allocatedPtr,
1522 DeleteTy: CGF.getContext().getCanonicalTagType(TD: ClassDecl),
1523 NumElements: numElements, CookieSize: cookieSize);
1524 }
1525 } else {
1526 // No operators delete[] were found, so emit a trap.
1527 llvm::CallInst *TrapCall = CGF.EmitTrapCall(IntrID: llvm::Intrinsic::trap);
1528 TrapCall->setDoesNotReturn();
1529 TrapCall->setDoesNotThrow();
1530 CGF.Builder.CreateUnreachable();
1531 CGF.Builder.ClearInsertionPoint();
1532 }
1533
1534 CGF.EmitBranchThroughCleanup(Dest: CGF.ReturnBlock);
1535 CGF.EmitBlock(BB: ScalarBB);
1536}
1537
1538/// EmitDestructorBody - Emits the body of the current destructor.
1539void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
1540 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(Val: CurGD.getDecl());
1541 CXXDtorType DtorType = CurGD.getDtorType();
1542
1543 // For an abstract class, non-base destructors are never used (and can't
1544 // be emitted in general, because vbase dtors may not have been validated
1545 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
1546 // in fact emit references to them from other compilations, so emit them
1547 // as functions containing a trap instruction.
1548 if (DtorType != Dtor_Base && Dtor->getParent()->isAbstract()) {
1549 llvm::CallInst *TrapCall = EmitTrapCall(IntrID: llvm::Intrinsic::trap);
1550 TrapCall->setDoesNotReturn();
1551 TrapCall->setDoesNotThrow();
1552 Builder.CreateUnreachable();
1553 Builder.ClearInsertionPoint();
1554 return;
1555 }
1556
1557 Stmt *Body = Dtor->getBody();
1558 if (Body) {
1559 incrementProfileCounter(S: Body);
1560 maybeCreateMCDCCondBitmap();
1561 }
1562
1563 // The call to operator delete in a deleting destructor happens
1564 // outside of the function-try-block, which means it's always
1565 // possible to delegate the destructor body to the complete
1566 // destructor. Do so.
1567 if (DtorType == Dtor_Deleting || DtorType == Dtor_VectorDeleting) {
1568 if (CXXStructorImplicitParamValue && DtorType == Dtor_VectorDeleting)
1569 EmitConditionalArrayDtorCall(DD: Dtor, CGF&: *this, ShouldDeleteCondition: CXXStructorImplicitParamValue);
1570 RunCleanupsScope DtorEpilogue(*this);
1571 EnterDtorCleanups(Dtor, Type: Dtor_Deleting);
1572 if (HaveInsertPoint()) {
1573 QualType ThisTy = Dtor->getFunctionObjectParameterType();
1574 EmitCXXDestructorCall(D: Dtor, Type: Dtor_Complete, /*ForVirtualBase=*/false,
1575 /*Delegating=*/false, This: LoadCXXThisAddress(), ThisTy);
1576 }
1577 return;
1578 }
1579
1580 // If the body is a function-try-block, enter the try before
1581 // anything else.
1582 bool isTryBody = isa_and_nonnull<CXXTryStmt>(Val: Body);
1583 if (isTryBody)
1584 EnterCXXTryStmt(S: *cast<CXXTryStmt>(Val: Body), IsFnTryBlock: true);
1585 EmitAsanPrologueOrEpilogue(Prologue: false);
1586
1587 // Enter the epilogue cleanups.
1588 RunCleanupsScope DtorEpilogue(*this);
1589
1590 // If this is the complete variant, just invoke the base variant;
1591 // the epilogue will destruct the virtual bases. But we can't do
1592 // this optimization if the body is a function-try-block, because
1593 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
1594 // always delegate because we might not have a definition in this TU.
1595 switch (DtorType) {
1596 case Dtor_Unified:
1597 llvm_unreachable("not expecting a unified dtor");
1598 case Dtor_Comdat:
1599 llvm_unreachable("not expecting a COMDAT");
1600 case Dtor_Deleting:
1601 llvm_unreachable("already handled deleting case");
1602 case Dtor_VectorDeleting:
1603 llvm_unreachable("already handled vector deleting case");
1604
1605 case Dtor_Complete:
1606 assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
1607 "can't emit a dtor without a body for non-Microsoft ABIs");
1608
1609 // Enter the cleanup scopes for virtual bases.
1610 EnterDtorCleanups(Dtor, Type: Dtor_Complete);
1611
1612 if (!isTryBody) {
1613 QualType ThisTy = Dtor->getFunctionObjectParameterType();
1614 EmitCXXDestructorCall(D: Dtor, Type: Dtor_Base, /*ForVirtualBase=*/false,
1615 /*Delegating=*/false, This: LoadCXXThisAddress(), ThisTy);
1616 break;
1617 }
1618
1619 // Fallthrough: act like we're in the base variant.
1620 [[fallthrough]];
1621
1622 case Dtor_Base:
1623 assert(Body);
1624
1625 // Enter the cleanup scopes for fields and non-virtual bases.
1626 EnterDtorCleanups(Dtor, Type: Dtor_Base);
1627
1628 // Initialize the vtable pointers before entering the body.
1629 if (!CanSkipVTablePointerInitialization(CGF&: *this, Dtor)) {
1630 // Insert the llvm.launder.invariant.group intrinsic before initializing
1631 // the vptrs to cancel any previous assumptions we might have made.
1632 if (CGM.getCodeGenOpts().StrictVTablePointers &&
1633 CGM.getCodeGenOpts().OptimizationLevel > 0)
1634 CXXThisValue = Builder.CreateLaunderInvariantGroup(Ptr: LoadCXXThis());
1635 InitializeVTablePointers(ClassDecl: Dtor->getParent());
1636 }
1637
1638 if (isTryBody)
1639 EmitStmt(S: cast<CXXTryStmt>(Val: Body)->getTryBlock());
1640 else if (Body)
1641 EmitStmt(S: Body);
1642 else {
1643 assert(Dtor->isImplicit() && "bodyless dtor not implicit");
1644 // nothing to do besides what's in the epilogue
1645 }
1646 // -fapple-kext must inline any call to this dtor into
1647 // the caller's body.
1648 if (getLangOpts().AppleKext)
1649 CurFn->addFnAttr(Kind: llvm::Attribute::AlwaysInline);
1650
1651 break;
1652 }
1653
1654 // Jump out through the epilogue cleanups.
1655 DtorEpilogue.ForceCleanup();
1656
1657 // Exit the try if applicable.
1658 if (isTryBody)
1659 ExitCXXTryStmt(S: *cast<CXXTryStmt>(Val: Body), IsFnTryBlock: true);
1660}
1661
1662void CodeGenFunction::emitImplicitAssignmentOperatorBody(
1663 FunctionArgList &Args) {
1664 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(Val: CurGD.getDecl());
1665 const Stmt *RootS = AssignOp->getBody();
1666 assert(isa<CompoundStmt>(RootS) &&
1667 "Body of an implicit assignment operator should be compound stmt.");
1668 const CompoundStmt *RootCS = cast<CompoundStmt>(Val: RootS);
1669
1670 LexicalScope Scope(*this, RootCS->getSourceRange());
1671
1672 incrementProfileCounter(S: RootCS);
1673 maybeCreateMCDCCondBitmap();
1674 AssignmentMemcpyizer AM(*this, AssignOp, Args);
1675 for (auto *I : RootCS->body())
1676 AM.emitAssignment(S: I);
1677
1678 AM.finish();
1679}
1680
1681namespace {
1682llvm::Value *LoadThisForDtorDelete(CodeGenFunction &CGF,
1683 const CXXDestructorDecl *DD) {
1684 if (Expr *ThisArg = DD->getOperatorDeleteThisArg())
1685 return CGF.EmitScalarExpr(E: ThisArg);
1686 return CGF.LoadCXXThis();
1687}
1688
1689/// Call the operator delete associated with the current destructor.
1690struct CallDtorDelete final : EHScopeStack::Cleanup {
1691 CallDtorDelete() {}
1692
1693 void Emit(CodeGenFunction &CGF, Flags flags) override {
1694 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(Val: CGF.CurCodeDecl);
1695 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1696 CGF.EmitDeleteCall(DeleteFD: Dtor->getOperatorDelete(),
1697 Ptr: LoadThisForDtorDelete(CGF, DD: Dtor),
1698 DeleteTy: CGF.getContext().getCanonicalTagType(TD: ClassDecl));
1699 }
1700};
1701
1702// This function implements generation of scalar deleting destructor body for
1703// the case when the destructor also accepts an implicit flag. Right now only
1704// Microsoft ABI requires deleting destructors to accept implicit flags.
1705// The flag indicates whether an operator delete should be called and whether
1706// it should be a class-specific operator delete or a global one.
1707void EmitConditionalDtorDeleteCall(CodeGenFunction &CGF,
1708 llvm::Value *ShouldDeleteCondition,
1709 bool ReturnAfterDelete) {
1710 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(Val: CGF.CurCodeDecl);
1711 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1712 const FunctionDecl *OD = Dtor->getOperatorDelete();
1713 assert(OD->isDestroyingOperatorDelete() == ReturnAfterDelete &&
1714 "unexpected value for ReturnAfterDelete");
1715 auto *CondTy = cast<llvm::IntegerType>(Val: ShouldDeleteCondition->getType());
1716 // MSVC calls global operator delete inside of the dtor body, but clang
1717 // aligned with this behavior only after a particular version. This is not
1718 // ABI-compatible with previous versions.
1719 ASTContext &Context = CGF.getContext();
1720 bool CallGlobDelete = Context.getTargetInfo().callGlobalDeleteInDeletingDtor(
1721 Context.getLangOpts());
1722 if (CallGlobDelete && OD->isDestroyingOperatorDelete()) {
1723 llvm::BasicBlock *CallDtor = CGF.createBasicBlock(name: "dtor.call_dtor");
1724 llvm::BasicBlock *DontCallDtor = CGF.createBasicBlock(name: "dtor.entry_cont");
1725 // Third bit set signals that global operator delete is called. That means
1726 // despite class having destroying operator delete which is responsible
1727 // for calling dtor, we need to call dtor because global operator delete
1728 // won't do that.
1729 llvm::Value *Check3rdBit = CGF.Builder.CreateAnd(
1730 LHS: ShouldDeleteCondition, RHS: llvm::ConstantInt::get(Ty: CondTy, V: 4));
1731 llvm::Value *ShouldCallDtor = CGF.Builder.CreateIsNull(Arg: Check3rdBit);
1732 CGF.Builder.CreateCondBr(Cond: ShouldCallDtor, True: DontCallDtor, False: CallDtor);
1733 CGF.EmitBlock(BB: CallDtor);
1734 QualType ThisTy = Dtor->getFunctionObjectParameterType();
1735 CGF.EmitCXXDestructorCall(D: Dtor, Type: Dtor_Complete, /*ForVirtualBase=*/false,
1736 /*Delegating=*/false, This: CGF.LoadCXXThisAddress(),
1737 ThisTy);
1738 CGF.Builder.CreateBr(Dest: DontCallDtor);
1739 CGF.EmitBlock(BB: DontCallDtor);
1740 }
1741 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock(name: "dtor.call_delete");
1742 llvm::BasicBlock *continueBB = CGF.createBasicBlock(name: "dtor.continue");
1743 // First bit set signals that operator delete must be called.
1744 llvm::Value *Check1stBit = CGF.Builder.CreateAnd(
1745 LHS: ShouldDeleteCondition, RHS: llvm::ConstantInt::get(Ty: CondTy, V: 1));
1746 llvm::Value *ShouldCallDelete = CGF.Builder.CreateIsNull(Arg: Check1stBit);
1747 CGF.Builder.CreateCondBr(Cond: ShouldCallDelete, True: continueBB, False: callDeleteBB);
1748
1749 CGF.EmitBlock(BB: callDeleteBB);
1750 auto EmitDeleteAndGoToEnd = [&](const FunctionDecl *DeleteOp) {
1751 CGF.EmitDeleteCall(DeleteFD: DeleteOp, Ptr: LoadThisForDtorDelete(CGF, DD: Dtor),
1752 DeleteTy: Context.getCanonicalTagType(TD: ClassDecl));
1753 if (ReturnAfterDelete)
1754 CGF.EmitBranchThroughCleanup(Dest: CGF.ReturnBlock);
1755 else
1756 CGF.Builder.CreateBr(Dest: continueBB);
1757 };
1758 // If Sema only found a global operator delete previously, the dtor can
1759 // always call it. Otherwise we need to check the third bit and call the
1760 // appropriate operator delete, i.e. global or class-specific.
1761 if (const FunctionDecl *GlobOD = Dtor->getOperatorGlobalDelete();
1762 isa<CXXMethodDecl>(Val: OD) && GlobOD && CallGlobDelete) {
1763 // Third bit set signals that global operator delete is called, i.e.
1764 // ::delete appears on the callsite.
1765 llvm::Value *CheckTheBitForGlobDeleteCall = CGF.Builder.CreateAnd(
1766 LHS: ShouldDeleteCondition, RHS: llvm::ConstantInt::get(Ty: CondTy, V: 4));
1767 llvm::Value *ShouldCallGlobDelete =
1768 CGF.Builder.CreateIsNull(Arg: CheckTheBitForGlobDeleteCall);
1769 llvm::BasicBlock *GlobDelete =
1770 CGF.createBasicBlock(name: "dtor.call_glob_delete");
1771 llvm::BasicBlock *ClassDelete =
1772 CGF.createBasicBlock(name: "dtor.call_class_delete");
1773 CGF.Builder.CreateCondBr(Cond: ShouldCallGlobDelete, True: ClassDelete, False: GlobDelete);
1774 CGF.EmitBlock(BB: GlobDelete);
1775
1776 EmitDeleteAndGoToEnd(GlobOD);
1777 CGF.EmitBlock(BB: ClassDelete);
1778 }
1779 EmitDeleteAndGoToEnd(OD);
1780 CGF.EmitBlock(BB: continueBB);
1781}
1782
1783struct CallDtorDeleteConditional final : EHScopeStack::Cleanup {
1784 llvm::Value *ShouldDeleteCondition;
1785
1786public:
1787 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
1788 : ShouldDeleteCondition(ShouldDeleteCondition) {
1789 assert(ShouldDeleteCondition != nullptr);
1790 }
1791
1792 void Emit(CodeGenFunction &CGF, Flags flags) override {
1793 EmitConditionalDtorDeleteCall(CGF, ShouldDeleteCondition,
1794 /*ReturnAfterDelete*/ false);
1795 }
1796};
1797
1798class DestroyField final : public EHScopeStack::Cleanup {
1799 const FieldDecl *field;
1800 CodeGenFunction::Destroyer *destroyer;
1801 bool useEHCleanupForArray;
1802
1803public:
1804 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
1805 bool useEHCleanupForArray)
1806 : field(field), destroyer(destroyer),
1807 useEHCleanupForArray(useEHCleanupForArray) {}
1808
1809 void Emit(CodeGenFunction &CGF, Flags flags) override {
1810 // Find the address of the field.
1811 Address thisValue = CGF.LoadCXXThisAddress();
1812 CanQualType RecordTy =
1813 CGF.getContext().getCanonicalTagType(TD: field->getParent());
1814 LValue ThisLV = CGF.MakeAddrLValue(Addr: thisValue, T: RecordTy);
1815 LValue LV = CGF.EmitLValueForField(Base: ThisLV, Field: field);
1816 assert(LV.isSimple());
1817
1818 CGF.emitDestroy(addr: LV.getAddress(), type: field->getType(), destroyer,
1819 useEHCleanupForArray: flags.isForNormalCleanup() && useEHCleanupForArray);
1820 }
1821};
1822
1823class DeclAsInlineDebugLocation {
1824 CGDebugInfo *DI;
1825 llvm::MDNode *InlinedAt;
1826 std::optional<ApplyDebugLocation> Location;
1827
1828public:
1829 DeclAsInlineDebugLocation(CodeGenFunction &CGF, const NamedDecl &Decl)
1830 : DI(CGF.getDebugInfo()) {
1831 if (!DI)
1832 return;
1833 InlinedAt = DI->getInlinedAt();
1834 DI->setInlinedAt(CGF.Builder.getCurrentDebugLocation());
1835 Location.emplace(args&: CGF, args: Decl.getLocation());
1836 }
1837
1838 ~DeclAsInlineDebugLocation() {
1839 if (!DI)
1840 return;
1841 Location.reset();
1842 DI->setInlinedAt(InlinedAt);
1843 }
1844};
1845
1846static void EmitSanitizerDtorCallback(
1847 CodeGenFunction &CGF, StringRef Name, llvm::Value *Ptr,
1848 std::optional<CharUnits::QuantityType> PoisonSize = {}) {
1849 CodeGenFunction::SanitizerScope SanScope(&CGF);
1850 // Pass in void pointer and size of region as arguments to runtime
1851 // function
1852 SmallVector<llvm::Value *, 2> Args = {Ptr};
1853 SmallVector<llvm::Type *, 2> ArgTypes = {CGF.VoidPtrTy};
1854
1855 if (PoisonSize.has_value()) {
1856 Args.emplace_back(Args: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: *PoisonSize));
1857 ArgTypes.emplace_back(Args&: CGF.SizeTy);
1858 }
1859
1860 llvm::FunctionType *FnType =
1861 llvm::FunctionType::get(Result: CGF.VoidTy, Params: ArgTypes, isVarArg: false);
1862 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(Ty: FnType, Name);
1863
1864 CGF.EmitNounwindRuntimeCall(callee: Fn, args: Args);
1865}
1866
1867static void
1868EmitSanitizerDtorFieldsCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
1869 CharUnits::QuantityType PoisonSize) {
1870 EmitSanitizerDtorCallback(CGF, Name: "__sanitizer_dtor_callback_fields", Ptr,
1871 PoisonSize);
1872}
1873
1874/// Poison base class with a trivial destructor.
1875struct SanitizeDtorTrivialBase final : EHScopeStack::Cleanup {
1876 const CXXRecordDecl *BaseClass;
1877 bool BaseIsVirtual;
1878 SanitizeDtorTrivialBase(const CXXRecordDecl *Base, bool BaseIsVirtual)
1879 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
1880
1881 void Emit(CodeGenFunction &CGF, Flags flags) override {
1882 const CXXRecordDecl *DerivedClass =
1883 cast<CXXMethodDecl>(Val: CGF.CurCodeDecl)->getParent();
1884
1885 Address Addr = CGF.GetAddressOfDirectBaseInCompleteClass(
1886 This: CGF.LoadCXXThisAddress(), Derived: DerivedClass, Base: BaseClass, BaseIsVirtual);
1887
1888 const ASTRecordLayout &BaseLayout =
1889 CGF.getContext().getASTRecordLayout(D: BaseClass);
1890 CharUnits BaseSize = BaseLayout.getSize();
1891
1892 if (!BaseSize.isPositive())
1893 return;
1894
1895 // Use the base class declaration location as inline DebugLocation. All
1896 // fields of the class are destroyed.
1897 DeclAsInlineDebugLocation InlineHere(CGF, *BaseClass);
1898 EmitSanitizerDtorFieldsCallback(CGF, Ptr: Addr.emitRawPointer(CGF),
1899 PoisonSize: BaseSize.getQuantity());
1900
1901 // Prevent the current stack frame from disappearing from the stack trace.
1902 CGF.CurFn->addFnAttr(Kind: "disable-tail-calls", Val: "true");
1903 }
1904};
1905
1906class SanitizeDtorFieldRange final : public EHScopeStack::Cleanup {
1907 const CXXDestructorDecl *Dtor;
1908 unsigned StartIndex;
1909 unsigned EndIndex;
1910
1911public:
1912 SanitizeDtorFieldRange(const CXXDestructorDecl *Dtor, unsigned StartIndex,
1913 unsigned EndIndex)
1914 : Dtor(Dtor), StartIndex(StartIndex), EndIndex(EndIndex) {}
1915
1916 // Generate function call for handling object poisoning.
1917 // Disables tail call elimination, to prevent the current stack frame
1918 // from disappearing from the stack trace.
1919 void Emit(CodeGenFunction &CGF, Flags flags) override {
1920 const ASTContext &Context = CGF.getContext();
1921 const ASTRecordLayout &Layout =
1922 Context.getASTRecordLayout(D: Dtor->getParent());
1923
1924 // It's a first trivial field so it should be at the begining of a char,
1925 // still round up start offset just in case.
1926 CharUnits PoisonStart = Context.toCharUnitsFromBits(
1927 BitSize: Layout.getFieldOffset(FieldNo: StartIndex) + Context.getCharWidth() - 1);
1928 llvm::ConstantInt *OffsetSizePtr =
1929 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: PoisonStart.getQuantity());
1930
1931 llvm::Value *OffsetPtr =
1932 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: CGF.LoadCXXThis(), IdxList: OffsetSizePtr);
1933
1934 CharUnits PoisonEnd;
1935 if (EndIndex >= Layout.getFieldCount()) {
1936 PoisonEnd = Layout.getNonVirtualSize();
1937 } else {
1938 PoisonEnd = Context.toCharUnitsFromBits(BitSize: Layout.getFieldOffset(FieldNo: EndIndex));
1939 }
1940 CharUnits PoisonSize = PoisonEnd - PoisonStart;
1941 if (!PoisonSize.isPositive())
1942 return;
1943
1944 // Use the top field declaration location as inline DebugLocation.
1945 DeclAsInlineDebugLocation InlineHere(
1946 CGF, **std::next(x: Dtor->getParent()->field_begin(), n: StartIndex));
1947 EmitSanitizerDtorFieldsCallback(CGF, Ptr: OffsetPtr, PoisonSize: PoisonSize.getQuantity());
1948
1949 // Prevent the current stack frame from disappearing from the stack trace.
1950 CGF.CurFn->addFnAttr(Kind: "disable-tail-calls", Val: "true");
1951 }
1952};
1953
1954class SanitizeDtorVTable final : public EHScopeStack::Cleanup {
1955 const CXXDestructorDecl *Dtor;
1956
1957public:
1958 SanitizeDtorVTable(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
1959
1960 // Generate function call for handling vtable pointer poisoning.
1961 void Emit(CodeGenFunction &CGF, Flags flags) override {
1962 assert(Dtor->getParent()->isDynamicClass());
1963 (void)Dtor;
1964 // Poison vtable and vtable ptr if they exist for this class.
1965 llvm::Value *VTablePtr = CGF.LoadCXXThis();
1966
1967 // Pass in void pointer and size of region as arguments to runtime
1968 // function
1969 EmitSanitizerDtorCallback(CGF, Name: "__sanitizer_dtor_callback_vptr", Ptr: VTablePtr);
1970 }
1971};
1972
1973class SanitizeDtorCleanupBuilder {
1974 ASTContext &Context;
1975 EHScopeStack &EHStack;
1976 const CXXDestructorDecl *DD;
1977 std::optional<unsigned> StartIndex;
1978
1979public:
1980 SanitizeDtorCleanupBuilder(ASTContext &Context, EHScopeStack &EHStack,
1981 const CXXDestructorDecl *DD)
1982 : Context(Context), EHStack(EHStack), DD(DD), StartIndex(std::nullopt) {}
1983 void PushCleanupForField(const FieldDecl *Field) {
1984 if (isEmptyFieldForLayout(Context, FD: Field))
1985 return;
1986 unsigned FieldIndex = Field->getFieldIndex();
1987 if (FieldHasTrivialDestructorBody(Context, Field)) {
1988 if (!StartIndex)
1989 StartIndex = FieldIndex;
1990 } else if (StartIndex) {
1991 EHStack.pushCleanup<SanitizeDtorFieldRange>(Kind: NormalAndEHCleanup, A: DD,
1992 A: *StartIndex, A: FieldIndex);
1993 StartIndex = std::nullopt;
1994 }
1995 }
1996 void End() {
1997 if (StartIndex)
1998 EHStack.pushCleanup<SanitizeDtorFieldRange>(Kind: NormalAndEHCleanup, A: DD,
1999 A: *StartIndex, A: -1);
2000 }
2001};
2002} // end anonymous namespace
2003
2004/// Emit all code that comes at the end of class's
2005/// destructor. This is to call destructors on members and base classes
2006/// in reverse order of their construction.
2007///
2008/// For a deleting destructor, this also handles the case where a destroying
2009/// operator delete completely overrides the definition.
2010void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
2011 CXXDtorType DtorType) {
2012 assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) &&
2013 "Should not emit dtor epilogue for non-exported trivial dtor!");
2014
2015 // The deleting-destructor phase just needs to call the appropriate
2016 // operator delete that Sema picked up.
2017 if (DtorType == Dtor_Deleting) {
2018 assert(DD->getOperatorDelete() &&
2019 "operator delete missing - EnterDtorCleanups");
2020 if (CXXStructorImplicitParamValue) {
2021 // If there is an implicit param to the deleting dtor, it's a boolean
2022 // telling whether this is a deleting destructor.
2023 if (DD->getOperatorDelete()->isDestroyingOperatorDelete())
2024 EmitConditionalDtorDeleteCall(CGF&: *this, ShouldDeleteCondition: CXXStructorImplicitParamValue,
2025 /*ReturnAfterDelete*/ true);
2026 else
2027 EHStack.pushCleanup<CallDtorDeleteConditional>(
2028 Kind: NormalAndEHCleanup, A: CXXStructorImplicitParamValue);
2029 } else {
2030 if (DD->getOperatorDelete()->isDestroyingOperatorDelete()) {
2031 const CXXRecordDecl *ClassDecl = DD->getParent();
2032 EmitDeleteCall(DeleteFD: DD->getOperatorDelete(),
2033 Ptr: LoadThisForDtorDelete(CGF&: *this, DD),
2034 DeleteTy: getContext().getCanonicalTagType(TD: ClassDecl));
2035 EmitBranchThroughCleanup(Dest: ReturnBlock);
2036 } else {
2037 EHStack.pushCleanup<CallDtorDelete>(Kind: NormalAndEHCleanup);
2038 }
2039 }
2040 return;
2041 }
2042
2043 const CXXRecordDecl *ClassDecl = DD->getParent();
2044
2045 // Unions have no bases and do not call field destructors.
2046 if (ClassDecl->isUnion())
2047 return;
2048
2049 // The complete-destructor phase just destructs all the virtual bases.
2050 if (DtorType == Dtor_Complete) {
2051 // Poison the vtable pointer such that access after the base
2052 // and member destructors are invoked is invalid.
2053 if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
2054 SanOpts.has(K: SanitizerKind::Memory) && ClassDecl->getNumVBases() &&
2055 ClassDecl->isPolymorphic())
2056 EHStack.pushCleanup<SanitizeDtorVTable>(Kind: NormalAndEHCleanup, A: DD);
2057
2058 // We push them in the forward order so that they'll be popped in
2059 // the reverse order.
2060 for (const auto &Base : ClassDecl->vbases()) {
2061 auto *BaseClassDecl = Base.getType()->castAsCXXRecordDecl();
2062 if (BaseClassDecl->hasTrivialDestructor()) {
2063 // Under SanitizeMemoryUseAfterDtor, poison the trivial base class
2064 // memory. For non-trival base classes the same is done in the class
2065 // destructor.
2066 if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
2067 SanOpts.has(K: SanitizerKind::Memory) && !BaseClassDecl->isEmpty())
2068 EHStack.pushCleanup<SanitizeDtorTrivialBase>(Kind: NormalAndEHCleanup,
2069 A: BaseClassDecl,
2070 /*BaseIsVirtual*/ A: true);
2071 } else {
2072 EHStack.pushCleanup<CallBaseDtor>(Kind: NormalAndEHCleanup, A: BaseClassDecl,
2073 /*BaseIsVirtual*/ A: true);
2074 }
2075 }
2076
2077 return;
2078 }
2079
2080 assert(DtorType == Dtor_Base);
2081 // Poison the vtable pointer if it has no virtual bases, but inherits
2082 // virtual functions.
2083 if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
2084 SanOpts.has(K: SanitizerKind::Memory) && !ClassDecl->getNumVBases() &&
2085 ClassDecl->isPolymorphic())
2086 EHStack.pushCleanup<SanitizeDtorVTable>(Kind: NormalAndEHCleanup, A: DD);
2087
2088 // Destroy non-virtual bases.
2089 for (const auto &Base : ClassDecl->bases()) {
2090 // Ignore virtual bases.
2091 if (Base.isVirtual())
2092 continue;
2093
2094 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
2095
2096 if (BaseClassDecl->hasTrivialDestructor()) {
2097 if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
2098 SanOpts.has(K: SanitizerKind::Memory) && !BaseClassDecl->isEmpty())
2099 EHStack.pushCleanup<SanitizeDtorTrivialBase>(Kind: NormalAndEHCleanup,
2100 A: BaseClassDecl,
2101 /*BaseIsVirtual*/ A: false);
2102 } else {
2103 EHStack.pushCleanup<CallBaseDtor>(Kind: NormalAndEHCleanup, A: BaseClassDecl,
2104 /*BaseIsVirtual*/ A: false);
2105 }
2106 }
2107
2108 // Poison fields such that access after their destructors are
2109 // invoked, and before the base class destructor runs, is invalid.
2110 bool SanitizeFields = CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
2111 SanOpts.has(K: SanitizerKind::Memory);
2112 SanitizeDtorCleanupBuilder SanitizeBuilder(getContext(), EHStack, DD);
2113
2114 // Destroy direct fields.
2115 for (const auto *Field : ClassDecl->fields()) {
2116 if (SanitizeFields)
2117 SanitizeBuilder.PushCleanupForField(Field);
2118
2119 QualType type = Field->getType();
2120 QualType::DestructionKind dtorKind = type.isDestructedType();
2121 if (!dtorKind)
2122 continue;
2123
2124 // Anonymous union members do not have their destructors called.
2125 const RecordType *RT = type->getAsUnionType();
2126 if (RT && RT->getDecl()->isAnonymousStructOrUnion())
2127 continue;
2128
2129 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2130 EHStack.pushCleanup<DestroyField>(
2131 Kind: cleanupKind, A: Field, A: getDestroyer(destructionKind: dtorKind), A: cleanupKind & EHCleanup);
2132 }
2133
2134 if (SanitizeFields)
2135 SanitizeBuilder.End();
2136}
2137
2138/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
2139/// constructor for each of several members of an array.
2140///
2141/// \param ctor the constructor to call for each element
2142/// \param arrayType the type of the array to initialize
2143/// \param arrayBegin an arrayType*
2144/// \param zeroInitialize true if each element should be
2145/// zero-initialized before it is constructed
2146void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
2147 const ArrayType *arrayType,
2148 Address arrayBegin,
2149 const CXXConstructExpr *E,
2150 bool NewPointerIsChecked,
2151 bool zeroInitialize) {
2152 QualType elementType;
2153 llvm::Value *numElements =
2154 emitArrayLength(arrayType, baseType&: elementType, addr&: arrayBegin);
2155
2156 EmitCXXAggrConstructorCall(D: ctor, NumElements: numElements, ArrayPtr: arrayBegin, E,
2157 NewPointerIsChecked, ZeroInitialization: zeroInitialize);
2158}
2159
2160/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
2161/// constructor for each of several members of an array.
2162///
2163/// \param ctor the constructor to call for each element
2164/// \param numElements the number of elements in the array;
2165/// may be zero
2166/// \param arrayBase a T*, where T is the type constructed by ctor
2167/// \param zeroInitialize true if each element should be
2168/// zero-initialized before it is constructed
2169void CodeGenFunction::EmitCXXAggrConstructorCall(
2170 const CXXConstructorDecl *ctor, llvm::Value *numElements, Address arrayBase,
2171 const CXXConstructExpr *E, bool NewPointerIsChecked, bool zeroInitialize) {
2172 // It's legal for numElements to be zero. This can happen both
2173 // dynamically, because x can be zero in 'new A[x]', and statically,
2174 // because of GCC extensions that permit zero-length arrays. There
2175 // are probably legitimate places where we could assume that this
2176 // doesn't happen, but it's not clear that it's worth it.
2177 llvm::BranchInst *zeroCheckBranch = nullptr;
2178
2179 // Optimize for a constant count.
2180 llvm::ConstantInt *constantCount = dyn_cast<llvm::ConstantInt>(Val: numElements);
2181 if (constantCount) {
2182 // Just skip out if the constant count is zero.
2183 if (constantCount->isZero())
2184 return;
2185
2186 // Otherwise, emit the check.
2187 } else {
2188 llvm::BasicBlock *loopBB = createBasicBlock(name: "new.ctorloop");
2189 llvm::Value *iszero = Builder.CreateIsNull(Arg: numElements, Name: "isempty");
2190 zeroCheckBranch = Builder.CreateCondBr(Cond: iszero, True: loopBB, False: loopBB);
2191 EmitBlock(BB: loopBB);
2192 }
2193
2194 // Find the end of the array.
2195 llvm::Type *elementType = arrayBase.getElementType();
2196 llvm::Value *arrayBegin = arrayBase.emitRawPointer(CGF&: *this);
2197 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(
2198 Ty: elementType, Ptr: arrayBegin, IdxList: numElements, Name: "arrayctor.end");
2199
2200 // Enter the loop, setting up a phi for the current location to initialize.
2201 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2202 llvm::BasicBlock *loopBB = createBasicBlock(name: "arrayctor.loop");
2203 EmitBlock(BB: loopBB);
2204 llvm::PHINode *cur =
2205 Builder.CreatePHI(Ty: arrayBegin->getType(), NumReservedValues: 2, Name: "arrayctor.cur");
2206 cur->addIncoming(V: arrayBegin, BB: entryBB);
2207
2208 // Inside the loop body, emit the constructor call on the array element.
2209 if (CGM.shouldEmitConvergenceTokens())
2210 ConvergenceTokenStack.push_back(Elt: emitConvergenceLoopToken(BB: loopBB));
2211
2212 // The alignment of the base, adjusted by the size of a single element,
2213 // provides a conservative estimate of the alignment of every element.
2214 // (This assumes we never start tracking offsetted alignments.)
2215 //
2216 // Note that these are complete objects and so we don't need to
2217 // use the non-virtual size or alignment.
2218 CanQualType type = getContext().getCanonicalTagType(TD: ctor->getParent());
2219 CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement(
2220 elementSize: getContext().getTypeSizeInChars(T: type));
2221 Address curAddr = Address(cur, elementType, eltAlignment);
2222
2223 // Zero initialize the storage, if requested.
2224 if (zeroInitialize)
2225 EmitNullInitialization(DestPtr: curAddr, Ty: type);
2226
2227 // C++ [class.temporary]p4:
2228 // There are two contexts in which temporaries are destroyed at a different
2229 // point than the end of the full-expression. The first context is when a
2230 // default constructor is called to initialize an element of an array.
2231 // If the constructor has one or more default arguments, the destruction of
2232 // every temporary created in a default argument expression is sequenced
2233 // before the construction of the next array element, if any.
2234
2235 {
2236 RunCleanupsScope Scope(*this);
2237
2238 // Evaluate the constructor and its arguments in a regular
2239 // partial-destroy cleanup.
2240 if (getLangOpts().Exceptions &&
2241 !ctor->getParent()->hasTrivialDestructor()) {
2242 Destroyer *destroyer = destroyCXXObject;
2243 pushRegularPartialArrayCleanup(arrayBegin, arrayEnd: cur, elementType: type, elementAlignment: eltAlignment,
2244 destroyer: *destroyer);
2245 }
2246 auto currAVS = AggValueSlot::forAddr(
2247 addr: curAddr, quals: type.getQualifiers(), isDestructed: AggValueSlot::IsDestructed,
2248 needsGC: AggValueSlot::DoesNotNeedGCBarriers, isAliased: AggValueSlot::IsNotAliased,
2249 mayOverlap: AggValueSlot::DoesNotOverlap, isZeroed: AggValueSlot::IsNotZeroed,
2250 isChecked: NewPointerIsChecked ? AggValueSlot::IsSanitizerChecked
2251 : AggValueSlot::IsNotSanitizerChecked);
2252 EmitCXXConstructorCall(D: ctor, Type: Ctor_Complete, /*ForVirtualBase=*/false,
2253 /*Delegating=*/false, ThisAVS: currAVS, E);
2254 }
2255
2256 // Go to the next element.
2257 llvm::Value *next = Builder.CreateInBoundsGEP(
2258 Ty: elementType, Ptr: cur, IdxList: llvm::ConstantInt::get(Ty: SizeTy, V: 1), Name: "arrayctor.next");
2259 cur->addIncoming(V: next, BB: Builder.GetInsertBlock());
2260
2261 // Check whether that's the end of the loop.
2262 llvm::Value *done = Builder.CreateICmpEQ(LHS: next, RHS: arrayEnd, Name: "arrayctor.done");
2263 llvm::BasicBlock *contBB = createBasicBlock(name: "arrayctor.cont");
2264 Builder.CreateCondBr(Cond: done, True: contBB, False: loopBB);
2265
2266 // Patch the earlier check to skip over the loop.
2267 if (zeroCheckBranch)
2268 zeroCheckBranch->setSuccessor(idx: 0, NewSucc: contBB);
2269
2270 if (CGM.shouldEmitConvergenceTokens())
2271 ConvergenceTokenStack.pop_back();
2272
2273 EmitBlock(BB: contBB);
2274}
2275
2276void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, Address addr,
2277 QualType type) {
2278 const CXXDestructorDecl *dtor = type->castAsCXXRecordDecl()->getDestructor();
2279 assert(!dtor->isTrivial());
2280 CGF.EmitCXXDestructorCall(D: dtor, Type: Dtor_Complete, /*for vbase*/ ForVirtualBase: false,
2281 /*Delegating=*/false, This: addr, ThisTy: type);
2282}
2283
2284void CodeGenFunction::EmitCXXConstructorCall(
2285 const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase,
2286 bool Delegating, AggValueSlot ThisAVS, const CXXConstructExpr *E) {
2287 CallArgList Args;
2288 Address This = ThisAVS.getAddress();
2289 LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace();
2290 LangAS ThisAS = D->getFunctionObjectParameterType().getAddressSpace();
2291 llvm::Value *ThisPtr =
2292 getAsNaturalPointerTo(Addr: This, PointeeType: D->getThisType()->getPointeeType());
2293
2294 if (SlotAS != ThisAS) {
2295 unsigned TargetThisAS = getContext().getTargetAddressSpace(AS: ThisAS);
2296 llvm::Type *NewType =
2297 llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: TargetThisAS);
2298 ThisPtr = performAddrSpaceCast(Src: ThisPtr, DestTy: NewType);
2299 }
2300
2301 // Push the this ptr.
2302 Args.add(rvalue: RValue::get(V: ThisPtr), type: D->getThisType());
2303
2304 // If this is a trivial constructor, emit a memcpy now before we lose
2305 // the alignment information on the argument.
2306 // FIXME: It would be better to preserve alignment information into CallArg.
2307 if (isMemcpyEquivalentSpecialMember(CGM, D)) {
2308 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
2309
2310 const Expr *Arg = E->getArg(Arg: 0);
2311 LValue Src = EmitLValue(E: Arg);
2312 CanQualType DestTy = getContext().getCanonicalTagType(TD: D->getParent());
2313 LValue Dest = MakeAddrLValue(Addr: This, T: DestTy);
2314 EmitAggregateCopyCtor(Dest, Src, MayOverlap: ThisAVS.mayOverlap());
2315 return;
2316 }
2317
2318 // Add the rest of the user-supplied arguments.
2319 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
2320 EvaluationOrder Order = E->isListInitialization()
2321 ? EvaluationOrder::ForceLeftToRight
2322 : EvaluationOrder::Default;
2323 EmitCallArgs(Args, Prototype: FPT, ArgRange: E->arguments(), AC: E->getConstructor(),
2324 /*ParamsToSkip*/ 0, Order);
2325
2326 EmitCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args,
2327 Overlap: ThisAVS.mayOverlap(), Loc: E->getExprLoc(),
2328 NewPointerIsChecked: ThisAVS.isSanitizerChecked());
2329}
2330
2331static bool canEmitDelegateCallArgs(CodeGenFunction &CGF,
2332 const CXXConstructorDecl *Ctor,
2333 CXXCtorType Type, CallArgList &Args) {
2334 // We can't forward a variadic call.
2335 if (Ctor->isVariadic())
2336 return false;
2337
2338 if (CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2339 // If the parameters are callee-cleanup, it's not safe to forward.
2340 for (auto *P : Ctor->parameters())
2341 if (P->needsDestruction(Ctx: CGF.getContext()))
2342 return false;
2343
2344 // Likewise if they're inalloca.
2345 const CGFunctionInfo &Info =
2346 CGF.CGM.getTypes().arrangeCXXConstructorCall(Args, D: Ctor, CtorKind: Type, ExtraPrefixArgs: 0, ExtraSuffixArgs: 0);
2347 if (Info.usesInAlloca())
2348 return false;
2349 }
2350
2351 // Anything else should be OK.
2352 return true;
2353}
2354
2355void CodeGenFunction::EmitCXXConstructorCall(
2356 const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase,
2357 bool Delegating, Address This, CallArgList &Args,
2358 AggValueSlot::Overlap_t Overlap, SourceLocation Loc,
2359 bool NewPointerIsChecked, llvm::CallBase **CallOrInvoke) {
2360 const CXXRecordDecl *ClassDecl = D->getParent();
2361
2362 if (!NewPointerIsChecked)
2363 EmitTypeCheck(TCK: CodeGenFunction::TCK_ConstructorCall, Loc, Addr: This,
2364 Type: getContext().getCanonicalTagType(TD: ClassDecl),
2365 Alignment: CharUnits::Zero());
2366
2367 if (D->isTrivial() && D->isDefaultConstructor()) {
2368 assert(Args.size() == 1 && "trivial default ctor with args");
2369 return;
2370 }
2371
2372 // If this is a trivial constructor, just emit what's needed. If this is a
2373 // union copy constructor, we must emit a memcpy, because the AST does not
2374 // model that copy.
2375 if (isMemcpyEquivalentSpecialMember(CGM, D)) {
2376 assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
2377 QualType SrcTy = D->getParamDecl(i: 0)->getType().getNonReferenceType();
2378 Address Src = makeNaturalAddressForPointer(
2379 Ptr: Args[1].getRValue(CGF&: *this).getScalarVal(), T: SrcTy);
2380 LValue SrcLVal = MakeAddrLValue(Addr: Src, T: SrcTy);
2381 CanQualType DestTy = getContext().getCanonicalTagType(TD: ClassDecl);
2382 LValue DestLVal = MakeAddrLValue(Addr: This, T: DestTy);
2383 EmitAggregateCopyCtor(Dest: DestLVal, Src: SrcLVal, MayOverlap: Overlap);
2384 return;
2385 }
2386
2387 bool PassPrototypeArgs = true;
2388 // Check whether we can actually emit the constructor before trying to do so.
2389 if (auto Inherited = D->getInheritedConstructor()) {
2390 PassPrototypeArgs = getTypes().inheritingCtorHasParams(Inherited, Type);
2391 if (PassPrototypeArgs && !canEmitDelegateCallArgs(CGF&: *this, Ctor: D, Type, Args)) {
2392 EmitInlinedInheritingCXXConstructorCall(Ctor: D, CtorType: Type, ForVirtualBase,
2393 Delegating, Args);
2394 return;
2395 }
2396 }
2397
2398 // Insert any ABI-specific implicit constructor arguments.
2399 CGCXXABI::AddedStructorArgCounts ExtraArgs =
2400 CGM.getCXXABI().addImplicitConstructorArgs(CGF&: *this, D, Type, ForVirtualBase,
2401 Delegating, Args);
2402
2403 // Emit the call.
2404 llvm::Constant *CalleePtr = CGM.getAddrOfCXXStructor(GD: GlobalDecl(D, Type));
2405 const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(
2406 Args, D, CtorKind: Type, ExtraPrefixArgs: ExtraArgs.Prefix, ExtraSuffixArgs: ExtraArgs.Suffix, PassProtoArgs: PassPrototypeArgs);
2407 CGCallee Callee = CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GlobalDecl(D, Type));
2408 EmitCall(CallInfo: Info, Callee, ReturnValue: ReturnValueSlot(), Args, CallOrInvoke, IsMustTail: false, Loc);
2409
2410 // Generate vtable assumptions if we're constructing a complete object
2411 // with a vtable. We don't do this for base subobjects for two reasons:
2412 // first, it's incorrect for classes with virtual bases, and second, we're
2413 // about to overwrite the vptrs anyway.
2414 // We also have to make sure if we can refer to vtable:
2415 // - Otherwise we can refer to vtable if it's safe to speculatively emit.
2416 // FIXME: If vtable is used by ctor/dtor, or if vtable is external and we are
2417 // sure that definition of vtable is not hidden,
2418 // then we are always safe to refer to it.
2419 // FIXME: It looks like InstCombine is very inefficient on dealing with
2420 // assumes. Make assumption loads require -fstrict-vtable-pointers
2421 // temporarily.
2422 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2423 ClassDecl->isDynamicClass() && Type != Ctor_Base &&
2424 CGM.getCXXABI().canSpeculativelyEmitVTable(RD: ClassDecl) &&
2425 CGM.getCodeGenOpts().StrictVTablePointers)
2426 EmitVTableAssumptionLoads(ClassDecl, This);
2427}
2428
2429void CodeGenFunction::EmitInheritedCXXConstructorCall(
2430 const CXXConstructorDecl *D, bool ForVirtualBase, Address This,
2431 bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) {
2432 CallArgList Args;
2433 CallArg ThisArg(RValue::get(V: getAsNaturalPointerTo(
2434 Addr: This, PointeeType: D->getThisType()->getPointeeType())),
2435 D->getThisType());
2436
2437 // Forward the parameters.
2438 if (InheritedFromVBase &&
2439 CGM.getTarget().getCXXABI().hasConstructorVariants()) {
2440 // Nothing to do; this construction is not responsible for constructing
2441 // the base class containing the inherited constructor.
2442 // FIXME: Can we just pass undef's for the remaining arguments if we don't
2443 // have constructor variants?
2444 Args.push_back(Elt: ThisArg);
2445 } else if (!CXXInheritedCtorInitExprArgs.empty()) {
2446 // The inheriting constructor was inlined; just inject its arguments.
2447 assert(CXXInheritedCtorInitExprArgs.size() >= D->getNumParams() &&
2448 "wrong number of parameters for inherited constructor call");
2449 Args = CXXInheritedCtorInitExprArgs;
2450 Args[0] = ThisArg;
2451 } else {
2452 // The inheriting constructor was not inlined. Emit delegating arguments.
2453 Args.push_back(Elt: ThisArg);
2454 const auto *OuterCtor = cast<CXXConstructorDecl>(Val: CurCodeDecl);
2455 assert(OuterCtor->getNumParams() == D->getNumParams());
2456 assert(!OuterCtor->isVariadic() && "should have been inlined");
2457
2458 for (const auto *Param : OuterCtor->parameters()) {
2459 assert(getContext().hasSameUnqualifiedType(
2460 OuterCtor->getParamDecl(Param->getFunctionScopeIndex())->getType(),
2461 Param->getType()));
2462 EmitDelegateCallArg(args&: Args, param: Param, loc: E->getLocation());
2463
2464 // Forward __attribute__(pass_object_size).
2465 if (Param->hasAttr<PassObjectSizeAttr>()) {
2466 auto *POSParam = SizeArguments[Param];
2467 assert(POSParam && "missing pass_object_size value for forwarding");
2468 EmitDelegateCallArg(args&: Args, param: POSParam, loc: E->getLocation());
2469 }
2470 }
2471 }
2472
2473 EmitCXXConstructorCall(D, Type: Ctor_Base, ForVirtualBase, /*Delegating*/ false,
2474 This, Args, Overlap: AggValueSlot::MayOverlap, Loc: E->getLocation(),
2475 /*NewPointerIsChecked*/ true);
2476}
2477
2478void CodeGenFunction::EmitInlinedInheritingCXXConstructorCall(
2479 const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase,
2480 bool Delegating, CallArgList &Args) {
2481 GlobalDecl GD(Ctor, CtorType);
2482 InlinedInheritingConstructorScope Scope(*this, GD);
2483 ApplyInlineDebugLocation DebugScope(*this, GD);
2484 RunCleanupsScope RunCleanups(*this);
2485
2486 // Save the arguments to be passed to the inherited constructor.
2487 CXXInheritedCtorInitExprArgs = Args;
2488
2489 FunctionArgList Params;
2490 QualType RetType = BuildFunctionArgList(GD: CurGD, Args&: Params);
2491 FnRetTy = RetType;
2492
2493 // Insert any ABI-specific implicit constructor arguments.
2494 CGM.getCXXABI().addImplicitConstructorArgs(CGF&: *this, D: Ctor, Type: CtorType,
2495 ForVirtualBase, Delegating, Args);
2496
2497 // Emit a simplified prolog. We only need to emit the implicit params.
2498 assert(Args.size() >= Params.size() && "too few arguments for call");
2499 for (unsigned I = 0, N = Args.size(); I != N; ++I) {
2500 if (I < Params.size() && isa<ImplicitParamDecl>(Val: Params[I])) {
2501 const RValue &RV = Args[I].getRValue(CGF&: *this);
2502 assert(!RV.isComplex() && "complex indirect params not supported");
2503 ParamValue Val = RV.isScalar()
2504 ? ParamValue::forDirect(value: RV.getScalarVal())
2505 : ParamValue::forIndirect(addr: RV.getAggregateAddress());
2506 EmitParmDecl(D: *Params[I], Arg: Val, ArgNo: I + 1);
2507 }
2508 }
2509
2510 // Create a return value slot if the ABI implementation wants one.
2511 // FIXME: This is dumb, we should ask the ABI not to try to set the return
2512 // value instead.
2513 if (!RetType->isVoidType())
2514 ReturnValue = CreateIRTempWithoutCast(T: RetType, Name: "retval.inhctor");
2515
2516 CGM.getCXXABI().EmitInstanceFunctionProlog(CGF&: *this);
2517 CXXThisValue = CXXABIThisValue;
2518
2519 // Directly emit the constructor initializers.
2520 EmitCtorPrologue(CD: Ctor, CtorType, Args&: Params);
2521}
2522
2523void CodeGenFunction::EmitVTableAssumptionLoad(const VPtr &Vptr, Address This) {
2524 llvm::Value *VTableGlobal =
2525 CGM.getCXXABI().getVTableAddressPoint(Base: Vptr.Base, VTableClass: Vptr.VTableClass);
2526 if (!VTableGlobal)
2527 return;
2528
2529 // We can just use the base offset in the complete class.
2530 CharUnits NonVirtualOffset = Vptr.Base.getBaseOffset();
2531
2532 if (!NonVirtualOffset.isZero())
2533 This =
2534 ApplyNonVirtualAndVirtualOffset(CGF&: *this, addr: This, nonVirtualOffset: NonVirtualOffset, virtualOffset: nullptr,
2535 derivedClass: Vptr.VTableClass, nearestVBase: Vptr.NearestVBase);
2536
2537 llvm::Value *VPtrValue =
2538 GetVTablePtr(This, VTableTy: VTableGlobal->getType(), VTableClass: Vptr.VTableClass);
2539 llvm::Value *Cmp =
2540 Builder.CreateICmpEQ(LHS: VPtrValue, RHS: VTableGlobal, Name: "cmp.vtables");
2541 Builder.CreateAssumption(Cond: Cmp);
2542}
2543
2544void CodeGenFunction::EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl,
2545 Address This) {
2546 if (CGM.getCXXABI().doStructorsInitializeVPtrs(VTableClass: ClassDecl))
2547 for (const VPtr &Vptr : getVTablePointers(VTableClass: ClassDecl))
2548 EmitVTableAssumptionLoad(Vptr, This);
2549}
2550
2551void CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(
2552 const CXXConstructorDecl *D, Address This, Address Src,
2553 const CXXConstructExpr *E) {
2554 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
2555
2556 CallArgList Args;
2557
2558 // Push the this ptr.
2559 Args.add(rvalue: RValue::get(V: getAsNaturalPointerTo(Addr: This, PointeeType: D->getThisType())),
2560 type: D->getThisType());
2561
2562 // Push the src ptr.
2563 QualType QT = *(FPT->param_type_begin());
2564 llvm::Type *t = CGM.getTypes().ConvertType(T: QT);
2565 llvm::Value *Val = getAsNaturalPointerTo(Addr: Src, PointeeType: D->getThisType());
2566 llvm::Value *SrcVal = Builder.CreateBitCast(V: Val, DestTy: t);
2567 Args.add(rvalue: RValue::get(V: SrcVal), type: QT);
2568
2569 // Skip over first argument (Src).
2570 EmitCallArgs(Args, Prototype: FPT, ArgRange: drop_begin(RangeOrContainer: E->arguments(), N: 1), AC: E->getConstructor(),
2571 /*ParamsToSkip*/ 1);
2572
2573 EmitCXXConstructorCall(D, Type: Ctor_Complete, /*ForVirtualBase*/ false,
2574 /*Delegating*/ false, This, Args,
2575 Overlap: AggValueSlot::MayOverlap, Loc: E->getExprLoc(),
2576 /*NewPointerIsChecked*/ false);
2577}
2578
2579void CodeGenFunction::EmitDelegateCXXConstructorCall(
2580 const CXXConstructorDecl *Ctor, CXXCtorType CtorType,
2581 const FunctionArgList &Args, SourceLocation Loc) {
2582 CallArgList DelegateArgs;
2583
2584 FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
2585 assert(I != E && "no parameters to constructor");
2586
2587 // this
2588 Address This = LoadCXXThisAddress();
2589 DelegateArgs.add(rvalue: RValue::get(V: getAsNaturalPointerTo(
2590 Addr: This, PointeeType: (*I)->getType()->getPointeeType())),
2591 type: (*I)->getType());
2592 ++I;
2593
2594 // FIXME: The location of the VTT parameter in the parameter list is
2595 // specific to the Itanium ABI and shouldn't be hardcoded here.
2596 if (CGM.getCXXABI().NeedsVTTParameter(GD: CurGD)) {
2597 assert(I != E && "cannot skip vtt parameter, already done with args");
2598 assert((*I)->getType()->isPointerType() &&
2599 "skipping parameter not of vtt type");
2600 ++I;
2601 }
2602
2603 // Explicit arguments.
2604 for (; I != E; ++I) {
2605 const VarDecl *param = *I;
2606 // FIXME: per-argument source location
2607 EmitDelegateCallArg(args&: DelegateArgs, param, loc: Loc);
2608 }
2609
2610 EmitCXXConstructorCall(D: Ctor, Type: CtorType, /*ForVirtualBase=*/false,
2611 /*Delegating=*/true, This, Args&: DelegateArgs,
2612 Overlap: AggValueSlot::MayOverlap, Loc,
2613 /*NewPointerIsChecked=*/true);
2614}
2615
2616namespace {
2617struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup {
2618 const CXXDestructorDecl *Dtor;
2619 Address Addr;
2620 CXXDtorType Type;
2621
2622 CallDelegatingCtorDtor(const CXXDestructorDecl *D, Address Addr,
2623 CXXDtorType Type)
2624 : Dtor(D), Addr(Addr), Type(Type) {}
2625
2626 void Emit(CodeGenFunction &CGF, Flags flags) override {
2627 // We are calling the destructor from within the constructor.
2628 // Therefore, "this" should have the expected type.
2629 QualType ThisTy = Dtor->getFunctionObjectParameterType();
2630 CGF.EmitCXXDestructorCall(D: Dtor, Type, /*ForVirtualBase=*/false,
2631 /*Delegating=*/true, This: Addr, ThisTy);
2632 }
2633};
2634} // end anonymous namespace
2635
2636void CodeGenFunction::EmitDelegatingCXXConstructorCall(
2637 const CXXConstructorDecl *Ctor, const FunctionArgList &Args) {
2638 assert(Ctor->isDelegatingConstructor());
2639
2640 Address ThisPtr = LoadCXXThisAddress();
2641
2642 AggValueSlot AggSlot = AggValueSlot::forAddr(
2643 addr: ThisPtr, quals: Qualifiers(), isDestructed: AggValueSlot::IsDestructed,
2644 needsGC: AggValueSlot::DoesNotNeedGCBarriers, isAliased: AggValueSlot::IsNotAliased,
2645 mayOverlap: AggValueSlot::MayOverlap, isZeroed: AggValueSlot::IsNotZeroed,
2646 // Checks are made by the code that calls constructor.
2647 isChecked: AggValueSlot::IsSanitizerChecked);
2648
2649 EmitAggExpr(E: Ctor->init_begin()[0]->getInit(), AS: AggSlot);
2650
2651 const CXXRecordDecl *ClassDecl = Ctor->getParent();
2652 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
2653 CXXDtorType Type =
2654 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
2655
2656 EHStack.pushCleanup<CallDelegatingCtorDtor>(
2657 Kind: EHCleanup, A: ClassDecl->getDestructor(), A: ThisPtr, A: Type);
2658 }
2659}
2660
2661void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
2662 CXXDtorType Type,
2663 bool ForVirtualBase,
2664 bool Delegating, Address This,
2665 QualType ThisTy) {
2666 CGM.getCXXABI().EmitDestructorCall(CGF&: *this, DD, Type, ForVirtualBase,
2667 Delegating, This, ThisTy);
2668}
2669
2670namespace {
2671struct CallLocalDtor final : EHScopeStack::Cleanup {
2672 const CXXDestructorDecl *Dtor;
2673 Address Addr;
2674 QualType Ty;
2675
2676 CallLocalDtor(const CXXDestructorDecl *D, Address Addr, QualType Ty)
2677 : Dtor(D), Addr(Addr), Ty(Ty) {}
2678
2679 void Emit(CodeGenFunction &CGF, Flags flags) override {
2680 CGF.EmitCXXDestructorCall(DD: Dtor, Type: Dtor_Complete,
2681 /*ForVirtualBase=*/false,
2682 /*Delegating=*/false, This: Addr, ThisTy: Ty);
2683 }
2684};
2685} // end anonymous namespace
2686
2687void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
2688 QualType T, Address Addr) {
2689 EHStack.pushCleanup<CallLocalDtor>(Kind: NormalAndEHCleanup, A: D, A: Addr, A: T);
2690}
2691
2692void CodeGenFunction::PushDestructorCleanup(QualType T, Address Addr) {
2693 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
2694 if (!ClassDecl)
2695 return;
2696 if (ClassDecl->hasTrivialDestructor())
2697 return;
2698
2699 const CXXDestructorDecl *D = ClassDecl->getDestructor();
2700 assert(D && D->isUsed() && "destructor not marked as used!");
2701 PushDestructorCleanup(D, T, Addr);
2702}
2703
2704void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
2705 // Compute the address point.
2706 llvm::Value *VTableAddressPoint =
2707 CGM.getCXXABI().getVTableAddressPointInStructor(
2708 CGF&: *this, RD: Vptr.VTableClass, Base: Vptr.Base, NearestVBase: Vptr.NearestVBase);
2709
2710 if (!VTableAddressPoint)
2711 return;
2712
2713 // Compute where to store the address point.
2714 llvm::Value *VirtualOffset = nullptr;
2715 CharUnits NonVirtualOffset = CharUnits::Zero();
2716
2717 if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(CGF&: *this, Vptr)) {
2718 // We need to use the virtual base offset offset because the virtual base
2719 // might have a different offset in the most derived class.
2720
2721 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(
2722 CGF&: *this, This: LoadCXXThisAddress(), ClassDecl: Vptr.VTableClass, BaseClassDecl: Vptr.NearestVBase);
2723 NonVirtualOffset = Vptr.OffsetFromNearestVBase;
2724 } else {
2725 // We can just use the base offset in the complete class.
2726 NonVirtualOffset = Vptr.Base.getBaseOffset();
2727 }
2728
2729 // Apply the offsets.
2730 Address VTableField = LoadCXXThisAddress();
2731 if (!NonVirtualOffset.isZero() || VirtualOffset)
2732 VTableField = ApplyNonVirtualAndVirtualOffset(
2733 CGF&: *this, addr: VTableField, nonVirtualOffset: NonVirtualOffset, virtualOffset: VirtualOffset, derivedClass: Vptr.VTableClass,
2734 nearestVBase: Vptr.NearestVBase);
2735
2736 // Finally, store the address point. Use the same LLVM types as the field to
2737 // support optimization.
2738 unsigned GlobalsAS = CGM.getDataLayout().getDefaultGlobalsAddressSpace();
2739 llvm::Type *PtrTy = llvm::PointerType::get(C&: CGM.getLLVMContext(), AddressSpace: GlobalsAS);
2740 // vtable field is derived from `this` pointer, therefore they should be in
2741 // the same addr space. Note that this might not be LLVM address space 0.
2742 VTableField = VTableField.withElementType(ElemTy: PtrTy);
2743
2744 if (auto AuthenticationInfo = CGM.getVTablePointerAuthInfo(
2745 Context: this, Record: Vptr.Base.getBase(), StorageAddress: VTableField.emitRawPointer(CGF&: *this)))
2746 VTableAddressPoint =
2747 EmitPointerAuthSign(Info: *AuthenticationInfo, Pointer: VTableAddressPoint);
2748
2749 llvm::StoreInst *Store = Builder.CreateStore(Val: VTableAddressPoint, Addr: VTableField);
2750 TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTablePtrType: PtrTy);
2751 CGM.DecorateInstructionWithTBAA(Inst: Store, TBAAInfo);
2752 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2753 CGM.getCodeGenOpts().StrictVTablePointers)
2754 CGM.DecorateInstructionWithInvariantGroup(I: Store, RD: Vptr.VTableClass);
2755}
2756
2757CodeGenFunction::VPtrsVector
2758CodeGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) {
2759 CodeGenFunction::VPtrsVector VPtrsResult;
2760 VisitedVirtualBasesSetTy VBases;
2761 getVTablePointers(Base: BaseSubobject(VTableClass, CharUnits::Zero()),
2762 /*NearestVBase=*/nullptr,
2763 /*OffsetFromNearestVBase=*/CharUnits::Zero(),
2764 /*BaseIsNonVirtualPrimaryBase=*/false, VTableClass, VBases,
2765 vptrs&: VPtrsResult);
2766 return VPtrsResult;
2767}
2768
2769void CodeGenFunction::getVTablePointers(BaseSubobject Base,
2770 const CXXRecordDecl *NearestVBase,
2771 CharUnits OffsetFromNearestVBase,
2772 bool BaseIsNonVirtualPrimaryBase,
2773 const CXXRecordDecl *VTableClass,
2774 VisitedVirtualBasesSetTy &VBases,
2775 VPtrsVector &Vptrs) {
2776 // If this base is a non-virtual primary base the address point has already
2777 // been set.
2778 if (!BaseIsNonVirtualPrimaryBase) {
2779 // Initialize the vtable pointer for this base.
2780 VPtr Vptr = {.Base: Base, .NearestVBase: NearestVBase, .OffsetFromNearestVBase: OffsetFromNearestVBase, .VTableClass: VTableClass};
2781 Vptrs.push_back(Elt: Vptr);
2782 }
2783
2784 const CXXRecordDecl *RD = Base.getBase();
2785
2786 // Traverse bases.
2787 for (const auto &I : RD->bases()) {
2788 auto *BaseDecl = I.getType()->castAsCXXRecordDecl();
2789 // Ignore classes without a vtable.
2790 if (!BaseDecl->isDynamicClass())
2791 continue;
2792
2793 CharUnits BaseOffset;
2794 CharUnits BaseOffsetFromNearestVBase;
2795 bool BaseDeclIsNonVirtualPrimaryBase;
2796
2797 if (I.isVirtual()) {
2798 // Check if we've visited this virtual base before.
2799 if (!VBases.insert(Ptr: BaseDecl).second)
2800 continue;
2801
2802 const ASTRecordLayout &Layout =
2803 getContext().getASTRecordLayout(D: VTableClass);
2804
2805 BaseOffset = Layout.getVBaseClassOffset(VBase: BaseDecl);
2806 BaseOffsetFromNearestVBase = CharUnits::Zero();
2807 BaseDeclIsNonVirtualPrimaryBase = false;
2808 } else {
2809 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D: RD);
2810
2811 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(Base: BaseDecl);
2812 BaseOffsetFromNearestVBase =
2813 OffsetFromNearestVBase + Layout.getBaseClassOffset(Base: BaseDecl);
2814 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
2815 }
2816
2817 getVTablePointers(
2818 Base: BaseSubobject(BaseDecl, BaseOffset),
2819 NearestVBase: I.isVirtual() ? BaseDecl : NearestVBase, OffsetFromNearestVBase: BaseOffsetFromNearestVBase,
2820 BaseIsNonVirtualPrimaryBase: BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases, Vptrs);
2821 }
2822}
2823
2824void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
2825 // Ignore classes without a vtable.
2826 if (!RD->isDynamicClass())
2827 return;
2828
2829 // Initialize the vtable pointers for this class and all of its bases.
2830 if (CGM.getCXXABI().doStructorsInitializeVPtrs(VTableClass: RD))
2831 for (const VPtr &Vptr : getVTablePointers(VTableClass: RD))
2832 InitializeVTablePointer(Vptr);
2833
2834 if (RD->getNumVBases())
2835 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(CGF&: *this, RD);
2836}
2837
2838llvm::Value *CodeGenFunction::GetVTablePtr(Address This, llvm::Type *VTableTy,
2839 const CXXRecordDecl *RD,
2840 VTableAuthMode AuthMode) {
2841 Address VTablePtrSrc = This.withElementType(ElemTy: VTableTy);
2842 llvm::Instruction *VTable = Builder.CreateLoad(Addr: VTablePtrSrc, Name: "vtable");
2843 TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTablePtrType: VTableTy);
2844 CGM.DecorateInstructionWithTBAA(Inst: VTable, TBAAInfo);
2845
2846 if (auto AuthenticationInfo =
2847 CGM.getVTablePointerAuthInfo(Context: this, Record: RD, StorageAddress: This.emitRawPointer(CGF&: *this))) {
2848 if (AuthMode != VTableAuthMode::UnsafeUbsanStrip) {
2849 VTable = cast<llvm::Instruction>(
2850 Val: EmitPointerAuthAuth(Info: *AuthenticationInfo, Pointer: VTable));
2851 if (AuthMode == VTableAuthMode::MustTrap) {
2852 // This is clearly suboptimal but until we have an ability
2853 // to rely on the authentication intrinsic trapping and force
2854 // an authentication to occur we don't really have a choice.
2855 VTable =
2856 cast<llvm::Instruction>(Val: Builder.CreateBitCast(V: VTable, DestTy: Int8PtrTy));
2857 Builder.CreateLoad(Addr: RawAddress(VTable, Int8Ty, CGM.getPointerAlign()),
2858 /* IsVolatile */ true);
2859 }
2860 } else {
2861 VTable = cast<llvm::Instruction>(Val: EmitPointerAuthAuth(
2862 Info: CGPointerAuthInfo(0, PointerAuthenticationMode::Strip, false, false,
2863 nullptr),
2864 Pointer: VTable));
2865 }
2866 }
2867
2868 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2869 CGM.getCodeGenOpts().StrictVTablePointers)
2870 CGM.DecorateInstructionWithInvariantGroup(I: VTable, RD);
2871
2872 return VTable;
2873}
2874
2875// If a class has a single non-virtual base and does not introduce or override
2876// virtual member functions or fields, it will have the same layout as its base.
2877// This function returns the least derived such class.
2878//
2879// Casting an instance of a base class to such a derived class is technically
2880// undefined behavior, but it is a relatively common hack for introducing member
2881// functions on class instances with specific properties (e.g. llvm::Operator)
2882// that works under most compilers and should not have security implications, so
2883// we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict.
2884static const CXXRecordDecl *
2885LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) {
2886 if (!RD->field_empty())
2887 return RD;
2888
2889 if (RD->getNumVBases() != 0)
2890 return RD;
2891
2892 if (RD->getNumBases() != 1)
2893 return RD;
2894
2895 for (const CXXMethodDecl *MD : RD->methods()) {
2896 if (MD->isVirtual()) {
2897 // Virtual member functions are only ok if they are implicit destructors
2898 // because the implicit destructor will have the same semantics as the
2899 // base class's destructor if no fields are added.
2900 if (isa<CXXDestructorDecl>(Val: MD) && MD->isImplicit())
2901 continue;
2902 return RD;
2903 }
2904 }
2905
2906 return LeastDerivedClassWithSameLayout(
2907 RD: RD->bases_begin()->getType()->getAsCXXRecordDecl());
2908}
2909
2910void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
2911 llvm::Value *VTable,
2912 SourceLocation Loc) {
2913 if (SanOpts.has(K: SanitizerKind::CFIVCall))
2914 EmitVTablePtrCheckForCall(RD, VTable, TCK: CodeGenFunction::CFITCK_VCall, Loc);
2915 // Emit the intrinsics of (type_test and assume) for the features of WPD and
2916 // speculative devirtualization. For WPD, emit the intrinsics only for the
2917 // case of non_public LTO visibility.
2918 // TODO: refactor this condition and similar ones into a function (e.g.,
2919 // ShouldEmitDevirtualizationMD) to encapsulate the details of the different
2920 // types of devirtualization.
2921 else if ((CGM.getCodeGenOpts().WholeProgramVTables &&
2922 !CGM.AlwaysHasLTOVisibilityPublic(RD)) ||
2923 CGM.getCodeGenOpts().DevirtualizeSpeculatively) {
2924 CanQualType Ty = CGM.getContext().getCanonicalTagType(TD: RD);
2925 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T: Ty);
2926 llvm::Value *TypeId = llvm::MetadataAsValue::get(Context&: CGM.getLLVMContext(), MD);
2927
2928 // If we already know that the call has hidden LTO visibility, emit
2929 // @llvm.type.test(). Otherwise emit @llvm.public.type.test(), which WPD
2930 // will convert to @llvm.type.test() if we assert at link time that we have
2931 // whole program visibility.
2932 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
2933 ? llvm::Intrinsic::type_test
2934 : llvm::Intrinsic::public_type_test;
2935 llvm::Value *TypeTest =
2936 Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: {VTable, TypeId});
2937 Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::assume), Args: TypeTest);
2938 }
2939}
2940
2941/// Converts the CFITypeCheckKind into SanitizerKind::SanitizerOrdinal and
2942/// llvm::SanitizerStatKind.
2943static std::pair<SanitizerKind::SanitizerOrdinal, llvm::SanitizerStatKind>
2944SanitizerInfoFromCFICheckKind(CodeGenFunction::CFITypeCheckKind TCK) {
2945 switch (TCK) {
2946 case CodeGenFunction::CFITCK_VCall:
2947 return std::make_pair(x: SanitizerKind::SO_CFIVCall, y: llvm::SanStat_CFI_VCall);
2948 case CodeGenFunction::CFITCK_NVCall:
2949 return std::make_pair(x: SanitizerKind::SO_CFINVCall,
2950 y: llvm::SanStat_CFI_NVCall);
2951 case CodeGenFunction::CFITCK_DerivedCast:
2952 return std::make_pair(x: SanitizerKind::SO_CFIDerivedCast,
2953 y: llvm::SanStat_CFI_DerivedCast);
2954 case CodeGenFunction::CFITCK_UnrelatedCast:
2955 return std::make_pair(x: SanitizerKind::SO_CFIUnrelatedCast,
2956 y: llvm::SanStat_CFI_UnrelatedCast);
2957 case CodeGenFunction::CFITCK_ICall:
2958 case CodeGenFunction::CFITCK_NVMFCall:
2959 case CodeGenFunction::CFITCK_VMFCall:
2960 llvm_unreachable("unexpected sanitizer kind");
2961 }
2962 llvm_unreachable("Unknown CFITypeCheckKind enum");
2963}
2964
2965void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXRecordDecl *RD,
2966 llvm::Value *VTable,
2967 CFITypeCheckKind TCK,
2968 SourceLocation Loc) {
2969 if (!SanOpts.has(K: SanitizerKind::CFICastStrict))
2970 RD = LeastDerivedClassWithSameLayout(RD);
2971
2972 auto [Ordinal, _] = SanitizerInfoFromCFICheckKind(TCK);
2973 SanitizerDebugLocation SanScope(this, {Ordinal},
2974 SanitizerHandler::CFICheckFail);
2975
2976 EmitVTablePtrCheck(RD, VTable, TCK, Loc);
2977}
2978
2979void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived,
2980 bool MayBeNull,
2981 CFITypeCheckKind TCK,
2982 SourceLocation Loc) {
2983 if (!getLangOpts().CPlusPlus)
2984 return;
2985
2986 const auto *ClassDecl = T->getAsCXXRecordDecl();
2987 if (!ClassDecl)
2988 return;
2989
2990 if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
2991 return;
2992
2993 if (!SanOpts.has(K: SanitizerKind::CFICastStrict))
2994 ClassDecl = LeastDerivedClassWithSameLayout(RD: ClassDecl);
2995
2996 auto [Ordinal, _] = SanitizerInfoFromCFICheckKind(TCK);
2997 SanitizerDebugLocation SanScope(this, {Ordinal},
2998 SanitizerHandler::CFICheckFail);
2999
3000 llvm::BasicBlock *ContBlock = nullptr;
3001
3002 if (MayBeNull) {
3003 llvm::Value *DerivedNotNull =
3004 Builder.CreateIsNotNull(Arg: Derived.emitRawPointer(CGF&: *this), Name: "cast.nonnull");
3005
3006 llvm::BasicBlock *CheckBlock = createBasicBlock(name: "cast.check");
3007 ContBlock = createBasicBlock(name: "cast.cont");
3008
3009 Builder.CreateCondBr(Cond: DerivedNotNull, True: CheckBlock, False: ContBlock);
3010
3011 EmitBlock(BB: CheckBlock);
3012 }
3013
3014 llvm::Value *VTable;
3015 std::tie(args&: VTable, args&: ClassDecl) =
3016 CGM.getCXXABI().LoadVTablePtr(CGF&: *this, This: Derived, RD: ClassDecl);
3017
3018 EmitVTablePtrCheck(RD: ClassDecl, VTable, TCK, Loc);
3019
3020 if (MayBeNull) {
3021 Builder.CreateBr(Dest: ContBlock);
3022 EmitBlock(BB: ContBlock);
3023 }
3024}
3025
3026void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
3027 llvm::Value *VTable,
3028 CFITypeCheckKind TCK,
3029 SourceLocation Loc) {
3030 assert(IsSanitizerScope);
3031
3032 if (!CGM.getCodeGenOpts().SanitizeCfiCrossDso &&
3033 !CGM.HasHiddenLTOVisibility(RD))
3034 return;
3035
3036 auto [M, SSK] = SanitizerInfoFromCFICheckKind(TCK);
3037
3038 std::string TypeName = RD->getQualifiedNameAsString();
3039 if (getContext().getNoSanitizeList().containsType(
3040 Mask: SanitizerMask::bitPosToMask(Pos: M), MangledTypeName: TypeName))
3041 return;
3042
3043 EmitSanitizerStatReport(SSK);
3044
3045 CanQualType T = CGM.getContext().getCanonicalTagType(TD: RD);
3046 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T);
3047 llvm::Value *TypeId = llvm::MetadataAsValue::get(Context&: getLLVMContext(), MD);
3048
3049 llvm::Value *TypeTest = Builder.CreateCall(
3050 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {VTable, TypeId});
3051
3052 llvm::Constant *StaticData[] = {
3053 llvm::ConstantInt::get(Ty: Int8Ty, V: TCK),
3054 EmitCheckSourceLocation(Loc),
3055 EmitCheckTypeDescriptor(T),
3056 };
3057
3058 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
3059 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
3060 EmitCfiSlowPathCheck(Ordinal: M, Cond: TypeTest, TypeId: CrossDsoTypeId, Ptr: VTable, StaticArgs: StaticData);
3061 return;
3062 }
3063
3064 if (CGM.getCodeGenOpts().SanitizeTrap.has(O: M)) {
3065 bool NoMerge = !CGM.getCodeGenOpts().SanitizeMergeHandlers.has(O: M);
3066 EmitTrapCheck(Checked: TypeTest, CheckHandlerID: SanitizerHandler::CFICheckFail, NoMerge);
3067 return;
3068 }
3069
3070 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3071 Context&: CGM.getLLVMContext(),
3072 MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables"));
3073 llvm::Value *ValidVtable = Builder.CreateCall(
3074 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {VTable, AllVtables});
3075 EmitCheck(Checked: std::make_pair(x&: TypeTest, y&: M), Check: SanitizerHandler::CFICheckFail,
3076 StaticArgs: StaticData, DynamicArgs: {VTable, ValidVtable});
3077}
3078
3079bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) {
3080 if ((!CGM.getCodeGenOpts().WholeProgramVTables ||
3081 !CGM.HasHiddenLTOVisibility(RD)) &&
3082 !CGM.getCodeGenOpts().DevirtualizeSpeculatively)
3083 return false;
3084
3085 if (CGM.getCodeGenOpts().VirtualFunctionElimination)
3086 return true;
3087
3088 if (!SanOpts.has(K: SanitizerKind::CFIVCall) ||
3089 !CGM.getCodeGenOpts().SanitizeTrap.has(K: SanitizerKind::CFIVCall))
3090 return false;
3091
3092 std::string TypeName = RD->getQualifiedNameAsString();
3093 return !getContext().getNoSanitizeList().containsType(Mask: SanitizerKind::CFIVCall,
3094 MangledTypeName: TypeName);
3095}
3096
3097llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
3098 const CXXRecordDecl *RD, llvm::Value *VTable, llvm::Type *VTableTy,
3099 uint64_t VTableByteOffset) {
3100 auto CheckOrdinal = SanitizerKind::SO_CFIVCall;
3101 auto CheckHandler = SanitizerHandler::CFICheckFail;
3102 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
3103
3104 EmitSanitizerStatReport(SSK: llvm::SanStat_CFI_VCall);
3105
3106 CanQualType T = CGM.getContext().getCanonicalTagType(TD: RD);
3107 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T);
3108 llvm::Value *TypeId = llvm::MetadataAsValue::get(Context&: CGM.getLLVMContext(), MD);
3109
3110 auto CheckedLoadIntrinsic = CGM.getVTables().useRelativeLayout()
3111 ? llvm::Intrinsic::type_checked_load_relative
3112 : llvm::Intrinsic::type_checked_load;
3113 llvm::Value *CheckedLoad = Builder.CreateCall(
3114 Callee: CGM.getIntrinsic(IID: CheckedLoadIntrinsic),
3115 Args: {VTable, llvm::ConstantInt::get(Ty: Int32Ty, V: VTableByteOffset), TypeId});
3116
3117 llvm::Value *CheckResult = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 1);
3118
3119 std::string TypeName = RD->getQualifiedNameAsString();
3120 if (SanOpts.has(K: SanitizerKind::CFIVCall) &&
3121 !getContext().getNoSanitizeList().containsType(Mask: SanitizerKind::CFIVCall,
3122 MangledTypeName: TypeName)) {
3123 EmitCheck(Checked: std::make_pair(x&: CheckResult, y&: CheckOrdinal), Check: CheckHandler, StaticArgs: {}, DynamicArgs: {});
3124 }
3125
3126 return Builder.CreateBitCast(V: Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 0),
3127 DestTy: VTableTy);
3128}
3129
3130void CodeGenFunction::EmitForwardingCallToLambda(
3131 const CXXMethodDecl *callOperator, CallArgList &callArgs,
3132 const CGFunctionInfo *calleeFnInfo, llvm::Constant *calleePtr) {
3133 // Get the address of the call operator.
3134 if (!calleeFnInfo)
3135 calleeFnInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD: callOperator);
3136
3137 if (!calleePtr)
3138 calleePtr =
3139 CGM.GetAddrOfFunction(GD: GlobalDecl(callOperator),
3140 Ty: CGM.getTypes().GetFunctionType(Info: *calleeFnInfo));
3141
3142 // Prepare the return slot.
3143 const FunctionProtoType *FPT =
3144 callOperator->getType()->castAs<FunctionProtoType>();
3145 QualType resultType = FPT->getReturnType();
3146 ReturnValueSlot returnSlot;
3147 if (!resultType->isVoidType() &&
3148 calleeFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
3149 !hasScalarEvaluationKind(T: calleeFnInfo->getReturnType()))
3150 returnSlot =
3151 ReturnValueSlot(ReturnValue, resultType.isVolatileQualified(),
3152 /*IsUnused=*/false, /*IsExternallyDestructed=*/true);
3153
3154 // We don't need to separately arrange the call arguments because
3155 // the call can't be variadic anyway --- it's impossible to forward
3156 // variadic arguments.
3157
3158 // Now emit our call.
3159 auto callee = CGCallee::forDirect(functionPtr: calleePtr, abstractInfo: GlobalDecl(callOperator));
3160 RValue RV = EmitCall(CallInfo: *calleeFnInfo, Callee: callee, ReturnValue: returnSlot, Args: callArgs);
3161
3162 // If necessary, copy the returned value into the slot.
3163 if (!resultType->isVoidType() && returnSlot.isNull()) {
3164 if (getLangOpts().ObjCAutoRefCount && resultType->isObjCRetainableType()) {
3165 RV = RValue::get(V: EmitARCRetainAutoreleasedReturnValue(value: RV.getScalarVal()));
3166 }
3167 EmitReturnOfRValue(RV, Ty: resultType);
3168 } else
3169 EmitBranchThroughCleanup(Dest: ReturnBlock);
3170}
3171
3172void CodeGenFunction::EmitLambdaBlockInvokeBody() {
3173 const BlockDecl *BD = BlockInfo->getBlockDecl();
3174 const VarDecl *variable = BD->capture_begin()->getVariable();
3175 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
3176 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
3177
3178 if (CallOp->isVariadic()) {
3179 // FIXME: Making this work correctly is nasty because it requires either
3180 // cloning the body of the call operator or making the call operator
3181 // forward.
3182 CGM.ErrorUnsupported(D: CurCodeDecl, Type: "lambda conversion to variadic function");
3183 return;
3184 }
3185
3186 // Start building arguments for forwarding call
3187 CallArgList CallArgs;
3188
3189 CanQualType ThisType =
3190 getContext().getPointerType(T: getContext().getCanonicalTagType(TD: Lambda));
3191 Address ThisPtr = GetAddrOfBlockDecl(var: variable);
3192 CallArgs.add(rvalue: RValue::get(V: getAsNaturalPointerTo(Addr: ThisPtr, PointeeType: ThisType)), type: ThisType);
3193
3194 // Add the rest of the parameters.
3195 for (auto *param : BD->parameters())
3196 EmitDelegateCallArg(args&: CallArgs, param, loc: param->getBeginLoc());
3197
3198 assert(!Lambda->isGenericLambda() &&
3199 "generic lambda interconversion to block not implemented");
3200 EmitForwardingCallToLambda(callOperator: CallOp, callArgs&: CallArgs);
3201}
3202
3203void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
3204 if (MD->isVariadic()) {
3205 // FIXME: Making this work correctly is nasty because it requires either
3206 // cloning the body of the call operator or making the call operator
3207 // forward.
3208 CGM.ErrorUnsupported(D: MD, Type: "lambda conversion to variadic function");
3209 return;
3210 }
3211
3212 const CXXRecordDecl *Lambda = MD->getParent();
3213
3214 // Start building arguments for forwarding call
3215 CallArgList CallArgs;
3216
3217 CanQualType LambdaType = getContext().getCanonicalTagType(TD: Lambda);
3218 CanQualType ThisType = getContext().getPointerType(T: LambdaType);
3219 Address ThisPtr = CreateMemTemp(T: LambdaType, Name: "unused.capture");
3220 CallArgs.add(rvalue: RValue::get(V: ThisPtr.emitRawPointer(CGF&: *this)), type: ThisType);
3221
3222 EmitLambdaDelegatingInvokeBody(MD, CallArgs);
3223}
3224
3225void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
3226 CallArgList &CallArgs) {
3227 // Add the rest of the forwarded parameters.
3228 for (auto *Param : MD->parameters())
3229 EmitDelegateCallArg(args&: CallArgs, param: Param, loc: Param->getBeginLoc());
3230
3231 const CXXRecordDecl *Lambda = MD->getParent();
3232 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
3233 // For a generic lambda, find the corresponding call operator specialization
3234 // to which the call to the static-invoker shall be forwarded.
3235 if (Lambda->isGenericLambda()) {
3236 assert(MD->isFunctionTemplateSpecialization());
3237 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
3238 FunctionTemplateDecl *CallOpTemplate =
3239 CallOp->getDescribedFunctionTemplate();
3240 void *InsertPos = nullptr;
3241 FunctionDecl *CorrespondingCallOpSpecialization =
3242 CallOpTemplate->findSpecialization(Args: TAL->asArray(), InsertPos);
3243 assert(CorrespondingCallOpSpecialization);
3244 CallOp = cast<CXXMethodDecl>(Val: CorrespondingCallOpSpecialization);
3245 }
3246
3247 // Special lambda forwarding when there are inalloca parameters.
3248 if (hasInAllocaArg(MD)) {
3249 const CGFunctionInfo *ImplFnInfo = nullptr;
3250 llvm::Function *ImplFn = nullptr;
3251 EmitLambdaInAllocaImplFn(CallOp, ImplFnInfo: &ImplFnInfo, ImplFn: &ImplFn);
3252
3253 EmitForwardingCallToLambda(callOperator: CallOp, callArgs&: CallArgs, calleeFnInfo: ImplFnInfo, calleePtr: ImplFn);
3254 return;
3255 }
3256
3257 EmitForwardingCallToLambda(callOperator: CallOp, callArgs&: CallArgs);
3258}
3259
3260void CodeGenFunction::EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD) {
3261 if (MD->isVariadic()) {
3262 // FIXME: Making this work correctly is nasty because it requires either
3263 // cloning the body of the call operator or making the call operator
3264 // forward.
3265 CGM.ErrorUnsupported(D: MD, Type: "lambda conversion to variadic function");
3266 return;
3267 }
3268
3269 // Forward %this argument.
3270 CallArgList CallArgs;
3271 CanQualType LambdaType = getContext().getCanonicalTagType(TD: MD->getParent());
3272 CanQualType ThisType = getContext().getPointerType(T: LambdaType);
3273 llvm::Value *ThisArg = CurFn->getArg(i: 0);
3274 CallArgs.add(rvalue: RValue::get(V: ThisArg), type: ThisType);
3275
3276 EmitLambdaDelegatingInvokeBody(MD, CallArgs);
3277}
3278
3279void CodeGenFunction::EmitLambdaInAllocaImplFn(
3280 const CXXMethodDecl *CallOp, const CGFunctionInfo **ImplFnInfo,
3281 llvm::Function **ImplFn) {
3282 const CGFunctionInfo &FnInfo =
3283 CGM.getTypes().arrangeCXXMethodDeclaration(MD: CallOp);
3284 llvm::Function *CallOpFn =
3285 cast<llvm::Function>(Val: CGM.GetAddrOfFunction(GD: GlobalDecl(CallOp)));
3286
3287 // Emit function containing the original call op body. __invoke will delegate
3288 // to this function.
3289 SmallVector<CanQualType, 4> ArgTypes;
3290 for (auto I = FnInfo.arg_begin(); I != FnInfo.arg_end(); ++I)
3291 ArgTypes.push_back(Elt: I->type);
3292 *ImplFnInfo = &CGM.getTypes().arrangeLLVMFunctionInfo(
3293 returnType: FnInfo.getReturnType(), opts: FnInfoOpts::IsDelegateCall, argTypes: ArgTypes,
3294 info: FnInfo.getExtInfo(), paramInfos: {}, args: FnInfo.getRequiredArgs());
3295
3296 // Create mangled name as if this was a method named __impl. If for some
3297 // reason the name doesn't look as expected then just tack __impl to the
3298 // front.
3299 // TODO: Use the name mangler to produce the right name instead of using
3300 // string replacement.
3301 StringRef CallOpName = CallOpFn->getName();
3302 std::string ImplName;
3303 if (size_t Pos = CallOpName.find_first_of(Chars: "<lambda"))
3304 ImplName = ("?__impl@" + CallOpName.drop_front(N: Pos)).str();
3305 else
3306 ImplName = ("__impl" + CallOpName).str();
3307
3308 llvm::Function *Fn = CallOpFn->getParent()->getFunction(Name: ImplName);
3309 if (!Fn) {
3310 Fn = llvm::Function::Create(Ty: CGM.getTypes().GetFunctionType(Info: **ImplFnInfo),
3311 Linkage: llvm::GlobalValue::InternalLinkage, N: ImplName,
3312 M&: CGM.getModule());
3313 CGM.SetInternalFunctionAttributes(GD: CallOp, F: Fn, FI: **ImplFnInfo);
3314
3315 const GlobalDecl &GD = GlobalDecl(CallOp);
3316 const auto *D = cast<FunctionDecl>(Val: GD.getDecl());
3317 CodeGenFunction(CGM).GenerateCode(GD, Fn, FnInfo: **ImplFnInfo);
3318 CGM.SetLLVMFunctionAttributesForDefinition(D, F: Fn);
3319 }
3320 *ImplFn = Fn;
3321}
3322