1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ expressions
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CodeGenFunction.h"
18#include "ConstantEmitter.h"
19#include "TargetInfo.h"
20#include "clang/Basic/CodeGenOptions.h"
21#include "clang/CodeGen/CGFunctionInfo.h"
22#include "llvm/IR/Intrinsics.h"
23
24using namespace clang;
25using namespace CodeGen;
26
27namespace {
28struct MemberCallInfo {
29 RequiredArgs ReqArgs;
30 // Number of prefix arguments for the call. Ignores the `this` pointer.
31 unsigned PrefixSize;
32};
33} // namespace
34
35static MemberCallInfo
36commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD,
37 llvm::Value *This, llvm::Value *ImplicitParam,
38 QualType ImplicitParamTy, const CallExpr *CE,
39 CallArgList &Args, CallArgList *RtlArgs) {
40 auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
41
42 assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
43 isa<CXXOperatorCallExpr>(CE));
44 assert(MD->isImplicitObjectMemberFunction() &&
45 "Trying to emit a member or operator call expr on a static method!");
46
47 // Push the this ptr.
48 const CXXRecordDecl *RD =
49 CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(GD);
50 Args.add(rvalue: RValue::get(V: This), type: CGF.getTypes().DeriveThisType(RD, MD));
51
52 // If there is an implicit parameter (e.g. VTT), emit it.
53 if (ImplicitParam) {
54 Args.add(rvalue: RValue::get(V: ImplicitParam), type: ImplicitParamTy);
55 }
56
57 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
58 RequiredArgs required = RequiredArgs::forPrototypePlus(prototype: FPT, additional: Args.size());
59 unsigned PrefixSize = Args.size() - 1;
60
61 // And the rest of the call args.
62 if (RtlArgs) {
63 // Special case: if the caller emitted the arguments right-to-left already
64 // (prior to emitting the *this argument), we're done. This happens for
65 // assignment operators.
66 Args.addFrom(other: *RtlArgs);
67 } else if (CE) {
68 // Special case: skip first argument of CXXOperatorCall (it is "this").
69 unsigned ArgsToSkip = 0;
70 if (const auto *Op = dyn_cast<CXXOperatorCallExpr>(Val: CE)) {
71 if (const auto *M = dyn_cast<CXXMethodDecl>(Val: Op->getCalleeDecl()))
72 ArgsToSkip =
73 static_cast<unsigned>(!M->isExplicitObjectMemberFunction());
74 }
75 CGF.EmitCallArgs(Args, Prototype: FPT, ArgRange: drop_begin(RangeOrContainer: CE->arguments(), N: ArgsToSkip),
76 AC: CE->getDirectCallee());
77 } else {
78 assert(
79 FPT->getNumParams() == 0 &&
80 "No CallExpr specified for function with non-zero number of arguments");
81 }
82 return {.ReqArgs: required, .PrefixSize: PrefixSize};
83}
84
85RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
86 const CXXMethodDecl *MD, const CGCallee &Callee,
87 ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
88 QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs,
89 llvm::CallBase **CallOrInvoke) {
90 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
91 CallArgList Args;
92 MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
93 CGF&: *this, GD: MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
94 auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
95 args: Args, type: FPT, required: CallInfo.ReqArgs, numPrefixArgs: CallInfo.PrefixSize);
96 return EmitCall(CallInfo: FnInfo, Callee, ReturnValue, Args, CallOrInvoke,
97 IsMustTail: CE && CE == MustTailCall,
98 Loc: CE ? CE->getExprLoc() : SourceLocation());
99}
100
101RValue CodeGenFunction::EmitCXXDestructorCall(
102 GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
103 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
104 llvm::CallBase **CallOrInvoke) {
105 const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Val: Dtor.getDecl());
106
107 assert(!ThisTy.isNull());
108 assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
109 "Pointer/Object mixup");
110
111 LangAS SrcAS = ThisTy.getAddressSpace();
112 LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
113 if (SrcAS != DstAS) {
114 QualType DstTy = DtorDecl->getThisType();
115 llvm::Type *NewType = CGM.getTypes().ConvertType(T: DstTy);
116 This = performAddrSpaceCast(Src: This, DestTy: NewType);
117 }
118
119 CallArgList Args;
120 commonEmitCXXMemberOrOperatorCall(CGF&: *this, GD: Dtor, This, ImplicitParam,
121 ImplicitParamTy, CE, Args, RtlArgs: nullptr);
122 return EmitCall(CallInfo: CGM.getTypes().arrangeCXXStructorDeclaration(GD: Dtor), Callee,
123 ReturnValue: ReturnValueSlot(), Args, CallOrInvoke,
124 IsMustTail: CE && CE == MustTailCall,
125 Loc: CE ? CE->getExprLoc() : SourceLocation{});
126}
127
128RValue
129CodeGenFunction::EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
130 QualType DestroyedType = E->getDestroyedType();
131 if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
132 // Automatic Reference Counting:
133 // If the pseudo-expression names a retainable object with weak or
134 // strong lifetime, the object shall be released.
135 Expr *BaseExpr = E->getBase();
136 Address BaseValue = Address::invalid();
137 Qualifiers BaseQuals;
138
139 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
140 if (E->isArrow()) {
141 BaseValue = EmitPointerWithAlignment(Addr: BaseExpr);
142 const auto *PTy = BaseExpr->getType()->castAs<PointerType>();
143 BaseQuals = PTy->getPointeeType().getQualifiers();
144 } else {
145 LValue BaseLV = EmitLValue(E: BaseExpr);
146 BaseValue = BaseLV.getAddress();
147 QualType BaseTy = BaseExpr->getType();
148 BaseQuals = BaseTy.getQualifiers();
149 }
150
151 switch (DestroyedType.getObjCLifetime()) {
152 case Qualifiers::OCL_None:
153 case Qualifiers::OCL_ExplicitNone:
154 case Qualifiers::OCL_Autoreleasing:
155 break;
156
157 case Qualifiers::OCL_Strong:
158 EmitARCRelease(
159 value: Builder.CreateLoad(Addr: BaseValue, IsVolatile: DestroyedType.isVolatileQualified()),
160 precise: ARCPreciseLifetime);
161 break;
162
163 case Qualifiers::OCL_Weak:
164 EmitARCDestroyWeak(addr: BaseValue);
165 break;
166 }
167 } else {
168 // C++ [expr.pseudo]p1:
169 // The result shall only be used as the operand for the function call
170 // operator (), and the result of such a call has type void. The only
171 // effect is the evaluation of the postfix-expression before the dot or
172 // arrow.
173 EmitIgnoredExpr(E: E->getBase());
174 }
175
176 return RValue::get(V: nullptr);
177}
178
179static CXXRecordDecl *getCXXRecord(const Expr *E) {
180 QualType T = E->getType();
181 if (const PointerType *PTy = T->getAs<PointerType>())
182 T = PTy->getPointeeType();
183 return T->castAsCXXRecordDecl();
184}
185
186// Note: This function also emit constructor calls to support a MSVC
187// extensions allowing explicit constructor function call.
188RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
189 ReturnValueSlot ReturnValue,
190 llvm::CallBase **CallOrInvoke) {
191 const Expr *callee = CE->getCallee()->IgnoreParens();
192
193 if (isa<BinaryOperator>(Val: callee))
194 return EmitCXXMemberPointerCallExpr(E: CE, ReturnValue, CallOrInvoke);
195
196 const MemberExpr *ME = cast<MemberExpr>(Val: callee);
197 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: ME->getMemberDecl());
198
199 if (MD->isStatic()) {
200 // The method is static, emit it as we would a regular call.
201 CGCallee callee =
202 CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD: MD), abstractInfo: GlobalDecl(MD));
203 return EmitCall(FnType: getContext().getPointerType(T: MD->getType()), Callee: callee, E: CE,
204 ReturnValue, /*Chain=*/nullptr, CallOrInvoke);
205 }
206
207 bool HasQualifier = ME->hasQualifier();
208 NestedNameSpecifier Qualifier = ME->getQualifier();
209 bool IsArrow = ME->isArrow();
210 const Expr *Base = ME->getBase();
211
212 return EmitCXXMemberOrOperatorMemberCallExpr(CE, MD, ReturnValue,
213 HasQualifier, Qualifier, IsArrow,
214 Base, CallOrInvoke);
215}
216
217RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
218 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
219 bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
220 const Expr *Base, llvm::CallBase **CallOrInvoke) {
221 assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
222
223 // Compute the object pointer.
224 bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
225
226 const CXXMethodDecl *DevirtualizedMethod = nullptr;
227 if (CanUseVirtualCall &&
228 MD->getDevirtualizedMethod(Base, IsAppleKext: getLangOpts().AppleKext)) {
229 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
230 DevirtualizedMethod = MD->getCorrespondingMethodInClass(RD: BestDynamicDecl);
231 assert(DevirtualizedMethod);
232 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
233 const Expr *Inner = Base->IgnoreParenBaseCasts();
234 if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
235 MD->getReturnType().getCanonicalType())
236 // If the return types are not the same, this might be a case where more
237 // code needs to run to compensate for it. For example, the derived
238 // method might return a type that inherits form from the return
239 // type of MD and has a prefix.
240 // For now we just avoid devirtualizing these covariant cases.
241 DevirtualizedMethod = nullptr;
242 else if (getCXXRecord(E: Inner) == DevirtualizedClass)
243 // If the class of the Inner expression is where the dynamic method
244 // is defined, build the this pointer from it.
245 Base = Inner;
246 else if (getCXXRecord(E: Base) != DevirtualizedClass) {
247 // If the method is defined in a class that is not the best dynamic
248 // one or the one of the full expression, we would have to build
249 // a derived-to-base cast to compute the correct this pointer, but
250 // we don't have support for that yet, so do a virtual call.
251 DevirtualizedMethod = nullptr;
252 }
253 }
254
255 bool TrivialForCodegen =
256 MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion());
257 bool TrivialAssignment =
258 TrivialForCodegen &&
259 (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
260 !MD->getParent()->mayInsertExtraPadding();
261
262 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
263 // operator before the LHS.
264 CallArgList RtlArgStorage;
265 CallArgList *RtlArgs = nullptr;
266 LValue TrivialAssignmentRHS;
267 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: CE)) {
268 if (OCE->isAssignmentOp()) {
269 if (TrivialAssignment) {
270 TrivialAssignmentRHS = EmitLValue(E: CE->getArg(Arg: 1));
271 } else {
272 RtlArgs = &RtlArgStorage;
273 EmitCallArgs(Args&: *RtlArgs, Prototype: MD->getType()->castAs<FunctionProtoType>(),
274 ArgRange: drop_begin(RangeOrContainer: CE->arguments(), N: 1), AC: CE->getDirectCallee(),
275 /*ParamsToSkip*/ 0, Order: EvaluationOrder::ForceRightToLeft);
276 }
277 }
278 }
279
280 LValue This;
281 if (IsArrow) {
282 LValueBaseInfo BaseInfo;
283 TBAAAccessInfo TBAAInfo;
284 Address ThisValue = EmitPointerWithAlignment(Addr: Base, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo);
285 This = MakeAddrLValue(Addr: ThisValue, T: Base->getType()->getPointeeType(),
286 BaseInfo, TBAAInfo);
287 } else {
288 This = EmitLValue(E: Base);
289 }
290
291 if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Val: MD)) {
292 // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
293 // constructing a new complete object of type Ctor.
294 assert(!RtlArgs);
295 assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
296 CallArgList Args;
297 commonEmitCXXMemberOrOperatorCall(
298 CGF&: *this, GD: {Ctor, Ctor_Complete}, This: This.getPointer(CGF&: *this),
299 /*ImplicitParam=*/nullptr,
300 /*ImplicitParamTy=*/QualType(), CE, Args, RtlArgs: nullptr);
301
302 EmitCXXConstructorCall(D: Ctor, Type: Ctor_Complete, /*ForVirtualBase=*/false,
303 /*Delegating=*/false, This: This.getAddress(), Args,
304 Overlap: AggValueSlot::DoesNotOverlap, Loc: CE->getExprLoc(),
305 /*NewPointerIsChecked=*/false, CallOrInvoke);
306 return RValue::get(V: nullptr);
307 }
308
309 if (TrivialForCodegen) {
310 if (isa<CXXDestructorDecl>(Val: MD))
311 return RValue::get(V: nullptr);
312
313 if (TrivialAssignment) {
314 // We don't like to generate the trivial copy/move assignment operator
315 // when it isn't necessary; just produce the proper effect here.
316 // It's important that we use the result of EmitLValue here rather than
317 // emitting call arguments, in order to preserve TBAA information from
318 // the RHS.
319 LValue RHS = isa<CXXOperatorCallExpr>(Val: CE) ? TrivialAssignmentRHS
320 : EmitLValue(E: *CE->arg_begin());
321 EmitAggregateAssign(Dest: This, Src: RHS, EltTy: CE->getType());
322 return RValue::get(V: This.getPointer(CGF&: *this));
323 }
324
325 assert(MD->getParent()->mayInsertExtraPadding() &&
326 "unknown trivial member function");
327 }
328
329 // Compute the function type we're calling.
330 const CXXMethodDecl *CalleeDecl =
331 DevirtualizedMethod ? DevirtualizedMethod : MD;
332 const CGFunctionInfo *FInfo = nullptr;
333 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Val: CalleeDecl))
334 FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
335 GD: GlobalDecl(Dtor, Dtor_Complete));
336 else
337 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD: CalleeDecl);
338
339 llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(Info: *FInfo);
340
341 // C++11 [class.mfct.non-static]p2:
342 // If a non-static member function of a class X is called for an object that
343 // is not of type X, or of a type derived from X, the behavior is undefined.
344 SourceLocation CallLoc;
345 ASTContext &C = getContext();
346 if (CE)
347 CallLoc = CE->getExprLoc();
348
349 SanitizerSet SkippedChecks;
350 if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(Val: CE)) {
351 auto *IOA = CMCE->getImplicitObjectArgument();
352 bool IsImplicitObjectCXXThis = IsWrappedCXXThis(E: IOA);
353 if (IsImplicitObjectCXXThis)
354 SkippedChecks.set(K: SanitizerKind::Alignment, Value: true);
355 if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(Val: IOA))
356 SkippedChecks.set(K: SanitizerKind::Null, Value: true);
357 }
358
359 if (sanitizePerformTypeCheck())
360 EmitTypeCheck(TCK: CodeGenFunction::TCK_MemberCall, Loc: CallLoc,
361 V: This.emitRawPointer(CGF&: *this),
362 Type: C.getCanonicalTagType(TD: CalleeDecl->getParent()),
363 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
364
365 // C++ [class.virtual]p12:
366 // Explicit qualification with the scope operator (5.1) suppresses the
367 // virtual call mechanism.
368 //
369 // We also don't emit a virtual call if the base expression has a record type
370 // because then we know what the type is.
371 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
372
373 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: CalleeDecl)) {
374 assert(CE->arguments().empty() &&
375 "Destructor shouldn't have explicit parameters");
376 assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
377 if (UseVirtualCall) {
378 CGM.getCXXABI().EmitVirtualDestructorCall(
379 CGF&: *this, Dtor, DtorType: Dtor_Complete, This: This.getAddress(),
380 E: cast<CXXMemberCallExpr>(Val: CE), CallOrInvoke);
381 } else {
382 GlobalDecl GD(Dtor, Dtor_Complete);
383 CGCallee Callee;
384 if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier)
385 Callee = BuildAppleKextVirtualCall(MD: Dtor, Qual: Qualifier, Ty);
386 else if (!DevirtualizedMethod)
387 Callee =
388 CGCallee::forDirect(functionPtr: CGM.getAddrOfCXXStructor(GD, FnInfo: FInfo, FnType: Ty), abstractInfo: GD);
389 else {
390 Callee = CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD, Ty), abstractInfo: GD);
391 }
392
393 QualType ThisTy =
394 IsArrow ? Base->getType()->getPointeeType() : Base->getType();
395 EmitCXXDestructorCall(Dtor: GD, Callee, This: This.getPointer(CGF&: *this), ThisTy,
396 /*ImplicitParam=*/nullptr,
397 /*ImplicitParamTy=*/QualType(), CE, CallOrInvoke);
398 }
399 return RValue::get(V: nullptr);
400 }
401
402 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
403 // 'CalleeDecl' instead.
404
405 CGCallee Callee;
406 if (UseVirtualCall) {
407 Callee = CGCallee::forVirtual(CE, MD, Addr: This.getAddress(), FTy: Ty);
408 } else {
409 if (SanOpts.has(K: SanitizerKind::CFINVCall) &&
410 MD->getParent()->isDynamicClass()) {
411 llvm::Value *VTable;
412 const CXXRecordDecl *RD;
413 std::tie(args&: VTable, args&: RD) = CGM.getCXXABI().LoadVTablePtr(
414 CGF&: *this, This: This.getAddress(), RD: CalleeDecl->getParent());
415 EmitVTablePtrCheckForCall(RD, VTable, TCK: CFITCK_NVCall, Loc: CE->getBeginLoc());
416 }
417
418 if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
419 Callee = BuildAppleKextVirtualCall(MD, Qual: Qualifier, Ty);
420 else if (!DevirtualizedMethod)
421 Callee =
422 CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD: MD, Ty), abstractInfo: GlobalDecl(MD));
423 else {
424 Callee =
425 CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD: DevirtualizedMethod, Ty),
426 abstractInfo: GlobalDecl(DevirtualizedMethod));
427 }
428 }
429
430 if (MD->isVirtual()) {
431 Address NewThisAddr =
432 CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
433 CGF&: *this, GD: CalleeDecl, This: This.getAddress(), VirtualCall: UseVirtualCall);
434 This.setAddress(NewThisAddr);
435 }
436
437 return EmitCXXMemberOrOperatorCall(
438 MD: CalleeDecl, Callee, ReturnValue, This: This.getPointer(CGF&: *this),
439 /*ImplicitParam=*/nullptr, ImplicitParamTy: QualType(), CE, RtlArgs, CallOrInvoke);
440}
441
442RValue
443CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
444 ReturnValueSlot ReturnValue,
445 llvm::CallBase **CallOrInvoke) {
446 const BinaryOperator *BO =
447 cast<BinaryOperator>(Val: E->getCallee()->IgnoreParens());
448 const Expr *BaseExpr = BO->getLHS();
449 const Expr *MemFnExpr = BO->getRHS();
450
451 const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>();
452 const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
453 const auto *RD = MPT->getMostRecentCXXRecordDecl();
454
455 // Emit the 'this' pointer.
456 Address This = Address::invalid();
457 if (BO->getOpcode() == BO_PtrMemI)
458 This = EmitPointerWithAlignment(Addr: BaseExpr, BaseInfo: nullptr, TBAAInfo: nullptr, IsKnownNonNull: KnownNonNull);
459 else
460 This = EmitLValue(E: BaseExpr, IsKnownNonNull: KnownNonNull).getAddress();
461
462 CanQualType ClassType = CGM.getContext().getCanonicalTagType(TD: RD);
463 EmitTypeCheck(TCK: TCK_MemberCall, Loc: E->getExprLoc(), V: This.emitRawPointer(CGF&: *this),
464 Type: ClassType);
465
466 // Get the member function pointer.
467 llvm::Value *MemFnPtr = EmitScalarExpr(E: MemFnExpr);
468
469 // Ask the ABI to load the callee. Note that This is modified.
470 llvm::Value *ThisPtrForCall = nullptr;
471 CGCallee Callee = CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(
472 CGF&: *this, E: BO, This, ThisPtrForCall, MemPtr: MemFnPtr, MPT);
473
474 CallArgList Args;
475
476 QualType ThisType = getContext().getPointerType(T: ClassType);
477
478 // Push the this ptr.
479 Args.add(rvalue: RValue::get(V: ThisPtrForCall), type: ThisType);
480
481 RequiredArgs required = RequiredArgs::forPrototypePlus(prototype: FPT, additional: 1);
482
483 // And the rest of the call args
484 EmitCallArgs(Args, Prototype: FPT, ArgRange: E->arguments());
485 return EmitCall(CallInfo: CGM.getTypes().arrangeCXXMethodCall(args: Args, type: FPT, required,
486 /*PrefixSize=*/numPrefixArgs: 0),
487 Callee, ReturnValue, Args, CallOrInvoke, IsMustTail: E == MustTailCall,
488 Loc: E->getExprLoc());
489}
490
491RValue CodeGenFunction::EmitCXXOperatorMemberCallExpr(
492 const CXXOperatorCallExpr *E, const CXXMethodDecl *MD,
493 ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke) {
494 assert(MD->isImplicitObjectMemberFunction() &&
495 "Trying to emit a member call expr on a static method!");
496 return EmitCXXMemberOrOperatorMemberCallExpr(
497 CE: E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
498 /*IsArrow=*/false, Base: E->getArg(Arg: 0), CallOrInvoke);
499}
500
501RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
502 ReturnValueSlot ReturnValue,
503 llvm::CallBase **CallOrInvoke) {
504 // Emit as a device kernel call if CUDA device code is to be generated.
505 // TODO: implement for HIP
506 if (!getLangOpts().HIP && getLangOpts().CUDAIsDevice)
507 return CGM.getCUDARuntime().EmitCUDADeviceKernelCallExpr(
508 CGF&: *this, E, ReturnValue, CallOrInvoke);
509 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(CGF&: *this, E, ReturnValue,
510 CallOrInvoke);
511}
512
513static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
514 Address DestPtr,
515 const CXXRecordDecl *Base) {
516 if (Base->isEmpty())
517 return;
518
519 DestPtr = DestPtr.withElementType(ElemTy: CGF.Int8Ty);
520
521 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(D: Base);
522 CharUnits NVSize = Layout.getNonVirtualSize();
523
524 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
525 // present, they are initialized by the most derived class before calling the
526 // constructor.
527 SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
528 Stores.emplace_back(Args: CharUnits::Zero(), Args&: NVSize);
529
530 // Each store is split by the existence of a vbptr.
531 CharUnits VBPtrWidth = CGF.getPointerSize();
532 std::vector<CharUnits> VBPtrOffsets =
533 CGF.CGM.getCXXABI().getVBPtrOffsets(RD: Base);
534 for (CharUnits VBPtrOffset : VBPtrOffsets) {
535 // Stop before we hit any virtual base pointers located in virtual bases.
536 if (VBPtrOffset >= NVSize)
537 break;
538 std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
539 CharUnits LastStoreOffset = LastStore.first;
540
541 CharUnits SplitBeforeOffset = LastStoreOffset;
542 CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
543 assert(!SplitBeforeSize.isNegative() && "negative store size!");
544 if (!SplitBeforeSize.isZero())
545 Stores.emplace_back(Args&: SplitBeforeOffset, Args&: SplitBeforeSize);
546
547 CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
548 CharUnits SplitAfterSize = NVSize - SplitAfterOffset;
549 assert(!SplitAfterSize.isNegative() && "negative store size!");
550 if (!SplitAfterSize.isZero())
551 Stores.emplace_back(Args&: SplitAfterOffset, Args&: SplitAfterSize);
552 }
553
554 // If the type contains a pointer to data member we can't memset it to zero.
555 // Instead, create a null constant and copy it to the destination.
556 // TODO: there are other patterns besides zero that we can usefully memset,
557 // like -1, which happens to be the pattern used by member-pointers.
558 // TODO: isZeroInitializable can be over-conservative in the case where a
559 // virtual base contains a member pointer.
560 llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Record: Base);
561 if (!NullConstantForBase->isNullValue()) {
562 llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
563 CGF.CGM.getModule(), NullConstantForBase->getType(),
564 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
565 NullConstantForBase, Twine());
566
567 CharUnits Align =
568 std::max(a: Layout.getNonVirtualAlignment(), b: DestPtr.getAlignment());
569 NullVariable->setAlignment(Align.getAsAlign());
570
571 Address SrcPtr(NullVariable, CGF.Int8Ty, Align);
572
573 // Get and call the appropriate llvm.memcpy overload.
574 for (std::pair<CharUnits, CharUnits> Store : Stores) {
575 CharUnits StoreOffset = Store.first;
576 CharUnits StoreSize = Store.second;
577 llvm::Value *StoreSizeVal = CGF.CGM.getSize(numChars: StoreSize);
578 CGF.Builder.CreateMemCpy(
579 Dest: CGF.Builder.CreateConstInBoundsByteGEP(Addr: DestPtr, Offset: StoreOffset),
580 Src: CGF.Builder.CreateConstInBoundsByteGEP(Addr: SrcPtr, Offset: StoreOffset),
581 Size: StoreSizeVal);
582 }
583
584 // Otherwise, just memset the whole thing to zero. This is legal
585 // because in LLVM, all default initializers (other than the ones we just
586 // handled above) are guaranteed to have a bit pattern of all zeros.
587 } else {
588 for (std::pair<CharUnits, CharUnits> Store : Stores) {
589 CharUnits StoreOffset = Store.first;
590 CharUnits StoreSize = Store.second;
591 llvm::Value *StoreSizeVal = CGF.CGM.getSize(numChars: StoreSize);
592 CGF.Builder.CreateMemSet(
593 Dest: CGF.Builder.CreateConstInBoundsByteGEP(Addr: DestPtr, Offset: StoreOffset),
594 Value: CGF.Builder.getInt8(C: 0), Size: StoreSizeVal);
595 }
596 }
597}
598
599void CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
600 AggValueSlot Dest) {
601 assert(!Dest.isIgnored() && "Must have a destination!");
602 const CXXConstructorDecl *CD = E->getConstructor();
603
604 // If we require zero initialization before (or instead of) calling the
605 // constructor, as can be the case with a non-user-provided default
606 // constructor, emit the zero initialization now, unless destination is
607 // already zeroed.
608 if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
609 switch (E->getConstructionKind()) {
610 case CXXConstructionKind::Delegating:
611 case CXXConstructionKind::Complete:
612 EmitNullInitialization(DestPtr: Dest.getAddress(), Ty: E->getType());
613 break;
614 case CXXConstructionKind::VirtualBase:
615 case CXXConstructionKind::NonVirtualBase:
616 EmitNullBaseClassInitialization(CGF&: *this, DestPtr: Dest.getAddress(),
617 Base: CD->getParent());
618 break;
619 }
620 }
621
622 // If this is a call to a trivial default constructor, do nothing.
623 if (CD->isTrivial() && CD->isDefaultConstructor())
624 return;
625
626 // Elide the constructor if we're constructing from a temporary.
627 if (getLangOpts().ElideConstructors && E->isElidable()) {
628 // FIXME: This only handles the simplest case, where the source object
629 // is passed directly as the first argument to the constructor.
630 // This should also handle stepping though implicit casts and
631 // conversion sequences which involve two steps, with a
632 // conversion operator followed by a converting constructor.
633 const Expr *SrcObj = E->getArg(Arg: 0);
634 assert(SrcObj->isTemporaryObject(getContext(), CD->getParent()));
635 assert(
636 getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType()));
637 EmitAggExpr(E: SrcObj, AS: Dest);
638 return;
639 }
640
641 if (const ArrayType *arrayType = getContext().getAsArrayType(T: E->getType())) {
642 EmitCXXAggrConstructorCall(D: CD, ArrayTy: arrayType, ArrayPtr: Dest.getAddress(), E,
643 NewPointerIsChecked: Dest.isSanitizerChecked());
644 } else {
645 CXXCtorType Type = Ctor_Complete;
646 bool ForVirtualBase = false;
647 bool Delegating = false;
648
649 switch (E->getConstructionKind()) {
650 case CXXConstructionKind::Delegating:
651 // We should be emitting a constructor; GlobalDecl will assert this
652 Type = CurGD.getCtorType();
653 Delegating = true;
654 break;
655
656 case CXXConstructionKind::Complete:
657 Type = Ctor_Complete;
658 break;
659
660 case CXXConstructionKind::VirtualBase:
661 ForVirtualBase = true;
662 [[fallthrough]];
663
664 case CXXConstructionKind::NonVirtualBase:
665 Type = Ctor_Base;
666 }
667
668 // Call the constructor.
669 EmitCXXConstructorCall(D: CD, Type, ForVirtualBase, Delegating, ThisAVS: Dest, E);
670 }
671}
672
673void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
674 const Expr *Exp) {
675 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Val: Exp))
676 Exp = E->getSubExpr();
677 assert(isa<CXXConstructExpr>(Exp) &&
678 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
679 const CXXConstructExpr *E = cast<CXXConstructExpr>(Val: Exp);
680 const CXXConstructorDecl *CD = E->getConstructor();
681 RunCleanupsScope Scope(*this);
682
683 // If we require zero initialization before (or instead of) calling the
684 // constructor, as can be the case with a non-user-provided default
685 // constructor, emit the zero initialization now.
686 // FIXME. Do I still need this for a copy ctor synthesis?
687 if (E->requiresZeroInitialization())
688 EmitNullInitialization(DestPtr: Dest, Ty: E->getType());
689
690 assert(!getContext().getAsConstantArrayType(E->getType()) &&
691 "EmitSynthesizedCXXCopyCtor - Copied-in Array");
692 EmitSynthesizedCXXCopyCtorCall(D: CD, This: Dest, Src, E);
693}
694
695static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
696 const CXXNewExpr *E) {
697 if (!E->isArray())
698 return CharUnits::Zero();
699
700 // No cookie is required if the operator new[] being used is the
701 // reserved placement operator new[].
702 if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
703 return CharUnits::Zero();
704
705 return CGF.CGM.getCXXABI().GetArrayCookieSize(expr: E);
706}
707
708static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
709 const CXXNewExpr *e,
710 unsigned minElements,
711 llvm::Value *&numElements,
712 llvm::Value *&sizeWithoutCookie) {
713 QualType type = e->getAllocatedType();
714
715 if (!e->isArray()) {
716 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(T: type);
717 sizeWithoutCookie =
718 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: typeSize.getQuantity());
719 return sizeWithoutCookie;
720 }
721
722 // The width of size_t.
723 unsigned sizeWidth = CGF.SizeTy->getBitWidth();
724
725 // Figure out the cookie size.
726 llvm::APInt cookieSize(sizeWidth,
727 CalculateCookiePadding(CGF, E: e).getQuantity());
728
729 // Emit the array size expression.
730 // We multiply the size of all dimensions for NumElements.
731 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
732 numElements = ConstantEmitter(CGF).tryEmitAbstract(
733 E: *e->getArraySize(), T: (*e->getArraySize())->getType());
734 if (!numElements)
735 numElements = CGF.EmitScalarExpr(E: *e->getArraySize());
736 assert(isa<llvm::IntegerType>(numElements->getType()));
737
738 // The number of elements can be have an arbitrary integer type;
739 // essentially, we need to multiply it by a constant factor, add a
740 // cookie size, and verify that the result is representable as a
741 // size_t. That's just a gloss, though, and it's wrong in one
742 // important way: if the count is negative, it's an error even if
743 // the cookie size would bring the total size >= 0.
744 bool isSigned =
745 (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
746 llvm::IntegerType *numElementsType =
747 cast<llvm::IntegerType>(Val: numElements->getType());
748 unsigned numElementsWidth = numElementsType->getBitWidth();
749
750 // Compute the constant factor.
751 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
752 while (const ConstantArrayType *CAT =
753 CGF.getContext().getAsConstantArrayType(T: type)) {
754 type = CAT->getElementType();
755 arraySizeMultiplier *= CAT->getSize();
756 }
757
758 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(T: type);
759 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
760 typeSizeMultiplier *= arraySizeMultiplier;
761
762 // This will be a size_t.
763 llvm::Value *size;
764
765 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
766 // Don't bloat the -O0 code.
767 if (llvm::ConstantInt *numElementsC =
768 dyn_cast<llvm::ConstantInt>(Val: numElements)) {
769 const llvm::APInt &count = numElementsC->getValue();
770
771 bool hasAnyOverflow = false;
772
773 // If 'count' was a negative number, it's an overflow.
774 if (isSigned && count.isNegative())
775 hasAnyOverflow = true;
776
777 // We want to do all this arithmetic in size_t. If numElements is
778 // wider than that, check whether it's already too big, and if so,
779 // overflow.
780 else if (numElementsWidth > sizeWidth &&
781 numElementsWidth - sizeWidth > count.countl_zero())
782 hasAnyOverflow = true;
783
784 // Okay, compute a count at the right width.
785 llvm::APInt adjustedCount = count.zextOrTrunc(width: sizeWidth);
786
787 // If there is a brace-initializer, we cannot allocate fewer elements than
788 // there are initializers. If we do, that's treated like an overflow.
789 if (adjustedCount.ult(RHS: minElements))
790 hasAnyOverflow = true;
791
792 // Scale numElements by that. This might overflow, but we don't
793 // care because it only overflows if allocationSize does, too, and
794 // if that overflows then we shouldn't use this.
795 numElements =
796 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: adjustedCount * arraySizeMultiplier);
797
798 // Compute the size before cookie, and track whether it overflowed.
799 bool overflow;
800 llvm::APInt allocationSize =
801 adjustedCount.umul_ov(RHS: typeSizeMultiplier, Overflow&: overflow);
802 hasAnyOverflow |= overflow;
803
804 // Add in the cookie, and check whether it's overflowed.
805 if (cookieSize != 0) {
806 // Save the current size without a cookie. This shouldn't be
807 // used if there was overflow.
808 sizeWithoutCookie = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: allocationSize);
809
810 allocationSize = allocationSize.uadd_ov(RHS: cookieSize, Overflow&: overflow);
811 hasAnyOverflow |= overflow;
812 }
813
814 // On overflow, produce a -1 so operator new will fail.
815 if (hasAnyOverflow) {
816 size = llvm::Constant::getAllOnesValue(Ty: CGF.SizeTy);
817 } else {
818 size = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: allocationSize);
819 }
820
821 // Otherwise, we might need to use the overflow intrinsics.
822 } else {
823 // There are up to five conditions we need to test for:
824 // 1) if isSigned, we need to check whether numElements is negative;
825 // 2) if numElementsWidth > sizeWidth, we need to check whether
826 // numElements is larger than something representable in size_t;
827 // 3) if minElements > 0, we need to check whether numElements is smaller
828 // than that.
829 // 4) we need to compute
830 // sizeWithoutCookie := numElements * typeSizeMultiplier
831 // and check whether it overflows; and
832 // 5) if we need a cookie, we need to compute
833 // size := sizeWithoutCookie + cookieSize
834 // and check whether it overflows.
835
836 llvm::Value *hasOverflow = nullptr;
837
838 // If numElementsWidth > sizeWidth, then one way or another, we're
839 // going to have to do a comparison for (2), and this happens to
840 // take care of (1), too.
841 if (numElementsWidth > sizeWidth) {
842 llvm::APInt threshold =
843 llvm::APInt::getOneBitSet(numBits: numElementsWidth, BitNo: sizeWidth);
844
845 llvm::Value *thresholdV =
846 llvm::ConstantInt::get(Ty: numElementsType, V: threshold);
847
848 hasOverflow = CGF.Builder.CreateICmpUGE(LHS: numElements, RHS: thresholdV);
849 numElements = CGF.Builder.CreateTrunc(V: numElements, DestTy: CGF.SizeTy);
850
851 // Otherwise, if we're signed, we want to sext up to size_t.
852 } else if (isSigned) {
853 if (numElementsWidth < sizeWidth)
854 numElements = CGF.Builder.CreateSExt(V: numElements, DestTy: CGF.SizeTy);
855
856 // If there's a non-1 type size multiplier, then we can do the
857 // signedness check at the same time as we do the multiply
858 // because a negative number times anything will cause an
859 // unsigned overflow. Otherwise, we have to do it here. But at least
860 // in this case, we can subsume the >= minElements check.
861 if (typeSizeMultiplier == 1)
862 hasOverflow = CGF.Builder.CreateICmpSLT(
863 LHS: numElements, RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements));
864
865 // Otherwise, zext up to size_t if necessary.
866 } else if (numElementsWidth < sizeWidth) {
867 numElements = CGF.Builder.CreateZExt(V: numElements, DestTy: CGF.SizeTy);
868 }
869
870 assert(numElements->getType() == CGF.SizeTy);
871
872 if (minElements) {
873 // Don't allow allocation of fewer elements than we have initializers.
874 if (!hasOverflow) {
875 hasOverflow = CGF.Builder.CreateICmpULT(
876 LHS: numElements, RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements));
877 } else if (numElementsWidth > sizeWidth) {
878 // The other existing overflow subsumes this check.
879 // We do an unsigned comparison, since any signed value < -1 is
880 // taken care of either above or below.
881 hasOverflow = CGF.Builder.CreateOr(
882 LHS: hasOverflow,
883 RHS: CGF.Builder.CreateICmpULT(
884 LHS: numElements, RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements)));
885 }
886 }
887
888 size = numElements;
889
890 // Multiply by the type size if necessary. This multiplier
891 // includes all the factors for nested arrays.
892 //
893 // This step also causes numElements to be scaled up by the
894 // nested-array factor if necessary. Overflow on this computation
895 // can be ignored because the result shouldn't be used if
896 // allocation fails.
897 if (typeSizeMultiplier != 1) {
898 llvm::Function *umul_with_overflow =
899 CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::umul_with_overflow, Tys: CGF.SizeTy);
900
901 llvm::Value *tsmV =
902 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: typeSizeMultiplier);
903 llvm::Value *result =
904 CGF.Builder.CreateCall(Callee: umul_with_overflow, Args: {size, tsmV});
905
906 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 1);
907 if (hasOverflow)
908 hasOverflow = CGF.Builder.CreateOr(LHS: hasOverflow, RHS: overflowed);
909 else
910 hasOverflow = overflowed;
911
912 size = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 0);
913
914 // Also scale up numElements by the array size multiplier.
915 if (arraySizeMultiplier != 1) {
916 // If the base element type size is 1, then we can re-use the
917 // multiply we just did.
918 if (typeSize.isOne()) {
919 assert(arraySizeMultiplier == typeSizeMultiplier);
920 numElements = size;
921
922 // Otherwise we need a separate multiply.
923 } else {
924 llvm::Value *asmV =
925 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: arraySizeMultiplier);
926 numElements = CGF.Builder.CreateMul(LHS: numElements, RHS: asmV);
927 }
928 }
929 } else {
930 // numElements doesn't need to be scaled.
931 assert(arraySizeMultiplier == 1);
932 }
933
934 // Add in the cookie size if necessary.
935 if (cookieSize != 0) {
936 sizeWithoutCookie = size;
937
938 llvm::Function *uadd_with_overflow =
939 CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::uadd_with_overflow, Tys: CGF.SizeTy);
940
941 llvm::Value *cookieSizeV = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: cookieSize);
942 llvm::Value *result =
943 CGF.Builder.CreateCall(Callee: uadd_with_overflow, Args: {size, cookieSizeV});
944
945 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 1);
946 if (hasOverflow)
947 hasOverflow = CGF.Builder.CreateOr(LHS: hasOverflow, RHS: overflowed);
948 else
949 hasOverflow = overflowed;
950
951 size = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 0);
952 }
953
954 // If we had any possibility of dynamic overflow, make a select to
955 // overwrite 'size' with an all-ones value, which should cause
956 // operator new to throw.
957 if (hasOverflow)
958 size = CGF.Builder.CreateSelect(
959 C: hasOverflow, True: llvm::Constant::getAllOnesValue(Ty: CGF.SizeTy), False: size);
960 }
961
962 if (cookieSize == 0)
963 sizeWithoutCookie = size;
964 else
965 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
966
967 return size;
968}
969
970static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
971 QualType AllocType, Address NewPtr,
972 AggValueSlot::Overlap_t MayOverlap) {
973 // FIXME: Refactor with EmitExprAsInit.
974 switch (CGF.getEvaluationKind(T: AllocType)) {
975 case TEK_Scalar:
976 CGF.EmitScalarInit(init: Init, D: nullptr, lvalue: CGF.MakeAddrLValue(Addr: NewPtr, T: AllocType),
977 capturedByInit: false);
978 return;
979 case TEK_Complex:
980 CGF.EmitComplexExprIntoLValue(E: Init, dest: CGF.MakeAddrLValue(Addr: NewPtr, T: AllocType),
981 /*isInit*/ true);
982 return;
983 case TEK_Aggregate: {
984 AggValueSlot Slot = AggValueSlot::forAddr(
985 addr: NewPtr, quals: AllocType.getQualifiers(), isDestructed: AggValueSlot::IsDestructed,
986 needsGC: AggValueSlot::DoesNotNeedGCBarriers, isAliased: AggValueSlot::IsNotAliased,
987 mayOverlap: MayOverlap, isZeroed: AggValueSlot::IsNotZeroed,
988 isChecked: AggValueSlot::IsSanitizerChecked);
989 CGF.EmitAggExpr(E: Init, AS: Slot);
990 return;
991 }
992 }
993 llvm_unreachable("bad evaluation kind");
994}
995
996void CodeGenFunction::EmitNewArrayInitializer(
997 const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
998 Address BeginPtr, llvm::Value *NumElements,
999 llvm::Value *AllocSizeWithoutCookie) {
1000 // If we have a type with trivial initialization and no initializer,
1001 // there's nothing to do.
1002 if (!E->hasInitializer())
1003 return;
1004
1005 Address CurPtr = BeginPtr;
1006
1007 unsigned InitListElements = 0;
1008
1009 const Expr *Init = E->getInitializer();
1010 Address EndOfInit = Address::invalid();
1011 QualType::DestructionKind DtorKind = ElementType.isDestructedType();
1012 CleanupDeactivationScope deactivation(*this);
1013 bool pushedCleanup = false;
1014
1015 CharUnits ElementSize = getContext().getTypeSizeInChars(T: ElementType);
1016 CharUnits ElementAlign =
1017 BeginPtr.getAlignment().alignmentOfArrayElement(elementSize: ElementSize);
1018
1019 // Attempt to perform zero-initialization using memset.
1020 auto TryMemsetInitialization = [&]() -> bool {
1021 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1022 // we can initialize with a memset to -1.
1023 if (!CGM.getTypes().isZeroInitializable(T: ElementType))
1024 return false;
1025
1026 // Optimization: since zero initialization will just set the memory
1027 // to all zeroes, generate a single memset to do it in one shot.
1028
1029 // Subtract out the size of any elements we've already initialized.
1030 auto *RemainingSize = AllocSizeWithoutCookie;
1031 if (InitListElements) {
1032 // We know this can't overflow; we check this when doing the allocation.
1033 auto *InitializedSize = llvm::ConstantInt::get(
1034 Ty: RemainingSize->getType(),
1035 V: getContext().getTypeSizeInChars(T: ElementType).getQuantity() *
1036 InitListElements);
1037 RemainingSize = Builder.CreateSub(LHS: RemainingSize, RHS: InitializedSize);
1038 }
1039
1040 // Create the memset.
1041 Builder.CreateMemSet(Dest: CurPtr, Value: Builder.getInt8(C: 0), Size: RemainingSize, IsVolatile: false);
1042 return true;
1043 };
1044
1045 const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: Init);
1046 const CXXParenListInitExpr *CPLIE = nullptr;
1047 const StringLiteral *SL = nullptr;
1048 const ObjCEncodeExpr *OCEE = nullptr;
1049 const Expr *IgnoreParen = nullptr;
1050 if (!ILE) {
1051 IgnoreParen = Init->IgnoreParenImpCasts();
1052 CPLIE = dyn_cast<CXXParenListInitExpr>(Val: IgnoreParen);
1053 SL = dyn_cast<StringLiteral>(Val: IgnoreParen);
1054 OCEE = dyn_cast<ObjCEncodeExpr>(Val: IgnoreParen);
1055 }
1056
1057 // If the initializer is an initializer list, first do the explicit elements.
1058 if (ILE || CPLIE || SL || OCEE) {
1059 // Initializing from a (braced) string literal is a special case; the init
1060 // list element does not initialize a (single) array element.
1061 if ((ILE && ILE->isStringLiteralInit()) || SL || OCEE) {
1062 if (!ILE)
1063 Init = IgnoreParen;
1064 // Initialize the initial portion of length equal to that of the string
1065 // literal. The allocation must be for at least this much; we emitted a
1066 // check for that earlier.
1067 AggValueSlot Slot = AggValueSlot::forAddr(
1068 addr: CurPtr, quals: ElementType.getQualifiers(), isDestructed: AggValueSlot::IsDestructed,
1069 needsGC: AggValueSlot::DoesNotNeedGCBarriers, isAliased: AggValueSlot::IsNotAliased,
1070 mayOverlap: AggValueSlot::DoesNotOverlap, isZeroed: AggValueSlot::IsNotZeroed,
1071 isChecked: AggValueSlot::IsSanitizerChecked);
1072 EmitAggExpr(E: ILE ? ILE->getInit(Init: 0) : Init, AS: Slot);
1073
1074 // Move past these elements.
1075 InitListElements =
1076 cast<ConstantArrayType>(Val: Init->getType()->getAsArrayTypeUnsafe())
1077 ->getZExtSize();
1078 CurPtr = Builder.CreateConstInBoundsGEP(Addr: CurPtr, Index: InitListElements,
1079 Name: "string.init.end");
1080
1081 // Zero out the rest, if any remain.
1082 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(Val: NumElements);
1083 if (!ConstNum || !ConstNum->equalsInt(V: InitListElements)) {
1084 bool OK = TryMemsetInitialization();
1085 (void)OK;
1086 assert(OK && "couldn't memset character type?");
1087 }
1088 return;
1089 }
1090
1091 ArrayRef<const Expr *> InitExprs =
1092 ILE ? ILE->inits() : CPLIE->getInitExprs();
1093 InitListElements = InitExprs.size();
1094
1095 // If this is a multi-dimensional array new, we will initialize multiple
1096 // elements with each init list element.
1097 QualType AllocType = E->getAllocatedType();
1098 if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1099 Val: AllocType->getAsArrayTypeUnsafe())) {
1100 ElementTy = ConvertTypeForMem(T: AllocType);
1101 CurPtr = CurPtr.withElementType(ElemTy: ElementTy);
1102 InitListElements *= getContext().getConstantArrayElementCount(CA: CAT);
1103 }
1104
1105 // Enter a partial-destruction Cleanup if necessary.
1106 if (DtorKind) {
1107 AllocaTrackerRAII AllocaTracker(*this);
1108 // In principle we could tell the Cleanup where we are more
1109 // directly, but the control flow can get so varied here that it
1110 // would actually be quite complex. Therefore we go through an
1111 // alloca.
1112 llvm::Instruction *DominatingIP =
1113 Builder.CreateFlagLoad(Addr: llvm::ConstantInt::getNullValue(Ty: Int8PtrTy));
1114 EndOfInit = CreateTempAlloca(Ty: BeginPtr.getType(), align: getPointerAlign(),
1115 Name: "array.init.end");
1116 pushIrregularPartialArrayCleanup(arrayBegin: BeginPtr.emitRawPointer(CGF&: *this),
1117 arrayEndPointer: EndOfInit, elementType: ElementType, elementAlignment: ElementAlign,
1118 destroyer: getDestroyer(destructionKind: DtorKind));
1119 cast<EHCleanupScope>(Val&: *EHStack.find(sp: EHStack.stable_begin()))
1120 .AddAuxAllocas(Allocas: AllocaTracker.Take());
1121 DeferredDeactivationCleanupStack.push_back(
1122 Elt: {.Cleanup: EHStack.stable_begin(), .DominatingIP: DominatingIP});
1123 pushedCleanup = true;
1124 }
1125
1126 CharUnits StartAlign = CurPtr.getAlignment();
1127 unsigned i = 0;
1128 for (const Expr *IE : InitExprs) {
1129 // Tell the cleanup that it needs to destroy up to this
1130 // element. TODO: some of these stores can be trivially
1131 // observed to be unnecessary.
1132 if (EndOfInit.isValid()) {
1133 Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit);
1134 }
1135 // FIXME: If the last initializer is an incomplete initializer list for
1136 // an array, and we have an array filler, we can fold together the two
1137 // initialization loops.
1138 StoreAnyExprIntoOneUnit(CGF&: *this, Init: IE, AllocType: IE->getType(), NewPtr: CurPtr,
1139 MayOverlap: AggValueSlot::DoesNotOverlap);
1140 CurPtr = Address(Builder.CreateInBoundsGEP(Ty: CurPtr.getElementType(),
1141 Ptr: CurPtr.emitRawPointer(CGF&: *this),
1142 IdxList: Builder.getSize(N: 1),
1143 Name: "array.exp.next"),
1144 CurPtr.getElementType(),
1145 StartAlign.alignmentAtOffset(offset: (++i) * ElementSize));
1146 }
1147
1148 // The remaining elements are filled with the array filler expression.
1149 Init = ILE ? ILE->getArrayFiller() : CPLIE->getArrayFiller();
1150
1151 // Extract the initializer for the individual array elements by pulling
1152 // out the array filler from all the nested initializer lists. This avoids
1153 // generating a nested loop for the initialization.
1154 while (Init && Init->getType()->isConstantArrayType()) {
1155 auto *SubILE = dyn_cast<InitListExpr>(Val: Init);
1156 if (!SubILE)
1157 break;
1158 assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1159 Init = SubILE->getArrayFiller();
1160 }
1161
1162 // Switch back to initializing one base element at a time.
1163 CurPtr = CurPtr.withElementType(ElemTy: BeginPtr.getElementType());
1164 }
1165
1166 // If all elements have already been initialized, skip any further
1167 // initialization.
1168 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(Val: NumElements);
1169 if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
1170 return;
1171 }
1172
1173 assert(Init && "have trailing elements to initialize but no initializer");
1174
1175 // If this is a constructor call, try to optimize it out, and failing that
1176 // emit a single loop to initialize all remaining elements.
1177 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Val: Init)) {
1178 CXXConstructorDecl *Ctor = CCE->getConstructor();
1179 if (Ctor->isTrivial()) {
1180 // If new expression did not specify value-initialization, then there
1181 // is no initialization.
1182 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
1183 return;
1184
1185 if (TryMemsetInitialization())
1186 return;
1187 }
1188
1189 // Store the new Cleanup position for irregular Cleanups.
1190 //
1191 // FIXME: Share this cleanup with the constructor call emission rather than
1192 // having it create a cleanup of its own.
1193 if (EndOfInit.isValid())
1194 Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit);
1195
1196 // Emit a constructor call loop to initialize the remaining elements.
1197 if (InitListElements)
1198 NumElements = Builder.CreateSub(
1199 LHS: NumElements,
1200 RHS: llvm::ConstantInt::get(Ty: NumElements->getType(), V: InitListElements));
1201 EmitCXXAggrConstructorCall(D: Ctor, NumElements, ArrayPtr: CurPtr, E: CCE,
1202 /*NewPointerIsChecked*/ true,
1203 ZeroInitialization: CCE->requiresZeroInitialization());
1204 if (getContext().getTargetInfo().emitVectorDeletingDtors(
1205 getContext().getLangOpts())) {
1206 CXXDestructorDecl *Dtor = Ctor->getParent()->getDestructor();
1207 if (Dtor && Dtor->isVirtual())
1208 CGM.requireVectorDestructorDefinition(RD: Ctor->getParent());
1209 }
1210 return;
1211 }
1212
1213 // If this is value-initialization, we can usually use memset.
1214 ImplicitValueInitExpr IVIE(ElementType);
1215 if (isa<ImplicitValueInitExpr>(Val: Init)) {
1216 if (TryMemsetInitialization())
1217 return;
1218
1219 // Switch to an ImplicitValueInitExpr for the element type. This handles
1220 // only one case: multidimensional array new of pointers to members. In
1221 // all other cases, we already have an initializer for the array element.
1222 Init = &IVIE;
1223 }
1224
1225 // At this point we should have found an initializer for the individual
1226 // elements of the array.
1227 assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1228 "got wrong type of element to initialize");
1229
1230 // If we have an empty initializer list, we can usually use memset.
1231 if (auto *ILE = dyn_cast<InitListExpr>(Val: Init))
1232 if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1233 return;
1234
1235 // If we have a struct whose every field is value-initialized, we can
1236 // usually use memset.
1237 if (auto *ILE = dyn_cast<InitListExpr>(Val: Init)) {
1238 if (const RecordType *RType =
1239 ILE->getType()->getAsCanonical<RecordType>()) {
1240 if (RType->getDecl()->isStruct()) {
1241 const RecordDecl *RD = RType->getDecl()->getDefinitionOrSelf();
1242 unsigned NumElements = 0;
1243 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
1244 NumElements = CXXRD->getNumBases();
1245 for (auto *Field : RD->fields())
1246 if (!Field->isUnnamedBitField())
1247 ++NumElements;
1248 // FIXME: Recurse into nested InitListExprs.
1249 if (ILE->getNumInits() == NumElements)
1250 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1251 if (!isa<ImplicitValueInitExpr>(Val: ILE->getInit(Init: i)))
1252 --NumElements;
1253 if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1254 return;
1255 }
1256 }
1257 }
1258
1259 // Create the loop blocks.
1260 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1261 llvm::BasicBlock *LoopBB = createBasicBlock(name: "new.loop");
1262 llvm::BasicBlock *ContBB = createBasicBlock(name: "new.loop.end");
1263
1264 // Find the end of the array, hoisted out of the loop.
1265 llvm::Value *EndPtr = Builder.CreateInBoundsGEP(
1266 Ty: BeginPtr.getElementType(), Ptr: BeginPtr.emitRawPointer(CGF&: *this), IdxList: NumElements,
1267 Name: "array.end");
1268
1269 // If the number of elements isn't constant, we have to now check if there is
1270 // anything left to initialize.
1271 if (!ConstNum) {
1272 llvm::Value *IsEmpty = Builder.CreateICmpEQ(LHS: CurPtr.emitRawPointer(CGF&: *this),
1273 RHS: EndPtr, Name: "array.isempty");
1274 Builder.CreateCondBr(Cond: IsEmpty, True: ContBB, False: LoopBB);
1275 }
1276
1277 // Enter the loop.
1278 EmitBlock(BB: LoopBB);
1279
1280 // Set up the current-element phi.
1281 llvm::PHINode *CurPtrPhi =
1282 Builder.CreatePHI(Ty: CurPtr.getType(), NumReservedValues: 2, Name: "array.cur");
1283 CurPtrPhi->addIncoming(V: CurPtr.emitRawPointer(CGF&: *this), BB: EntryBB);
1284
1285 CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign);
1286
1287 // Store the new Cleanup position for irregular Cleanups.
1288 if (EndOfInit.isValid())
1289 Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit);
1290
1291 // Enter a partial-destruction Cleanup if necessary.
1292 if (!pushedCleanup && needsEHCleanup(kind: DtorKind)) {
1293 llvm::Instruction *DominatingIP =
1294 Builder.CreateFlagLoad(Addr: llvm::ConstantInt::getNullValue(Ty: Int8PtrTy));
1295 pushRegularPartialArrayCleanup(arrayBegin: BeginPtr.emitRawPointer(CGF&: *this),
1296 arrayEnd: CurPtr.emitRawPointer(CGF&: *this), elementType: ElementType,
1297 elementAlignment: ElementAlign, destroyer: getDestroyer(destructionKind: DtorKind));
1298 DeferredDeactivationCleanupStack.push_back(
1299 Elt: {.Cleanup: EHStack.stable_begin(), .DominatingIP: DominatingIP});
1300 }
1301
1302 // Emit the initializer into this element.
1303 StoreAnyExprIntoOneUnit(CGF&: *this, Init, AllocType: Init->getType(), NewPtr: CurPtr,
1304 MayOverlap: AggValueSlot::DoesNotOverlap);
1305
1306 // Leave the Cleanup if we entered one.
1307 deactivation.ForceDeactivate();
1308
1309 // Advance to the next element by adjusting the pointer type as necessary.
1310 llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32(
1311 Ty: ElementTy, Ptr: CurPtr.emitRawPointer(CGF&: *this), Idx0: 1, Name: "array.next");
1312
1313 // Check whether we've gotten to the end of the array and, if so,
1314 // exit the loop.
1315 llvm::Value *IsEnd = Builder.CreateICmpEQ(LHS: NextPtr, RHS: EndPtr, Name: "array.atend");
1316 Builder.CreateCondBr(Cond: IsEnd, True: ContBB, False: LoopBB);
1317 CurPtrPhi->addIncoming(V: NextPtr, BB: Builder.GetInsertBlock());
1318
1319 EmitBlock(BB: ContBB);
1320}
1321
1322static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1323 QualType ElementType, llvm::Type *ElementTy,
1324 Address NewPtr, llvm::Value *NumElements,
1325 llvm::Value *AllocSizeWithoutCookie) {
1326 ApplyDebugLocation DL(CGF, E);
1327 if (E->isArray())
1328 CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, BeginPtr: NewPtr, NumElements,
1329 AllocSizeWithoutCookie);
1330 else if (const Expr *Init = E->getInitializer())
1331 StoreAnyExprIntoOneUnit(CGF, Init, AllocType: E->getAllocatedType(), NewPtr,
1332 MayOverlap: AggValueSlot::DoesNotOverlap);
1333}
1334
1335/// Emit a call to an operator new or operator delete function, as implicitly
1336/// created by new-expressions and delete-expressions.
1337static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1338 const FunctionDecl *CalleeDecl,
1339 const FunctionProtoType *CalleeType,
1340 const CallArgList &Args) {
1341 llvm::CallBase *CallOrInvoke;
1342 llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(GD: CalleeDecl);
1343 CGCallee Callee = CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GlobalDecl(CalleeDecl));
1344 RValue RV = CGF.EmitCall(CallInfo: CGF.CGM.getTypes().arrangeFreeFunctionCall(
1345 Args, Ty: CalleeType, /*ChainCall=*/false),
1346 Callee, ReturnValue: ReturnValueSlot(), Args, CallOrInvoke: &CallOrInvoke);
1347
1348 /// C++1y [expr.new]p10:
1349 /// [In a new-expression,] an implementation is allowed to omit a call
1350 /// to a replaceable global allocation function.
1351 ///
1352 /// We model such elidable calls with the 'builtin' attribute.
1353 llvm::Function *Fn = dyn_cast<llvm::Function>(Val: CalleePtr);
1354 if (CalleeDecl->isReplaceableGlobalAllocationFunction() && Fn &&
1355 Fn->hasFnAttribute(Kind: llvm::Attribute::NoBuiltin)) {
1356 CallOrInvoke->addFnAttr(Kind: llvm::Attribute::Builtin);
1357 }
1358
1359 return RV;
1360}
1361
1362RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1363 const CallExpr *TheCall,
1364 bool IsDelete) {
1365 CallArgList Args;
1366 EmitCallArgs(Args, Prototype: Type, ArgRange: TheCall->arguments());
1367 // Find the allocation or deallocation function that we're calling.
1368 ASTContext &Ctx = getContext();
1369 DeclarationName Name =
1370 Ctx.DeclarationNames.getCXXOperatorName(Op: IsDelete ? OO_Delete : OO_New);
1371
1372 for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1373 if (auto *FD = dyn_cast<FunctionDecl>(Val: Decl))
1374 if (Ctx.hasSameType(T1: FD->getType(), T2: QualType(Type, 0))) {
1375 RValue RV = EmitNewDeleteCall(CGF&: *this, CalleeDecl: FD, CalleeType: Type, Args);
1376 if (auto *CB = dyn_cast_if_present<llvm::CallBase>(Val: RV.getScalarVal())) {
1377 if (SanOpts.has(K: SanitizerKind::AllocToken)) {
1378 // Set !alloc_token metadata.
1379 EmitAllocToken(CB, E: TheCall);
1380 }
1381 }
1382 return RV;
1383 }
1384 llvm_unreachable("predeclared global operator new/delete is missing");
1385}
1386
1387namespace {
1388/// A cleanup to call the given 'operator delete' function upon abnormal
1389/// exit from a new expression. Templated on a traits type that deals with
1390/// ensuring that the arguments dominate the cleanup if necessary.
1391template <typename Traits>
1392class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1393 /// Type used to hold llvm::Value*s.
1394 typedef typename Traits::ValueTy ValueTy;
1395 /// Type used to hold RValues.
1396 typedef typename Traits::RValueTy RValueTy;
1397 struct PlacementArg {
1398 RValueTy ArgValue;
1399 QualType ArgType;
1400 };
1401
1402 unsigned NumPlacementArgs : 30;
1403 LLVM_PREFERRED_TYPE(AlignedAllocationMode)
1404 unsigned PassAlignmentToPlacementDelete : 1;
1405 const FunctionDecl *OperatorDelete;
1406 RValueTy TypeIdentity;
1407 ValueTy Ptr;
1408 ValueTy AllocSize;
1409 CharUnits AllocAlign;
1410
1411 PlacementArg *getPlacementArgs() {
1412 return reinterpret_cast<PlacementArg *>(this + 1);
1413 }
1414
1415public:
1416 static size_t getExtraSize(size_t NumPlacementArgs) {
1417 return NumPlacementArgs * sizeof(PlacementArg);
1418 }
1419
1420 CallDeleteDuringNew(size_t NumPlacementArgs,
1421 const FunctionDecl *OperatorDelete, RValueTy TypeIdentity,
1422 ValueTy Ptr, ValueTy AllocSize,
1423 const ImplicitAllocationParameters &IAP,
1424 CharUnits AllocAlign)
1425 : NumPlacementArgs(NumPlacementArgs),
1426 PassAlignmentToPlacementDelete(isAlignedAllocation(Mode: IAP.PassAlignment)),
1427 OperatorDelete(OperatorDelete), TypeIdentity(TypeIdentity), Ptr(Ptr),
1428 AllocSize(AllocSize), AllocAlign(AllocAlign) {}
1429
1430 void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1431 assert(I < NumPlacementArgs && "index out of range");
1432 getPlacementArgs()[I] = {Arg, Type};
1433 }
1434
1435 void Emit(CodeGenFunction &CGF, Flags flags) override {
1436 const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
1437 CallArgList DeleteArgs;
1438 unsigned FirstNonTypeArg = 0;
1439 TypeAwareAllocationMode TypeAwareDeallocation = TypeAwareAllocationMode::No;
1440 if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
1441 TypeAwareDeallocation = TypeAwareAllocationMode::Yes;
1442 QualType SpecializedTypeIdentity = FPT->getParamType(i: 0);
1443 ++FirstNonTypeArg;
1444 DeleteArgs.add(rvalue: Traits::get(CGF, TypeIdentity), type: SpecializedTypeIdentity);
1445 }
1446 // The first argument after type-identity parameter (if any) is always
1447 // a void* (or C* for a destroying operator delete for class type C).
1448 DeleteArgs.add(rvalue: Traits::get(CGF, Ptr), type: FPT->getParamType(i: FirstNonTypeArg));
1449
1450 // Figure out what other parameters we should be implicitly passing.
1451 UsualDeleteParams Params;
1452 if (NumPlacementArgs) {
1453 // A placement deallocation function is implicitly passed an alignment
1454 // if the placement allocation function was, but is never passed a size.
1455 Params.Alignment =
1456 alignedAllocationModeFromBool(IsAligned: PassAlignmentToPlacementDelete);
1457 Params.TypeAwareDelete = TypeAwareDeallocation;
1458 Params.Size = isTypeAwareAllocation(Mode: Params.TypeAwareDelete);
1459 } else {
1460 // For a non-placement new-expression, 'operator delete' can take a
1461 // size and/or an alignment if it has the right parameters.
1462 Params = OperatorDelete->getUsualDeleteParams();
1463 }
1464
1465 assert(!Params.DestroyingDelete &&
1466 "should not call destroying delete in a new-expression");
1467
1468 // The second argument can be a std::size_t (for non-placement delete).
1469 if (Params.Size)
1470 DeleteArgs.add(rvalue: Traits::get(CGF, AllocSize),
1471 type: CGF.getContext().getSizeType());
1472
1473 // The next (second or third) argument can be a std::align_val_t, which
1474 // is an enum whose underlying type is std::size_t.
1475 // FIXME: Use the right type as the parameter type. Note that in a call
1476 // to operator delete(size_t, ...), we may not have it available.
1477 if (isAlignedAllocation(Mode: Params.Alignment))
1478 DeleteArgs.add(rvalue: RValue::get(V: llvm::ConstantInt::get(
1479 Ty: CGF.SizeTy, V: AllocAlign.getQuantity())),
1480 type: CGF.getContext().getSizeType());
1481
1482 // Pass the rest of the arguments, which must match exactly.
1483 for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1484 auto Arg = getPlacementArgs()[I];
1485 DeleteArgs.add(rvalue: Traits::get(CGF, Arg.ArgValue), type: Arg.ArgType);
1486 }
1487
1488 // Call 'operator delete'.
1489 EmitNewDeleteCall(CGF, CalleeDecl: OperatorDelete, CalleeType: FPT, Args: DeleteArgs);
1490 }
1491};
1492} // namespace
1493
1494/// Enter a cleanup to call 'operator delete' if the initializer in a
1495/// new-expression throws.
1496static void EnterNewDeleteCleanup(CodeGenFunction &CGF, const CXXNewExpr *E,
1497 RValue TypeIdentity, Address NewPtr,
1498 llvm::Value *AllocSize, CharUnits AllocAlign,
1499 const CallArgList &NewArgs) {
1500 unsigned NumNonPlacementArgs = E->getNumImplicitArgs();
1501
1502 // If we're not inside a conditional branch, then the cleanup will
1503 // dominate and we can do the easier (and more efficient) thing.
1504 if (!CGF.isInConditionalBranch()) {
1505 struct DirectCleanupTraits {
1506 typedef llvm::Value *ValueTy;
1507 typedef RValue RValueTy;
1508 static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1509 static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1510 };
1511
1512 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1513
1514 DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra<DirectCleanup>(
1515 Kind: EHCleanup, N: E->getNumPlacementArgs(), A: E->getOperatorDelete(),
1516 A: TypeIdentity, A: NewPtr.emitRawPointer(CGF), A: AllocSize,
1517 A: E->implicitAllocationParameters(), A: AllocAlign);
1518 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1519 auto &Arg = NewArgs[I + NumNonPlacementArgs];
1520 Cleanup->setPlacementArg(I, Arg: Arg.getRValue(CGF), Type: Arg.Ty);
1521 }
1522
1523 return;
1524 }
1525
1526 // Otherwise, we need to save all this stuff.
1527 DominatingValue<RValue>::saved_type SavedNewPtr =
1528 DominatingValue<RValue>::save(CGF, value: RValue::get(Addr: NewPtr, CGF));
1529 DominatingValue<RValue>::saved_type SavedAllocSize =
1530 DominatingValue<RValue>::save(CGF, value: RValue::get(V: AllocSize));
1531 DominatingValue<RValue>::saved_type SavedTypeIdentity =
1532 DominatingValue<RValue>::save(CGF, value: TypeIdentity);
1533 struct ConditionalCleanupTraits {
1534 typedef DominatingValue<RValue>::saved_type ValueTy;
1535 typedef DominatingValue<RValue>::saved_type RValueTy;
1536 static RValue get(CodeGenFunction &CGF, ValueTy V) {
1537 return V.restore(CGF);
1538 }
1539 };
1540 typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1541
1542 ConditionalCleanup *Cleanup =
1543 CGF.EHStack.pushCleanupWithExtra<ConditionalCleanup>(
1544 Kind: EHCleanup, N: E->getNumPlacementArgs(), A: E->getOperatorDelete(),
1545 A: SavedTypeIdentity, A: SavedNewPtr, A: SavedAllocSize,
1546 A: E->implicitAllocationParameters(), A: AllocAlign);
1547 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1548 auto &Arg = NewArgs[I + NumNonPlacementArgs];
1549 Cleanup->setPlacementArg(
1550 I, Arg: DominatingValue<RValue>::save(CGF, value: Arg.getRValue(CGF)), Type: Arg.Ty);
1551 }
1552
1553 CGF.initFullExprCleanup();
1554}
1555
1556llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1557 // The element type being allocated.
1558 QualType allocType = getContext().getBaseElementType(QT: E->getAllocatedType());
1559
1560 // 1. Build a call to the allocation function.
1561 FunctionDecl *allocator = E->getOperatorNew();
1562
1563 // If there is a brace-initializer or C++20 parenthesized initializer, cannot
1564 // allocate fewer elements than inits.
1565 unsigned minElements = 0;
1566 unsigned IndexOfAlignArg = 1;
1567 if (E->isArray() && E->hasInitializer()) {
1568 const Expr *Init = E->getInitializer();
1569 const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: Init);
1570 const CXXParenListInitExpr *CPLIE = dyn_cast<CXXParenListInitExpr>(Val: Init);
1571 const Expr *IgnoreParen = Init->IgnoreParenImpCasts();
1572 if ((ILE && ILE->isStringLiteralInit()) ||
1573 isa<StringLiteral>(Val: IgnoreParen) || isa<ObjCEncodeExpr>(Val: IgnoreParen)) {
1574 minElements =
1575 cast<ConstantArrayType>(Val: Init->getType()->getAsArrayTypeUnsafe())
1576 ->getZExtSize();
1577 } else if (ILE || CPLIE) {
1578 minElements = ILE ? ILE->getNumInits() : CPLIE->getInitExprs().size();
1579 }
1580 }
1581
1582 llvm::Value *numElements = nullptr;
1583 llvm::Value *allocSizeWithoutCookie = nullptr;
1584 llvm::Value *allocSize = EmitCXXNewAllocSize(
1585 CGF&: *this, e: E, minElements, numElements, sizeWithoutCookie&: allocSizeWithoutCookie);
1586 CharUnits allocAlign = getContext().getTypeAlignInChars(T: allocType);
1587
1588 // Emit the allocation call. If the allocator is a global placement
1589 // operator, just "inline" it directly.
1590 Address allocation = Address::invalid();
1591 CallArgList allocatorArgs;
1592 RValue TypeIdentityArg;
1593 if (allocator->isReservedGlobalPlacementOperator()) {
1594 assert(E->getNumPlacementArgs() == 1);
1595 const Expr *arg = *E->placement_arguments().begin();
1596
1597 LValueBaseInfo BaseInfo;
1598 allocation = EmitPointerWithAlignment(Addr: arg, BaseInfo: &BaseInfo);
1599
1600 // The pointer expression will, in many cases, be an opaque void*.
1601 // In these cases, discard the computed alignment and use the
1602 // formal alignment of the allocated type.
1603 if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1604 allocation.setAlignment(allocAlign);
1605
1606 // Set up allocatorArgs for the call to operator delete if it's not
1607 // the reserved global operator.
1608 if (E->getOperatorDelete() &&
1609 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1610 allocatorArgs.add(rvalue: RValue::get(V: allocSize), type: getContext().getSizeType());
1611 allocatorArgs.add(rvalue: RValue::get(Addr: allocation, CGF&: *this), type: arg->getType());
1612 }
1613
1614 } else {
1615 const FunctionProtoType *allocatorType =
1616 allocator->getType()->castAs<FunctionProtoType>();
1617 ImplicitAllocationParameters IAP = E->implicitAllocationParameters();
1618 unsigned ParamsToSkip = 0;
1619 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
1620 QualType SpecializedTypeIdentity = allocatorType->getParamType(i: 0);
1621 CXXScalarValueInitExpr TypeIdentityParam(SpecializedTypeIdentity, nullptr,
1622 SourceLocation());
1623 TypeIdentityArg = EmitAnyExprToTemp(E: &TypeIdentityParam);
1624 allocatorArgs.add(rvalue: TypeIdentityArg, type: SpecializedTypeIdentity);
1625 ++ParamsToSkip;
1626 ++IndexOfAlignArg;
1627 }
1628 // The allocation size is the first argument.
1629 QualType sizeType = getContext().getSizeType();
1630 allocatorArgs.add(rvalue: RValue::get(V: allocSize), type: sizeType);
1631 ++ParamsToSkip;
1632
1633 if (allocSize != allocSizeWithoutCookie) {
1634 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1635 allocAlign = std::max(a: allocAlign, b: cookieAlign);
1636 }
1637
1638 // The allocation alignment may be passed as the second argument.
1639 if (isAlignedAllocation(Mode: IAP.PassAlignment)) {
1640 QualType AlignValT = sizeType;
1641 if (allocatorType->getNumParams() > IndexOfAlignArg) {
1642 AlignValT = allocatorType->getParamType(i: IndexOfAlignArg);
1643 assert(getContext().hasSameUnqualifiedType(
1644 AlignValT->castAsEnumDecl()->getIntegerType(), sizeType) &&
1645 "wrong type for alignment parameter");
1646 ++ParamsToSkip;
1647 } else {
1648 // Corner case, passing alignment to 'operator new(size_t, ...)'.
1649 assert(allocator->isVariadic() && "can't pass alignment to allocator");
1650 }
1651 allocatorArgs.add(
1652 rvalue: RValue::get(V: llvm::ConstantInt::get(Ty: SizeTy, V: allocAlign.getQuantity())),
1653 type: AlignValT);
1654 }
1655
1656 // FIXME: Why do we not pass a CalleeDecl here?
1657 EmitCallArgs(Args&: allocatorArgs, Prototype: allocatorType, ArgRange: E->placement_arguments(),
1658 /*AC*/ AbstractCallee(), /*ParamsToSkip*/ ParamsToSkip);
1659
1660 RValue RV =
1661 EmitNewDeleteCall(CGF&: *this, CalleeDecl: allocator, CalleeType: allocatorType, Args: allocatorArgs);
1662
1663 if (auto *newCall = dyn_cast<llvm::CallBase>(Val: RV.getScalarVal())) {
1664 if (auto *CGDI = getDebugInfo()) {
1665 // Set !heapallocsite metadata on the call to operator new.
1666 CGDI->addHeapAllocSiteMetadata(CallSite: newCall, AllocatedTy: allocType, Loc: E->getExprLoc());
1667 }
1668 if (SanOpts.has(K: SanitizerKind::AllocToken)) {
1669 // Set !alloc_token metadata.
1670 EmitAllocToken(CB: newCall, AllocType: allocType);
1671 }
1672 }
1673
1674 // If this was a call to a global replaceable allocation function that does
1675 // not take an alignment argument, the allocator is known to produce
1676 // storage that's suitably aligned for any object that fits, up to a known
1677 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1678 CharUnits allocationAlign = allocAlign;
1679 if (!E->passAlignment() &&
1680 allocator->isReplaceableGlobalAllocationFunction()) {
1681 unsigned AllocatorAlign = llvm::bit_floor(Value: std::min<uint64_t>(
1682 a: Target.getNewAlign(), b: getContext().getTypeSize(T: allocType)));
1683 allocationAlign = std::max(
1684 a: allocationAlign, b: getContext().toCharUnitsFromBits(BitSize: AllocatorAlign));
1685 }
1686
1687 allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign);
1688 }
1689
1690 // Emit a null check on the allocation result if the allocation
1691 // function is allowed to return null (because it has a non-throwing
1692 // exception spec or is the reserved placement new) and we have an
1693 // interesting initializer will be running sanitizers on the initialization.
1694 bool nullCheck = E->shouldNullCheckAllocation() &&
1695 (!allocType.isPODType(Context: getContext()) || E->hasInitializer() ||
1696 sanitizePerformTypeCheck());
1697
1698 llvm::BasicBlock *nullCheckBB = nullptr;
1699 llvm::BasicBlock *contBB = nullptr;
1700
1701 // The null-check means that the initializer is conditionally
1702 // evaluated.
1703 ConditionalEvaluation conditional(*this);
1704
1705 if (nullCheck) {
1706 conditional.begin(CGF&: *this);
1707
1708 nullCheckBB = Builder.GetInsertBlock();
1709 llvm::BasicBlock *notNullBB = createBasicBlock(name: "new.notnull");
1710 contBB = createBasicBlock(name: "new.cont");
1711
1712 llvm::Value *isNull = Builder.CreateIsNull(Addr: allocation, Name: "new.isnull");
1713 Builder.CreateCondBr(Cond: isNull, True: contBB, False: notNullBB);
1714 EmitBlock(BB: notNullBB);
1715 }
1716
1717 // If there's an operator delete, enter a cleanup to call it if an
1718 // exception is thrown.
1719 EHScopeStack::stable_iterator operatorDeleteCleanup;
1720 llvm::Instruction *cleanupDominator = nullptr;
1721 if (E->getOperatorDelete() &&
1722 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1723 EnterNewDeleteCleanup(CGF&: *this, E, TypeIdentity: TypeIdentityArg, NewPtr: allocation, AllocSize: allocSize,
1724 AllocAlign: allocAlign, NewArgs: allocatorArgs);
1725 operatorDeleteCleanup = EHStack.stable_begin();
1726 cleanupDominator = Builder.CreateUnreachable();
1727 }
1728
1729 assert((allocSize == allocSizeWithoutCookie) ==
1730 CalculateCookiePadding(*this, E).isZero());
1731 if (allocSize != allocSizeWithoutCookie) {
1732 assert(E->isArray());
1733 allocation = CGM.getCXXABI().InitializeArrayCookie(
1734 CGF&: *this, NewPtr: allocation, NumElements: numElements, expr: E, ElementType: allocType);
1735 }
1736
1737 llvm::Type *elementTy = ConvertTypeForMem(T: allocType);
1738 Address result = allocation.withElementType(ElemTy: elementTy);
1739
1740 // Passing pointer through launder.invariant.group to avoid propagation of
1741 // vptrs information which may be included in previous type.
1742 // To not break LTO with different optimizations levels, we do it regardless
1743 // of optimization level.
1744 if (CGM.getCodeGenOpts().StrictVTablePointers &&
1745 allocator->isReservedGlobalPlacementOperator())
1746 result = Builder.CreateLaunderInvariantGroup(Addr: result);
1747
1748 // Emit sanitizer checks for pointer value now, so that in the case of an
1749 // array it was checked only once and not at each constructor call. We may
1750 // have already checked that the pointer is non-null.
1751 // FIXME: If we have an array cookie and a potentially-throwing allocator,
1752 // we'll null check the wrong pointer here.
1753 SanitizerSet SkippedChecks;
1754 SkippedChecks.set(K: SanitizerKind::Null, Value: nullCheck);
1755 EmitTypeCheck(TCK: CodeGenFunction::TCK_ConstructorCall,
1756 Loc: E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1757 Addr: result, Type: allocType, Alignment: result.getAlignment(), SkippedChecks,
1758 ArraySize: numElements);
1759
1760 EmitNewInitializer(CGF&: *this, E, ElementType: allocType, ElementTy: elementTy, NewPtr: result, NumElements: numElements,
1761 AllocSizeWithoutCookie: allocSizeWithoutCookie);
1762 llvm::Value *resultPtr = result.emitRawPointer(CGF&: *this);
1763
1764 // Deactivate the 'operator delete' cleanup if we finished
1765 // initialization.
1766 if (operatorDeleteCleanup.isValid()) {
1767 DeactivateCleanupBlock(Cleanup: operatorDeleteCleanup, DominatingIP: cleanupDominator);
1768 cleanupDominator->eraseFromParent();
1769 }
1770
1771 if (nullCheck) {
1772 conditional.end(CGF&: *this);
1773
1774 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1775 EmitBlock(BB: contBB);
1776
1777 llvm::PHINode *PHI = Builder.CreatePHI(Ty: resultPtr->getType(), NumReservedValues: 2);
1778 PHI->addIncoming(V: resultPtr, BB: notNullBB);
1779 PHI->addIncoming(V: llvm::Constant::getNullValue(Ty: resultPtr->getType()),
1780 BB: nullCheckBB);
1781
1782 resultPtr = PHI;
1783 }
1784
1785 return resultPtr;
1786}
1787
1788void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1789 llvm::Value *DeletePtr, QualType DeleteTy,
1790 llvm::Value *NumElements,
1791 CharUnits CookieSize) {
1792 assert((!NumElements && CookieSize.isZero()) ||
1793 DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1794
1795 const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>();
1796 CallArgList DeleteArgs;
1797
1798 auto Params = DeleteFD->getUsualDeleteParams();
1799 auto ParamTypeIt = DeleteFTy->param_type_begin();
1800
1801 std::optional<llvm::AllocaInst *> TagAlloca;
1802 auto EmitTag = [&](QualType TagType, const char *TagName) {
1803 assert(!TagAlloca);
1804 llvm::Type *Ty = getTypes().ConvertType(T: TagType);
1805 CharUnits Align = CGM.getNaturalTypeAlignment(T: TagType);
1806 llvm::AllocaInst *TagAllocation = CreateTempAlloca(Ty, Name: TagName);
1807 TagAllocation->setAlignment(Align.getAsAlign());
1808 DeleteArgs.add(rvalue: RValue::getAggregate(addr: Address(TagAllocation, Ty, Align)),
1809 type: TagType);
1810 TagAlloca = TagAllocation;
1811 };
1812
1813 // Pass std::type_identity tag if present
1814 if (isTypeAwareAllocation(Mode: Params.TypeAwareDelete))
1815 EmitTag(*ParamTypeIt++, "typeaware.delete.tag");
1816
1817 // Pass the pointer itself.
1818 QualType ArgTy = *ParamTypeIt++;
1819 DeleteArgs.add(rvalue: RValue::get(V: DeletePtr), type: ArgTy);
1820
1821 // Pass the std::destroying_delete tag if present.
1822 if (Params.DestroyingDelete)
1823 EmitTag(*ParamTypeIt++, "destroying.delete.tag");
1824
1825 // Pass the size if the delete function has a size_t parameter.
1826 if (Params.Size) {
1827 QualType SizeType = *ParamTypeIt++;
1828 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(T: DeleteTy);
1829 llvm::Value *Size = llvm::ConstantInt::get(Ty: ConvertType(T: SizeType),
1830 V: DeleteTypeSize.getQuantity());
1831
1832 // For array new, multiply by the number of elements.
1833 if (NumElements)
1834 Size = Builder.CreateMul(LHS: Size, RHS: NumElements);
1835
1836 // If there is a cookie, add the cookie size.
1837 if (!CookieSize.isZero())
1838 Size = Builder.CreateAdd(
1839 LHS: Size, RHS: llvm::ConstantInt::get(Ty: SizeTy, V: CookieSize.getQuantity()));
1840
1841 DeleteArgs.add(rvalue: RValue::get(V: Size), type: SizeType);
1842 }
1843
1844 // Pass the alignment if the delete function has an align_val_t parameter.
1845 if (isAlignedAllocation(Mode: Params.Alignment)) {
1846 QualType AlignValType = *ParamTypeIt++;
1847 CharUnits DeleteTypeAlign =
1848 getContext().toCharUnitsFromBits(BitSize: getContext().getTypeAlignIfKnown(
1849 T: DeleteTy, NeedsPreferredAlignment: true /* NeedsPreferredAlignment */));
1850 llvm::Value *Align = llvm::ConstantInt::get(Ty: ConvertType(T: AlignValType),
1851 V: DeleteTypeAlign.getQuantity());
1852 DeleteArgs.add(rvalue: RValue::get(V: Align), type: AlignValType);
1853 }
1854
1855 assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1856 "unknown parameter to usual delete function");
1857
1858 // Emit the call to delete.
1859 EmitNewDeleteCall(CGF&: *this, CalleeDecl: DeleteFD, CalleeType: DeleteFTy, Args: DeleteArgs);
1860
1861 // If call argument lowering didn't use a generated tag argument alloca we
1862 // remove them
1863 if (TagAlloca && (*TagAlloca)->use_empty())
1864 (*TagAlloca)->eraseFromParent();
1865}
1866namespace {
1867/// Calls the given 'operator delete' on a single object.
1868struct CallObjectDelete final : EHScopeStack::Cleanup {
1869 llvm::Value *Ptr;
1870 const FunctionDecl *OperatorDelete;
1871 QualType ElementType;
1872
1873 CallObjectDelete(llvm::Value *Ptr, const FunctionDecl *OperatorDelete,
1874 QualType ElementType)
1875 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1876
1877 void Emit(CodeGenFunction &CGF, Flags flags) override {
1878 CGF.EmitDeleteCall(DeleteFD: OperatorDelete, DeletePtr: Ptr, DeleteTy: ElementType);
1879 }
1880};
1881} // namespace
1882
1883void CodeGenFunction::pushCallObjectDeleteCleanup(
1884 const FunctionDecl *OperatorDelete, llvm::Value *CompletePtr,
1885 QualType ElementType) {
1886 EHStack.pushCleanup<CallObjectDelete>(Kind: NormalAndEHCleanup, A: CompletePtr,
1887 A: OperatorDelete, A: ElementType);
1888}
1889
1890/// Emit the code for deleting a single object with a destroying operator
1891/// delete. If the element type has a non-virtual destructor, Ptr has already
1892/// been converted to the type of the parameter of 'operator delete'. Otherwise
1893/// Ptr points to an object of the static type.
1894static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
1895 const CXXDeleteExpr *DE, Address Ptr,
1896 QualType ElementType) {
1897 auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
1898 if (Dtor && Dtor->isVirtual())
1899 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1900 Dtor);
1901 else
1902 CGF.EmitDeleteCall(DeleteFD: DE->getOperatorDelete(), DeletePtr: Ptr.emitRawPointer(CGF),
1903 DeleteTy: ElementType);
1904}
1905
1906static CXXDestructorDecl *TryDevirtualizeDtorCall(const CXXDeleteExpr *E,
1907 CXXDestructorDecl *Dtor,
1908 const LangOptions &LO) {
1909 assert(Dtor && Dtor->isVirtual() && "virtual dtor is expected");
1910 const Expr *DBase = E->getArgument();
1911 if (auto *MaybeDevirtualizedDtor = dyn_cast_or_null<CXXDestructorDecl>(
1912 Val: Dtor->getDevirtualizedMethod(Base: DBase, IsAppleKext: LO.AppleKext))) {
1913 const CXXRecordDecl *DevirtualizedClass =
1914 MaybeDevirtualizedDtor->getParent();
1915 if (declaresSameEntity(D1: getCXXRecord(E: DBase), D2: DevirtualizedClass)) {
1916 // Devirtualized to the class of the base type (the type of the
1917 // whole expression).
1918 return MaybeDevirtualizedDtor;
1919 }
1920 // Devirtualized to some other type. Would need to cast the this
1921 // pointer to that type but we don't have support for that yet, so
1922 // do a virtual call. FIXME: handle the case where it is
1923 // devirtualized to the derived type (the type of the inner
1924 // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1925 }
1926 return nullptr;
1927}
1928
1929/// Emit the code for deleting a single object.
1930/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1931/// if not.
1932static bool EmitObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
1933 Address Ptr, QualType ElementType,
1934 llvm::BasicBlock *UnconditionalDeleteBlock) {
1935 // C++11 [expr.delete]p3:
1936 // If the static type of the object to be deleted is different from its
1937 // dynamic type, the static type shall be a base class of the dynamic type
1938 // of the object to be deleted and the static type shall have a virtual
1939 // destructor or the behavior is undefined.
1940 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_MemberCall, Loc: DE->getExprLoc(), Addr: Ptr,
1941 Type: ElementType);
1942
1943 const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1944 assert(!OperatorDelete->isDestroyingOperatorDelete());
1945
1946 // Find the destructor for the type, if applicable. If the
1947 // destructor is virtual, we'll just emit the vcall and return.
1948 CXXDestructorDecl *Dtor = nullptr;
1949 if (const auto *RD = ElementType->getAsCXXRecordDecl()) {
1950 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1951 Dtor = RD->getDestructor();
1952
1953 if (Dtor->isVirtual()) {
1954 if (auto *DevirtualizedDtor =
1955 TryDevirtualizeDtorCall(E: DE, Dtor, LO: CGF.CGM.getLangOpts())) {
1956 Dtor = DevirtualizedDtor;
1957 } else {
1958 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1959 Dtor);
1960 return false;
1961 }
1962 }
1963 }
1964 }
1965
1966 // Make sure that we call delete even if the dtor throws.
1967 // This doesn't have to a conditional cleanup because we're going
1968 // to pop it off in a second.
1969 CGF.EHStack.pushCleanup<CallObjectDelete>(
1970 Kind: NormalAndEHCleanup, A: Ptr.emitRawPointer(CGF), A: OperatorDelete, A: ElementType);
1971
1972 if (Dtor)
1973 CGF.EmitCXXDestructorCall(D: Dtor, Type: Dtor_Complete,
1974 /*ForVirtualBase=*/false,
1975 /*Delegating=*/false, This: Ptr, ThisTy: ElementType);
1976 else if (auto Lifetime = ElementType.getObjCLifetime()) {
1977 switch (Lifetime) {
1978 case Qualifiers::OCL_None:
1979 case Qualifiers::OCL_ExplicitNone:
1980 case Qualifiers::OCL_Autoreleasing:
1981 break;
1982
1983 case Qualifiers::OCL_Strong:
1984 CGF.EmitARCDestroyStrong(addr: Ptr, precise: ARCPreciseLifetime);
1985 break;
1986
1987 case Qualifiers::OCL_Weak:
1988 CGF.EmitARCDestroyWeak(addr: Ptr);
1989 break;
1990 }
1991 }
1992
1993 // When optimizing for size, call 'operator delete' unconditionally.
1994 if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {
1995 CGF.EmitBlock(BB: UnconditionalDeleteBlock);
1996 CGF.PopCleanupBlock();
1997 return true;
1998 }
1999
2000 CGF.PopCleanupBlock();
2001 return false;
2002}
2003
2004namespace {
2005/// Calls the given 'operator delete' on an array of objects.
2006struct CallArrayDelete final : EHScopeStack::Cleanup {
2007 llvm::Value *Ptr;
2008 const FunctionDecl *OperatorDelete;
2009 llvm::Value *NumElements;
2010 QualType ElementType;
2011 CharUnits CookieSize;
2012
2013 CallArrayDelete(llvm::Value *Ptr, const FunctionDecl *OperatorDelete,
2014 llvm::Value *NumElements, QualType ElementType,
2015 CharUnits CookieSize)
2016 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
2017 ElementType(ElementType), CookieSize(CookieSize) {}
2018
2019 void Emit(CodeGenFunction &CGF, Flags flags) override {
2020 CGF.EmitDeleteCall(DeleteFD: OperatorDelete, DeletePtr: Ptr, DeleteTy: ElementType, NumElements,
2021 CookieSize);
2022 }
2023};
2024} // namespace
2025
2026/// Emit the code for deleting an array of objects.
2027static void EmitArrayDelete(CodeGenFunction &CGF, const CXXDeleteExpr *E,
2028 Address deletedPtr, QualType elementType) {
2029 llvm::Value *numElements = nullptr;
2030 llvm::Value *allocatedPtr = nullptr;
2031 CharUnits cookieSize;
2032 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr: deletedPtr, expr: E, ElementType: elementType,
2033 NumElements&: numElements, AllocPtr&: allocatedPtr, CookieSize&: cookieSize);
2034
2035 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
2036
2037 // Make sure that we call delete even if one of the dtors throws.
2038 const FunctionDecl *operatorDelete = E->getOperatorDelete();
2039 CGF.EHStack.pushCleanup<CallArrayDelete>(Kind: NormalAndEHCleanup, A: allocatedPtr,
2040 A: operatorDelete, A: numElements,
2041 A: elementType, A: cookieSize);
2042
2043 // Destroy the elements.
2044 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
2045 assert(numElements && "no element count for a type with a destructor!");
2046
2047 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
2048 CharUnits elementAlign =
2049 deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
2050
2051 llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF);
2052 llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
2053 Ty: deletedPtr.getElementType(), Ptr: arrayBegin, IdxList: numElements, Name: "delete.end");
2054
2055 // Note that it is legal to allocate a zero-length array, and we
2056 // can never fold the check away because the length should always
2057 // come from a cookie.
2058 CGF.emitArrayDestroy(begin: arrayBegin, end: arrayEnd, elementType, elementAlign,
2059 destroyer: CGF.getDestroyer(destructionKind: dtorKind),
2060 /*checkZeroLength*/ true,
2061 useEHCleanup: CGF.needsEHCleanup(kind: dtorKind));
2062 }
2063
2064 // Pop the cleanup block.
2065 CGF.PopCleanupBlock();
2066}
2067
2068void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
2069 const Expr *Arg = E->getArgument();
2070 Address Ptr = EmitPointerWithAlignment(Addr: Arg);
2071
2072 // Null check the pointer.
2073 //
2074 // We could avoid this null check if we can determine that the object
2075 // destruction is trivial and doesn't require an array cookie; we can
2076 // unconditionally perform the operator delete call in that case. For now, we
2077 // assume that deleted pointers are null rarely enough that it's better to
2078 // keep the branch. This might be worth revisiting for a -O0 code size win.
2079 llvm::BasicBlock *DeleteNotNull = createBasicBlock(name: "delete.notnull");
2080 llvm::BasicBlock *DeleteEnd = createBasicBlock(name: "delete.end");
2081
2082 llvm::Value *IsNull = Builder.CreateIsNull(Addr: Ptr, Name: "isnull");
2083
2084 Builder.CreateCondBr(Cond: IsNull, True: DeleteEnd, False: DeleteNotNull);
2085 EmitBlock(BB: DeleteNotNull);
2086 Ptr.setKnownNonNull();
2087
2088 QualType DeleteTy = E->getDestroyedType();
2089
2090 // A destroying operator delete overrides the entire operation of the
2091 // delete expression.
2092 if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
2093 EmitDestroyingObjectDelete(CGF&: *this, DE: E, Ptr, ElementType: DeleteTy);
2094 EmitBlock(BB: DeleteEnd);
2095 return;
2096 }
2097
2098 // We might be deleting a pointer to array.
2099 DeleteTy = getContext().getBaseElementType(QT: DeleteTy);
2100 Ptr = Ptr.withElementType(ElemTy: ConvertTypeForMem(T: DeleteTy));
2101
2102 if (E->isArrayForm() &&
2103 CGM.getContext().getTargetInfo().emitVectorDeletingDtors(
2104 CGM.getContext().getLangOpts())) {
2105 if (auto *RD = DeleteTy->getAsCXXRecordDecl()) {
2106 auto *Dtor = RD->getDestructor();
2107 if (Dtor && Dtor->isVirtual()) {
2108 // Emit normal loop over the array elements if we can easily
2109 // devirtualize destructor call.
2110 // Emit virtual call to vector deleting destructor otherwise.
2111 if (!TryDevirtualizeDtorCall(E, Dtor, LO: CGM.getLangOpts())) {
2112 llvm::Value *NumElements = nullptr;
2113 llvm::Value *AllocatedPtr = nullptr;
2114 CharUnits CookieSize;
2115 llvm::BasicBlock *BodyBB = createBasicBlock(name: "vdtor.call");
2116 llvm::BasicBlock *DoneBB = createBasicBlock(name: "vdtor.nocall");
2117 // Check array cookie to see if the array has length 0. Don't call
2118 // the destructor in that case.
2119 CGM.getCXXABI().ReadArrayCookie(CGF&: *this, Ptr, expr: E, ElementType: DeleteTy, NumElements,
2120 AllocPtr&: AllocatedPtr, CookieSize);
2121
2122 auto *CondTy = cast<llvm::IntegerType>(Val: NumElements->getType());
2123 llvm::Value *IsEmpty = Builder.CreateICmpEQ(
2124 LHS: NumElements, RHS: llvm::ConstantInt::get(Ty: CondTy, V: 0));
2125 Builder.CreateCondBr(Cond: IsEmpty, True: DoneBB, False: BodyBB);
2126
2127 // Delete cookie for empty array.
2128 const FunctionDecl *OperatorDelete = E->getOperatorDelete();
2129 EmitBlock(BB: DoneBB);
2130 EmitDeleteCall(DeleteFD: OperatorDelete, DeletePtr: AllocatedPtr, DeleteTy, NumElements,
2131 CookieSize);
2132 EmitBranch(Block: DeleteEnd);
2133
2134 EmitBlock(BB: BodyBB);
2135 CGM.getCXXABI().emitVirtualObjectDelete(CGF&: *this, DE: E, Ptr, ElementType: DeleteTy,
2136 Dtor);
2137 EmitBlock(BB: DeleteEnd);
2138 return;
2139 }
2140 }
2141 }
2142 }
2143
2144 if (E->isArrayForm()) {
2145 EmitArrayDelete(CGF&: *this, E, deletedPtr: Ptr, elementType: DeleteTy);
2146 EmitBlock(BB: DeleteEnd);
2147 } else {
2148 if (!EmitObjectDelete(CGF&: *this, DE: E, Ptr, ElementType: DeleteTy, UnconditionalDeleteBlock: DeleteEnd))
2149 EmitBlock(BB: DeleteEnd);
2150 }
2151}
2152
2153static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2154 llvm::Type *StdTypeInfoPtrTy,
2155 bool HasNullCheck) {
2156 // Get the vtable pointer.
2157 Address ThisPtr = CGF.EmitLValue(E).getAddress();
2158
2159 QualType SrcRecordTy = E->getType();
2160
2161 // C++ [class.cdtor]p4:
2162 // If the operand of typeid refers to the object under construction or
2163 // destruction and the static type of the operand is neither the constructor
2164 // or destructor’s class nor one of its bases, the behavior is undefined.
2165 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_DynamicOperation, Loc: E->getExprLoc(),
2166 Addr: ThisPtr, Type: SrcRecordTy);
2167
2168 // Whether we need an explicit null pointer check. For example, with the
2169 // Microsoft ABI, if this is a call to __RTtypeid, the null pointer check and
2170 // exception throw is inside the __RTtypeid(nullptr) call
2171 if (HasNullCheck &&
2172 CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(SrcRecordTy)) {
2173 llvm::BasicBlock *BadTypeidBlock =
2174 CGF.createBasicBlock(name: "typeid.bad_typeid");
2175 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "typeid.end");
2176
2177 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Addr: ThisPtr);
2178 CGF.Builder.CreateCondBr(Cond: IsNull, True: BadTypeidBlock, False: EndBlock);
2179
2180 CGF.EmitBlock(BB: BadTypeidBlock);
2181 CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2182 CGF.EmitBlock(BB: EndBlock);
2183 }
2184
2185 return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2186 StdTypeInfoPtrTy);
2187}
2188
2189llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2190 // Ideally, we would like to use GlobalsInt8PtrTy here, however, we cannot,
2191 // primarily because the result of applying typeid is a value of type
2192 // type_info, which is declared & defined by the standard library
2193 // implementation and expects to operate on the generic (default) AS.
2194 // https://reviews.llvm.org/D157452 has more context, and a possible solution.
2195 llvm::Type *PtrTy = Int8PtrTy;
2196 LangAS GlobAS = CGM.GetGlobalVarAddressSpace(D: nullptr);
2197
2198 auto MaybeASCast = [=](llvm::Constant *TypeInfo) {
2199 if (GlobAS == LangAS::Default)
2200 return TypeInfo;
2201 return CGM.performAddrSpaceCast(Src: TypeInfo, DestTy: PtrTy);
2202 };
2203
2204 if (E->isTypeOperand()) {
2205 llvm::Constant *TypeInfo =
2206 CGM.GetAddrOfRTTIDescriptor(Ty: E->getTypeOperand(Context: getContext()));
2207 return MaybeASCast(TypeInfo);
2208 }
2209
2210 // C++ [expr.typeid]p2:
2211 // When typeid is applied to a glvalue expression whose type is a
2212 // polymorphic class type, the result refers to a std::type_info object
2213 // representing the type of the most derived object (that is, the dynamic
2214 // type) to which the glvalue refers.
2215 // If the operand is already most derived object, no need to look up vtable.
2216 if (E->isPotentiallyEvaluated() && !E->isMostDerived(Context: getContext()))
2217 return EmitTypeidFromVTable(CGF&: *this, E: E->getExprOperand(), StdTypeInfoPtrTy: PtrTy,
2218 HasNullCheck: E->hasNullCheck());
2219
2220 QualType OperandTy = E->getExprOperand()->getType();
2221 return MaybeASCast(CGM.GetAddrOfRTTIDescriptor(Ty: OperandTy));
2222}
2223
2224static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2225 QualType DestTy) {
2226 llvm::Type *DestLTy = CGF.ConvertType(T: DestTy);
2227 if (DestTy->isPointerType())
2228 return llvm::Constant::getNullValue(Ty: DestLTy);
2229
2230 /// C++ [expr.dynamic.cast]p9:
2231 /// A failed cast to reference type throws std::bad_cast
2232 if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2233 return nullptr;
2234
2235 CGF.Builder.ClearInsertionPoint();
2236 return llvm::PoisonValue::get(T: DestLTy);
2237}
2238
2239llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2240 const CXXDynamicCastExpr *DCE) {
2241 CGM.EmitExplicitCastExprType(E: DCE, CGF: this);
2242 QualType DestTy = DCE->getTypeAsWritten();
2243
2244 QualType SrcTy = DCE->getSubExpr()->getType();
2245
2246 // C++ [expr.dynamic.cast]p7:
2247 // If T is "pointer to cv void," then the result is a pointer to the most
2248 // derived object pointed to by v.
2249 bool IsDynamicCastToVoid = DestTy->isVoidPointerType();
2250 QualType SrcRecordTy;
2251 QualType DestRecordTy;
2252 if (IsDynamicCastToVoid) {
2253 SrcRecordTy = SrcTy->getPointeeType();
2254 // No DestRecordTy.
2255 } else if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
2256 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2257 DestRecordTy = DestPTy->getPointeeType();
2258 } else {
2259 SrcRecordTy = SrcTy;
2260 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2261 }
2262
2263 // C++ [class.cdtor]p5:
2264 // If the operand of the dynamic_cast refers to the object under
2265 // construction or destruction and the static type of the operand is not a
2266 // pointer to or object of the constructor or destructor’s own class or one
2267 // of its bases, the dynamic_cast results in undefined behavior.
2268 EmitTypeCheck(TCK: TCK_DynamicOperation, Loc: DCE->getExprLoc(), Addr: ThisAddr, Type: SrcRecordTy);
2269
2270 if (DCE->isAlwaysNull()) {
2271 if (llvm::Value *T = EmitDynamicCastToNull(CGF&: *this, DestTy)) {
2272 // Expression emission is expected to retain a valid insertion point.
2273 if (!Builder.GetInsertBlock())
2274 EmitBlock(BB: createBasicBlock(name: "dynamic_cast.unreachable"));
2275 return T;
2276 }
2277 }
2278
2279 assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2280
2281 // If the destination is effectively final, the cast succeeds if and only
2282 // if the dynamic type of the pointer is exactly the destination type.
2283 bool IsExact = !IsDynamicCastToVoid &&
2284 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2285 DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&
2286 CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy);
2287
2288 std::optional<CGCXXABI::ExactDynamicCastInfo> ExactCastInfo;
2289 if (IsExact) {
2290 ExactCastInfo = CGM.getCXXABI().getExactDynamicCastInfo(SrcRecordTy, DestTy,
2291 DestRecordTy);
2292 if (!ExactCastInfo) {
2293 llvm::Value *NullValue = EmitDynamicCastToNull(CGF&: *this, DestTy);
2294 if (!Builder.GetInsertBlock())
2295 EmitBlock(BB: createBasicBlock(name: "dynamic_cast.unreachable"));
2296 return NullValue;
2297 }
2298 }
2299
2300 // C++ [expr.dynamic.cast]p4:
2301 // If the value of v is a null pointer value in the pointer case, the result
2302 // is the null pointer value of type T.
2303 bool ShouldNullCheckSrcValue =
2304 IsExact || CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(
2305 SrcIsPtr: SrcTy->isPointerType(), SrcRecordTy);
2306
2307 llvm::BasicBlock *CastNull = nullptr;
2308 llvm::BasicBlock *CastNotNull = nullptr;
2309 llvm::BasicBlock *CastEnd = createBasicBlock(name: "dynamic_cast.end");
2310
2311 if (ShouldNullCheckSrcValue) {
2312 CastNull = createBasicBlock(name: "dynamic_cast.null");
2313 CastNotNull = createBasicBlock(name: "dynamic_cast.notnull");
2314
2315 llvm::Value *IsNull = Builder.CreateIsNull(Addr: ThisAddr);
2316 Builder.CreateCondBr(Cond: IsNull, True: CastNull, False: CastNotNull);
2317 EmitBlock(BB: CastNotNull);
2318 }
2319
2320 llvm::Value *Value;
2321 if (IsDynamicCastToVoid) {
2322 Value = CGM.getCXXABI().emitDynamicCastToVoid(CGF&: *this, Value: ThisAddr, SrcRecordTy);
2323 } else if (IsExact) {
2324 // If the destination type is effectively final, this pointer points to the
2325 // right type if and only if its vptr has the right value.
2326 Value = CGM.getCXXABI().emitExactDynamicCast(
2327 CGF&: *this, Value: ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastInfo: *ExactCastInfo,
2328 CastSuccess: CastEnd, CastFail: CastNull);
2329 } else {
2330 assert(DestRecordTy->isRecordType() &&
2331 "destination type must be a record type!");
2332 Value = CGM.getCXXABI().emitDynamicCastCall(CGF&: *this, Value: ThisAddr, SrcRecordTy,
2333 DestTy, DestRecordTy, CastEnd);
2334 }
2335 CastNotNull = Builder.GetInsertBlock();
2336
2337 llvm::Value *NullValue = nullptr;
2338 if (ShouldNullCheckSrcValue) {
2339 EmitBranch(Block: CastEnd);
2340
2341 EmitBlock(BB: CastNull);
2342 NullValue = EmitDynamicCastToNull(CGF&: *this, DestTy);
2343 CastNull = Builder.GetInsertBlock();
2344
2345 EmitBranch(Block: CastEnd);
2346 }
2347
2348 EmitBlock(BB: CastEnd);
2349
2350 if (CastNull) {
2351 llvm::PHINode *PHI = Builder.CreatePHI(Ty: Value->getType(), NumReservedValues: 2);
2352 PHI->addIncoming(V: Value, BB: CastNotNull);
2353 PHI->addIncoming(V: NullValue, BB: CastNull);
2354
2355 Value = PHI;
2356 }
2357
2358 return Value;
2359}
2360