1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ expressions
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CodeGenFunction.h"
18#include "ConstantEmitter.h"
19#include "TargetInfo.h"
20#include "clang/Basic/CodeGenOptions.h"
21#include "clang/CodeGen/CGFunctionInfo.h"
22#include "llvm/IR/Intrinsics.h"
23
24using namespace clang;
25using namespace CodeGen;
26
27namespace {
28struct MemberCallInfo {
29 RequiredArgs ReqArgs;
30 // Number of prefix arguments for the call. Ignores the `this` pointer.
31 unsigned PrefixSize;
32};
33}
34
35static MemberCallInfo
36commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD,
37 llvm::Value *This, llvm::Value *ImplicitParam,
38 QualType ImplicitParamTy, const CallExpr *CE,
39 CallArgList &Args, CallArgList *RtlArgs) {
40 auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
41
42 assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
43 isa<CXXOperatorCallExpr>(CE));
44 assert(MD->isImplicitObjectMemberFunction() &&
45 "Trying to emit a member or operator call expr on a static method!");
46
47 // Push the this ptr.
48 const CXXRecordDecl *RD =
49 CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(GD);
50 Args.add(rvalue: RValue::get(V: This), type: CGF.getTypes().DeriveThisType(RD, MD));
51
52 // If there is an implicit parameter (e.g. VTT), emit it.
53 if (ImplicitParam) {
54 Args.add(rvalue: RValue::get(V: ImplicitParam), type: ImplicitParamTy);
55 }
56
57 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
58 RequiredArgs required = RequiredArgs::forPrototypePlus(prototype: FPT, additional: Args.size());
59 unsigned PrefixSize = Args.size() - 1;
60
61 // And the rest of the call args.
62 if (RtlArgs) {
63 // Special case: if the caller emitted the arguments right-to-left already
64 // (prior to emitting the *this argument), we're done. This happens for
65 // assignment operators.
66 Args.addFrom(other: *RtlArgs);
67 } else if (CE) {
68 // Special case: skip first argument of CXXOperatorCall (it is "this").
69 unsigned ArgsToSkip = 0;
70 if (const auto *Op = dyn_cast<CXXOperatorCallExpr>(Val: CE)) {
71 if (const auto *M = dyn_cast<CXXMethodDecl>(Val: Op->getCalleeDecl()))
72 ArgsToSkip =
73 static_cast<unsigned>(!M->isExplicitObjectMemberFunction());
74 }
75 CGF.EmitCallArgs(Args, Prototype: FPT, ArgRange: drop_begin(RangeOrContainer: CE->arguments(), N: ArgsToSkip),
76 AC: CE->getDirectCallee());
77 } else {
78 assert(
79 FPT->getNumParams() == 0 &&
80 "No CallExpr specified for function with non-zero number of arguments");
81 }
82 return {.ReqArgs: required, .PrefixSize: PrefixSize};
83}
84
85RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
86 const CXXMethodDecl *MD, const CGCallee &Callee,
87 ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
88 QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs,
89 llvm::CallBase **CallOrInvoke) {
90 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
91 CallArgList Args;
92 MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
93 CGF&: *this, GD: MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
94 auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
95 args: Args, type: FPT, required: CallInfo.ReqArgs, numPrefixArgs: CallInfo.PrefixSize);
96 return EmitCall(CallInfo: FnInfo, Callee, ReturnValue, Args, CallOrInvoke,
97 IsMustTail: CE && CE == MustTailCall,
98 Loc: CE ? CE->getExprLoc() : SourceLocation());
99}
100
101RValue CodeGenFunction::EmitCXXDestructorCall(
102 GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
103 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
104 llvm::CallBase **CallOrInvoke) {
105 const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Val: Dtor.getDecl());
106
107 assert(!ThisTy.isNull());
108 assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
109 "Pointer/Object mixup");
110
111 LangAS SrcAS = ThisTy.getAddressSpace();
112 LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
113 if (SrcAS != DstAS) {
114 QualType DstTy = DtorDecl->getThisType();
115 llvm::Type *NewType = CGM.getTypes().ConvertType(T: DstTy);
116 This = getTargetHooks().performAddrSpaceCast(CGF&: *this, V: This, SrcAddr: SrcAS, DestTy: NewType);
117 }
118
119 CallArgList Args;
120 commonEmitCXXMemberOrOperatorCall(CGF&: *this, GD: Dtor, This, ImplicitParam,
121 ImplicitParamTy, CE, Args, RtlArgs: nullptr);
122 return EmitCall(CallInfo: CGM.getTypes().arrangeCXXStructorDeclaration(GD: Dtor), Callee,
123 ReturnValue: ReturnValueSlot(), Args, CallOrInvoke,
124 IsMustTail: CE && CE == MustTailCall,
125 Loc: CE ? CE->getExprLoc() : SourceLocation{});
126}
127
128RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
129 const CXXPseudoDestructorExpr *E) {
130 QualType DestroyedType = E->getDestroyedType();
131 if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
132 // Automatic Reference Counting:
133 // If the pseudo-expression names a retainable object with weak or
134 // strong lifetime, the object shall be released.
135 Expr *BaseExpr = E->getBase();
136 Address BaseValue = Address::invalid();
137 Qualifiers BaseQuals;
138
139 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
140 if (E->isArrow()) {
141 BaseValue = EmitPointerWithAlignment(Addr: BaseExpr);
142 const auto *PTy = BaseExpr->getType()->castAs<PointerType>();
143 BaseQuals = PTy->getPointeeType().getQualifiers();
144 } else {
145 LValue BaseLV = EmitLValue(E: BaseExpr);
146 BaseValue = BaseLV.getAddress();
147 QualType BaseTy = BaseExpr->getType();
148 BaseQuals = BaseTy.getQualifiers();
149 }
150
151 switch (DestroyedType.getObjCLifetime()) {
152 case Qualifiers::OCL_None:
153 case Qualifiers::OCL_ExplicitNone:
154 case Qualifiers::OCL_Autoreleasing:
155 break;
156
157 case Qualifiers::OCL_Strong:
158 EmitARCRelease(value: Builder.CreateLoad(Addr: BaseValue,
159 IsVolatile: DestroyedType.isVolatileQualified()),
160 precise: ARCPreciseLifetime);
161 break;
162
163 case Qualifiers::OCL_Weak:
164 EmitARCDestroyWeak(addr: BaseValue);
165 break;
166 }
167 } else {
168 // C++ [expr.pseudo]p1:
169 // The result shall only be used as the operand for the function call
170 // operator (), and the result of such a call has type void. The only
171 // effect is the evaluation of the postfix-expression before the dot or
172 // arrow.
173 EmitIgnoredExpr(E: E->getBase());
174 }
175
176 return RValue::get(V: nullptr);
177}
178
179static CXXRecordDecl *getCXXRecord(const Expr *E) {
180 QualType T = E->getType();
181 if (const PointerType *PTy = T->getAs<PointerType>())
182 T = PTy->getPointeeType();
183 const RecordType *Ty = T->castAs<RecordType>();
184 return cast<CXXRecordDecl>(Val: Ty->getDecl());
185}
186
187// Note: This function also emit constructor calls to support a MSVC
188// extensions allowing explicit constructor function call.
189RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
190 ReturnValueSlot ReturnValue,
191 llvm::CallBase **CallOrInvoke) {
192 const Expr *callee = CE->getCallee()->IgnoreParens();
193
194 if (isa<BinaryOperator>(Val: callee))
195 return EmitCXXMemberPointerCallExpr(E: CE, ReturnValue, CallOrInvoke);
196
197 const MemberExpr *ME = cast<MemberExpr>(Val: callee);
198 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: ME->getMemberDecl());
199
200 if (MD->isStatic()) {
201 // The method is static, emit it as we would a regular call.
202 CGCallee callee =
203 CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD: MD), abstractInfo: GlobalDecl(MD));
204 return EmitCall(FnType: getContext().getPointerType(T: MD->getType()), Callee: callee, E: CE,
205 ReturnValue, /*Chain=*/nullptr, CallOrInvoke);
206 }
207
208 bool HasQualifier = ME->hasQualifier();
209 NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
210 bool IsArrow = ME->isArrow();
211 const Expr *Base = ME->getBase();
212
213 return EmitCXXMemberOrOperatorMemberCallExpr(CE, MD, ReturnValue,
214 HasQualifier, Qualifier, IsArrow,
215 Base, CallOrInvoke);
216}
217
218RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
219 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
220 bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
221 const Expr *Base, llvm::CallBase **CallOrInvoke) {
222 assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
223
224 // Compute the object pointer.
225 bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
226
227 const CXXMethodDecl *DevirtualizedMethod = nullptr;
228 if (CanUseVirtualCall &&
229 MD->getDevirtualizedMethod(Base, IsAppleKext: getLangOpts().AppleKext)) {
230 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
231 DevirtualizedMethod = MD->getCorrespondingMethodInClass(RD: BestDynamicDecl);
232 assert(DevirtualizedMethod);
233 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
234 const Expr *Inner = Base->IgnoreParenBaseCasts();
235 if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
236 MD->getReturnType().getCanonicalType())
237 // If the return types are not the same, this might be a case where more
238 // code needs to run to compensate for it. For example, the derived
239 // method might return a type that inherits form from the return
240 // type of MD and has a prefix.
241 // For now we just avoid devirtualizing these covariant cases.
242 DevirtualizedMethod = nullptr;
243 else if (getCXXRecord(E: Inner) == DevirtualizedClass)
244 // If the class of the Inner expression is where the dynamic method
245 // is defined, build the this pointer from it.
246 Base = Inner;
247 else if (getCXXRecord(E: Base) != DevirtualizedClass) {
248 // If the method is defined in a class that is not the best dynamic
249 // one or the one of the full expression, we would have to build
250 // a derived-to-base cast to compute the correct this pointer, but
251 // we don't have support for that yet, so do a virtual call.
252 DevirtualizedMethod = nullptr;
253 }
254 }
255
256 bool TrivialForCodegen =
257 MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion());
258 bool TrivialAssignment =
259 TrivialForCodegen &&
260 (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
261 !MD->getParent()->mayInsertExtraPadding();
262
263 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
264 // operator before the LHS.
265 CallArgList RtlArgStorage;
266 CallArgList *RtlArgs = nullptr;
267 LValue TrivialAssignmentRHS;
268 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: CE)) {
269 if (OCE->isAssignmentOp()) {
270 if (TrivialAssignment) {
271 TrivialAssignmentRHS = EmitLValue(E: CE->getArg(Arg: 1));
272 } else {
273 RtlArgs = &RtlArgStorage;
274 EmitCallArgs(Args&: *RtlArgs, Prototype: MD->getType()->castAs<FunctionProtoType>(),
275 ArgRange: drop_begin(RangeOrContainer: CE->arguments(), N: 1), AC: CE->getDirectCallee(),
276 /*ParamsToSkip*/0, Order: EvaluationOrder::ForceRightToLeft);
277 }
278 }
279 }
280
281 LValue This;
282 if (IsArrow) {
283 LValueBaseInfo BaseInfo;
284 TBAAAccessInfo TBAAInfo;
285 Address ThisValue = EmitPointerWithAlignment(Addr: Base, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo);
286 This = MakeAddrLValue(Addr: ThisValue, T: Base->getType()->getPointeeType(),
287 BaseInfo, TBAAInfo);
288 } else {
289 This = EmitLValue(E: Base);
290 }
291
292 if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Val: MD)) {
293 // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
294 // constructing a new complete object of type Ctor.
295 assert(!RtlArgs);
296 assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
297 CallArgList Args;
298 commonEmitCXXMemberOrOperatorCall(
299 CGF&: *this, GD: {Ctor, Ctor_Complete}, This: This.getPointer(CGF&: *this),
300 /*ImplicitParam=*/nullptr,
301 /*ImplicitParamTy=*/QualType(), CE, Args, RtlArgs: nullptr);
302
303 EmitCXXConstructorCall(D: Ctor, Type: Ctor_Complete, /*ForVirtualBase=*/false,
304 /*Delegating=*/false, This: This.getAddress(), Args,
305 Overlap: AggValueSlot::DoesNotOverlap, Loc: CE->getExprLoc(),
306 /*NewPointerIsChecked=*/false, CallOrInvoke);
307 return RValue::get(V: nullptr);
308 }
309
310 if (TrivialForCodegen) {
311 if (isa<CXXDestructorDecl>(Val: MD))
312 return RValue::get(V: nullptr);
313
314 if (TrivialAssignment) {
315 // We don't like to generate the trivial copy/move assignment operator
316 // when it isn't necessary; just produce the proper effect here.
317 // It's important that we use the result of EmitLValue here rather than
318 // emitting call arguments, in order to preserve TBAA information from
319 // the RHS.
320 LValue RHS = isa<CXXOperatorCallExpr>(Val: CE)
321 ? TrivialAssignmentRHS
322 : EmitLValue(E: *CE->arg_begin());
323 EmitAggregateAssign(Dest: This, Src: RHS, EltTy: CE->getType());
324 return RValue::get(V: This.getPointer(CGF&: *this));
325 }
326
327 assert(MD->getParent()->mayInsertExtraPadding() &&
328 "unknown trivial member function");
329 }
330
331 // Compute the function type we're calling.
332 const CXXMethodDecl *CalleeDecl =
333 DevirtualizedMethod ? DevirtualizedMethod : MD;
334 const CGFunctionInfo *FInfo = nullptr;
335 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Val: CalleeDecl))
336 FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
337 GD: GlobalDecl(Dtor, Dtor_Complete));
338 else
339 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD: CalleeDecl);
340
341 llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(Info: *FInfo);
342
343 // C++11 [class.mfct.non-static]p2:
344 // If a non-static member function of a class X is called for an object that
345 // is not of type X, or of a type derived from X, the behavior is undefined.
346 SourceLocation CallLoc;
347 ASTContext &C = getContext();
348 if (CE)
349 CallLoc = CE->getExprLoc();
350
351 SanitizerSet SkippedChecks;
352 if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(Val: CE)) {
353 auto *IOA = CMCE->getImplicitObjectArgument();
354 bool IsImplicitObjectCXXThis = IsWrappedCXXThis(E: IOA);
355 if (IsImplicitObjectCXXThis)
356 SkippedChecks.set(K: SanitizerKind::Alignment, Value: true);
357 if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(Val: IOA))
358 SkippedChecks.set(K: SanitizerKind::Null, Value: true);
359 }
360
361 if (sanitizePerformTypeCheck())
362 EmitTypeCheck(TCK: CodeGenFunction::TCK_MemberCall, Loc: CallLoc,
363 V: This.emitRawPointer(CGF&: *this),
364 Type: C.getRecordType(Decl: CalleeDecl->getParent()),
365 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
366
367 // C++ [class.virtual]p12:
368 // Explicit qualification with the scope operator (5.1) suppresses the
369 // virtual call mechanism.
370 //
371 // We also don't emit a virtual call if the base expression has a record type
372 // because then we know what the type is.
373 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
374
375 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(Val: CalleeDecl)) {
376 assert(CE->arg_begin() == CE->arg_end() &&
377 "Destructor shouldn't have explicit parameters");
378 assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
379 if (UseVirtualCall) {
380 CGM.getCXXABI().EmitVirtualDestructorCall(
381 CGF&: *this, Dtor, DtorType: Dtor_Complete, This: This.getAddress(),
382 E: cast<CXXMemberCallExpr>(Val: CE), CallOrInvoke);
383 } else {
384 GlobalDecl GD(Dtor, Dtor_Complete);
385 CGCallee Callee;
386 if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier)
387 Callee = BuildAppleKextVirtualCall(MD: Dtor, Qual: Qualifier, Ty);
388 else if (!DevirtualizedMethod)
389 Callee =
390 CGCallee::forDirect(functionPtr: CGM.getAddrOfCXXStructor(GD, FnInfo: FInfo, FnType: Ty), abstractInfo: GD);
391 else {
392 Callee = CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD, Ty), abstractInfo: GD);
393 }
394
395 QualType ThisTy =
396 IsArrow ? Base->getType()->getPointeeType() : Base->getType();
397 EmitCXXDestructorCall(Dtor: GD, Callee, This: This.getPointer(CGF&: *this), ThisTy,
398 /*ImplicitParam=*/nullptr,
399 /*ImplicitParamTy=*/QualType(), CE, CallOrInvoke);
400 }
401 return RValue::get(V: nullptr);
402 }
403
404 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
405 // 'CalleeDecl' instead.
406
407 CGCallee Callee;
408 if (UseVirtualCall) {
409 Callee = CGCallee::forVirtual(CE, MD, Addr: This.getAddress(), FTy: Ty);
410 } else {
411 if (SanOpts.has(K: SanitizerKind::CFINVCall) &&
412 MD->getParent()->isDynamicClass()) {
413 llvm::Value *VTable;
414 const CXXRecordDecl *RD;
415 std::tie(args&: VTable, args&: RD) = CGM.getCXXABI().LoadVTablePtr(
416 CGF&: *this, This: This.getAddress(), RD: CalleeDecl->getParent());
417 EmitVTablePtrCheckForCall(RD, VTable, TCK: CFITCK_NVCall, Loc: CE->getBeginLoc());
418 }
419
420 if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
421 Callee = BuildAppleKextVirtualCall(MD, Qual: Qualifier, Ty);
422 else if (!DevirtualizedMethod)
423 Callee =
424 CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD: MD, Ty), abstractInfo: GlobalDecl(MD));
425 else {
426 Callee =
427 CGCallee::forDirect(functionPtr: CGM.GetAddrOfFunction(GD: DevirtualizedMethod, Ty),
428 abstractInfo: GlobalDecl(DevirtualizedMethod));
429 }
430 }
431
432 if (MD->isVirtual()) {
433 Address NewThisAddr =
434 CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
435 CGF&: *this, GD: CalleeDecl, This: This.getAddress(), VirtualCall: UseVirtualCall);
436 This.setAddress(NewThisAddr);
437 }
438
439 return EmitCXXMemberOrOperatorCall(
440 MD: CalleeDecl, Callee, ReturnValue, This: This.getPointer(CGF&: *this),
441 /*ImplicitParam=*/nullptr, ImplicitParamTy: QualType(), CE, RtlArgs, CallOrInvoke);
442}
443
444RValue
445CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
446 ReturnValueSlot ReturnValue,
447 llvm::CallBase **CallOrInvoke) {
448 const BinaryOperator *BO =
449 cast<BinaryOperator>(Val: E->getCallee()->IgnoreParens());
450 const Expr *BaseExpr = BO->getLHS();
451 const Expr *MemFnExpr = BO->getRHS();
452
453 const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>();
454 const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
455 const auto *RD = MPT->getMostRecentCXXRecordDecl();
456
457 // Emit the 'this' pointer.
458 Address This = Address::invalid();
459 if (BO->getOpcode() == BO_PtrMemI)
460 This = EmitPointerWithAlignment(Addr: BaseExpr, BaseInfo: nullptr, TBAAInfo: nullptr, IsKnownNonNull: KnownNonNull);
461 else
462 This = EmitLValue(E: BaseExpr, IsKnownNonNull: KnownNonNull).getAddress();
463
464 EmitTypeCheck(
465 TCK: TCK_MemberCall, Loc: E->getExprLoc(), V: This.emitRawPointer(CGF&: *this),
466 Type: QualType(MPT->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0));
467
468 // Get the member function pointer.
469 llvm::Value *MemFnPtr = EmitScalarExpr(E: MemFnExpr);
470
471 // Ask the ABI to load the callee. Note that This is modified.
472 llvm::Value *ThisPtrForCall = nullptr;
473 CGCallee Callee =
474 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF&: *this, E: BO, This,
475 ThisPtrForCall, MemPtr: MemFnPtr, MPT);
476
477 CallArgList Args;
478
479 QualType ThisType =
480 getContext().getPointerType(T: getContext().getTagDeclType(Decl: RD));
481
482 // Push the this ptr.
483 Args.add(rvalue: RValue::get(V: ThisPtrForCall), type: ThisType);
484
485 RequiredArgs required = RequiredArgs::forPrototypePlus(prototype: FPT, additional: 1);
486
487 // And the rest of the call args
488 EmitCallArgs(Args, Prototype: FPT, ArgRange: E->arguments());
489 return EmitCall(CallInfo: CGM.getTypes().arrangeCXXMethodCall(args: Args, type: FPT, required,
490 /*PrefixSize=*/numPrefixArgs: 0),
491 Callee, ReturnValue, Args, CallOrInvoke, IsMustTail: E == MustTailCall,
492 Loc: E->getExprLoc());
493}
494
495RValue CodeGenFunction::EmitCXXOperatorMemberCallExpr(
496 const CXXOperatorCallExpr *E, const CXXMethodDecl *MD,
497 ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke) {
498 assert(MD->isImplicitObjectMemberFunction() &&
499 "Trying to emit a member call expr on a static method!");
500 return EmitCXXMemberOrOperatorMemberCallExpr(
501 CE: E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
502 /*IsArrow=*/false, Base: E->getArg(Arg: 0), CallOrInvoke);
503}
504
505RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
506 ReturnValueSlot ReturnValue,
507 llvm::CallBase **CallOrInvoke) {
508 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(CGF&: *this, E, ReturnValue,
509 CallOrInvoke);
510}
511
512static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
513 Address DestPtr,
514 const CXXRecordDecl *Base) {
515 if (Base->isEmpty())
516 return;
517
518 DestPtr = DestPtr.withElementType(ElemTy: CGF.Int8Ty);
519
520 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(D: Base);
521 CharUnits NVSize = Layout.getNonVirtualSize();
522
523 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
524 // present, they are initialized by the most derived class before calling the
525 // constructor.
526 SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
527 Stores.emplace_back(Args: CharUnits::Zero(), Args&: NVSize);
528
529 // Each store is split by the existence of a vbptr.
530 CharUnits VBPtrWidth = CGF.getPointerSize();
531 std::vector<CharUnits> VBPtrOffsets =
532 CGF.CGM.getCXXABI().getVBPtrOffsets(RD: Base);
533 for (CharUnits VBPtrOffset : VBPtrOffsets) {
534 // Stop before we hit any virtual base pointers located in virtual bases.
535 if (VBPtrOffset >= NVSize)
536 break;
537 std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
538 CharUnits LastStoreOffset = LastStore.first;
539 CharUnits LastStoreSize = LastStore.second;
540
541 CharUnits SplitBeforeOffset = LastStoreOffset;
542 CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
543 assert(!SplitBeforeSize.isNegative() && "negative store size!");
544 if (!SplitBeforeSize.isZero())
545 Stores.emplace_back(Args&: SplitBeforeOffset, Args&: SplitBeforeSize);
546
547 CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
548 CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
549 assert(!SplitAfterSize.isNegative() && "negative store size!");
550 if (!SplitAfterSize.isZero())
551 Stores.emplace_back(Args&: SplitAfterOffset, Args&: SplitAfterSize);
552 }
553
554 // If the type contains a pointer to data member we can't memset it to zero.
555 // Instead, create a null constant and copy it to the destination.
556 // TODO: there are other patterns besides zero that we can usefully memset,
557 // like -1, which happens to be the pattern used by member-pointers.
558 // TODO: isZeroInitializable can be over-conservative in the case where a
559 // virtual base contains a member pointer.
560 llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Record: Base);
561 if (!NullConstantForBase->isNullValue()) {
562 llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
563 CGF.CGM.getModule(), NullConstantForBase->getType(),
564 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
565 NullConstantForBase, Twine());
566
567 CharUnits Align =
568 std::max(a: Layout.getNonVirtualAlignment(), b: DestPtr.getAlignment());
569 NullVariable->setAlignment(Align.getAsAlign());
570
571 Address SrcPtr(NullVariable, CGF.Int8Ty, Align);
572
573 // Get and call the appropriate llvm.memcpy overload.
574 for (std::pair<CharUnits, CharUnits> Store : Stores) {
575 CharUnits StoreOffset = Store.first;
576 CharUnits StoreSize = Store.second;
577 llvm::Value *StoreSizeVal = CGF.CGM.getSize(numChars: StoreSize);
578 CGF.Builder.CreateMemCpy(
579 Dest: CGF.Builder.CreateConstInBoundsByteGEP(Addr: DestPtr, Offset: StoreOffset),
580 Src: CGF.Builder.CreateConstInBoundsByteGEP(Addr: SrcPtr, Offset: StoreOffset),
581 Size: StoreSizeVal);
582 }
583
584 // Otherwise, just memset the whole thing to zero. This is legal
585 // because in LLVM, all default initializers (other than the ones we just
586 // handled above) are guaranteed to have a bit pattern of all zeros.
587 } else {
588 for (std::pair<CharUnits, CharUnits> Store : Stores) {
589 CharUnits StoreOffset = Store.first;
590 CharUnits StoreSize = Store.second;
591 llvm::Value *StoreSizeVal = CGF.CGM.getSize(numChars: StoreSize);
592 CGF.Builder.CreateMemSet(
593 Dest: CGF.Builder.CreateConstInBoundsByteGEP(Addr: DestPtr, Offset: StoreOffset),
594 Value: CGF.Builder.getInt8(C: 0), Size: StoreSizeVal);
595 }
596 }
597}
598
599void
600CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
601 AggValueSlot Dest) {
602 assert(!Dest.isIgnored() && "Must have a destination!");
603 const CXXConstructorDecl *CD = E->getConstructor();
604
605 // If we require zero initialization before (or instead of) calling the
606 // constructor, as can be the case with a non-user-provided default
607 // constructor, emit the zero initialization now, unless destination is
608 // already zeroed.
609 if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
610 switch (E->getConstructionKind()) {
611 case CXXConstructionKind::Delegating:
612 case CXXConstructionKind::Complete:
613 EmitNullInitialization(DestPtr: Dest.getAddress(), Ty: E->getType());
614 break;
615 case CXXConstructionKind::VirtualBase:
616 case CXXConstructionKind::NonVirtualBase:
617 EmitNullBaseClassInitialization(CGF&: *this, DestPtr: Dest.getAddress(),
618 Base: CD->getParent());
619 break;
620 }
621 }
622
623 // If this is a call to a trivial default constructor, do nothing.
624 if (CD->isTrivial() && CD->isDefaultConstructor())
625 return;
626
627 // Elide the constructor if we're constructing from a temporary.
628 if (getLangOpts().ElideConstructors && E->isElidable()) {
629 // FIXME: This only handles the simplest case, where the source object
630 // is passed directly as the first argument to the constructor.
631 // This should also handle stepping though implicit casts and
632 // conversion sequences which involve two steps, with a
633 // conversion operator followed by a converting constructor.
634 const Expr *SrcObj = E->getArg(Arg: 0);
635 assert(SrcObj->isTemporaryObject(getContext(), CD->getParent()));
636 assert(
637 getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType()));
638 EmitAggExpr(E: SrcObj, AS: Dest);
639 return;
640 }
641
642 if (const ArrayType *arrayType
643 = getContext().getAsArrayType(T: E->getType())) {
644 EmitCXXAggrConstructorCall(D: CD, ArrayTy: arrayType, ArrayPtr: Dest.getAddress(), E,
645 NewPointerIsChecked: Dest.isSanitizerChecked());
646 } else {
647 CXXCtorType Type = Ctor_Complete;
648 bool ForVirtualBase = false;
649 bool Delegating = false;
650
651 switch (E->getConstructionKind()) {
652 case CXXConstructionKind::Delegating:
653 // We should be emitting a constructor; GlobalDecl will assert this
654 Type = CurGD.getCtorType();
655 Delegating = true;
656 break;
657
658 case CXXConstructionKind::Complete:
659 Type = Ctor_Complete;
660 break;
661
662 case CXXConstructionKind::VirtualBase:
663 ForVirtualBase = true;
664 [[fallthrough]];
665
666 case CXXConstructionKind::NonVirtualBase:
667 Type = Ctor_Base;
668 }
669
670 // Call the constructor.
671 EmitCXXConstructorCall(D: CD, Type, ForVirtualBase, Delegating, ThisAVS: Dest, E);
672 }
673}
674
675void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
676 const Expr *Exp) {
677 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Val: Exp))
678 Exp = E->getSubExpr();
679 assert(isa<CXXConstructExpr>(Exp) &&
680 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
681 const CXXConstructExpr* E = cast<CXXConstructExpr>(Val: Exp);
682 const CXXConstructorDecl *CD = E->getConstructor();
683 RunCleanupsScope Scope(*this);
684
685 // If we require zero initialization before (or instead of) calling the
686 // constructor, as can be the case with a non-user-provided default
687 // constructor, emit the zero initialization now.
688 // FIXME. Do I still need this for a copy ctor synthesis?
689 if (E->requiresZeroInitialization())
690 EmitNullInitialization(DestPtr: Dest, Ty: E->getType());
691
692 assert(!getContext().getAsConstantArrayType(E->getType())
693 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
694 EmitSynthesizedCXXCopyCtorCall(D: CD, This: Dest, Src, E);
695}
696
697static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
698 const CXXNewExpr *E) {
699 if (!E->isArray())
700 return CharUnits::Zero();
701
702 // No cookie is required if the operator new[] being used is the
703 // reserved placement operator new[].
704 if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
705 return CharUnits::Zero();
706
707 return CGF.CGM.getCXXABI().GetArrayCookieSize(expr: E);
708}
709
710static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
711 const CXXNewExpr *e,
712 unsigned minElements,
713 llvm::Value *&numElements,
714 llvm::Value *&sizeWithoutCookie) {
715 QualType type = e->getAllocatedType();
716
717 if (!e->isArray()) {
718 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(T: type);
719 sizeWithoutCookie
720 = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: typeSize.getQuantity());
721 return sizeWithoutCookie;
722 }
723
724 // The width of size_t.
725 unsigned sizeWidth = CGF.SizeTy->getBitWidth();
726
727 // Figure out the cookie size.
728 llvm::APInt cookieSize(sizeWidth,
729 CalculateCookiePadding(CGF, E: e).getQuantity());
730
731 // Emit the array size expression.
732 // We multiply the size of all dimensions for NumElements.
733 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
734 numElements = ConstantEmitter(CGF).tryEmitAbstract(
735 E: *e->getArraySize(), T: (*e->getArraySize())->getType());
736 if (!numElements)
737 numElements = CGF.EmitScalarExpr(E: *e->getArraySize());
738 assert(isa<llvm::IntegerType>(numElements->getType()));
739
740 // The number of elements can be have an arbitrary integer type;
741 // essentially, we need to multiply it by a constant factor, add a
742 // cookie size, and verify that the result is representable as a
743 // size_t. That's just a gloss, though, and it's wrong in one
744 // important way: if the count is negative, it's an error even if
745 // the cookie size would bring the total size >= 0.
746 bool isSigned
747 = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
748 llvm::IntegerType *numElementsType
749 = cast<llvm::IntegerType>(Val: numElements->getType());
750 unsigned numElementsWidth = numElementsType->getBitWidth();
751
752 // Compute the constant factor.
753 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
754 while (const ConstantArrayType *CAT
755 = CGF.getContext().getAsConstantArrayType(T: type)) {
756 type = CAT->getElementType();
757 arraySizeMultiplier *= CAT->getSize();
758 }
759
760 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(T: type);
761 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
762 typeSizeMultiplier *= arraySizeMultiplier;
763
764 // This will be a size_t.
765 llvm::Value *size;
766
767 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
768 // Don't bloat the -O0 code.
769 if (llvm::ConstantInt *numElementsC =
770 dyn_cast<llvm::ConstantInt>(Val: numElements)) {
771 const llvm::APInt &count = numElementsC->getValue();
772
773 bool hasAnyOverflow = false;
774
775 // If 'count' was a negative number, it's an overflow.
776 if (isSigned && count.isNegative())
777 hasAnyOverflow = true;
778
779 // We want to do all this arithmetic in size_t. If numElements is
780 // wider than that, check whether it's already too big, and if so,
781 // overflow.
782 else if (numElementsWidth > sizeWidth &&
783 numElementsWidth - sizeWidth > count.countl_zero())
784 hasAnyOverflow = true;
785
786 // Okay, compute a count at the right width.
787 llvm::APInt adjustedCount = count.zextOrTrunc(width: sizeWidth);
788
789 // If there is a brace-initializer, we cannot allocate fewer elements than
790 // there are initializers. If we do, that's treated like an overflow.
791 if (adjustedCount.ult(RHS: minElements))
792 hasAnyOverflow = true;
793
794 // Scale numElements by that. This might overflow, but we don't
795 // care because it only overflows if allocationSize does, too, and
796 // if that overflows then we shouldn't use this.
797 numElements = llvm::ConstantInt::get(Ty: CGF.SizeTy,
798 V: adjustedCount * arraySizeMultiplier);
799
800 // Compute the size before cookie, and track whether it overflowed.
801 bool overflow;
802 llvm::APInt allocationSize
803 = adjustedCount.umul_ov(RHS: typeSizeMultiplier, Overflow&: overflow);
804 hasAnyOverflow |= overflow;
805
806 // Add in the cookie, and check whether it's overflowed.
807 if (cookieSize != 0) {
808 // Save the current size without a cookie. This shouldn't be
809 // used if there was overflow.
810 sizeWithoutCookie = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: allocationSize);
811
812 allocationSize = allocationSize.uadd_ov(RHS: cookieSize, Overflow&: overflow);
813 hasAnyOverflow |= overflow;
814 }
815
816 // On overflow, produce a -1 so operator new will fail.
817 if (hasAnyOverflow) {
818 size = llvm::Constant::getAllOnesValue(Ty: CGF.SizeTy);
819 } else {
820 size = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: allocationSize);
821 }
822
823 // Otherwise, we might need to use the overflow intrinsics.
824 } else {
825 // There are up to five conditions we need to test for:
826 // 1) if isSigned, we need to check whether numElements is negative;
827 // 2) if numElementsWidth > sizeWidth, we need to check whether
828 // numElements is larger than something representable in size_t;
829 // 3) if minElements > 0, we need to check whether numElements is smaller
830 // than that.
831 // 4) we need to compute
832 // sizeWithoutCookie := numElements * typeSizeMultiplier
833 // and check whether it overflows; and
834 // 5) if we need a cookie, we need to compute
835 // size := sizeWithoutCookie + cookieSize
836 // and check whether it overflows.
837
838 llvm::Value *hasOverflow = nullptr;
839
840 // If numElementsWidth > sizeWidth, then one way or another, we're
841 // going to have to do a comparison for (2), and this happens to
842 // take care of (1), too.
843 if (numElementsWidth > sizeWidth) {
844 llvm::APInt threshold =
845 llvm::APInt::getOneBitSet(numBits: numElementsWidth, BitNo: sizeWidth);
846
847 llvm::Value *thresholdV
848 = llvm::ConstantInt::get(Ty: numElementsType, V: threshold);
849
850 hasOverflow = CGF.Builder.CreateICmpUGE(LHS: numElements, RHS: thresholdV);
851 numElements = CGF.Builder.CreateTrunc(V: numElements, DestTy: CGF.SizeTy);
852
853 // Otherwise, if we're signed, we want to sext up to size_t.
854 } else if (isSigned) {
855 if (numElementsWidth < sizeWidth)
856 numElements = CGF.Builder.CreateSExt(V: numElements, DestTy: CGF.SizeTy);
857
858 // If there's a non-1 type size multiplier, then we can do the
859 // signedness check at the same time as we do the multiply
860 // because a negative number times anything will cause an
861 // unsigned overflow. Otherwise, we have to do it here. But at least
862 // in this case, we can subsume the >= minElements check.
863 if (typeSizeMultiplier == 1)
864 hasOverflow = CGF.Builder.CreateICmpSLT(LHS: numElements,
865 RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements));
866
867 // Otherwise, zext up to size_t if necessary.
868 } else if (numElementsWidth < sizeWidth) {
869 numElements = CGF.Builder.CreateZExt(V: numElements, DestTy: CGF.SizeTy);
870 }
871
872 assert(numElements->getType() == CGF.SizeTy);
873
874 if (minElements) {
875 // Don't allow allocation of fewer elements than we have initializers.
876 if (!hasOverflow) {
877 hasOverflow = CGF.Builder.CreateICmpULT(LHS: numElements,
878 RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements));
879 } else if (numElementsWidth > sizeWidth) {
880 // The other existing overflow subsumes this check.
881 // We do an unsigned comparison, since any signed value < -1 is
882 // taken care of either above or below.
883 hasOverflow = CGF.Builder.CreateOr(LHS: hasOverflow,
884 RHS: CGF.Builder.CreateICmpULT(LHS: numElements,
885 RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: minElements)));
886 }
887 }
888
889 size = numElements;
890
891 // Multiply by the type size if necessary. This multiplier
892 // includes all the factors for nested arrays.
893 //
894 // This step also causes numElements to be scaled up by the
895 // nested-array factor if necessary. Overflow on this computation
896 // can be ignored because the result shouldn't be used if
897 // allocation fails.
898 if (typeSizeMultiplier != 1) {
899 llvm::Function *umul_with_overflow
900 = CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::umul_with_overflow, Tys: CGF.SizeTy);
901
902 llvm::Value *tsmV =
903 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: typeSizeMultiplier);
904 llvm::Value *result =
905 CGF.Builder.CreateCall(Callee: umul_with_overflow, Args: {size, tsmV});
906
907 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 1);
908 if (hasOverflow)
909 hasOverflow = CGF.Builder.CreateOr(LHS: hasOverflow, RHS: overflowed);
910 else
911 hasOverflow = overflowed;
912
913 size = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 0);
914
915 // Also scale up numElements by the array size multiplier.
916 if (arraySizeMultiplier != 1) {
917 // If the base element type size is 1, then we can re-use the
918 // multiply we just did.
919 if (typeSize.isOne()) {
920 assert(arraySizeMultiplier == typeSizeMultiplier);
921 numElements = size;
922
923 // Otherwise we need a separate multiply.
924 } else {
925 llvm::Value *asmV =
926 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: arraySizeMultiplier);
927 numElements = CGF.Builder.CreateMul(LHS: numElements, RHS: asmV);
928 }
929 }
930 } else {
931 // numElements doesn't need to be scaled.
932 assert(arraySizeMultiplier == 1);
933 }
934
935 // Add in the cookie size if necessary.
936 if (cookieSize != 0) {
937 sizeWithoutCookie = size;
938
939 llvm::Function *uadd_with_overflow
940 = CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::uadd_with_overflow, Tys: CGF.SizeTy);
941
942 llvm::Value *cookieSizeV = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: cookieSize);
943 llvm::Value *result =
944 CGF.Builder.CreateCall(Callee: uadd_with_overflow, Args: {size, cookieSizeV});
945
946 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 1);
947 if (hasOverflow)
948 hasOverflow = CGF.Builder.CreateOr(LHS: hasOverflow, RHS: overflowed);
949 else
950 hasOverflow = overflowed;
951
952 size = CGF.Builder.CreateExtractValue(Agg: result, Idxs: 0);
953 }
954
955 // If we had any possibility of dynamic overflow, make a select to
956 // overwrite 'size' with an all-ones value, which should cause
957 // operator new to throw.
958 if (hasOverflow)
959 size = CGF.Builder.CreateSelect(C: hasOverflow,
960 True: llvm::Constant::getAllOnesValue(Ty: CGF.SizeTy),
961 False: size);
962 }
963
964 if (cookieSize == 0)
965 sizeWithoutCookie = size;
966 else
967 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
968
969 return size;
970}
971
972static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
973 QualType AllocType, Address NewPtr,
974 AggValueSlot::Overlap_t MayOverlap) {
975 // FIXME: Refactor with EmitExprAsInit.
976 switch (CGF.getEvaluationKind(T: AllocType)) {
977 case TEK_Scalar:
978 CGF.EmitScalarInit(init: Init, D: nullptr,
979 lvalue: CGF.MakeAddrLValue(Addr: NewPtr, T: AllocType), capturedByInit: false);
980 return;
981 case TEK_Complex:
982 CGF.EmitComplexExprIntoLValue(E: Init, dest: CGF.MakeAddrLValue(Addr: NewPtr, T: AllocType),
983 /*isInit*/ true);
984 return;
985 case TEK_Aggregate: {
986 AggValueSlot Slot
987 = AggValueSlot::forAddr(addr: NewPtr, quals: AllocType.getQualifiers(),
988 isDestructed: AggValueSlot::IsDestructed,
989 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
990 isAliased: AggValueSlot::IsNotAliased,
991 mayOverlap: MayOverlap, isZeroed: AggValueSlot::IsNotZeroed,
992 isChecked: AggValueSlot::IsSanitizerChecked);
993 CGF.EmitAggExpr(E: Init, AS: Slot);
994 return;
995 }
996 }
997 llvm_unreachable("bad evaluation kind");
998}
999
1000void CodeGenFunction::EmitNewArrayInitializer(
1001 const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
1002 Address BeginPtr, llvm::Value *NumElements,
1003 llvm::Value *AllocSizeWithoutCookie) {
1004 // If we have a type with trivial initialization and no initializer,
1005 // there's nothing to do.
1006 if (!E->hasInitializer())
1007 return;
1008
1009 Address CurPtr = BeginPtr;
1010
1011 unsigned InitListElements = 0;
1012
1013 const Expr *Init = E->getInitializer();
1014 Address EndOfInit = Address::invalid();
1015 QualType::DestructionKind DtorKind = ElementType.isDestructedType();
1016 CleanupDeactivationScope deactivation(*this);
1017 bool pushedCleanup = false;
1018
1019 CharUnits ElementSize = getContext().getTypeSizeInChars(T: ElementType);
1020 CharUnits ElementAlign =
1021 BeginPtr.getAlignment().alignmentOfArrayElement(elementSize: ElementSize);
1022
1023 // Attempt to perform zero-initialization using memset.
1024 auto TryMemsetInitialization = [&]() -> bool {
1025 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1026 // we can initialize with a memset to -1.
1027 if (!CGM.getTypes().isZeroInitializable(T: ElementType))
1028 return false;
1029
1030 // Optimization: since zero initialization will just set the memory
1031 // to all zeroes, generate a single memset to do it in one shot.
1032
1033 // Subtract out the size of any elements we've already initialized.
1034 auto *RemainingSize = AllocSizeWithoutCookie;
1035 if (InitListElements) {
1036 // We know this can't overflow; we check this when doing the allocation.
1037 auto *InitializedSize = llvm::ConstantInt::get(
1038 Ty: RemainingSize->getType(),
1039 V: getContext().getTypeSizeInChars(T: ElementType).getQuantity() *
1040 InitListElements);
1041 RemainingSize = Builder.CreateSub(LHS: RemainingSize, RHS: InitializedSize);
1042 }
1043
1044 // Create the memset.
1045 Builder.CreateMemSet(Dest: CurPtr, Value: Builder.getInt8(C: 0), Size: RemainingSize, IsVolatile: false);
1046 return true;
1047 };
1048
1049 const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: Init);
1050 const CXXParenListInitExpr *CPLIE = nullptr;
1051 const StringLiteral *SL = nullptr;
1052 const ObjCEncodeExpr *OCEE = nullptr;
1053 const Expr *IgnoreParen = nullptr;
1054 if (!ILE) {
1055 IgnoreParen = Init->IgnoreParenImpCasts();
1056 CPLIE = dyn_cast<CXXParenListInitExpr>(Val: IgnoreParen);
1057 SL = dyn_cast<StringLiteral>(Val: IgnoreParen);
1058 OCEE = dyn_cast<ObjCEncodeExpr>(Val: IgnoreParen);
1059 }
1060
1061 // If the initializer is an initializer list, first do the explicit elements.
1062 if (ILE || CPLIE || SL || OCEE) {
1063 // Initializing from a (braced) string literal is a special case; the init
1064 // list element does not initialize a (single) array element.
1065 if ((ILE && ILE->isStringLiteralInit()) || SL || OCEE) {
1066 if (!ILE)
1067 Init = IgnoreParen;
1068 // Initialize the initial portion of length equal to that of the string
1069 // literal. The allocation must be for at least this much; we emitted a
1070 // check for that earlier.
1071 AggValueSlot Slot =
1072 AggValueSlot::forAddr(addr: CurPtr, quals: ElementType.getQualifiers(),
1073 isDestructed: AggValueSlot::IsDestructed,
1074 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
1075 isAliased: AggValueSlot::IsNotAliased,
1076 mayOverlap: AggValueSlot::DoesNotOverlap,
1077 isZeroed: AggValueSlot::IsNotZeroed,
1078 isChecked: AggValueSlot::IsSanitizerChecked);
1079 EmitAggExpr(E: ILE ? ILE->getInit(Init: 0) : Init, AS: Slot);
1080
1081 // Move past these elements.
1082 InitListElements =
1083 cast<ConstantArrayType>(Val: Init->getType()->getAsArrayTypeUnsafe())
1084 ->getZExtSize();
1085 CurPtr = Builder.CreateConstInBoundsGEP(
1086 Addr: CurPtr, Index: InitListElements, Name: "string.init.end");
1087
1088 // Zero out the rest, if any remain.
1089 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(Val: NumElements);
1090 if (!ConstNum || !ConstNum->equalsInt(V: InitListElements)) {
1091 bool OK = TryMemsetInitialization();
1092 (void)OK;
1093 assert(OK && "couldn't memset character type?");
1094 }
1095 return;
1096 }
1097
1098 ArrayRef<const Expr *> InitExprs =
1099 ILE ? ILE->inits() : CPLIE->getInitExprs();
1100 InitListElements = InitExprs.size();
1101
1102 // If this is a multi-dimensional array new, we will initialize multiple
1103 // elements with each init list element.
1104 QualType AllocType = E->getAllocatedType();
1105 if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1106 Val: AllocType->getAsArrayTypeUnsafe())) {
1107 ElementTy = ConvertTypeForMem(T: AllocType);
1108 CurPtr = CurPtr.withElementType(ElemTy: ElementTy);
1109 InitListElements *= getContext().getConstantArrayElementCount(CA: CAT);
1110 }
1111
1112 // Enter a partial-destruction Cleanup if necessary.
1113 if (DtorKind) {
1114 AllocaTrackerRAII AllocaTracker(*this);
1115 // In principle we could tell the Cleanup where we are more
1116 // directly, but the control flow can get so varied here that it
1117 // would actually be quite complex. Therefore we go through an
1118 // alloca.
1119 llvm::Instruction *DominatingIP =
1120 Builder.CreateFlagLoad(Addr: llvm::ConstantInt::getNullValue(Ty: Int8PtrTy));
1121 EndOfInit = CreateTempAlloca(Ty: BeginPtr.getType(), align: getPointerAlign(),
1122 Name: "array.init.end");
1123 pushIrregularPartialArrayCleanup(arrayBegin: BeginPtr.emitRawPointer(CGF&: *this),
1124 arrayEndPointer: EndOfInit, elementType: ElementType, elementAlignment: ElementAlign,
1125 destroyer: getDestroyer(destructionKind: DtorKind));
1126 cast<EHCleanupScope>(Val&: *EHStack.find(sp: EHStack.stable_begin()))
1127 .AddAuxAllocas(Allocas: AllocaTracker.Take());
1128 DeferredDeactivationCleanupStack.push_back(
1129 Elt: {.Cleanup: EHStack.stable_begin(), .DominatingIP: DominatingIP});
1130 pushedCleanup = true;
1131 }
1132
1133 CharUnits StartAlign = CurPtr.getAlignment();
1134 unsigned i = 0;
1135 for (const Expr *IE : InitExprs) {
1136 // Tell the cleanup that it needs to destroy up to this
1137 // element. TODO: some of these stores can be trivially
1138 // observed to be unnecessary.
1139 if (EndOfInit.isValid()) {
1140 Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit);
1141 }
1142 // FIXME: If the last initializer is an incomplete initializer list for
1143 // an array, and we have an array filler, we can fold together the two
1144 // initialization loops.
1145 StoreAnyExprIntoOneUnit(CGF&: *this, Init: IE, AllocType: IE->getType(), NewPtr: CurPtr,
1146 MayOverlap: AggValueSlot::DoesNotOverlap);
1147 CurPtr = Address(Builder.CreateInBoundsGEP(Ty: CurPtr.getElementType(),
1148 Ptr: CurPtr.emitRawPointer(CGF&: *this),
1149 IdxList: Builder.getSize(N: 1),
1150 Name: "array.exp.next"),
1151 CurPtr.getElementType(),
1152 StartAlign.alignmentAtOffset(offset: (++i) * ElementSize));
1153 }
1154
1155 // The remaining elements are filled with the array filler expression.
1156 Init = ILE ? ILE->getArrayFiller() : CPLIE->getArrayFiller();
1157
1158 // Extract the initializer for the individual array elements by pulling
1159 // out the array filler from all the nested initializer lists. This avoids
1160 // generating a nested loop for the initialization.
1161 while (Init && Init->getType()->isConstantArrayType()) {
1162 auto *SubILE = dyn_cast<InitListExpr>(Val: Init);
1163 if (!SubILE)
1164 break;
1165 assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1166 Init = SubILE->getArrayFiller();
1167 }
1168
1169 // Switch back to initializing one base element at a time.
1170 CurPtr = CurPtr.withElementType(ElemTy: BeginPtr.getElementType());
1171 }
1172
1173 // If all elements have already been initialized, skip any further
1174 // initialization.
1175 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(Val: NumElements);
1176 if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
1177 return;
1178 }
1179
1180 assert(Init && "have trailing elements to initialize but no initializer");
1181
1182 // If this is a constructor call, try to optimize it out, and failing that
1183 // emit a single loop to initialize all remaining elements.
1184 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Val: Init)) {
1185 CXXConstructorDecl *Ctor = CCE->getConstructor();
1186 if (Ctor->isTrivial()) {
1187 // If new expression did not specify value-initialization, then there
1188 // is no initialization.
1189 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
1190 return;
1191
1192 if (TryMemsetInitialization())
1193 return;
1194 }
1195
1196 // Store the new Cleanup position for irregular Cleanups.
1197 //
1198 // FIXME: Share this cleanup with the constructor call emission rather than
1199 // having it create a cleanup of its own.
1200 if (EndOfInit.isValid())
1201 Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit);
1202
1203 // Emit a constructor call loop to initialize the remaining elements.
1204 if (InitListElements)
1205 NumElements = Builder.CreateSub(
1206 LHS: NumElements,
1207 RHS: llvm::ConstantInt::get(Ty: NumElements->getType(), V: InitListElements));
1208 EmitCXXAggrConstructorCall(D: Ctor, NumElements, ArrayPtr: CurPtr, E: CCE,
1209 /*NewPointerIsChecked*/true,
1210 ZeroInitialization: CCE->requiresZeroInitialization());
1211 return;
1212 }
1213
1214 // If this is value-initialization, we can usually use memset.
1215 ImplicitValueInitExpr IVIE(ElementType);
1216 if (isa<ImplicitValueInitExpr>(Val: Init)) {
1217 if (TryMemsetInitialization())
1218 return;
1219
1220 // Switch to an ImplicitValueInitExpr for the element type. This handles
1221 // only one case: multidimensional array new of pointers to members. In
1222 // all other cases, we already have an initializer for the array element.
1223 Init = &IVIE;
1224 }
1225
1226 // At this point we should have found an initializer for the individual
1227 // elements of the array.
1228 assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1229 "got wrong type of element to initialize");
1230
1231 // If we have an empty initializer list, we can usually use memset.
1232 if (auto *ILE = dyn_cast<InitListExpr>(Val: Init))
1233 if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1234 return;
1235
1236 // If we have a struct whose every field is value-initialized, we can
1237 // usually use memset.
1238 if (auto *ILE = dyn_cast<InitListExpr>(Val: Init)) {
1239 if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1240 if (RType->getDecl()->isStruct()) {
1241 unsigned NumElements = 0;
1242 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RType->getDecl()))
1243 NumElements = CXXRD->getNumBases();
1244 for (auto *Field : RType->getDecl()->fields())
1245 if (!Field->isUnnamedBitField())
1246 ++NumElements;
1247 // FIXME: Recurse into nested InitListExprs.
1248 if (ILE->getNumInits() == NumElements)
1249 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1250 if (!isa<ImplicitValueInitExpr>(Val: ILE->getInit(Init: i)))
1251 --NumElements;
1252 if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1253 return;
1254 }
1255 }
1256 }
1257
1258 // Create the loop blocks.
1259 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1260 llvm::BasicBlock *LoopBB = createBasicBlock(name: "new.loop");
1261 llvm::BasicBlock *ContBB = createBasicBlock(name: "new.loop.end");
1262
1263 // Find the end of the array, hoisted out of the loop.
1264 llvm::Value *EndPtr = Builder.CreateInBoundsGEP(
1265 Ty: BeginPtr.getElementType(), Ptr: BeginPtr.emitRawPointer(CGF&: *this), IdxList: NumElements,
1266 Name: "array.end");
1267
1268 // If the number of elements isn't constant, we have to now check if there is
1269 // anything left to initialize.
1270 if (!ConstNum) {
1271 llvm::Value *IsEmpty = Builder.CreateICmpEQ(LHS: CurPtr.emitRawPointer(CGF&: *this),
1272 RHS: EndPtr, Name: "array.isempty");
1273 Builder.CreateCondBr(Cond: IsEmpty, True: ContBB, False: LoopBB);
1274 }
1275
1276 // Enter the loop.
1277 EmitBlock(BB: LoopBB);
1278
1279 // Set up the current-element phi.
1280 llvm::PHINode *CurPtrPhi =
1281 Builder.CreatePHI(Ty: CurPtr.getType(), NumReservedValues: 2, Name: "array.cur");
1282 CurPtrPhi->addIncoming(V: CurPtr.emitRawPointer(CGF&: *this), BB: EntryBB);
1283
1284 CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign);
1285
1286 // Store the new Cleanup position for irregular Cleanups.
1287 if (EndOfInit.isValid())
1288 Builder.CreateStore(Val: CurPtr.emitRawPointer(CGF&: *this), Addr: EndOfInit);
1289
1290 // Enter a partial-destruction Cleanup if necessary.
1291 if (!pushedCleanup && needsEHCleanup(kind: DtorKind)) {
1292 llvm::Instruction *DominatingIP =
1293 Builder.CreateFlagLoad(Addr: llvm::ConstantInt::getNullValue(Ty: Int8PtrTy));
1294 pushRegularPartialArrayCleanup(arrayBegin: BeginPtr.emitRawPointer(CGF&: *this),
1295 arrayEnd: CurPtr.emitRawPointer(CGF&: *this), elementType: ElementType,
1296 elementAlignment: ElementAlign, destroyer: getDestroyer(destructionKind: DtorKind));
1297 DeferredDeactivationCleanupStack.push_back(
1298 Elt: {.Cleanup: EHStack.stable_begin(), .DominatingIP: DominatingIP});
1299 }
1300
1301 // Emit the initializer into this element.
1302 StoreAnyExprIntoOneUnit(CGF&: *this, Init, AllocType: Init->getType(), NewPtr: CurPtr,
1303 MayOverlap: AggValueSlot::DoesNotOverlap);
1304
1305 // Leave the Cleanup if we entered one.
1306 deactivation.ForceDeactivate();
1307
1308 // Advance to the next element by adjusting the pointer type as necessary.
1309 llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32(
1310 Ty: ElementTy, Ptr: CurPtr.emitRawPointer(CGF&: *this), Idx0: 1, Name: "array.next");
1311
1312 // Check whether we've gotten to the end of the array and, if so,
1313 // exit the loop.
1314 llvm::Value *IsEnd = Builder.CreateICmpEQ(LHS: NextPtr, RHS: EndPtr, Name: "array.atend");
1315 Builder.CreateCondBr(Cond: IsEnd, True: ContBB, False: LoopBB);
1316 CurPtrPhi->addIncoming(V: NextPtr, BB: Builder.GetInsertBlock());
1317
1318 EmitBlock(BB: ContBB);
1319}
1320
1321static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1322 QualType ElementType, llvm::Type *ElementTy,
1323 Address NewPtr, llvm::Value *NumElements,
1324 llvm::Value *AllocSizeWithoutCookie) {
1325 ApplyDebugLocation DL(CGF, E);
1326 if (E->isArray())
1327 CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, BeginPtr: NewPtr, NumElements,
1328 AllocSizeWithoutCookie);
1329 else if (const Expr *Init = E->getInitializer())
1330 StoreAnyExprIntoOneUnit(CGF, Init, AllocType: E->getAllocatedType(), NewPtr,
1331 MayOverlap: AggValueSlot::DoesNotOverlap);
1332}
1333
1334/// Emit a call to an operator new or operator delete function, as implicitly
1335/// created by new-expressions and delete-expressions.
1336static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1337 const FunctionDecl *CalleeDecl,
1338 const FunctionProtoType *CalleeType,
1339 const CallArgList &Args) {
1340 llvm::CallBase *CallOrInvoke;
1341 llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(GD: CalleeDecl);
1342 CGCallee Callee = CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GlobalDecl(CalleeDecl));
1343 RValue RV =
1344 CGF.EmitCall(CallInfo: CGF.CGM.getTypes().arrangeFreeFunctionCall(
1345 Args, Ty: CalleeType, /*ChainCall=*/false),
1346 Callee, ReturnValue: ReturnValueSlot(), Args, CallOrInvoke: &CallOrInvoke);
1347
1348 /// C++1y [expr.new]p10:
1349 /// [In a new-expression,] an implementation is allowed to omit a call
1350 /// to a replaceable global allocation function.
1351 ///
1352 /// We model such elidable calls with the 'builtin' attribute.
1353 llvm::Function *Fn = dyn_cast<llvm::Function>(Val: CalleePtr);
1354 if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1355 Fn && Fn->hasFnAttribute(Kind: llvm::Attribute::NoBuiltin)) {
1356 CallOrInvoke->addFnAttr(Kind: llvm::Attribute::Builtin);
1357 }
1358
1359 return RV;
1360}
1361
1362RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1363 const CallExpr *TheCall,
1364 bool IsDelete) {
1365 CallArgList Args;
1366 EmitCallArgs(Args, Prototype: Type, ArgRange: TheCall->arguments());
1367 // Find the allocation or deallocation function that we're calling.
1368 ASTContext &Ctx = getContext();
1369 DeclarationName Name = Ctx.DeclarationNames
1370 .getCXXOperatorName(Op: IsDelete ? OO_Delete : OO_New);
1371
1372 for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1373 if (auto *FD = dyn_cast<FunctionDecl>(Val: Decl))
1374 if (Ctx.hasSameType(T1: FD->getType(), T2: QualType(Type, 0)))
1375 return EmitNewDeleteCall(CGF&: *this, CalleeDecl: FD, CalleeType: Type, Args);
1376 llvm_unreachable("predeclared global operator new/delete is missing");
1377}
1378
1379namespace {
1380/// The parameters to pass to a usual operator delete.
1381struct UsualDeleteParams {
1382 TypeAwareAllocationMode TypeAwareDelete = TypeAwareAllocationMode::No;
1383 bool DestroyingDelete = false;
1384 bool Size = false;
1385 AlignedAllocationMode Alignment = AlignedAllocationMode::No;
1386};
1387}
1388
1389static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
1390 UsualDeleteParams Params;
1391
1392 const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
1393 auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1394
1395 if (FD->isTypeAwareOperatorNewOrDelete()) {
1396 Params.TypeAwareDelete = TypeAwareAllocationMode::Yes;
1397 assert(AI != AE);
1398 ++AI;
1399 }
1400
1401 // The first argument after the type-identity parameter (if any) is
1402 // always a void* (or C* for a destroying operator delete for class
1403 // type C).
1404 ++AI;
1405
1406 // The next parameter may be a std::destroying_delete_t.
1407 if (FD->isDestroyingOperatorDelete()) {
1408 assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
1409 Params.DestroyingDelete = true;
1410 assert(AI != AE);
1411 ++AI;
1412 }
1413
1414 // Figure out what other parameters we should be implicitly passing.
1415 if (AI != AE && (*AI)->isIntegerType()) {
1416 Params.Size = true;
1417 ++AI;
1418 } else
1419 assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
1420
1421 if (AI != AE && (*AI)->isAlignValT()) {
1422 Params.Alignment = AlignedAllocationMode::Yes;
1423 ++AI;
1424 } else
1425 assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
1426
1427 assert(AI == AE && "unexpected usual deallocation function parameter");
1428 return Params;
1429}
1430
1431namespace {
1432 /// A cleanup to call the given 'operator delete' function upon abnormal
1433 /// exit from a new expression. Templated on a traits type that deals with
1434 /// ensuring that the arguments dominate the cleanup if necessary.
1435 template<typename Traits>
1436 class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1437 /// Type used to hold llvm::Value*s.
1438 typedef typename Traits::ValueTy ValueTy;
1439 /// Type used to hold RValues.
1440 typedef typename Traits::RValueTy RValueTy;
1441 struct PlacementArg {
1442 RValueTy ArgValue;
1443 QualType ArgType;
1444 };
1445
1446 unsigned NumPlacementArgs : 30;
1447 LLVM_PREFERRED_TYPE(AlignedAllocationMode)
1448 unsigned PassAlignmentToPlacementDelete : 1;
1449 const FunctionDecl *OperatorDelete;
1450 RValueTy TypeIdentity;
1451 ValueTy Ptr;
1452 ValueTy AllocSize;
1453 CharUnits AllocAlign;
1454
1455 PlacementArg *getPlacementArgs() {
1456 return reinterpret_cast<PlacementArg *>(this + 1);
1457 }
1458
1459 public:
1460 static size_t getExtraSize(size_t NumPlacementArgs) {
1461 return NumPlacementArgs * sizeof(PlacementArg);
1462 }
1463
1464 CallDeleteDuringNew(size_t NumPlacementArgs,
1465 const FunctionDecl *OperatorDelete,
1466 RValueTy TypeIdentity, ValueTy Ptr, ValueTy AllocSize,
1467 const ImplicitAllocationParameters &IAP,
1468 CharUnits AllocAlign)
1469 : NumPlacementArgs(NumPlacementArgs),
1470 PassAlignmentToPlacementDelete(
1471 isAlignedAllocation(Mode: IAP.PassAlignment)),
1472 OperatorDelete(OperatorDelete), TypeIdentity(TypeIdentity), Ptr(Ptr),
1473 AllocSize(AllocSize), AllocAlign(AllocAlign) {}
1474
1475 void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1476 assert(I < NumPlacementArgs && "index out of range");
1477 getPlacementArgs()[I] = {Arg, Type};
1478 }
1479
1480 void Emit(CodeGenFunction &CGF, Flags flags) override {
1481 const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
1482 CallArgList DeleteArgs;
1483 unsigned FirstNonTypeArg = 0;
1484 TypeAwareAllocationMode TypeAwareDeallocation =
1485 TypeAwareAllocationMode::No;
1486 if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
1487 TypeAwareDeallocation = TypeAwareAllocationMode::Yes;
1488 QualType SpecializedTypeIdentity = FPT->getParamType(i: 0);
1489 ++FirstNonTypeArg;
1490 DeleteArgs.add(rvalue: Traits::get(CGF, TypeIdentity), type: SpecializedTypeIdentity);
1491 }
1492 // The first argument after type-identity parameter (if any) is always
1493 // a void* (or C* for a destroying operator delete for class type C).
1494 DeleteArgs.add(rvalue: Traits::get(CGF, Ptr), type: FPT->getParamType(i: FirstNonTypeArg));
1495
1496 // Figure out what other parameters we should be implicitly passing.
1497 UsualDeleteParams Params;
1498 if (NumPlacementArgs) {
1499 // A placement deallocation function is implicitly passed an alignment
1500 // if the placement allocation function was, but is never passed a size.
1501 Params.Alignment =
1502 alignedAllocationModeFromBool(IsAligned: PassAlignmentToPlacementDelete);
1503 Params.TypeAwareDelete = TypeAwareDeallocation;
1504 Params.Size = isTypeAwareAllocation(Mode: Params.TypeAwareDelete);
1505 } else {
1506 // For a non-placement new-expression, 'operator delete' can take a
1507 // size and/or an alignment if it has the right parameters.
1508 Params = getUsualDeleteParams(FD: OperatorDelete);
1509 }
1510
1511 assert(!Params.DestroyingDelete &&
1512 "should not call destroying delete in a new-expression");
1513
1514 // The second argument can be a std::size_t (for non-placement delete).
1515 if (Params.Size)
1516 DeleteArgs.add(rvalue: Traits::get(CGF, AllocSize),
1517 type: CGF.getContext().getSizeType());
1518
1519 // The next (second or third) argument can be a std::align_val_t, which
1520 // is an enum whose underlying type is std::size_t.
1521 // FIXME: Use the right type as the parameter type. Note that in a call
1522 // to operator delete(size_t, ...), we may not have it available.
1523 if (isAlignedAllocation(Mode: Params.Alignment))
1524 DeleteArgs.add(rvalue: RValue::get(V: llvm::ConstantInt::get(
1525 Ty: CGF.SizeTy, V: AllocAlign.getQuantity())),
1526 type: CGF.getContext().getSizeType());
1527
1528 // Pass the rest of the arguments, which must match exactly.
1529 for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1530 auto Arg = getPlacementArgs()[I];
1531 DeleteArgs.add(rvalue: Traits::get(CGF, Arg.ArgValue), type: Arg.ArgType);
1532 }
1533
1534 // Call 'operator delete'.
1535 EmitNewDeleteCall(CGF, CalleeDecl: OperatorDelete, CalleeType: FPT, Args: DeleteArgs);
1536 }
1537 };
1538}
1539
1540/// Enter a cleanup to call 'operator delete' if the initializer in a
1541/// new-expression throws.
1542static void EnterNewDeleteCleanup(CodeGenFunction &CGF, const CXXNewExpr *E,
1543 RValue TypeIdentity, Address NewPtr,
1544 llvm::Value *AllocSize, CharUnits AllocAlign,
1545 const CallArgList &NewArgs) {
1546 unsigned NumNonPlacementArgs = E->getNumImplicitArgs();
1547
1548 // If we're not inside a conditional branch, then the cleanup will
1549 // dominate and we can do the easier (and more efficient) thing.
1550 if (!CGF.isInConditionalBranch()) {
1551 struct DirectCleanupTraits {
1552 typedef llvm::Value *ValueTy;
1553 typedef RValue RValueTy;
1554 static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1555 static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1556 };
1557
1558 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1559
1560 DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra<DirectCleanup>(
1561 Kind: EHCleanup, N: E->getNumPlacementArgs(), A: E->getOperatorDelete(),
1562 A: TypeIdentity, A: NewPtr.emitRawPointer(CGF), A: AllocSize,
1563 A: E->implicitAllocationParameters(), A: AllocAlign);
1564 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1565 auto &Arg = NewArgs[I + NumNonPlacementArgs];
1566 Cleanup->setPlacementArg(I, Arg: Arg.getRValue(CGF), Type: Arg.Ty);
1567 }
1568
1569 return;
1570 }
1571
1572 // Otherwise, we need to save all this stuff.
1573 DominatingValue<RValue>::saved_type SavedNewPtr =
1574 DominatingValue<RValue>::save(CGF, value: RValue::get(Addr: NewPtr, CGF));
1575 DominatingValue<RValue>::saved_type SavedAllocSize =
1576 DominatingValue<RValue>::save(CGF, value: RValue::get(V: AllocSize));
1577 DominatingValue<RValue>::saved_type SavedTypeIdentity =
1578 DominatingValue<RValue>::save(CGF, value: TypeIdentity);
1579 struct ConditionalCleanupTraits {
1580 typedef DominatingValue<RValue>::saved_type ValueTy;
1581 typedef DominatingValue<RValue>::saved_type RValueTy;
1582 static RValue get(CodeGenFunction &CGF, ValueTy V) {
1583 return V.restore(CGF);
1584 }
1585 };
1586 typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1587
1588 ConditionalCleanup *Cleanup =
1589 CGF.EHStack.pushCleanupWithExtra<ConditionalCleanup>(
1590 Kind: EHCleanup, N: E->getNumPlacementArgs(), A: E->getOperatorDelete(),
1591 A: SavedTypeIdentity, A: SavedNewPtr, A: SavedAllocSize,
1592 A: E->implicitAllocationParameters(), A: AllocAlign);
1593 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1594 auto &Arg = NewArgs[I + NumNonPlacementArgs];
1595 Cleanup->setPlacementArg(
1596 I, Arg: DominatingValue<RValue>::save(CGF, value: Arg.getRValue(CGF)), Type: Arg.Ty);
1597 }
1598
1599 CGF.initFullExprCleanup();
1600}
1601
1602llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1603 // The element type being allocated.
1604 QualType allocType = getContext().getBaseElementType(QT: E->getAllocatedType());
1605
1606 // 1. Build a call to the allocation function.
1607 FunctionDecl *allocator = E->getOperatorNew();
1608
1609 // If there is a brace-initializer or C++20 parenthesized initializer, cannot
1610 // allocate fewer elements than inits.
1611 unsigned minElements = 0;
1612 unsigned IndexOfAlignArg = 1;
1613 if (E->isArray() && E->hasInitializer()) {
1614 const Expr *Init = E->getInitializer();
1615 const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: Init);
1616 const CXXParenListInitExpr *CPLIE = dyn_cast<CXXParenListInitExpr>(Val: Init);
1617 const Expr *IgnoreParen = Init->IgnoreParenImpCasts();
1618 if ((ILE && ILE->isStringLiteralInit()) ||
1619 isa<StringLiteral>(Val: IgnoreParen) || isa<ObjCEncodeExpr>(Val: IgnoreParen)) {
1620 minElements =
1621 cast<ConstantArrayType>(Val: Init->getType()->getAsArrayTypeUnsafe())
1622 ->getZExtSize();
1623 } else if (ILE || CPLIE) {
1624 minElements = ILE ? ILE->getNumInits() : CPLIE->getInitExprs().size();
1625 }
1626 }
1627
1628 llvm::Value *numElements = nullptr;
1629 llvm::Value *allocSizeWithoutCookie = nullptr;
1630 llvm::Value *allocSize =
1631 EmitCXXNewAllocSize(CGF&: *this, e: E, minElements, numElements,
1632 sizeWithoutCookie&: allocSizeWithoutCookie);
1633 CharUnits allocAlign = getContext().getTypeAlignInChars(T: allocType);
1634
1635 // Emit the allocation call. If the allocator is a global placement
1636 // operator, just "inline" it directly.
1637 Address allocation = Address::invalid();
1638 CallArgList allocatorArgs;
1639 RValue TypeIdentityArg;
1640 if (allocator->isReservedGlobalPlacementOperator()) {
1641 assert(E->getNumPlacementArgs() == 1);
1642 const Expr *arg = *E->placement_arguments().begin();
1643
1644 LValueBaseInfo BaseInfo;
1645 allocation = EmitPointerWithAlignment(Addr: arg, BaseInfo: &BaseInfo);
1646
1647 // The pointer expression will, in many cases, be an opaque void*.
1648 // In these cases, discard the computed alignment and use the
1649 // formal alignment of the allocated type.
1650 if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1651 allocation.setAlignment(allocAlign);
1652
1653 // Set up allocatorArgs for the call to operator delete if it's not
1654 // the reserved global operator.
1655 if (E->getOperatorDelete() &&
1656 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1657 allocatorArgs.add(rvalue: RValue::get(V: allocSize), type: getContext().getSizeType());
1658 allocatorArgs.add(rvalue: RValue::get(Addr: allocation, CGF&: *this), type: arg->getType());
1659 }
1660
1661 } else {
1662 const FunctionProtoType *allocatorType =
1663 allocator->getType()->castAs<FunctionProtoType>();
1664 ImplicitAllocationParameters IAP = E->implicitAllocationParameters();
1665 unsigned ParamsToSkip = 0;
1666 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
1667 QualType SpecializedTypeIdentity = allocatorType->getParamType(i: 0);
1668 CXXScalarValueInitExpr TypeIdentityParam(SpecializedTypeIdentity, nullptr,
1669 SourceLocation());
1670 TypeIdentityArg = EmitAnyExprToTemp(E: &TypeIdentityParam);
1671 allocatorArgs.add(rvalue: TypeIdentityArg, type: SpecializedTypeIdentity);
1672 ++ParamsToSkip;
1673 ++IndexOfAlignArg;
1674 }
1675 // The allocation size is the first argument.
1676 QualType sizeType = getContext().getSizeType();
1677 allocatorArgs.add(rvalue: RValue::get(V: allocSize), type: sizeType);
1678 ++ParamsToSkip;
1679
1680 if (allocSize != allocSizeWithoutCookie) {
1681 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1682 allocAlign = std::max(a: allocAlign, b: cookieAlign);
1683 }
1684
1685 // The allocation alignment may be passed as the second argument.
1686 if (isAlignedAllocation(Mode: IAP.PassAlignment)) {
1687 QualType AlignValT = sizeType;
1688 if (allocatorType->getNumParams() > IndexOfAlignArg) {
1689 AlignValT = allocatorType->getParamType(i: IndexOfAlignArg);
1690 assert(getContext().hasSameUnqualifiedType(
1691 AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1692 sizeType) &&
1693 "wrong type for alignment parameter");
1694 ++ParamsToSkip;
1695 } else {
1696 // Corner case, passing alignment to 'operator new(size_t, ...)'.
1697 assert(allocator->isVariadic() && "can't pass alignment to allocator");
1698 }
1699 allocatorArgs.add(
1700 rvalue: RValue::get(V: llvm::ConstantInt::get(Ty: SizeTy, V: allocAlign.getQuantity())),
1701 type: AlignValT);
1702 }
1703
1704 // FIXME: Why do we not pass a CalleeDecl here?
1705 EmitCallArgs(Args&: allocatorArgs, Prototype: allocatorType, ArgRange: E->placement_arguments(),
1706 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1707
1708 RValue RV =
1709 EmitNewDeleteCall(CGF&: *this, CalleeDecl: allocator, CalleeType: allocatorType, Args: allocatorArgs);
1710
1711 // Set !heapallocsite metadata on the call to operator new.
1712 if (getDebugInfo())
1713 if (auto *newCall = dyn_cast<llvm::CallBase>(Val: RV.getScalarVal()))
1714 getDebugInfo()->addHeapAllocSiteMetadata(CallSite: newCall, AllocatedTy: allocType,
1715 Loc: E->getExprLoc());
1716
1717 // If this was a call to a global replaceable allocation function that does
1718 // not take an alignment argument, the allocator is known to produce
1719 // storage that's suitably aligned for any object that fits, up to a known
1720 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1721 CharUnits allocationAlign = allocAlign;
1722 if (!E->passAlignment() &&
1723 allocator->isReplaceableGlobalAllocationFunction()) {
1724 unsigned AllocatorAlign = llvm::bit_floor(Value: std::min<uint64_t>(
1725 a: Target.getNewAlign(), b: getContext().getTypeSize(T: allocType)));
1726 allocationAlign = std::max(
1727 a: allocationAlign, b: getContext().toCharUnitsFromBits(BitSize: AllocatorAlign));
1728 }
1729
1730 allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign);
1731 }
1732
1733 // Emit a null check on the allocation result if the allocation
1734 // function is allowed to return null (because it has a non-throwing
1735 // exception spec or is the reserved placement new) and we have an
1736 // interesting initializer will be running sanitizers on the initialization.
1737 bool nullCheck = E->shouldNullCheckAllocation() &&
1738 (!allocType.isPODType(Context: getContext()) || E->hasInitializer() ||
1739 sanitizePerformTypeCheck());
1740
1741 llvm::BasicBlock *nullCheckBB = nullptr;
1742 llvm::BasicBlock *contBB = nullptr;
1743
1744 // The null-check means that the initializer is conditionally
1745 // evaluated.
1746 ConditionalEvaluation conditional(*this);
1747
1748 if (nullCheck) {
1749 conditional.begin(CGF&: *this);
1750
1751 nullCheckBB = Builder.GetInsertBlock();
1752 llvm::BasicBlock *notNullBB = createBasicBlock(name: "new.notnull");
1753 contBB = createBasicBlock(name: "new.cont");
1754
1755 llvm::Value *isNull = Builder.CreateIsNull(Addr: allocation, Name: "new.isnull");
1756 Builder.CreateCondBr(Cond: isNull, True: contBB, False: notNullBB);
1757 EmitBlock(BB: notNullBB);
1758 }
1759
1760 // If there's an operator delete, enter a cleanup to call it if an
1761 // exception is thrown.
1762 EHScopeStack::stable_iterator operatorDeleteCleanup;
1763 llvm::Instruction *cleanupDominator = nullptr;
1764 if (E->getOperatorDelete() &&
1765 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1766 EnterNewDeleteCleanup(CGF&: *this, E, TypeIdentity: TypeIdentityArg, NewPtr: allocation, AllocSize: allocSize,
1767 AllocAlign: allocAlign, NewArgs: allocatorArgs);
1768 operatorDeleteCleanup = EHStack.stable_begin();
1769 cleanupDominator = Builder.CreateUnreachable();
1770 }
1771
1772 assert((allocSize == allocSizeWithoutCookie) ==
1773 CalculateCookiePadding(*this, E).isZero());
1774 if (allocSize != allocSizeWithoutCookie) {
1775 assert(E->isArray());
1776 allocation = CGM.getCXXABI().InitializeArrayCookie(CGF&: *this, NewPtr: allocation,
1777 NumElements: numElements,
1778 expr: E, ElementType: allocType);
1779 }
1780
1781 llvm::Type *elementTy = ConvertTypeForMem(T: allocType);
1782 Address result = allocation.withElementType(ElemTy: elementTy);
1783
1784 // Passing pointer through launder.invariant.group to avoid propagation of
1785 // vptrs information which may be included in previous type.
1786 // To not break LTO with different optimizations levels, we do it regardless
1787 // of optimization level.
1788 if (CGM.getCodeGenOpts().StrictVTablePointers &&
1789 allocator->isReservedGlobalPlacementOperator())
1790 result = Builder.CreateLaunderInvariantGroup(Addr: result);
1791
1792 // Emit sanitizer checks for pointer value now, so that in the case of an
1793 // array it was checked only once and not at each constructor call. We may
1794 // have already checked that the pointer is non-null.
1795 // FIXME: If we have an array cookie and a potentially-throwing allocator,
1796 // we'll null check the wrong pointer here.
1797 SanitizerSet SkippedChecks;
1798 SkippedChecks.set(K: SanitizerKind::Null, Value: nullCheck);
1799 EmitTypeCheck(TCK: CodeGenFunction::TCK_ConstructorCall,
1800 Loc: E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1801 Addr: result, Type: allocType, Alignment: result.getAlignment(), SkippedChecks,
1802 ArraySize: numElements);
1803
1804 EmitNewInitializer(CGF&: *this, E, ElementType: allocType, ElementTy: elementTy, NewPtr: result, NumElements: numElements,
1805 AllocSizeWithoutCookie: allocSizeWithoutCookie);
1806 llvm::Value *resultPtr = result.emitRawPointer(CGF&: *this);
1807
1808 // Deactivate the 'operator delete' cleanup if we finished
1809 // initialization.
1810 if (operatorDeleteCleanup.isValid()) {
1811 DeactivateCleanupBlock(Cleanup: operatorDeleteCleanup, DominatingIP: cleanupDominator);
1812 cleanupDominator->eraseFromParent();
1813 }
1814
1815 if (nullCheck) {
1816 conditional.end(CGF&: *this);
1817
1818 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1819 EmitBlock(BB: contBB);
1820
1821 llvm::PHINode *PHI = Builder.CreatePHI(Ty: resultPtr->getType(), NumReservedValues: 2);
1822 PHI->addIncoming(V: resultPtr, BB: notNullBB);
1823 PHI->addIncoming(V: llvm::Constant::getNullValue(Ty: resultPtr->getType()),
1824 BB: nullCheckBB);
1825
1826 resultPtr = PHI;
1827 }
1828
1829 return resultPtr;
1830}
1831
1832void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1833 llvm::Value *DeletePtr, QualType DeleteTy,
1834 llvm::Value *NumElements,
1835 CharUnits CookieSize) {
1836 assert((!NumElements && CookieSize.isZero()) ||
1837 DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1838
1839 const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>();
1840 CallArgList DeleteArgs;
1841
1842 auto Params = getUsualDeleteParams(FD: DeleteFD);
1843 auto ParamTypeIt = DeleteFTy->param_type_begin();
1844
1845 std::optional<llvm::AllocaInst *> TagAlloca;
1846 auto EmitTag = [&](QualType TagType, const char *TagName) {
1847 assert(!TagAlloca);
1848 llvm::Type *Ty = getTypes().ConvertType(T: TagType);
1849 CharUnits Align = CGM.getNaturalTypeAlignment(T: TagType);
1850 llvm::AllocaInst *TagAllocation = CreateTempAlloca(Ty, Name: TagName);
1851 TagAllocation->setAlignment(Align.getAsAlign());
1852 DeleteArgs.add(rvalue: RValue::getAggregate(addr: Address(TagAllocation, Ty, Align)),
1853 type: TagType);
1854 TagAlloca = TagAllocation;
1855 };
1856
1857 // Pass std::type_identity tag if present
1858 if (isTypeAwareAllocation(Mode: Params.TypeAwareDelete))
1859 EmitTag(*ParamTypeIt++, "typeaware.delete.tag");
1860
1861 // Pass the pointer itself.
1862 QualType ArgTy = *ParamTypeIt++;
1863 DeleteArgs.add(rvalue: RValue::get(V: DeletePtr), type: ArgTy);
1864
1865 // Pass the std::destroying_delete tag if present.
1866 if (Params.DestroyingDelete)
1867 EmitTag(*ParamTypeIt++, "destroying.delete.tag");
1868
1869 // Pass the size if the delete function has a size_t parameter.
1870 if (Params.Size) {
1871 QualType SizeType = *ParamTypeIt++;
1872 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(T: DeleteTy);
1873 llvm::Value *Size = llvm::ConstantInt::get(Ty: ConvertType(T: SizeType),
1874 V: DeleteTypeSize.getQuantity());
1875
1876 // For array new, multiply by the number of elements.
1877 if (NumElements)
1878 Size = Builder.CreateMul(LHS: Size, RHS: NumElements);
1879
1880 // If there is a cookie, add the cookie size.
1881 if (!CookieSize.isZero())
1882 Size = Builder.CreateAdd(
1883 LHS: Size, RHS: llvm::ConstantInt::get(Ty: SizeTy, V: CookieSize.getQuantity()));
1884
1885 DeleteArgs.add(rvalue: RValue::get(V: Size), type: SizeType);
1886 }
1887
1888 // Pass the alignment if the delete function has an align_val_t parameter.
1889 if (isAlignedAllocation(Mode: Params.Alignment)) {
1890 QualType AlignValType = *ParamTypeIt++;
1891 CharUnits DeleteTypeAlign =
1892 getContext().toCharUnitsFromBits(BitSize: getContext().getTypeAlignIfKnown(
1893 T: DeleteTy, NeedsPreferredAlignment: true /* NeedsPreferredAlignment */));
1894 llvm::Value *Align = llvm::ConstantInt::get(Ty: ConvertType(T: AlignValType),
1895 V: DeleteTypeAlign.getQuantity());
1896 DeleteArgs.add(rvalue: RValue::get(V: Align), type: AlignValType);
1897 }
1898
1899 assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1900 "unknown parameter to usual delete function");
1901
1902 // Emit the call to delete.
1903 EmitNewDeleteCall(CGF&: *this, CalleeDecl: DeleteFD, CalleeType: DeleteFTy, Args: DeleteArgs);
1904
1905 // If call argument lowering didn't use a generated tag argument alloca we
1906 // remove them
1907 if (TagAlloca && (*TagAlloca)->use_empty())
1908 (*TagAlloca)->eraseFromParent();
1909}
1910namespace {
1911 /// Calls the given 'operator delete' on a single object.
1912 struct CallObjectDelete final : EHScopeStack::Cleanup {
1913 llvm::Value *Ptr;
1914 const FunctionDecl *OperatorDelete;
1915 QualType ElementType;
1916
1917 CallObjectDelete(llvm::Value *Ptr,
1918 const FunctionDecl *OperatorDelete,
1919 QualType ElementType)
1920 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1921
1922 void Emit(CodeGenFunction &CGF, Flags flags) override {
1923 CGF.EmitDeleteCall(DeleteFD: OperatorDelete, DeletePtr: Ptr, DeleteTy: ElementType);
1924 }
1925 };
1926}
1927
1928void
1929CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1930 llvm::Value *CompletePtr,
1931 QualType ElementType) {
1932 EHStack.pushCleanup<CallObjectDelete>(Kind: NormalAndEHCleanup, A: CompletePtr,
1933 A: OperatorDelete, A: ElementType);
1934}
1935
1936/// Emit the code for deleting a single object with a destroying operator
1937/// delete. If the element type has a non-virtual destructor, Ptr has already
1938/// been converted to the type of the parameter of 'operator delete'. Otherwise
1939/// Ptr points to an object of the static type.
1940static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
1941 const CXXDeleteExpr *DE, Address Ptr,
1942 QualType ElementType) {
1943 auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
1944 if (Dtor && Dtor->isVirtual())
1945 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1946 Dtor);
1947 else
1948 CGF.EmitDeleteCall(DeleteFD: DE->getOperatorDelete(), DeletePtr: Ptr.emitRawPointer(CGF),
1949 DeleteTy: ElementType);
1950}
1951
1952/// Emit the code for deleting a single object.
1953/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1954/// if not.
1955static bool EmitObjectDelete(CodeGenFunction &CGF,
1956 const CXXDeleteExpr *DE,
1957 Address Ptr,
1958 QualType ElementType,
1959 llvm::BasicBlock *UnconditionalDeleteBlock) {
1960 // C++11 [expr.delete]p3:
1961 // If the static type of the object to be deleted is different from its
1962 // dynamic type, the static type shall be a base class of the dynamic type
1963 // of the object to be deleted and the static type shall have a virtual
1964 // destructor or the behavior is undefined.
1965 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_MemberCall, Loc: DE->getExprLoc(), Addr: Ptr,
1966 Type: ElementType);
1967
1968 const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1969 assert(!OperatorDelete->isDestroyingOperatorDelete());
1970
1971 // Find the destructor for the type, if applicable. If the
1972 // destructor is virtual, we'll just emit the vcall and return.
1973 const CXXDestructorDecl *Dtor = nullptr;
1974 if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1975 CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RT->getDecl());
1976 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1977 Dtor = RD->getDestructor();
1978
1979 if (Dtor->isVirtual()) {
1980 bool UseVirtualCall = true;
1981 const Expr *Base = DE->getArgument();
1982 if (auto *DevirtualizedDtor =
1983 dyn_cast_or_null<const CXXDestructorDecl>(
1984 Val: Dtor->getDevirtualizedMethod(
1985 Base, IsAppleKext: CGF.CGM.getLangOpts().AppleKext))) {
1986 UseVirtualCall = false;
1987 const CXXRecordDecl *DevirtualizedClass =
1988 DevirtualizedDtor->getParent();
1989 if (declaresSameEntity(D1: getCXXRecord(E: Base), D2: DevirtualizedClass)) {
1990 // Devirtualized to the class of the base type (the type of the
1991 // whole expression).
1992 Dtor = DevirtualizedDtor;
1993 } else {
1994 // Devirtualized to some other type. Would need to cast the this
1995 // pointer to that type but we don't have support for that yet, so
1996 // do a virtual call. FIXME: handle the case where it is
1997 // devirtualized to the derived type (the type of the inner
1998 // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1999 UseVirtualCall = true;
2000 }
2001 }
2002 if (UseVirtualCall) {
2003 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
2004 Dtor);
2005 return false;
2006 }
2007 }
2008 }
2009 }
2010
2011 // Make sure that we call delete even if the dtor throws.
2012 // This doesn't have to a conditional cleanup because we're going
2013 // to pop it off in a second.
2014 CGF.EHStack.pushCleanup<CallObjectDelete>(
2015 Kind: NormalAndEHCleanup, A: Ptr.emitRawPointer(CGF), A: OperatorDelete, A: ElementType);
2016
2017 if (Dtor)
2018 CGF.EmitCXXDestructorCall(D: Dtor, Type: Dtor_Complete,
2019 /*ForVirtualBase=*/false,
2020 /*Delegating=*/false,
2021 This: Ptr, ThisTy: ElementType);
2022 else if (auto Lifetime = ElementType.getObjCLifetime()) {
2023 switch (Lifetime) {
2024 case Qualifiers::OCL_None:
2025 case Qualifiers::OCL_ExplicitNone:
2026 case Qualifiers::OCL_Autoreleasing:
2027 break;
2028
2029 case Qualifiers::OCL_Strong:
2030 CGF.EmitARCDestroyStrong(addr: Ptr, precise: ARCPreciseLifetime);
2031 break;
2032
2033 case Qualifiers::OCL_Weak:
2034 CGF.EmitARCDestroyWeak(addr: Ptr);
2035 break;
2036 }
2037 }
2038
2039 // When optimizing for size, call 'operator delete' unconditionally.
2040 if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {
2041 CGF.EmitBlock(BB: UnconditionalDeleteBlock);
2042 CGF.PopCleanupBlock();
2043 return true;
2044 }
2045
2046 CGF.PopCleanupBlock();
2047 return false;
2048}
2049
2050namespace {
2051 /// Calls the given 'operator delete' on an array of objects.
2052 struct CallArrayDelete final : EHScopeStack::Cleanup {
2053 llvm::Value *Ptr;
2054 const FunctionDecl *OperatorDelete;
2055 llvm::Value *NumElements;
2056 QualType ElementType;
2057 CharUnits CookieSize;
2058
2059 CallArrayDelete(llvm::Value *Ptr,
2060 const FunctionDecl *OperatorDelete,
2061 llvm::Value *NumElements,
2062 QualType ElementType,
2063 CharUnits CookieSize)
2064 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
2065 ElementType(ElementType), CookieSize(CookieSize) {}
2066
2067 void Emit(CodeGenFunction &CGF, Flags flags) override {
2068 CGF.EmitDeleteCall(DeleteFD: OperatorDelete, DeletePtr: Ptr, DeleteTy: ElementType, NumElements,
2069 CookieSize);
2070 }
2071 };
2072}
2073
2074/// Emit the code for deleting an array of objects.
2075static void EmitArrayDelete(CodeGenFunction &CGF,
2076 const CXXDeleteExpr *E,
2077 Address deletedPtr,
2078 QualType elementType) {
2079 llvm::Value *numElements = nullptr;
2080 llvm::Value *allocatedPtr = nullptr;
2081 CharUnits cookieSize;
2082 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr: deletedPtr, expr: E, ElementType: elementType,
2083 NumElements&: numElements, AllocPtr&: allocatedPtr, CookieSize&: cookieSize);
2084
2085 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
2086
2087 // Make sure that we call delete even if one of the dtors throws.
2088 const FunctionDecl *operatorDelete = E->getOperatorDelete();
2089 CGF.EHStack.pushCleanup<CallArrayDelete>(Kind: NormalAndEHCleanup,
2090 A: allocatedPtr, A: operatorDelete,
2091 A: numElements, A: elementType,
2092 A: cookieSize);
2093
2094 // Destroy the elements.
2095 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
2096 assert(numElements && "no element count for a type with a destructor!");
2097
2098 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
2099 CharUnits elementAlign =
2100 deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
2101
2102 llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF);
2103 llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
2104 Ty: deletedPtr.getElementType(), Ptr: arrayBegin, IdxList: numElements, Name: "delete.end");
2105
2106 // Note that it is legal to allocate a zero-length array, and we
2107 // can never fold the check away because the length should always
2108 // come from a cookie.
2109 CGF.emitArrayDestroy(begin: arrayBegin, end: arrayEnd, elementType, elementAlign,
2110 destroyer: CGF.getDestroyer(destructionKind: dtorKind),
2111 /*checkZeroLength*/ true,
2112 useEHCleanup: CGF.needsEHCleanup(kind: dtorKind));
2113 }
2114
2115 // Pop the cleanup block.
2116 CGF.PopCleanupBlock();
2117}
2118
2119void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
2120 const Expr *Arg = E->getArgument();
2121 Address Ptr = EmitPointerWithAlignment(Addr: Arg);
2122
2123 // Null check the pointer.
2124 //
2125 // We could avoid this null check if we can determine that the object
2126 // destruction is trivial and doesn't require an array cookie; we can
2127 // unconditionally perform the operator delete call in that case. For now, we
2128 // assume that deleted pointers are null rarely enough that it's better to
2129 // keep the branch. This might be worth revisiting for a -O0 code size win.
2130 llvm::BasicBlock *DeleteNotNull = createBasicBlock(name: "delete.notnull");
2131 llvm::BasicBlock *DeleteEnd = createBasicBlock(name: "delete.end");
2132
2133 llvm::Value *IsNull = Builder.CreateIsNull(Addr: Ptr, Name: "isnull");
2134
2135 Builder.CreateCondBr(Cond: IsNull, True: DeleteEnd, False: DeleteNotNull);
2136 EmitBlock(BB: DeleteNotNull);
2137 Ptr.setKnownNonNull();
2138
2139 QualType DeleteTy = E->getDestroyedType();
2140
2141 // A destroying operator delete overrides the entire operation of the
2142 // delete expression.
2143 if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
2144 EmitDestroyingObjectDelete(CGF&: *this, DE: E, Ptr, ElementType: DeleteTy);
2145 EmitBlock(BB: DeleteEnd);
2146 return;
2147 }
2148
2149 // We might be deleting a pointer to array. If so, GEP down to the
2150 // first non-array element.
2151 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2152 if (DeleteTy->isConstantArrayType()) {
2153 llvm::Value *Zero = Builder.getInt32(C: 0);
2154 SmallVector<llvm::Value*,8> GEP;
2155
2156 GEP.push_back(Elt: Zero); // point at the outermost array
2157
2158 // For each layer of array type we're pointing at:
2159 while (const ConstantArrayType *Arr
2160 = getContext().getAsConstantArrayType(T: DeleteTy)) {
2161 // 1. Unpeel the array type.
2162 DeleteTy = Arr->getElementType();
2163
2164 // 2. GEP to the first element of the array.
2165 GEP.push_back(Elt: Zero);
2166 }
2167
2168 Ptr = Builder.CreateInBoundsGEP(Addr: Ptr, IdxList: GEP, ElementType: ConvertTypeForMem(T: DeleteTy),
2169 Align: Ptr.getAlignment(), Name: "del.first");
2170 }
2171
2172 assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
2173
2174 if (E->isArrayForm()) {
2175 EmitArrayDelete(CGF&: *this, E, deletedPtr: Ptr, elementType: DeleteTy);
2176 EmitBlock(BB: DeleteEnd);
2177 } else {
2178 if (!EmitObjectDelete(CGF&: *this, DE: E, Ptr, ElementType: DeleteTy, UnconditionalDeleteBlock: DeleteEnd))
2179 EmitBlock(BB: DeleteEnd);
2180 }
2181}
2182
2183static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2184 llvm::Type *StdTypeInfoPtrTy,
2185 bool HasNullCheck) {
2186 // Get the vtable pointer.
2187 Address ThisPtr = CGF.EmitLValue(E).getAddress();
2188
2189 QualType SrcRecordTy = E->getType();
2190
2191 // C++ [class.cdtor]p4:
2192 // If the operand of typeid refers to the object under construction or
2193 // destruction and the static type of the operand is neither the constructor
2194 // or destructor’s class nor one of its bases, the behavior is undefined.
2195 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_DynamicOperation, Loc: E->getExprLoc(),
2196 Addr: ThisPtr, Type: SrcRecordTy);
2197
2198 // Whether we need an explicit null pointer check. For example, with the
2199 // Microsoft ABI, if this is a call to __RTtypeid, the null pointer check and
2200 // exception throw is inside the __RTtypeid(nullptr) call
2201 if (HasNullCheck &&
2202 CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(SrcRecordTy)) {
2203 llvm::BasicBlock *BadTypeidBlock =
2204 CGF.createBasicBlock(name: "typeid.bad_typeid");
2205 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "typeid.end");
2206
2207 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Addr: ThisPtr);
2208 CGF.Builder.CreateCondBr(Cond: IsNull, True: BadTypeidBlock, False: EndBlock);
2209
2210 CGF.EmitBlock(BB: BadTypeidBlock);
2211 CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2212 CGF.EmitBlock(BB: EndBlock);
2213 }
2214
2215 return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2216 StdTypeInfoPtrTy);
2217}
2218
2219llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2220 // Ideally, we would like to use GlobalsInt8PtrTy here, however, we cannot,
2221 // primarily because the result of applying typeid is a value of type
2222 // type_info, which is declared & defined by the standard library
2223 // implementation and expects to operate on the generic (default) AS.
2224 // https://reviews.llvm.org/D157452 has more context, and a possible solution.
2225 llvm::Type *PtrTy = Int8PtrTy;
2226 LangAS GlobAS = CGM.GetGlobalVarAddressSpace(D: nullptr);
2227
2228 auto MaybeASCast = [=](auto &&TypeInfo) {
2229 if (GlobAS == LangAS::Default)
2230 return TypeInfo;
2231 return getTargetHooks().performAddrSpaceCast(CGM, TypeInfo, GlobAS, PtrTy);
2232 };
2233
2234 if (E->isTypeOperand()) {
2235 llvm::Constant *TypeInfo =
2236 CGM.GetAddrOfRTTIDescriptor(Ty: E->getTypeOperand(Context: getContext()));
2237 return MaybeASCast(TypeInfo);
2238 }
2239
2240 // C++ [expr.typeid]p2:
2241 // When typeid is applied to a glvalue expression whose type is a
2242 // polymorphic class type, the result refers to a std::type_info object
2243 // representing the type of the most derived object (that is, the dynamic
2244 // type) to which the glvalue refers.
2245 // If the operand is already most derived object, no need to look up vtable.
2246 if (E->isPotentiallyEvaluated() && !E->isMostDerived(Context: getContext()))
2247 return EmitTypeidFromVTable(CGF&: *this, E: E->getExprOperand(), StdTypeInfoPtrTy: PtrTy,
2248 HasNullCheck: E->hasNullCheck());
2249
2250 QualType OperandTy = E->getExprOperand()->getType();
2251 return MaybeASCast(CGM.GetAddrOfRTTIDescriptor(Ty: OperandTy));
2252}
2253
2254static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2255 QualType DestTy) {
2256 llvm::Type *DestLTy = CGF.ConvertType(T: DestTy);
2257 if (DestTy->isPointerType())
2258 return llvm::Constant::getNullValue(Ty: DestLTy);
2259
2260 /// C++ [expr.dynamic.cast]p9:
2261 /// A failed cast to reference type throws std::bad_cast
2262 if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2263 return nullptr;
2264
2265 CGF.Builder.ClearInsertionPoint();
2266 return llvm::PoisonValue::get(T: DestLTy);
2267}
2268
2269llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2270 const CXXDynamicCastExpr *DCE) {
2271 CGM.EmitExplicitCastExprType(E: DCE, CGF: this);
2272 QualType DestTy = DCE->getTypeAsWritten();
2273
2274 QualType SrcTy = DCE->getSubExpr()->getType();
2275
2276 // C++ [expr.dynamic.cast]p7:
2277 // If T is "pointer to cv void," then the result is a pointer to the most
2278 // derived object pointed to by v.
2279 bool IsDynamicCastToVoid = DestTy->isVoidPointerType();
2280 QualType SrcRecordTy;
2281 QualType DestRecordTy;
2282 if (IsDynamicCastToVoid) {
2283 SrcRecordTy = SrcTy->getPointeeType();
2284 // No DestRecordTy.
2285 } else if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
2286 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2287 DestRecordTy = DestPTy->getPointeeType();
2288 } else {
2289 SrcRecordTy = SrcTy;
2290 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2291 }
2292
2293 // C++ [class.cdtor]p5:
2294 // If the operand of the dynamic_cast refers to the object under
2295 // construction or destruction and the static type of the operand is not a
2296 // pointer to or object of the constructor or destructor’s own class or one
2297 // of its bases, the dynamic_cast results in undefined behavior.
2298 EmitTypeCheck(TCK: TCK_DynamicOperation, Loc: DCE->getExprLoc(), Addr: ThisAddr, Type: SrcRecordTy);
2299
2300 if (DCE->isAlwaysNull()) {
2301 if (llvm::Value *T = EmitDynamicCastToNull(CGF&: *this, DestTy)) {
2302 // Expression emission is expected to retain a valid insertion point.
2303 if (!Builder.GetInsertBlock())
2304 EmitBlock(BB: createBasicBlock(name: "dynamic_cast.unreachable"));
2305 return T;
2306 }
2307 }
2308
2309 assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2310
2311 // If the destination is effectively final, the cast succeeds if and only
2312 // if the dynamic type of the pointer is exactly the destination type.
2313 bool IsExact = !IsDynamicCastToVoid &&
2314 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2315 DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&
2316 CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy);
2317
2318 // C++ [expr.dynamic.cast]p4:
2319 // If the value of v is a null pointer value in the pointer case, the result
2320 // is the null pointer value of type T.
2321 bool ShouldNullCheckSrcValue =
2322 IsExact || CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(
2323 SrcIsPtr: SrcTy->isPointerType(), SrcRecordTy);
2324
2325 llvm::BasicBlock *CastNull = nullptr;
2326 llvm::BasicBlock *CastNotNull = nullptr;
2327 llvm::BasicBlock *CastEnd = createBasicBlock(name: "dynamic_cast.end");
2328
2329 if (ShouldNullCheckSrcValue) {
2330 CastNull = createBasicBlock(name: "dynamic_cast.null");
2331 CastNotNull = createBasicBlock(name: "dynamic_cast.notnull");
2332
2333 llvm::Value *IsNull = Builder.CreateIsNull(Addr: ThisAddr);
2334 Builder.CreateCondBr(Cond: IsNull, True: CastNull, False: CastNotNull);
2335 EmitBlock(BB: CastNotNull);
2336 }
2337
2338 llvm::Value *Value;
2339 if (IsDynamicCastToVoid) {
2340 Value = CGM.getCXXABI().emitDynamicCastToVoid(CGF&: *this, Value: ThisAddr, SrcRecordTy);
2341 } else if (IsExact) {
2342 // If the destination type is effectively final, this pointer points to the
2343 // right type if and only if its vptr has the right value.
2344 Value = CGM.getCXXABI().emitExactDynamicCast(
2345 CGF&: *this, Value: ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastSuccess: CastEnd, CastFail: CastNull);
2346 } else {
2347 assert(DestRecordTy->isRecordType() &&
2348 "destination type must be a record type!");
2349 Value = CGM.getCXXABI().emitDynamicCastCall(CGF&: *this, Value: ThisAddr, SrcRecordTy,
2350 DestTy, DestRecordTy, CastEnd);
2351 }
2352 CastNotNull = Builder.GetInsertBlock();
2353
2354 llvm::Value *NullValue = nullptr;
2355 if (ShouldNullCheckSrcValue) {
2356 EmitBranch(Block: CastEnd);
2357
2358 EmitBlock(BB: CastNull);
2359 NullValue = EmitDynamicCastToNull(CGF&: *this, DestTy);
2360 CastNull = Builder.GetInsertBlock();
2361
2362 EmitBranch(Block: CastEnd);
2363 }
2364
2365 EmitBlock(BB: CastEnd);
2366
2367 if (CastNull) {
2368 llvm::PHINode *PHI = Builder.CreatePHI(Ty: Value->getType(), NumReservedValues: 2);
2369 PHI->addIncoming(V: Value, BB: CastNotNull);
2370 PHI->addIncoming(V: NullValue, BB: CastNull);
2371
2372 Value = PHI;
2373 }
2374
2375 return Value;
2376}
2377