1//===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Objective-C code as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGObjCRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "ConstantEmitter.h"
19#include "TargetInfo.h"
20#include "clang/AST/ASTContext.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/NSAPI.h"
24#include "clang/AST/StmtObjC.h"
25#include "clang/Basic/Diagnostic.h"
26#include "clang/CodeGen/CGFunctionInfo.h"
27#include "clang/CodeGen/CodeGenABITypes.h"
28#include "llvm/Analysis/ObjCARCUtil.h"
29#include "llvm/BinaryFormat/MachO.h"
30#include "llvm/IR/Constants.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/InlineAsm.h"
33#include <optional>
34using namespace clang;
35using namespace CodeGen;
36
37typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
38static TryEmitResult
39tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
40static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
41 QualType ET,
42 RValue Result);
43
44/// Given the address of a variable of pointer type, find the correct
45/// null to store into it.
46static llvm::Constant *getNullForVariable(Address addr) {
47 llvm::Type *type = addr.getElementType();
48 return llvm::ConstantPointerNull::get(T: cast<llvm::PointerType>(Val: type));
49}
50
51/// Emits an instance of NSConstantString representing the object.
52llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
53{
54 llvm::Constant *C =
55 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
56 return C;
57}
58
59/// EmitObjCBoxedExpr - This routine generates code to call
60/// the appropriate expression boxing method. This will either be
61/// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:],
62/// or [NSValue valueWithBytes:objCType:].
63///
64llvm::Value *
65CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
66 // If decided in Sema constant initializers are supported by the runtime, not
67 // disabled, and the contents can be emitted as a constant NSNumber subclass;
68 // use the ConstEmitter
69 if (E->isExpressibleAsConstantInitializer()) {
70 ConstantEmitter ConstEmitter(CGM);
71 return ConstEmitter.tryEmitAbstract(E, T: E->getType());
72 }
73
74 // Generate the correct selector for this literal's concrete type.
75 // Get the method.
76 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
77 const Expr *SubExpr = E->getSubExpr();
78
79 if (E->isExpressibleAsConstantInitializer()) {
80 ConstantEmitter ConstEmitter(CGM);
81 return ConstEmitter.tryEmitAbstract(E, T: E->getType());
82 }
83
84 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
85 Selector Sel = BoxingMethod->getSelector();
86
87 // Generate a reference to the class pointer, which will be the receiver.
88 // Assumes that the method was introduced in the class that should be
89 // messaged (avoids pulling it out of the result type).
90 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
91 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
92 llvm::Value *Receiver = Runtime.GetClass(CGF&: *this, OID: ClassDecl);
93
94 CallArgList Args;
95 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin();
96 QualType ArgQT = ArgDecl->getType().getUnqualifiedType();
97
98 // ObjCBoxedExpr supports boxing of structs and unions
99 // via [NSValue valueWithBytes:objCType:]
100 const QualType ValueType(SubExpr->getType().getCanonicalType());
101 if (ValueType->isObjCBoxableRecordType()) {
102 // Emit CodeGen for first parameter
103 // and cast value to correct type
104 Address Temporary = CreateMemTemp(T: SubExpr->getType());
105 EmitAnyExprToMem(E: SubExpr, Location: Temporary, Quals: Qualifiers(), /*isInit*/ IsInitializer: true);
106 llvm::Value *BitCast = Builder.CreateBitCast(
107 V: Temporary.emitRawPointer(CGF&: *this), DestTy: ConvertType(T: ArgQT));
108 Args.add(rvalue: RValue::get(V: BitCast), type: ArgQT);
109
110 // Create char array to store type encoding
111 std::string Str;
112 getContext().getObjCEncodingForType(T: ValueType, S&: Str);
113 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer();
114
115 // Cast type encoding to correct type
116 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
117 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType();
118 llvm::Value *Cast = Builder.CreateBitCast(V: GV, DestTy: ConvertType(T: EncodingQT));
119
120 Args.add(rvalue: RValue::get(V: Cast), type: EncodingQT);
121 } else {
122 Args.add(rvalue: EmitAnyExpr(E: SubExpr), type: ArgQT);
123 }
124
125 RValue result = Runtime.GenerateMessageSend(
126 CGF&: *this, ReturnSlot: ReturnValueSlot(), ResultType: BoxingMethod->getReturnType(), Sel, Receiver,
127 CallArgs: Args, Class: ClassDecl, Method: BoxingMethod);
128 return Builder.CreateBitCast(V: result.getScalarVal(),
129 DestTy: ConvertType(T: E->getType()));
130}
131
132llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
133 const ObjCMethodDecl *MethodWithObjects) {
134 ASTContext &Context = CGM.getContext();
135 const ObjCDictionaryLiteral *DLE = nullptr;
136 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(Val: E);
137 if (!ALE)
138 DLE = cast<ObjCDictionaryLiteral>(Val: E);
139
140 const bool CanBeExpressedAsConstant =
141 ALE ? ALE->isExpressibleAsConstantInitializer()
142 : DLE->isExpressibleAsConstantInitializer();
143 if (CanBeExpressedAsConstant) {
144 ConstantEmitter ConstEmitter(CGM);
145 return ConstEmitter.tryEmitAbstract(E, T: E->getType());
146 }
147
148 // Optimize empty collections by referencing constants, when available and
149 // constant initializers aren't supported
150 uint64_t NumElements = ALE ? ALE->getNumElements() : DLE->getNumElements();
151
152 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) {
153 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__";
154 QualType IdTy(CGM.getContext().getObjCIdType());
155 llvm::Constant *Constant =
156 CGM.CreateRuntimeVariable(Ty: ConvertType(T: IdTy), Name: ConstantName);
157 LValue LV = MakeNaturalAlignAddrLValue(V: Constant, T: IdTy);
158 llvm::Value *Ptr = EmitLoadOfScalar(lvalue: LV, Loc: E->getBeginLoc());
159 cast<llvm::LoadInst>(Val: Ptr)->setMetadata(
160 KindID: llvm::LLVMContext::MD_invariant_load,
161 Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: {}));
162 return Builder.CreateBitCast(V: Ptr, DestTy: ConvertType(T: E->getType()));
163 }
164
165 // Compute the type of the array we're initializing.
166 llvm::APInt APNumElements(Context.getTypeSize(T: Context.getSizeType()),
167 NumElements);
168 QualType ElementType = Context.getObjCIdType().withConst();
169 QualType ElementArrayType = Context.getConstantArrayType(
170 EltTy: ElementType, ArySize: APNumElements, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal,
171 /*IndexTypeQuals=*/0);
172
173 // Allocate the temporary array(s).
174 Address Objects = CreateMemTemp(T: ElementArrayType, Name: "objects");
175 Address Keys = Address::invalid();
176 if (DLE)
177 Keys = CreateMemTemp(T: ElementArrayType, Name: "keys");
178
179 // In ARC, we may need to do extra work to keep all the keys and
180 // values alive until after the call.
181 SmallVector<llvm::Value *, 16> NeededObjects;
182 bool TrackNeededObjects =
183 (getLangOpts().ObjCAutoRefCount &&
184 CGM.getCodeGenOpts().OptimizationLevel != 0);
185
186 // Perform the actual initialialization of the array(s).
187 for (uint64_t i = 0; i < NumElements; i++) {
188 if (ALE) {
189 // Emit the element and store it to the appropriate array slot.
190 const Expr *Rhs = ALE->getElement(Index: i);
191 LValue LV = MakeAddrLValue(Addr: Builder.CreateConstArrayGEP(Addr: Objects, Index: i),
192 T: ElementType, Source: AlignmentSource::Decl);
193
194 llvm::Value *value = EmitScalarExpr(E: Rhs);
195 EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV, isInit: true);
196 if (TrackNeededObjects) {
197 NeededObjects.push_back(Elt: value);
198 }
199 } else {
200 // Emit the key and store it to the appropriate array slot.
201 const Expr *Key = DLE->getKeyValueElement(Index: i).Key;
202 LValue KeyLV = MakeAddrLValue(Addr: Builder.CreateConstArrayGEP(Addr: Keys, Index: i),
203 T: ElementType, Source: AlignmentSource::Decl);
204 llvm::Value *keyValue = EmitScalarExpr(E: Key);
205 EmitStoreThroughLValue(Src: RValue::get(V: keyValue), Dst: KeyLV, /*isInit=*/true);
206
207 // Emit the value and store it to the appropriate array slot.
208 const Expr *Value = DLE->getKeyValueElement(Index: i).Value;
209 LValue ValueLV = MakeAddrLValue(Addr: Builder.CreateConstArrayGEP(Addr: Objects, Index: i),
210 T: ElementType, Source: AlignmentSource::Decl);
211 llvm::Value *valueValue = EmitScalarExpr(E: Value);
212 EmitStoreThroughLValue(Src: RValue::get(V: valueValue), Dst: ValueLV, /*isInit=*/true);
213 if (TrackNeededObjects) {
214 NeededObjects.push_back(Elt: keyValue);
215 NeededObjects.push_back(Elt: valueValue);
216 }
217 }
218 }
219
220 // Generate the argument list.
221 CallArgList Args;
222 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
223 const ParmVarDecl *argDecl = *PI++;
224 QualType ArgQT = argDecl->getType().getUnqualifiedType();
225 Args.add(rvalue: RValue::get(Addr: Objects, CGF&: *this), type: ArgQT);
226 if (DLE) {
227 argDecl = *PI++;
228 ArgQT = argDecl->getType().getUnqualifiedType();
229 Args.add(rvalue: RValue::get(Addr: Keys, CGF&: *this), type: ArgQT);
230 }
231 argDecl = *PI;
232 ArgQT = argDecl->getType().getUnqualifiedType();
233 llvm::Value *Count =
234 llvm::ConstantInt::get(Ty: CGM.getTypes().ConvertType(T: ArgQT), V: NumElements);
235 Args.add(rvalue: RValue::get(V: Count), type: ArgQT);
236
237 // Generate a reference to the class pointer, which will be the receiver.
238 Selector Sel = MethodWithObjects->getSelector();
239 QualType ResultType = E->getType();
240 const ObjCObjectPointerType *InterfacePointerType
241 = ResultType->getAsObjCInterfacePointerType();
242 assert(InterfacePointerType && "Unexpected InterfacePointerType - null");
243 ObjCInterfaceDecl *Class
244 = InterfacePointerType->getObjectType()->getInterface();
245 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
246 llvm::Value *Receiver = Runtime.GetClass(CGF&: *this, OID: Class);
247
248 // Generate the message send.
249 RValue result = Runtime.GenerateMessageSend(
250 CGF&: *this, ReturnSlot: ReturnValueSlot(), ResultType: MethodWithObjects->getReturnType(), Sel,
251 Receiver, CallArgs: Args, Class, Method: MethodWithObjects);
252
253 // The above message send needs these objects, but in ARC they are
254 // passed in a buffer that is essentially __unsafe_unretained.
255 // Therefore we must prevent the optimizer from releasing them until
256 // after the call.
257 if (TrackNeededObjects) {
258 EmitARCIntrinsicUse(values: NeededObjects);
259 }
260
261 return Builder.CreateBitCast(V: result.getScalarVal(),
262 DestTy: ConvertType(T: E->getType()));
263}
264
265llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
266 return EmitObjCCollectionLiteral(E, MethodWithObjects: E->getArrayWithObjectsMethod());
267}
268
269llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
270 const ObjCDictionaryLiteral *E) {
271 return EmitObjCCollectionLiteral(E, MethodWithObjects: E->getDictWithObjectsMethod());
272}
273
274/// Emit a selector.
275llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
276 // Untyped selector.
277 // Note that this implementation allows for non-constant strings to be passed
278 // as arguments to @selector(). Currently, the only thing preventing this
279 // behaviour is the type checking in the front end.
280 return CGM.getObjCRuntime().GetSelector(CGF&: *this, Sel: E->getSelector());
281}
282
283llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
284 // FIXME: This should pass the Decl not the name.
285 return CGM.getObjCRuntime().GenerateProtocolRef(CGF&: *this, OPD: E->getProtocol());
286}
287
288/// Adjust the type of an Objective-C object that doesn't match up due
289/// to type erasure at various points, e.g., related result types or the use
290/// of parameterized classes.
291static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT,
292 RValue Result) {
293 if (!ExpT->isObjCRetainableType())
294 return Result;
295
296 // If the converted types are the same, we're done.
297 llvm::Type *ExpLLVMTy = CGF.ConvertType(T: ExpT);
298 if (ExpLLVMTy == Result.getScalarVal()->getType())
299 return Result;
300
301 // We have applied a substitution. Cast the rvalue appropriately.
302 return RValue::get(V: CGF.Builder.CreateBitCast(V: Result.getScalarVal(),
303 DestTy: ExpLLVMTy));
304}
305
306/// Decide whether to extend the lifetime of the receiver of a
307/// returns-inner-pointer message.
308static bool
309shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
310 switch (message->getReceiverKind()) {
311
312 // For a normal instance message, we should extend unless the
313 // receiver is loaded from a variable with precise lifetime.
314 case ObjCMessageExpr::Instance: {
315 const Expr *receiver = message->getInstanceReceiver();
316
317 // Look through OVEs.
318 if (auto opaque = dyn_cast<OpaqueValueExpr>(Val: receiver)) {
319 if (opaque->getSourceExpr())
320 receiver = opaque->getSourceExpr()->IgnoreParens();
321 }
322
323 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(Val: receiver);
324 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
325 receiver = ice->getSubExpr()->IgnoreParens();
326
327 // Look through OVEs.
328 if (auto opaque = dyn_cast<OpaqueValueExpr>(Val: receiver)) {
329 if (opaque->getSourceExpr())
330 receiver = opaque->getSourceExpr()->IgnoreParens();
331 }
332
333 // Only __strong variables.
334 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
335 return true;
336
337 // All ivars and fields have precise lifetime.
338 if (isa<MemberExpr>(Val: receiver) || isa<ObjCIvarRefExpr>(Val: receiver))
339 return false;
340
341 // Otherwise, check for variables.
342 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(Val: ice->getSubExpr());
343 if (!declRef) return true;
344 const VarDecl *var = dyn_cast<VarDecl>(Val: declRef->getDecl());
345 if (!var) return true;
346
347 // All variables have precise lifetime except local variables with
348 // automatic storage duration that aren't specially marked.
349 return (var->hasLocalStorage() &&
350 !var->hasAttr<ObjCPreciseLifetimeAttr>());
351 }
352
353 case ObjCMessageExpr::Class:
354 case ObjCMessageExpr::SuperClass:
355 // It's never necessary for class objects.
356 return false;
357
358 case ObjCMessageExpr::SuperInstance:
359 // We generally assume that 'self' lives throughout a method call.
360 return false;
361 }
362
363 llvm_unreachable("invalid receiver kind");
364}
365
366/// Given an expression of ObjC pointer type, check whether it was
367/// immediately loaded from an ARC __weak l-value.
368static const Expr *findWeakLValue(const Expr *E) {
369 assert(E->getType()->isObjCRetainableType());
370 E = E->IgnoreParens();
371 if (auto CE = dyn_cast<CastExpr>(Val: E)) {
372 if (CE->getCastKind() == CK_LValueToRValue) {
373 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
374 return CE->getSubExpr();
375 }
376 }
377
378 return nullptr;
379}
380
381/// The ObjC runtime may provide entrypoints that are likely to be faster
382/// than an ordinary message send of the appropriate selector.
383///
384/// The entrypoints are guaranteed to be equivalent to just sending the
385/// corresponding message. If the entrypoint is implemented naively as just a
386/// message send, using it is a trade-off: it sacrifices a few cycles of
387/// overhead to save a small amount of code. However, it's possible for
388/// runtimes to detect and special-case classes that use "standard"
389/// behavior; if that's dynamically a large proportion of all objects, using
390/// the entrypoint will also be faster than using a message send.
391///
392/// If the runtime does support a required entrypoint, then this method will
393/// generate a call and return the resulting value. Otherwise it will return
394/// std::nullopt and the caller can generate a msgSend instead.
395static std::optional<llvm::Value *> tryGenerateSpecializedMessageSend(
396 CodeGenFunction &CGF, QualType ResultType, llvm::Value *Receiver,
397 const CallArgList &Args, Selector Sel, const ObjCMethodDecl *method,
398 bool isClassMessage) {
399 auto &CGM = CGF.CGM;
400 if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls)
401 return std::nullopt;
402
403 auto &Runtime = CGM.getLangOpts().ObjCRuntime;
404 switch (Sel.getMethodFamily()) {
405 case OMF_alloc:
406 if (isClassMessage &&
407 Runtime.shouldUseRuntimeFunctionsForAlloc() &&
408 ResultType->isObjCObjectPointerType()) {
409 // [Foo alloc] -> objc_alloc(Foo) or
410 // [self alloc] -> objc_alloc(self)
411 if (Sel.isUnarySelector() && Sel.getNameForSlot(argIndex: 0) == "alloc")
412 return CGF.EmitObjCAlloc(value: Receiver, returnType: CGF.ConvertType(T: ResultType));
413 // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or
414 // [self allocWithZone:nil] -> objc_allocWithZone(self)
415 if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 &&
416 Args.size() == 1 && Args.front().getType()->isPointerType() &&
417 Sel.getNameForSlot(argIndex: 0) == "allocWithZone") {
418 const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal();
419 if (isa<llvm::ConstantPointerNull>(Val: arg))
420 return CGF.EmitObjCAllocWithZone(value: Receiver,
421 returnType: CGF.ConvertType(T: ResultType));
422 return std::nullopt;
423 }
424 }
425 break;
426
427 case OMF_autorelease:
428 if (ResultType->isObjCObjectPointerType() &&
429 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
430 Runtime.shouldUseARCFunctionsForRetainRelease())
431 return CGF.EmitObjCAutorelease(value: Receiver, returnType: CGF.ConvertType(T: ResultType));
432 break;
433
434 case OMF_retain:
435 if (ResultType->isObjCObjectPointerType() &&
436 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
437 Runtime.shouldUseARCFunctionsForRetainRelease())
438 return CGF.EmitObjCRetainNonBlock(value: Receiver, returnType: CGF.ConvertType(T: ResultType));
439 break;
440
441 case OMF_release:
442 if (ResultType->isVoidType() &&
443 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
444 Runtime.shouldUseARCFunctionsForRetainRelease()) {
445 CGF.EmitObjCRelease(value: Receiver, precise: ARCPreciseLifetime);
446 return nullptr;
447 }
448 break;
449
450 default:
451 break;
452 }
453 return std::nullopt;
454}
455
456CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend(
457 CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType,
458 Selector Sel, llvm::Value *Receiver, const CallArgList &Args,
459 const ObjCInterfaceDecl *OID, const ObjCMethodDecl *Method,
460 bool isClassMessage) {
461 if (std::optional<llvm::Value *> SpecializedResult =
462 tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args,
463 Sel, method: Method, isClassMessage)) {
464 return RValue::get(V: *SpecializedResult);
465 }
466 return GenerateMessageSend(CGF, ReturnSlot: Return, ResultType, Sel, Receiver, CallArgs: Args, Class: OID,
467 Method);
468}
469
470static void AppendFirstImpliedRuntimeProtocols(
471 const ObjCProtocolDecl *PD,
472 llvm::UniqueVector<const ObjCProtocolDecl *> &PDs) {
473 if (!PD->isNonRuntimeProtocol()) {
474 const auto *Can = PD->getCanonicalDecl();
475 PDs.insert(Entry: Can);
476 return;
477 }
478
479 for (const auto *ParentPD : PD->protocols())
480 AppendFirstImpliedRuntimeProtocols(PD: ParentPD, PDs);
481}
482
483std::vector<const ObjCProtocolDecl *>
484CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin,
485 ObjCProtocolDecl::protocol_iterator end) {
486 std::vector<const ObjCProtocolDecl *> RuntimePds;
487 llvm::DenseSet<const ObjCProtocolDecl *> NonRuntimePDs;
488
489 for (; begin != end; ++begin) {
490 const auto *It = *begin;
491 const auto *Can = It->getCanonicalDecl();
492 if (Can->isNonRuntimeProtocol())
493 NonRuntimePDs.insert(V: Can);
494 else
495 RuntimePds.push_back(x: Can);
496 }
497
498 // If there are no non-runtime protocols then we can just stop now.
499 if (NonRuntimePDs.empty())
500 return RuntimePds;
501
502 // Else we have to search through the non-runtime protocol's inheritancy
503 // hierarchy DAG stopping whenever a branch either finds a runtime protocol or
504 // a non-runtime protocol without any parents. These are the "first-implied"
505 // protocols from a non-runtime protocol.
506 llvm::UniqueVector<const ObjCProtocolDecl *> FirstImpliedProtos;
507 for (const auto *PD : NonRuntimePDs)
508 AppendFirstImpliedRuntimeProtocols(PD, PDs&: FirstImpliedProtos);
509
510 // Walk the Runtime list to get all protocols implied via the inclusion of
511 // this protocol, e.g. all protocols it inherits from including itself.
512 llvm::DenseSet<const ObjCProtocolDecl *> AllImpliedProtocols;
513 for (const auto *PD : RuntimePds) {
514 const auto *Can = PD->getCanonicalDecl();
515 AllImpliedProtocols.insert(V: Can);
516 Can->getImpliedProtocols(IPs&: AllImpliedProtocols);
517 }
518
519 // Similar to above, walk the list of first-implied protocols to find the set
520 // all the protocols implied excluding the listed protocols themselves since
521 // they are not yet a part of the `RuntimePds` list.
522 for (const auto *PD : FirstImpliedProtos) {
523 PD->getImpliedProtocols(IPs&: AllImpliedProtocols);
524 }
525
526 // From the first-implied list we have to finish building the final protocol
527 // list. If a protocol in the first-implied list was already implied via some
528 // inheritance path through some other protocols then it would be redundant to
529 // add it here and so we skip over it.
530 for (const auto *PD : FirstImpliedProtos) {
531 if (!AllImpliedProtocols.contains(V: PD)) {
532 RuntimePds.push_back(x: PD);
533 }
534 }
535
536 return RuntimePds;
537}
538
539/// Instead of '[[MyClass alloc] init]', try to generate
540/// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the
541/// caller side, as well as the optimized objc_alloc.
542static std::optional<llvm::Value *>
543tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) {
544 auto &Runtime = CGF.getLangOpts().ObjCRuntime;
545 if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit())
546 return std::nullopt;
547
548 // Match the exact pattern '[[MyClass alloc] init]'.
549 Selector Sel = OME->getSelector();
550 if (OME->getReceiverKind() != ObjCMessageExpr::Instance ||
551 !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() ||
552 Sel.getNameForSlot(argIndex: 0) != "init")
553 return std::nullopt;
554
555 // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]'
556 // with 'cls' a Class.
557 auto *SubOME =
558 dyn_cast<ObjCMessageExpr>(Val: OME->getInstanceReceiver()->IgnoreParenCasts());
559 if (!SubOME)
560 return std::nullopt;
561 Selector SubSel = SubOME->getSelector();
562
563 if (!SubOME->getType()->isObjCObjectPointerType() ||
564 !SubSel.isUnarySelector() || SubSel.getNameForSlot(argIndex: 0) != "alloc")
565 return std::nullopt;
566
567 llvm::Value *Receiver = nullptr;
568 switch (SubOME->getReceiverKind()) {
569 case ObjCMessageExpr::Instance:
570 if (!SubOME->getInstanceReceiver()->getType()->isObjCClassType())
571 return std::nullopt;
572 Receiver = CGF.EmitScalarExpr(E: SubOME->getInstanceReceiver());
573 break;
574
575 case ObjCMessageExpr::Class: {
576 QualType ReceiverType = SubOME->getClassReceiver();
577 const ObjCObjectType *ObjTy = ReceiverType->castAs<ObjCObjectType>();
578 const ObjCInterfaceDecl *ID = ObjTy->getInterface();
579 assert(ID && "null interface should be impossible here");
580 Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, OID: ID);
581 break;
582 }
583 case ObjCMessageExpr::SuperInstance:
584 case ObjCMessageExpr::SuperClass:
585 return std::nullopt;
586 }
587
588 return CGF.EmitObjCAllocInit(value: Receiver, resultType: CGF.ConvertType(T: OME->getType()));
589}
590
591RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
592 ReturnValueSlot Return) {
593 // Only the lookup mechanism and first two arguments of the method
594 // implementation vary between runtimes. We can get the receiver and
595 // arguments in generic code.
596
597 bool isDelegateInit = E->isDelegateInitCall();
598
599 const ObjCMethodDecl *method = E->getMethodDecl();
600
601 // If the method is -retain, and the receiver's being loaded from
602 // a __weak variable, peephole the entire operation to objc_loadWeakRetained.
603 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance &&
604 method->getMethodFamily() == OMF_retain) {
605 if (auto lvalueExpr = findWeakLValue(E: E->getInstanceReceiver())) {
606 LValue lvalue = EmitLValue(E: lvalueExpr);
607 llvm::Value *result = EmitARCLoadWeakRetained(addr: lvalue.getAddress());
608 return AdjustObjCObjectType(CGF&: *this, ExpT: E->getType(), Result: RValue::get(V: result));
609 }
610 }
611
612 if (std::optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(CGF&: *this, OME: E))
613 return AdjustObjCObjectType(CGF&: *this, ExpT: E->getType(), Result: RValue::get(V: *Val));
614
615 // We don't retain the receiver in delegate init calls, and this is
616 // safe because the receiver value is always loaded from 'self',
617 // which we zero out. We don't want to Block_copy block receivers,
618 // though.
619 bool retainSelf =
620 (!isDelegateInit &&
621 CGM.getLangOpts().ObjCAutoRefCount &&
622 method &&
623 method->hasAttr<NSConsumesSelfAttr>());
624
625 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
626 bool isSuperMessage = false;
627 bool isClassMessage = false;
628 ObjCInterfaceDecl *OID = nullptr;
629 // Find the receiver
630 QualType ReceiverType;
631 llvm::Value *Receiver = nullptr;
632 switch (E->getReceiverKind()) {
633 case ObjCMessageExpr::Instance:
634 ReceiverType = E->getInstanceReceiver()->getType();
635 isClassMessage = ReceiverType->isObjCClassType();
636 if (retainSelf) {
637 TryEmitResult ter = tryEmitARCRetainScalarExpr(CGF&: *this,
638 e: E->getInstanceReceiver());
639 Receiver = ter.getPointer();
640 if (ter.getInt()) retainSelf = false;
641 } else
642 Receiver = EmitScalarExpr(E: E->getInstanceReceiver());
643 break;
644
645 case ObjCMessageExpr::Class: {
646 ReceiverType = E->getClassReceiver();
647 OID = ReceiverType->castAs<ObjCObjectType>()->getInterface();
648 assert(OID && "Invalid Objective-C class message send");
649 Receiver = Runtime.GetClass(CGF&: *this, OID);
650 isClassMessage = true;
651 break;
652 }
653
654 case ObjCMessageExpr::SuperInstance:
655 ReceiverType = E->getSuperType();
656 Receiver = LoadObjCSelf();
657 isSuperMessage = true;
658 break;
659
660 case ObjCMessageExpr::SuperClass:
661 ReceiverType = E->getSuperType();
662 Receiver = LoadObjCSelf();
663 isSuperMessage = true;
664 isClassMessage = true;
665 break;
666 }
667
668 if (retainSelf)
669 Receiver = EmitARCRetainNonBlock(value: Receiver);
670
671 // In ARC, we sometimes want to "extend the lifetime"
672 // (i.e. retain+autorelease) of receivers of returns-inner-pointer
673 // messages.
674 if (getLangOpts().ObjCAutoRefCount && method &&
675 method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
676 shouldExtendReceiverForInnerPointerMessage(message: E))
677 Receiver = EmitARCRetainAutorelease(type: ReceiverType, value: Receiver);
678
679 QualType ResultType = method ? method->getReturnType() : E->getType();
680
681 CallArgList Args;
682 EmitCallArgs(Args, Prototype: method, ArgRange: E->arguments(), /*AC*/AbstractCallee(method));
683
684 // For delegate init calls in ARC, do an unsafe store of null into
685 // self. This represents the call taking direct ownership of that
686 // value. We have to do this after emitting the other call
687 // arguments because they might also reference self, but we don't
688 // have to worry about any of them modifying self because that would
689 // be an undefined read and write of an object in unordered
690 // expressions.
691 if (isDelegateInit) {
692 assert(getLangOpts().ObjCAutoRefCount &&
693 "delegate init calls should only be marked in ARC");
694
695 // Do an unsafe store of null into self.
696 Address selfAddr =
697 GetAddrOfLocalVar(VD: cast<ObjCMethodDecl>(Val: CurCodeDecl)->getSelfDecl());
698 Builder.CreateStore(Val: getNullForVariable(addr: selfAddr), Addr: selfAddr);
699 }
700
701 RValue result;
702 if (isSuperMessage) {
703 // super is only valid in an Objective-C method
704 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(Val: CurFuncDecl);
705 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(Val: OMD->getDeclContext());
706 result = Runtime.GenerateMessageSendSuper(CGF&: *this, ReturnSlot: Return, ResultType,
707 Sel: E->getSelector(),
708 Class: OMD->getClassInterface(),
709 isCategoryImpl,
710 Self: Receiver,
711 IsClassMessage: isClassMessage,
712 CallArgs: Args,
713 Method: method);
714 } else {
715 // Call runtime methods directly if we can.
716 result = Runtime.GeneratePossiblySpecializedMessageSend(
717 CGF&: *this, Return, ResultType, Sel: E->getSelector(), Receiver, Args, OID,
718 Method: method, isClassMessage);
719 }
720
721 // For delegate init calls in ARC, implicitly store the result of
722 // the call back into self. This takes ownership of the value.
723 if (isDelegateInit) {
724 Address selfAddr =
725 GetAddrOfLocalVar(VD: cast<ObjCMethodDecl>(Val: CurCodeDecl)->getSelfDecl());
726 llvm::Value *newSelf = result.getScalarVal();
727
728 // The delegate return type isn't necessarily a matching type; in
729 // fact, it's quite likely to be 'id'.
730 llvm::Type *selfTy = selfAddr.getElementType();
731 newSelf = Builder.CreateBitCast(V: newSelf, DestTy: selfTy);
732
733 Builder.CreateStore(Val: newSelf, Addr: selfAddr);
734 }
735
736 return AdjustObjCObjectType(CGF&: *this, ExpT: E->getType(), Result: result);
737}
738
739namespace {
740struct FinishARCDealloc final : EHScopeStack::Cleanup {
741 void Emit(CodeGenFunction &CGF, Flags flags) override {
742 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(Val: CGF.CurCodeDecl);
743
744 const ObjCImplDecl *impl = cast<ObjCImplDecl>(Val: method->getDeclContext());
745 const ObjCInterfaceDecl *iface = impl->getClassInterface();
746 if (!iface->getSuperClass()) return;
747
748 bool isCategory = isa<ObjCCategoryImplDecl>(Val: impl);
749
750 // Call [super dealloc] if we have a superclass.
751 llvm::Value *self = CGF.LoadObjCSelf();
752
753 CallArgList args;
754 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnSlot: ReturnValueSlot(),
755 ResultType: CGF.getContext().VoidTy,
756 Sel: method->getSelector(),
757 Class: iface,
758 isCategoryImpl: isCategory,
759 Self: self,
760 /*is class msg*/ IsClassMessage: false,
761 CallArgs: args,
762 Method: method);
763 }
764};
765}
766
767/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
768/// the LLVM function and sets the other context used by
769/// CodeGenFunction.
770void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
771 const ObjCContainerDecl *CD) {
772 SourceLocation StartLoc = OMD->getBeginLoc();
773 FunctionArgList args;
774 // Check if we should generate debug info for this method.
775 if (OMD->hasAttr<NoDebugAttr>())
776 DebugInfo = nullptr; // disable debug info indefinitely for this function
777
778 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
779
780 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(MD: OMD);
781 if (OMD->isDirectMethod()) {
782 // Default hidden visibility
783 Fn->setVisibility(llvm::Function::HiddenVisibility);
784 if (CGM.isObjCDirectPreconditionThunkEnabled()) {
785 // However, if we expose the symbol, and the decl (property or method)
786 // have visibility attribute set ...
787 const NamedDecl *Decl = OMD;
788 if (const auto *PD = OMD->findPropertyDecl()) {
789 Decl = PD;
790 }
791 // ... then respect source level visibility setting
792 if (auto V = Decl->getExplicitVisibility(kind: NamedDecl::VisibilityForValue)) {
793 Fn->setVisibility(CGM.GetLLVMVisibility(V: *V));
794 }
795 }
796 CGM.SetLLVMFunctionAttributes(GD: OMD, Info: FI, F: Fn, /*IsThunk=*/false);
797 CGM.SetLLVMFunctionAttributesForDefinition(D: OMD, F: Fn);
798 } else {
799 CGM.SetInternalFunctionAttributes(GD: OMD, F: Fn, FI);
800 }
801
802 args.push_back(Elt: OMD->getSelfDecl());
803 if (!OMD->isDirectMethod())
804 args.push_back(Elt: OMD->getCmdDecl());
805
806 args.append(in_start: OMD->param_begin(), in_end: OMD->param_end());
807
808 CurGD = OMD;
809 CurEHLocation = OMD->getEndLoc();
810
811 StartFunction(GD: OMD, RetTy: OMD->getReturnType(), Fn, FnInfo: FI, Args: args,
812 Loc: OMD->getLocation(), StartLoc);
813
814 if (OMD->isDirectMethod()) {
815 CGM.getObjCRuntime().GenerateDirectMethodPrologue(CGF&: *this, Fn, OMD, CD);
816 }
817
818 // In ARC, certain methods get an extra cleanup.
819 if (CGM.getLangOpts().ObjCAutoRefCount &&
820 OMD->isInstanceMethod() &&
821 OMD->getSelector().isUnarySelector()) {
822 const IdentifierInfo *ident =
823 OMD->getSelector().getIdentifierInfoForSlot(argIndex: 0);
824 if (ident->isStr(Str: "dealloc"))
825 EHStack.pushCleanup<FinishARCDealloc>(Kind: getARCCleanupKind());
826 }
827}
828
829static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
830 LValue lvalue, QualType type);
831
832/// Generate an Objective-C method. An Objective-C method is a C function with
833/// its pointer, name, and types registered in the class structure.
834void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
835 StartObjCMethod(OMD, CD: OMD->getClassInterface());
836 PGO->assignRegionCounters(GD: GlobalDecl(OMD), Fn: CurFn);
837 assert(isa<CompoundStmt>(OMD->getBody()));
838 incrementProfileCounter(S: OMD->getBody());
839 EmitCompoundStmtWithoutScope(S: *cast<CompoundStmt>(Val: OMD->getBody()));
840 FinishFunction(EndLoc: OMD->getBodyRBrace());
841}
842
843/// emitStructGetterCall - Call the runtime function to load a property
844/// into the return value slot.
845static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
846 bool isAtomic, bool hasStrong) {
847 ASTContext &Context = CGF.getContext();
848
849 llvm::Value *src =
850 CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: CGF.LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0)
851 .getPointer(CGF);
852
853 // objc_copyStruct (ReturnValue, &structIvar,
854 // sizeof (Type of Ivar), isAtomic, false);
855 CallArgList args;
856
857 llvm::Value *dest = CGF.ReturnValue.emitRawPointer(CGF);
858 args.add(rvalue: RValue::get(V: dest), type: Context.VoidPtrTy);
859 args.add(rvalue: RValue::get(V: src), type: Context.VoidPtrTy);
860
861 CharUnits size = CGF.getContext().getTypeSizeInChars(T: ivar->getType());
862 args.add(rvalue: RValue::get(V: CGF.CGM.getSize(numChars: size)), type: Context.getSizeType());
863 args.add(rvalue: RValue::get(V: CGF.Builder.getInt1(V: isAtomic)), type: Context.BoolTy);
864 args.add(rvalue: RValue::get(V: CGF.Builder.getInt1(V: hasStrong)), type: Context.BoolTy);
865
866 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
867 CGCallee callee = CGCallee::forDirect(functionPtr: fn);
868 CGF.EmitCall(CallInfo: CGF.getTypes().arrangeBuiltinFunctionCall(resultType: Context.VoidTy, args),
869 Callee: callee, ReturnValue: ReturnValueSlot(), Args: args);
870}
871
872/// Determine whether the given architecture supports unaligned atomic
873/// accesses. They don't have to be fast, just faster than a function
874/// call and a mutex.
875static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
876 // FIXME: Allow unaligned atomic load/store on x86. (It is not
877 // currently supported by the backend.)
878 return false;
879}
880
881/// Return the maximum size that permits atomic accesses for the given
882/// architecture.
883static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
884 llvm::Triple::ArchType arch) {
885 // ARM has 8-byte atomic accesses, but it's not clear whether we
886 // want to rely on them here.
887
888 // In the default case, just assume that any size up to a pointer is
889 // fine given adequate alignment.
890 return CharUnits::fromQuantity(Quantity: CGM.PointerSizeInBytes);
891}
892
893namespace {
894 class PropertyImplStrategy {
895 public:
896 enum StrategyKind {
897 /// The 'native' strategy is to use the architecture's provided
898 /// reads and writes.
899 Native,
900
901 /// Use objc_setProperty and objc_getProperty.
902 GetSetProperty,
903
904 /// Use objc_setProperty for the setter, but use expression
905 /// evaluation for the getter.
906 SetPropertyAndExpressionGet,
907
908 /// Use objc_copyStruct.
909 CopyStruct,
910
911 /// The 'expression' strategy is to emit normal assignment or
912 /// lvalue-to-rvalue expressions.
913 Expression
914 };
915
916 StrategyKind getKind() const { return StrategyKind(Kind); }
917
918 bool hasStrongMember() const { return HasStrong; }
919 bool isAtomic() const { return IsAtomic; }
920 bool isCopy() const { return IsCopy; }
921
922 CharUnits getIvarSize() const { return IvarSize; }
923 CharUnits getIvarAlignment() const { return IvarAlignment; }
924
925 PropertyImplStrategy(CodeGenModule &CGM,
926 const ObjCPropertyImplDecl *propImpl);
927
928 private:
929 LLVM_PREFERRED_TYPE(StrategyKind)
930 unsigned Kind : 8;
931 LLVM_PREFERRED_TYPE(bool)
932 unsigned IsAtomic : 1;
933 LLVM_PREFERRED_TYPE(bool)
934 unsigned IsCopy : 1;
935 LLVM_PREFERRED_TYPE(bool)
936 unsigned HasStrong : 1;
937
938 CharUnits IvarSize;
939 CharUnits IvarAlignment;
940 };
941}
942
943/// Pick an implementation strategy for the given property synthesis.
944PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
945 const ObjCPropertyImplDecl *propImpl) {
946 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
947 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
948
949 IsCopy = (setterKind == ObjCPropertyDecl::Copy);
950 IsAtomic = prop->isAtomic();
951 HasStrong = false; // doesn't matter here.
952
953 // Evaluate the ivar's size and alignment.
954 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
955 QualType ivarType = ivar->getType();
956 auto TInfo = CGM.getContext().getTypeInfoInChars(T: ivarType);
957 IvarSize = TInfo.Width;
958 IvarAlignment = TInfo.Align;
959
960 // If we have a copy property, we always have to use setProperty.
961 // If the property is atomic we need to use getProperty, but in
962 // the nonatomic case we can just use expression.
963 if (IsCopy) {
964 Kind = IsAtomic ? GetSetProperty : SetPropertyAndExpressionGet;
965 return;
966 }
967
968 // Handle retain.
969 if (setterKind == ObjCPropertyDecl::Retain) {
970 // In GC-only, there's nothing special that needs to be done.
971 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
972 // fallthrough
973
974 // In ARC, if the property is non-atomic, use expression emission,
975 // which translates to objc_storeStrong. This isn't required, but
976 // it's slightly nicer.
977 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
978 // Using standard expression emission for the setter is only
979 // acceptable if the ivar is __strong, which won't be true if
980 // the property is annotated with __attribute__((NSObject)).
981 // TODO: falling all the way back to objc_setProperty here is
982 // just laziness, though; we could still use objc_storeStrong
983 // if we hacked it right.
984 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
985 Kind = Expression;
986 else
987 Kind = SetPropertyAndExpressionGet;
988 return;
989
990 // Otherwise, we need to at least use setProperty. However, if
991 // the property isn't atomic, we can use normal expression
992 // emission for the getter.
993 } else if (!IsAtomic) {
994 Kind = SetPropertyAndExpressionGet;
995 return;
996
997 // Otherwise, we have to use both setProperty and getProperty.
998 } else {
999 Kind = GetSetProperty;
1000 return;
1001 }
1002 }
1003
1004 // If we're not atomic, just use expression accesses.
1005 if (!IsAtomic) {
1006 Kind = Expression;
1007 return;
1008 }
1009
1010 // Properties on bitfield ivars need to be emitted using expression
1011 // accesses even if they're nominally atomic.
1012 if (ivar->isBitField()) {
1013 Kind = Expression;
1014 return;
1015 }
1016
1017 // GC-qualified or ARC-qualified ivars need to be emitted as
1018 // expressions. This actually works out to being atomic anyway,
1019 // except for ARC __strong, but that should trigger the above code.
1020 if (ivarType.hasNonTrivialObjCLifetime() ||
1021 (CGM.getLangOpts().getGC() &&
1022 CGM.getContext().getObjCGCAttrKind(Ty: ivarType))) {
1023 Kind = Expression;
1024 return;
1025 }
1026
1027 // Compute whether the ivar has strong members.
1028 if (CGM.getLangOpts().getGC())
1029 if (const auto *RD = ivarType->getAsRecordDecl())
1030 HasStrong = RD->hasObjectMember();
1031
1032 // We can never access structs with object members with a native
1033 // access, because we need to use write barriers. This is what
1034 // objc_copyStruct is for.
1035 if (HasStrong) {
1036 Kind = CopyStruct;
1037 return;
1038 }
1039
1040 // Otherwise, this is target-dependent and based on the size and
1041 // alignment of the ivar.
1042
1043 // If the size of the ivar is not a power of two, give up. We don't
1044 // want to get into the business of doing compare-and-swaps.
1045 if (!IvarSize.isPowerOfTwo()) {
1046 Kind = CopyStruct;
1047 return;
1048 }
1049
1050 llvm::Triple::ArchType arch =
1051 CGM.getTarget().getTriple().getArch();
1052
1053 // Most architectures require memory to fit within a single cache
1054 // line, so the alignment has to be at least the size of the access.
1055 // Otherwise we have to grab a lock.
1056 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
1057 Kind = CopyStruct;
1058 return;
1059 }
1060
1061 // If the ivar's size exceeds the architecture's maximum atomic
1062 // access size, we have to use CopyStruct.
1063 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
1064 Kind = CopyStruct;
1065 return;
1066 }
1067
1068 // Otherwise, we can use native loads and stores.
1069 Kind = Native;
1070}
1071
1072/// Generate an Objective-C property getter function.
1073///
1074/// The given Decl must be an ObjCImplementationDecl. \@synthesize
1075/// is illegal within a category.
1076void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
1077 const ObjCPropertyImplDecl *PID) {
1078 llvm::Constant *AtomicHelperFn =
1079 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
1080 ObjCMethodDecl *OMD = PID->getGetterMethodDecl();
1081 assert(OMD && "Invalid call to generate getter (empty method)");
1082 StartObjCMethod(OMD, CD: IMP->getClassInterface());
1083
1084 generateObjCGetterBody(classImpl: IMP, propImpl: PID, GetterMothodDecl: OMD, AtomicHelperFn);
1085
1086 FinishFunction(EndLoc: OMD->getEndLoc());
1087}
1088
1089static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
1090 const Expr *getter = propImpl->getGetterCXXConstructor();
1091 if (!getter) return true;
1092
1093 // Sema only makes only of these when the ivar has a C++ class type,
1094 // so the form is pretty constrained.
1095
1096 // If the property has a reference type, we might just be binding a
1097 // reference, in which case the result will be a gl-value. We should
1098 // treat this as a non-trivial operation.
1099 if (getter->isGLValue())
1100 return false;
1101
1102 // If we selected a trivial copy-constructor, we're okay.
1103 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(Val: getter))
1104 return (construct->getConstructor()->isTrivial());
1105
1106 // The constructor might require cleanups (in which case it's never
1107 // trivial).
1108 assert(isa<ExprWithCleanups>(getter));
1109 return false;
1110}
1111
1112/// emitCPPObjectAtomicGetterCall - Call the runtime function to
1113/// copy the ivar into the resturn slot.
1114static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
1115 llvm::Value *returnAddr,
1116 ObjCIvarDecl *ivar,
1117 llvm::Constant *AtomicHelperFn) {
1118 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
1119 // AtomicHelperFn);
1120 CallArgList args;
1121
1122 // The 1st argument is the return Slot.
1123 args.add(rvalue: RValue::get(V: returnAddr), type: CGF.getContext().VoidPtrTy);
1124
1125 // The 2nd argument is the address of the ivar.
1126 llvm::Value *ivarAddr =
1127 CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: CGF.LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0)
1128 .getPointer(CGF);
1129 args.add(rvalue: RValue::get(V: ivarAddr), type: CGF.getContext().VoidPtrTy);
1130
1131 // Third argument is the helper function.
1132 args.add(rvalue: RValue::get(V: AtomicHelperFn), type: CGF.getContext().VoidPtrTy);
1133
1134 llvm::FunctionCallee copyCppAtomicObjectFn =
1135 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
1136 CGCallee callee = CGCallee::forDirect(functionPtr: copyCppAtomicObjectFn);
1137 CGF.EmitCall(
1138 CallInfo: CGF.getTypes().arrangeBuiltinFunctionCall(resultType: CGF.getContext().VoidTy, args),
1139 Callee: callee, ReturnValue: ReturnValueSlot(), Args: args);
1140}
1141
1142// emitCmdValueForGetterSetterBody - Handle emitting the load necessary for
1143// the `_cmd` selector argument for getter/setter bodies. For direct methods,
1144// this returns an undefined/poison value; this matches behavior prior to `_cmd`
1145// being removed from the direct method ABI as the getter/setter caller would
1146// never load one. For non-direct methods, this emits a load of the implicit
1147// `_cmd` storage.
1148static llvm::Value *emitCmdValueForGetterSetterBody(CodeGenFunction &CGF,
1149 ObjCMethodDecl *MD) {
1150 if (MD->isDirectMethod()) {
1151 // Direct methods do not have a `_cmd` argument. Emit an undefined/poison
1152 // value. This will be passed to objc_getProperty/objc_setProperty, which
1153 // has not appeared bothered by the `_cmd` argument being undefined before.
1154 llvm::Type *selType = CGF.ConvertType(T: CGF.getContext().getObjCSelType());
1155 return llvm::PoisonValue::get(T: selType);
1156 }
1157
1158 return CGF.Builder.CreateLoad(Addr: CGF.GetAddrOfLocalVar(VD: MD->getCmdDecl()), Name: "cmd");
1159}
1160
1161void
1162CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1163 const ObjCPropertyImplDecl *propImpl,
1164 const ObjCMethodDecl *GetterMethodDecl,
1165 llvm::Constant *AtomicHelperFn) {
1166
1167 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1168
1169 if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
1170 if (!AtomicHelperFn) {
1171 LValue Src =
1172 EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0);
1173 LValue Dst = MakeAddrLValue(Addr: ReturnValue, T: ivar->getType());
1174 callCStructCopyConstructor(Dst, Src);
1175 } else {
1176 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1177 emitCPPObjectAtomicGetterCall(CGF&: *this, returnAddr: ReturnValue.emitRawPointer(CGF&: *this),
1178 ivar, AtomicHelperFn);
1179 }
1180 return;
1181 }
1182
1183 // If there's a non-trivial 'get' expression, we just have to emit that.
1184 if (!hasTrivialGetExpr(propImpl)) {
1185 if (!AtomicHelperFn) {
1186 auto *ret = ReturnStmt::Create(Ctx: getContext(), RL: SourceLocation(),
1187 E: propImpl->getGetterCXXConstructor(),
1188 /* NRVOCandidate=*/nullptr);
1189 EmitReturnStmt(S: *ret);
1190 }
1191 else {
1192 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1193 emitCPPObjectAtomicGetterCall(CGF&: *this, returnAddr: ReturnValue.emitRawPointer(CGF&: *this),
1194 ivar, AtomicHelperFn);
1195 }
1196 return;
1197 }
1198
1199 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
1200 QualType propType = prop->getType();
1201 ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl();
1202
1203 // Pick an implementation strategy.
1204 PropertyImplStrategy strategy(CGM, propImpl);
1205 switch (strategy.getKind()) {
1206 case PropertyImplStrategy::Native: {
1207 // We don't need to do anything for a zero-size struct.
1208 if (strategy.getIvarSize().isZero())
1209 return;
1210
1211 LValue LV = EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0);
1212
1213 // Currently, all atomic accesses have to be through integer
1214 // types, so there's no point in trying to pick a prettier type.
1215 uint64_t ivarSize = getContext().toBits(CharSize: strategy.getIvarSize());
1216 llvm::Type *bitcastType = llvm::Type::getIntNTy(C&: getLLVMContext(), N: ivarSize);
1217
1218 // Perform an atomic load. This does not impose ordering constraints.
1219 Address ivarAddr = LV.getAddress();
1220 ivarAddr = ivarAddr.withElementType(ElemTy: bitcastType);
1221 llvm::LoadInst *load = Builder.CreateLoad(Addr: ivarAddr, Name: "load");
1222 load->setAtomic(Ordering: llvm::AtomicOrdering::Unordered);
1223 llvm::Value *ivarVal = load;
1224 if (PointerAuthQualifier PAQ = ivar->getType().getPointerAuth()) {
1225 CGPointerAuthInfo SrcInfo = EmitPointerAuthInfo(Qualifier: PAQ, StorageAddress: ivarAddr);
1226 CGPointerAuthInfo TargetInfo =
1227 CGM.getPointerAuthInfoForType(type: getterMethod->getReturnType());
1228 ivarVal = emitPointerAuthResign(Pointer: ivarVal, PointerType: ivar->getType(), CurAuthInfo: SrcInfo,
1229 NewAuthInfo: TargetInfo, /*isKnownNonNull=*/IsKnownNonNull: false);
1230 }
1231
1232 // Store that value into the return address. Doing this with a
1233 // bitcast is likely to produce some pretty ugly IR, but it's not
1234 // the *most* terrible thing in the world.
1235 llvm::Type *retTy = ConvertType(T: getterMethod->getReturnType());
1236 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(Ty: retTy);
1237 if (ivarSize > retTySize) {
1238 bitcastType = llvm::Type::getIntNTy(C&: getLLVMContext(), N: retTySize);
1239 ivarVal = Builder.CreateTrunc(V: ivarVal, DestTy: bitcastType);
1240 }
1241 Builder.CreateStore(Val: ivarVal, Addr: ReturnValue.withElementType(ElemTy: bitcastType));
1242
1243 // Make sure we don't do an autorelease.
1244 AutoreleaseResult = false;
1245 return;
1246 }
1247
1248 case PropertyImplStrategy::GetSetProperty: {
1249 llvm::FunctionCallee getPropertyFn =
1250 CGM.getObjCRuntime().GetPropertyGetFunction();
1251
1252 if (ivar->getType().getPointerAuth()) {
1253 // This currently cannot be hit, but if we ever allow objc pointers
1254 // to be signed, this will become possible. Reaching here would require
1255 // a copy, weak, etc property backed by an authenticated pointer.
1256 CGM.ErrorUnsupported(D: propImpl,
1257 Type: "Obj-C getter requiring pointer authentication");
1258 return;
1259 }
1260
1261 if (!getPropertyFn) {
1262 CGM.ErrorUnsupported(D: propImpl, Type: "Obj-C getter requiring atomic copy");
1263 return;
1264 }
1265 CGCallee callee = CGCallee::forDirect(functionPtr: getPropertyFn);
1266
1267 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
1268 // FIXME: Can't this be simpler? This might even be worse than the
1269 // corresponding gcc code.
1270 llvm::Value *cmd = emitCmdValueForGetterSetterBody(CGF&: *this, MD: getterMethod);
1271 llvm::Value *self = Builder.CreateBitCast(V: LoadObjCSelf(), DestTy: VoidPtrTy);
1272 llvm::Value *ivarOffset =
1273 EmitIvarOffsetAsPointerDiff(Interface: classImpl->getClassInterface(), Ivar: ivar);
1274
1275 CallArgList args;
1276 args.add(rvalue: RValue::get(V: self), type: getContext().getObjCIdType());
1277 args.add(rvalue: RValue::get(V: cmd), type: getContext().getObjCSelType());
1278 args.add(rvalue: RValue::get(V: ivarOffset), type: getContext().getPointerDiffType());
1279 args.add(rvalue: RValue::get(V: Builder.getInt1(V: strategy.isAtomic())),
1280 type: getContext().BoolTy);
1281
1282 // FIXME: We shouldn't need to get the function info here, the
1283 // runtime already should have computed it to build the function.
1284 llvm::CallBase *CallInstruction;
1285 RValue RV = EmitCall(CallInfo: getTypes().arrangeBuiltinFunctionCall(
1286 resultType: getContext().getObjCIdType(), args),
1287 Callee: callee, ReturnValue: ReturnValueSlot(), Args: args, CallOrInvoke: &CallInstruction);
1288 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(Val: CallInstruction))
1289 call->setTailCall();
1290
1291 // We need to fix the type here. Ivars with copy & retain are
1292 // always objects so we don't need to worry about complex or
1293 // aggregates.
1294 RV = RValue::get(V: Builder.CreateBitCast(
1295 V: RV.getScalarVal(),
1296 DestTy: getTypes().ConvertType(T: getterMethod->getReturnType())));
1297
1298 EmitReturnOfRValue(RV, Ty: propType);
1299
1300 // objc_getProperty does an autorelease, so we should suppress ours.
1301 AutoreleaseResult = false;
1302
1303 return;
1304 }
1305
1306 case PropertyImplStrategy::CopyStruct:
1307 emitStructGetterCall(CGF&: *this, ivar, isAtomic: strategy.isAtomic(),
1308 hasStrong: strategy.hasStrongMember());
1309 return;
1310
1311 case PropertyImplStrategy::Expression:
1312 case PropertyImplStrategy::SetPropertyAndExpressionGet: {
1313 LValue LV = EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0);
1314
1315 QualType ivarType = ivar->getType();
1316 auto EvaluationKind = getEvaluationKind(T: ivarType);
1317 assert(!ivarType.getPointerAuth() || EvaluationKind == TEK_Scalar);
1318 switch (EvaluationKind) {
1319 case TEK_Complex: {
1320 ComplexPairTy pair = EmitLoadOfComplex(src: LV, loc: SourceLocation());
1321 EmitStoreOfComplex(V: pair, dest: MakeAddrLValue(Addr: ReturnValue, T: ivarType),
1322 /*init*/ isInit: true);
1323 return;
1324 }
1325 case TEK_Aggregate: {
1326 // The return value slot is guaranteed to not be aliased, but
1327 // that's not necessarily the same as "on the stack", so
1328 // we still potentially need objc_memmove_collectable.
1329 EmitAggregateCopy(/* Dest= */ MakeAddrLValue(Addr: ReturnValue, T: ivarType),
1330 /* Src= */ LV, EltTy: ivarType, MayOverlap: getOverlapForReturnValue());
1331 return;
1332 }
1333 case TEK_Scalar: {
1334 llvm::Value *value;
1335 if (propType->isReferenceType()) {
1336 if (ivarType.getPointerAuth()) {
1337 CGM.ErrorUnsupported(D: propImpl,
1338 Type: "Obj-C getter for authenticated reference type");
1339 return;
1340 }
1341 value = LV.getAddress().emitRawPointer(CGF&: *this);
1342 } else {
1343 // We want to load and autoreleaseReturnValue ARC __weak ivars.
1344 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
1345 if (getLangOpts().ObjCAutoRefCount) {
1346 value = emitARCRetainLoadOfScalar(CGF&: *this, lvalue: LV, type: ivarType);
1347 } else {
1348 value = EmitARCLoadWeak(addr: LV.getAddress());
1349 }
1350
1351 // Otherwise we want to do a simple load, suppressing the
1352 // final autorelease.
1353 } else {
1354 if (PointerAuthQualifier PAQ = ivar->getType().getPointerAuth()) {
1355 Address ivarAddr = LV.getAddress();
1356 llvm::LoadInst *LoadInst = Builder.CreateLoad(Addr: ivarAddr, Name: "load");
1357 llvm::Value *Load = LoadInst;
1358 auto SrcInfo = EmitPointerAuthInfo(Qualifier: PAQ, StorageAddress: ivarAddr);
1359 auto TargetInfo =
1360 CGM.getPointerAuthInfoForType(type: getterMethod->getReturnType());
1361 Load = emitPointerAuthResign(Pointer: Load, PointerType: ivarType, CurAuthInfo: SrcInfo, NewAuthInfo: TargetInfo,
1362 /*isKnownNonNull=*/IsKnownNonNull: false);
1363 value = Load;
1364 } else
1365 value = EmitLoadOfLValue(V: LV, Loc: SourceLocation()).getScalarVal();
1366
1367 AutoreleaseResult = false;
1368 }
1369
1370 value = Builder.CreateBitCast(
1371 V: value, DestTy: ConvertType(T: GetterMethodDecl->getReturnType()));
1372 }
1373
1374 EmitReturnOfRValue(RV: RValue::get(V: value), Ty: propType);
1375 return;
1376 }
1377 }
1378 llvm_unreachable("bad evaluation kind");
1379 }
1380
1381 }
1382 llvm_unreachable("bad @property implementation strategy!");
1383}
1384
1385/// emitStructSetterCall - Call the runtime function to store the value
1386/// from the first formal parameter into the given ivar.
1387static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
1388 ObjCIvarDecl *ivar) {
1389 // objc_copyStruct (&structIvar, &Arg,
1390 // sizeof (struct something), true, false);
1391 CallArgList args;
1392
1393 // The first argument is the address of the ivar.
1394 llvm::Value *ivarAddr =
1395 CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: CGF.LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0)
1396 .getPointer(CGF);
1397 ivarAddr = CGF.Builder.CreateBitCast(V: ivarAddr, DestTy: CGF.Int8PtrTy);
1398 args.add(rvalue: RValue::get(V: ivarAddr), type: CGF.getContext().VoidPtrTy);
1399
1400 // The second argument is the address of the parameter variable.
1401 ParmVarDecl *argVar = *OMD->param_begin();
1402 DeclRefExpr argRef(CGF.getContext(), argVar, false,
1403 argVar->getType().getNonReferenceType(), VK_LValue,
1404 SourceLocation());
1405 llvm::Value *argAddr = CGF.EmitLValue(E: &argRef).getPointer(CGF);
1406 args.add(rvalue: RValue::get(V: argAddr), type: CGF.getContext().VoidPtrTy);
1407
1408 // The third argument is the sizeof the type.
1409 llvm::Value *size =
1410 CGF.CGM.getSize(numChars: CGF.getContext().getTypeSizeInChars(T: ivar->getType()));
1411 args.add(rvalue: RValue::get(V: size), type: CGF.getContext().getSizeType());
1412
1413 // The fourth argument is the 'isAtomic' flag.
1414 args.add(rvalue: RValue::get(V: CGF.Builder.getTrue()), type: CGF.getContext().BoolTy);
1415
1416 // The fifth argument is the 'hasStrong' flag.
1417 // FIXME: should this really always be false?
1418 args.add(rvalue: RValue::get(V: CGF.Builder.getFalse()), type: CGF.getContext().BoolTy);
1419
1420 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
1421 CGCallee callee = CGCallee::forDirect(functionPtr: fn);
1422 CGF.EmitCall(
1423 CallInfo: CGF.getTypes().arrangeBuiltinFunctionCall(resultType: CGF.getContext().VoidTy, args),
1424 Callee: callee, ReturnValue: ReturnValueSlot(), Args: args);
1425}
1426
1427/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
1428/// the value from the first formal parameter into the given ivar, using
1429/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
1430static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
1431 ObjCMethodDecl *OMD,
1432 ObjCIvarDecl *ivar,
1433 llvm::Constant *AtomicHelperFn) {
1434 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
1435 // AtomicHelperFn);
1436 CallArgList args;
1437
1438 // The first argument is the address of the ivar.
1439 llvm::Value *ivarAddr =
1440 CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: CGF.LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0)
1441 .getPointer(CGF);
1442 args.add(rvalue: RValue::get(V: ivarAddr), type: CGF.getContext().VoidPtrTy);
1443
1444 // The second argument is the address of the parameter variable.
1445 ParmVarDecl *argVar = *OMD->param_begin();
1446 DeclRefExpr argRef(CGF.getContext(), argVar, false,
1447 argVar->getType().getNonReferenceType(), VK_LValue,
1448 SourceLocation());
1449 llvm::Value *argAddr = CGF.EmitLValue(E: &argRef).getPointer(CGF);
1450 args.add(rvalue: RValue::get(V: argAddr), type: CGF.getContext().VoidPtrTy);
1451
1452 // Third argument is the helper function.
1453 args.add(rvalue: RValue::get(V: AtomicHelperFn), type: CGF.getContext().VoidPtrTy);
1454
1455 llvm::FunctionCallee fn =
1456 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
1457 CGCallee callee = CGCallee::forDirect(functionPtr: fn);
1458 CGF.EmitCall(
1459 CallInfo: CGF.getTypes().arrangeBuiltinFunctionCall(resultType: CGF.getContext().VoidTy, args),
1460 Callee: callee, ReturnValue: ReturnValueSlot(), Args: args);
1461}
1462
1463
1464static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
1465 Expr *setter = PID->getSetterCXXAssignment();
1466 if (!setter) return true;
1467
1468 // Sema only makes only of these when the ivar has a C++ class type,
1469 // so the form is pretty constrained.
1470
1471 // An operator call is trivial if the function it calls is trivial.
1472 // This also implies that there's nothing non-trivial going on with
1473 // the arguments, because operator= can only be trivial if it's a
1474 // synthesized assignment operator and therefore both parameters are
1475 // references.
1476 if (CallExpr *call = dyn_cast<CallExpr>(Val: setter)) {
1477 if (const FunctionDecl *callee
1478 = dyn_cast_or_null<FunctionDecl>(Val: call->getCalleeDecl()))
1479 if (callee->isTrivial())
1480 return true;
1481 return false;
1482 }
1483
1484 assert(isa<ExprWithCleanups>(setter));
1485 return false;
1486}
1487
1488static bool UseOptimizedSetter(CodeGenModule &CGM) {
1489 if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
1490 return false;
1491 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
1492}
1493
1494void
1495CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1496 const ObjCPropertyImplDecl *propImpl,
1497 llvm::Constant *AtomicHelperFn) {
1498 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1499 ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl();
1500
1501 if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
1502 ParmVarDecl *PVD = *setterMethod->param_begin();
1503 if (!AtomicHelperFn) {
1504 // Call the move assignment operator instead of calling the copy
1505 // assignment operator and destructor.
1506 LValue Dst = EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar,
1507 /*quals*/ CVRQualifiers: 0);
1508 LValue Src = MakeAddrLValue(Addr: GetAddrOfLocalVar(VD: PVD), T: ivar->getType());
1509 callCStructMoveAssignmentOperator(Dst, Src);
1510 } else {
1511 // If atomic, assignment is called via a locking api.
1512 emitCPPObjectAtomicSetterCall(CGF&: *this, OMD: setterMethod, ivar, AtomicHelperFn);
1513 }
1514 // Decativate the destructor for the setter parameter.
1515 DeactivateCleanupBlock(Cleanup: CalleeDestructedParamCleanups[PVD], DominatingIP: AllocaInsertPt);
1516 return;
1517 }
1518
1519 // Just use the setter expression if Sema gave us one and it's
1520 // non-trivial.
1521 if (!hasTrivialSetExpr(PID: propImpl)) {
1522 if (!AtomicHelperFn)
1523 // If non-atomic, assignment is called directly.
1524 EmitStmt(S: propImpl->getSetterCXXAssignment());
1525 else
1526 // If atomic, assignment is called via a locking api.
1527 emitCPPObjectAtomicSetterCall(CGF&: *this, OMD: setterMethod, ivar,
1528 AtomicHelperFn);
1529 return;
1530 }
1531
1532 PropertyImplStrategy strategy(CGM, propImpl);
1533 switch (strategy.getKind()) {
1534 case PropertyImplStrategy::Native: {
1535 // We don't need to do anything for a zero-size struct.
1536 if (strategy.getIvarSize().isZero())
1537 return;
1538
1539 Address argAddr = GetAddrOfLocalVar(VD: *setterMethod->param_begin());
1540
1541 LValue ivarLValue =
1542 EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, /*quals*/ CVRQualifiers: 0);
1543 Address ivarAddr = ivarLValue.getAddress();
1544
1545 // Currently, all atomic accesses have to be through integer
1546 // types, so there's no point in trying to pick a prettier type.
1547 llvm::Type *castType = llvm::Type::getIntNTy(
1548 C&: getLLVMContext(), N: getContext().toBits(CharSize: strategy.getIvarSize()));
1549
1550 // Cast both arguments to the chosen operation type.
1551 argAddr = argAddr.withElementType(ElemTy: castType);
1552 ivarAddr = ivarAddr.withElementType(ElemTy: castType);
1553
1554 llvm::Value *load = Builder.CreateLoad(Addr: argAddr);
1555
1556 if (PointerAuthQualifier PAQ = ivar->getType().getPointerAuth()) {
1557 QualType PropertyType = propImpl->getPropertyDecl()->getType();
1558 CGPointerAuthInfo SrcInfo = CGM.getPointerAuthInfoForType(type: PropertyType);
1559 CGPointerAuthInfo TargetInfo = EmitPointerAuthInfo(Qualifier: PAQ, StorageAddress: ivarAddr);
1560 load = emitPointerAuthResign(Pointer: load, PointerType: ivar->getType(), CurAuthInfo: SrcInfo, NewAuthInfo: TargetInfo,
1561 /*isKnownNonNull=*/IsKnownNonNull: false);
1562 }
1563
1564 // Perform an atomic store. There are no memory ordering requirements.
1565 llvm::StoreInst *store = Builder.CreateStore(Val: load, Addr: ivarAddr);
1566 store->setAtomic(Ordering: llvm::AtomicOrdering::Unordered);
1567 return;
1568 }
1569
1570 case PropertyImplStrategy::GetSetProperty:
1571 case PropertyImplStrategy::SetPropertyAndExpressionGet: {
1572
1573 llvm::FunctionCallee setOptimizedPropertyFn = nullptr;
1574 llvm::FunctionCallee setPropertyFn = nullptr;
1575 if (UseOptimizedSetter(CGM)) {
1576 // 10.8 and iOS 6.0 code and GC is off
1577 setOptimizedPropertyFn =
1578 CGM.getObjCRuntime().GetOptimizedPropertySetFunction(
1579 atomic: strategy.isAtomic(), copy: strategy.isCopy());
1580 if (!setOptimizedPropertyFn) {
1581 CGM.ErrorUnsupported(D: propImpl, Type: "Obj-C optimized setter - NYI");
1582 return;
1583 }
1584 }
1585 else {
1586 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
1587 if (!setPropertyFn) {
1588 CGM.ErrorUnsupported(D: propImpl, Type: "Obj-C setter requiring atomic copy");
1589 return;
1590 }
1591 }
1592
1593 // Emit objc_setProperty((id) self, _cmd, offset, arg,
1594 // <is-atomic>, <is-copy>).
1595 llvm::Value *cmd = emitCmdValueForGetterSetterBody(CGF&: *this, MD: setterMethod);
1596 llvm::Value *self =
1597 Builder.CreateBitCast(V: LoadObjCSelf(), DestTy: VoidPtrTy);
1598 llvm::Value *ivarOffset =
1599 EmitIvarOffsetAsPointerDiff(Interface: classImpl->getClassInterface(), Ivar: ivar);
1600 Address argAddr = GetAddrOfLocalVar(VD: *setterMethod->param_begin());
1601 llvm::Value *arg = Builder.CreateLoad(Addr: argAddr, Name: "arg");
1602 arg = Builder.CreateBitCast(V: arg, DestTy: VoidPtrTy);
1603
1604 CallArgList args;
1605 args.add(rvalue: RValue::get(V: self), type: getContext().getObjCIdType());
1606 args.add(rvalue: RValue::get(V: cmd), type: getContext().getObjCSelType());
1607 if (setOptimizedPropertyFn) {
1608 args.add(rvalue: RValue::get(V: arg), type: getContext().getObjCIdType());
1609 args.add(rvalue: RValue::get(V: ivarOffset), type: getContext().getPointerDiffType());
1610 CGCallee callee = CGCallee::forDirect(functionPtr: setOptimizedPropertyFn);
1611 EmitCall(CallInfo: getTypes().arrangeBuiltinFunctionCall(resultType: getContext().VoidTy, args),
1612 Callee: callee, ReturnValue: ReturnValueSlot(), Args: args);
1613 } else {
1614 args.add(rvalue: RValue::get(V: ivarOffset), type: getContext().getPointerDiffType());
1615 args.add(rvalue: RValue::get(V: arg), type: getContext().getObjCIdType());
1616 args.add(rvalue: RValue::get(V: Builder.getInt1(V: strategy.isAtomic())),
1617 type: getContext().BoolTy);
1618 args.add(rvalue: RValue::get(V: Builder.getInt1(V: strategy.isCopy())),
1619 type: getContext().BoolTy);
1620 // FIXME: We shouldn't need to get the function info here, the runtime
1621 // already should have computed it to build the function.
1622 CGCallee callee = CGCallee::forDirect(functionPtr: setPropertyFn);
1623 EmitCall(CallInfo: getTypes().arrangeBuiltinFunctionCall(resultType: getContext().VoidTy, args),
1624 Callee: callee, ReturnValue: ReturnValueSlot(), Args: args);
1625 }
1626
1627 return;
1628 }
1629
1630 case PropertyImplStrategy::CopyStruct:
1631 emitStructSetterCall(CGF&: *this, OMD: setterMethod, ivar);
1632 return;
1633
1634 case PropertyImplStrategy::Expression:
1635 break;
1636 }
1637
1638 // Otherwise, fake up some ASTs and emit a normal assignment.
1639 ValueDecl *selfDecl = setterMethod->getSelfDecl();
1640 DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(),
1641 VK_LValue, SourceLocation());
1642 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(),
1643 CK_LValueToRValue, &self, VK_PRValue,
1644 FPOptionsOverride());
1645 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
1646 SourceLocation(), SourceLocation(),
1647 &selfLoad, true, true);
1648
1649 ParmVarDecl *argDecl = *setterMethod->param_begin();
1650 QualType argType = argDecl->getType().getNonReferenceType();
1651 DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue,
1652 SourceLocation());
1653 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
1654 argType.getUnqualifiedType(), CK_LValueToRValue,
1655 &arg, VK_PRValue, FPOptionsOverride());
1656
1657 // The property type can differ from the ivar type in some situations with
1658 // Objective-C pointer types, we can always bit cast the RHS in these cases.
1659 // The following absurdity is just to ensure well-formed IR.
1660 CastKind argCK = CK_NoOp;
1661 if (ivarRef.getType()->isObjCObjectPointerType()) {
1662 if (argLoad.getType()->isObjCObjectPointerType())
1663 argCK = CK_BitCast;
1664 else if (argLoad.getType()->isBlockPointerType())
1665 argCK = CK_BlockPointerToObjCPointerCast;
1666 else
1667 argCK = CK_CPointerToObjCPointerCast;
1668 } else if (ivarRef.getType()->isBlockPointerType()) {
1669 if (argLoad.getType()->isBlockPointerType())
1670 argCK = CK_BitCast;
1671 else
1672 argCK = CK_AnyPointerToBlockPointerCast;
1673 } else if (ivarRef.getType()->isPointerType()) {
1674 argCK = CK_BitCast;
1675 } else if (argLoad.getType()->isAtomicType() &&
1676 !ivarRef.getType()->isAtomicType()) {
1677 argCK = CK_AtomicToNonAtomic;
1678 } else if (!argLoad.getType()->isAtomicType() &&
1679 ivarRef.getType()->isAtomicType()) {
1680 argCK = CK_NonAtomicToAtomic;
1681 }
1682 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK,
1683 &argLoad, VK_PRValue, FPOptionsOverride());
1684 Expr *finalArg = &argLoad;
1685 if (!getContext().hasSameUnqualifiedType(T1: ivarRef.getType(),
1686 T2: argLoad.getType()))
1687 finalArg = &argCast;
1688
1689 BinaryOperator *assign = BinaryOperator::Create(
1690 C: getContext(), lhs: &ivarRef, rhs: finalArg, opc: BO_Assign, ResTy: ivarRef.getType(),
1691 VK: VK_PRValue, OK: OK_Ordinary, opLoc: SourceLocation(), FPFeatures: FPOptionsOverride());
1692 EmitStmt(S: assign);
1693}
1694
1695/// Generate an Objective-C property setter function.
1696///
1697/// The given Decl must be an ObjCImplementationDecl. \@synthesize
1698/// is illegal within a category.
1699void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
1700 const ObjCPropertyImplDecl *PID) {
1701 llvm::Constant *AtomicHelperFn =
1702 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
1703 ObjCMethodDecl *OMD = PID->getSetterMethodDecl();
1704 assert(OMD && "Invalid call to generate setter (empty method)");
1705 StartObjCMethod(OMD, CD: IMP->getClassInterface());
1706
1707 generateObjCSetterBody(classImpl: IMP, propImpl: PID, AtomicHelperFn);
1708
1709 FinishFunction(EndLoc: OMD->getEndLoc());
1710}
1711
1712namespace {
1713 struct DestroyIvar final : EHScopeStack::Cleanup {
1714 private:
1715 llvm::Value *addr;
1716 const ObjCIvarDecl *ivar;
1717 CodeGenFunction::Destroyer *destroyer;
1718 bool useEHCleanupForArray;
1719 public:
1720 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
1721 CodeGenFunction::Destroyer *destroyer,
1722 bool useEHCleanupForArray)
1723 : addr(addr), ivar(ivar), destroyer(destroyer),
1724 useEHCleanupForArray(useEHCleanupForArray) {}
1725
1726 void Emit(CodeGenFunction &CGF, Flags flags) override {
1727 LValue lvalue
1728 = CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: addr, Ivar: ivar, /*CVR*/ CVRQualifiers: 0);
1729 CGF.emitDestroy(addr: lvalue.getAddress(), type: ivar->getType(), destroyer,
1730 useEHCleanupForArray: flags.isForNormalCleanup() && useEHCleanupForArray);
1731 }
1732 };
1733}
1734
1735/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
1736static void destroyARCStrongWithStore(CodeGenFunction &CGF,
1737 Address addr,
1738 QualType type) {
1739 llvm::Value *null = getNullForVariable(addr);
1740 CGF.EmitARCStoreStrongCall(addr, value: null, /*ignored*/ resultIgnored: true);
1741}
1742
1743static void emitCXXDestructMethod(CodeGenFunction &CGF,
1744 ObjCImplementationDecl *impl) {
1745 CodeGenFunction::RunCleanupsScope scope(CGF);
1746
1747 llvm::Value *self = CGF.LoadObjCSelf();
1748
1749 const ObjCInterfaceDecl *iface = impl->getClassInterface();
1750 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
1751 ivar; ivar = ivar->getNextIvar()) {
1752 QualType type = ivar->getType();
1753
1754 // Check whether the ivar is a destructible type.
1755 QualType::DestructionKind dtorKind = type.isDestructedType();
1756 if (!dtorKind) continue;
1757
1758 CodeGenFunction::Destroyer *destroyer = nullptr;
1759
1760 // Use a call to objc_storeStrong to destroy strong ivars, for the
1761 // general benefit of the tools.
1762 if (dtorKind == QualType::DK_objc_strong_lifetime) {
1763 destroyer = destroyARCStrongWithStore;
1764
1765 // Otherwise use the default for the destruction kind.
1766 } else {
1767 destroyer = CGF.getDestroyer(destructionKind: dtorKind);
1768 }
1769
1770 CleanupKind cleanupKind = CGF.getCleanupKind(kind: dtorKind);
1771
1772 CGF.EHStack.pushCleanup<DestroyIvar>(Kind: cleanupKind, A: self, A: ivar, A: destroyer,
1773 A: cleanupKind & EHCleanup);
1774 }
1775
1776 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
1777}
1778
1779void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1780 ObjCMethodDecl *MD,
1781 bool ctor) {
1782 MD->createImplicitParams(Context&: CGM.getContext(), ID: IMP->getClassInterface());
1783 StartObjCMethod(OMD: MD, CD: IMP->getClassInterface());
1784
1785 // Emit .cxx_construct.
1786 if (ctor) {
1787 // Suppress the final autorelease in ARC.
1788 AutoreleaseResult = false;
1789
1790 for (const auto *IvarInit : IMP->inits()) {
1791 FieldDecl *Field = IvarInit->getAnyMember();
1792 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Val: Field);
1793 LValue LV = EmitLValueForIvar(ObjectTy: TypeOfSelfObject(),
1794 Base: LoadObjCSelf(), Ivar, CVRQualifiers: 0);
1795 EmitAggExpr(E: IvarInit->getInit(),
1796 AS: AggValueSlot::forLValue(LV, isDestructed: AggValueSlot::IsDestructed,
1797 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
1798 isAliased: AggValueSlot::IsNotAliased,
1799 mayOverlap: AggValueSlot::DoesNotOverlap));
1800 }
1801 // constructor returns 'self'.
1802 CodeGenTypes &Types = CGM.getTypes();
1803 QualType IdTy(CGM.getContext().getObjCIdType());
1804 llvm::Value *SelfAsId =
1805 Builder.CreateBitCast(V: LoadObjCSelf(), DestTy: Types.ConvertType(T: IdTy));
1806 EmitReturnOfRValue(RV: RValue::get(V: SelfAsId), Ty: IdTy);
1807
1808 // Emit .cxx_destruct.
1809 } else {
1810 emitCXXDestructMethod(CGF&: *this, impl: IMP);
1811 }
1812 FinishFunction();
1813}
1814
1815llvm::Value *CodeGenFunction::LoadObjCSelf() {
1816 VarDecl *Self = cast<ObjCMethodDecl>(Val: CurFuncDecl)->getSelfDecl();
1817 DeclRefExpr DRE(getContext(), Self,
1818 /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
1819 Self->getType(), VK_LValue, SourceLocation());
1820 return EmitLoadOfScalar(lvalue: EmitDeclRefLValue(E: &DRE), Loc: SourceLocation());
1821}
1822
1823QualType CodeGenFunction::TypeOfSelfObject() {
1824 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(Val: CurFuncDecl);
1825 ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
1826 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
1827 Val: getContext().getCanonicalType(T: selfDecl->getType()));
1828 return PTy->getPointeeType();
1829}
1830
1831void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
1832 llvm::FunctionCallee EnumerationMutationFnPtr =
1833 CGM.getObjCRuntime().EnumerationMutationFunction();
1834 if (!EnumerationMutationFnPtr) {
1835 CGM.ErrorUnsupported(S: &S, Type: "Obj-C fast enumeration for this runtime");
1836 return;
1837 }
1838 CGCallee EnumerationMutationFn =
1839 CGCallee::forDirect(functionPtr: EnumerationMutationFnPtr);
1840
1841 CGDebugInfo *DI = getDebugInfo();
1842 if (DI)
1843 DI->EmitLexicalBlockStart(Builder, Loc: S.getSourceRange().getBegin());
1844
1845 RunCleanupsScope ForScope(*this);
1846
1847 // The local variable comes into scope immediately.
1848 AutoVarEmission variable = AutoVarEmission::invalid();
1849 if (const DeclStmt *SD = dyn_cast<DeclStmt>(Val: S.getElement()))
1850 variable = EmitAutoVarAlloca(var: *cast<VarDecl>(Val: SD->getSingleDecl()));
1851
1852 JumpDest LoopEnd = getJumpDestInCurrentScope(Name: "forcoll.end");
1853
1854 // Fast enumeration state.
1855 QualType StateTy = CGM.getObjCFastEnumerationStateType();
1856 Address StatePtr = CreateMemTemp(T: StateTy, Name: "state.ptr");
1857 EmitNullInitialization(DestPtr: StatePtr, Ty: StateTy);
1858
1859 // Number of elements in the items array.
1860 static const unsigned NumItems = 16;
1861
1862 // Fetch the countByEnumeratingWithState:objects:count: selector.
1863 const IdentifierInfo *II[] = {
1864 &CGM.getContext().Idents.get(Name: "countByEnumeratingWithState"),
1865 &CGM.getContext().Idents.get(Name: "objects"),
1866 &CGM.getContext().Idents.get(Name: "count")};
1867 Selector FastEnumSel =
1868 CGM.getContext().Selectors.getSelector(NumArgs: std::size(II), IIV: &II[0]);
1869
1870 QualType ItemsTy = getContext().getConstantArrayType(
1871 EltTy: getContext().getObjCIdType(), ArySize: llvm::APInt(32, NumItems), SizeExpr: nullptr,
1872 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
1873 Address ItemsPtr = CreateMemTemp(T: ItemsTy, Name: "items.ptr");
1874
1875 // Emit the collection pointer. In ARC, we do a retain.
1876 llvm::Value *Collection;
1877 if (getLangOpts().ObjCAutoRefCount) {
1878 Collection = EmitARCRetainScalarExpr(expr: S.getCollection());
1879
1880 // Enter a cleanup to do the release.
1881 EmitObjCConsumeObject(T: S.getCollection()->getType(), Ptr: Collection);
1882 } else {
1883 Collection = EmitScalarExpr(E: S.getCollection());
1884 }
1885
1886 // The 'continue' label needs to appear within the cleanup for the
1887 // collection object.
1888 JumpDest AfterBody = getJumpDestInCurrentScope(Name: "forcoll.next");
1889
1890 // Send it our message:
1891 CallArgList Args;
1892
1893 // The first argument is a temporary of the enumeration-state type.
1894 Args.add(rvalue: RValue::get(Addr: StatePtr, CGF&: *this), type: getContext().getPointerType(T: StateTy));
1895
1896 // The second argument is a temporary array with space for NumItems
1897 // pointers. We'll actually be loading elements from the array
1898 // pointer written into the control state; this buffer is so that
1899 // collections that *aren't* backed by arrays can still queue up
1900 // batches of elements.
1901 Args.add(rvalue: RValue::get(Addr: ItemsPtr, CGF&: *this), type: getContext().getPointerType(T: ItemsTy));
1902
1903 // The third argument is the capacity of that temporary array.
1904 llvm::Type *NSUIntegerTy = ConvertType(T: getContext().getNSUIntegerType());
1905 llvm::Constant *Count = llvm::ConstantInt::get(Ty: NSUIntegerTy, V: NumItems);
1906 Args.add(rvalue: RValue::get(V: Count), type: getContext().getNSUIntegerType());
1907
1908 // Start the enumeration.
1909 RValue CountRV =
1910 CGM.getObjCRuntime().GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(),
1911 ResultType: getContext().getNSUIntegerType(),
1912 Sel: FastEnumSel, Receiver: Collection, CallArgs: Args);
1913
1914 // The initial number of objects that were returned in the buffer.
1915 llvm::Value *initialBufferLimit = CountRV.getScalarVal();
1916
1917 llvm::BasicBlock *EmptyBB = createBasicBlock(name: "forcoll.empty");
1918 llvm::BasicBlock *LoopInitBB = createBasicBlock(name: "forcoll.loopinit");
1919
1920 llvm::Value *zero = llvm::Constant::getNullValue(Ty: NSUIntegerTy);
1921
1922 // If the limit pointer was zero to begin with, the collection is
1923 // empty; skip all this. Set the branch weight assuming this has the same
1924 // probability of exiting the loop as any other loop exit.
1925 uint64_t EntryCount = getCurrentProfileCount();
1926 Builder.CreateCondBr(
1927 Cond: Builder.CreateICmpEQ(LHS: initialBufferLimit, RHS: zero, Name: "iszero"), True: EmptyBB,
1928 False: LoopInitBB,
1929 BranchWeights: createProfileWeights(TrueCount: EntryCount, FalseCount: getProfileCount(S: S.getBody())));
1930
1931 // Otherwise, initialize the loop.
1932 EmitBlock(BB: LoopInitBB);
1933
1934 // Save the initial mutations value. This is the value at an
1935 // address that was written into the state object by
1936 // countByEnumeratingWithState:objects:count:.
1937 Address StateMutationsPtrPtr =
1938 Builder.CreateStructGEP(Addr: StatePtr, Index: 2, Name: "mutationsptr.ptr");
1939 llvm::Value *StateMutationsPtr
1940 = Builder.CreateLoad(Addr: StateMutationsPtrPtr, Name: "mutationsptr");
1941
1942 llvm::Type *UnsignedLongTy = ConvertType(T: getContext().UnsignedLongTy);
1943 llvm::Value *initialMutations =
1944 Builder.CreateAlignedLoad(Ty: UnsignedLongTy, Addr: StateMutationsPtr,
1945 Align: getPointerAlign(), Name: "forcoll.initial-mutations");
1946
1947 // Start looping. This is the point we return to whenever we have a
1948 // fresh, non-empty batch of objects.
1949 llvm::BasicBlock *LoopBodyBB = createBasicBlock(name: "forcoll.loopbody");
1950 EmitBlock(BB: LoopBodyBB);
1951
1952 // The current index into the buffer.
1953 llvm::PHINode *index = Builder.CreatePHI(Ty: NSUIntegerTy, NumReservedValues: 3, Name: "forcoll.index");
1954 index->addIncoming(V: zero, BB: LoopInitBB);
1955
1956 // The current buffer size.
1957 llvm::PHINode *count = Builder.CreatePHI(Ty: NSUIntegerTy, NumReservedValues: 3, Name: "forcoll.count");
1958 count->addIncoming(V: initialBufferLimit, BB: LoopInitBB);
1959
1960 incrementProfileCounter(S: &S);
1961
1962 // Check whether the mutations value has changed from where it was
1963 // at start. StateMutationsPtr should actually be invariant between
1964 // refreshes.
1965 StateMutationsPtr = Builder.CreateLoad(Addr: StateMutationsPtrPtr, Name: "mutationsptr");
1966 llvm::Value *currentMutations
1967 = Builder.CreateAlignedLoad(Ty: UnsignedLongTy, Addr: StateMutationsPtr,
1968 Align: getPointerAlign(), Name: "statemutations");
1969
1970 llvm::BasicBlock *WasMutatedBB = createBasicBlock(name: "forcoll.mutated");
1971 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock(name: "forcoll.notmutated");
1972
1973 Builder.CreateCondBr(Cond: Builder.CreateICmpEQ(LHS: currentMutations, RHS: initialMutations),
1974 True: WasNotMutatedBB, False: WasMutatedBB);
1975
1976 // If so, call the enumeration-mutation function.
1977 EmitBlock(BB: WasMutatedBB);
1978 llvm::Type *ObjCIdType = ConvertType(T: getContext().getObjCIdType());
1979 llvm::Value *V =
1980 Builder.CreateBitCast(V: Collection, DestTy: ObjCIdType);
1981 CallArgList Args2;
1982 Args2.add(rvalue: RValue::get(V), type: getContext().getObjCIdType());
1983 // FIXME: We shouldn't need to get the function info here, the runtime already
1984 // should have computed it to build the function.
1985 EmitCall(
1986 CallInfo: CGM.getTypes().arrangeBuiltinFunctionCall(resultType: getContext().VoidTy, args: Args2),
1987 Callee: EnumerationMutationFn, ReturnValue: ReturnValueSlot(), Args: Args2);
1988
1989 // Otherwise, or if the mutation function returns, just continue.
1990 EmitBlock(BB: WasNotMutatedBB);
1991
1992 // Initialize the element variable.
1993 RunCleanupsScope elementVariableScope(*this);
1994 bool elementIsVariable;
1995 LValue elementLValue;
1996 QualType elementType;
1997 if (const DeclStmt *SD = dyn_cast<DeclStmt>(Val: S.getElement())) {
1998 // Initialize the variable, in case it's a __block variable or something.
1999 EmitAutoVarInit(emission: variable);
2000
2001 const VarDecl *D = cast<VarDecl>(Val: SD->getSingleDecl());
2002 DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false,
2003 D->getType(), VK_LValue, SourceLocation());
2004 elementLValue = EmitLValue(E: &tempDRE);
2005 elementType = D->getType();
2006 elementIsVariable = true;
2007
2008 if (D->isARCPseudoStrong())
2009 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
2010 } else {
2011 elementLValue = LValue(); // suppress warning
2012 elementType = cast<Expr>(Val: S.getElement())->getType();
2013 elementIsVariable = false;
2014 }
2015 llvm::Type *convertedElementType = ConvertType(T: elementType);
2016
2017 // Fetch the buffer out of the enumeration state.
2018 // TODO: this pointer should actually be invariant between
2019 // refreshes, which would help us do certain loop optimizations.
2020 Address StateItemsPtr =
2021 Builder.CreateStructGEP(Addr: StatePtr, Index: 1, Name: "stateitems.ptr");
2022 llvm::Value *EnumStateItems =
2023 Builder.CreateLoad(Addr: StateItemsPtr, Name: "stateitems");
2024
2025 // Fetch the value at the current index from the buffer.
2026 llvm::Value *CurrentItemPtr = Builder.CreateInBoundsGEP(
2027 Ty: ObjCIdType, Ptr: EnumStateItems, IdxList: index, Name: "currentitem.ptr");
2028 llvm::Value *CurrentItem =
2029 Builder.CreateAlignedLoad(Ty: ObjCIdType, Addr: CurrentItemPtr, Align: getPointerAlign());
2030
2031 if (SanOpts.has(K: SanitizerKind::ObjCCast)) {
2032 // Before using an item from the collection, check that the implicit cast
2033 // from id to the element type is valid. This is done with instrumentation
2034 // roughly corresponding to:
2035 //
2036 // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ }
2037 const ObjCObjectPointerType *ObjPtrTy =
2038 elementType->getAsObjCInterfacePointerType();
2039 const ObjCInterfaceType *InterfaceTy =
2040 ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr;
2041 if (InterfaceTy) {
2042 auto CheckOrdinal = SanitizerKind::SO_ObjCCast;
2043 auto CheckHandler = SanitizerHandler::InvalidObjCCast;
2044 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2045 auto &C = CGM.getContext();
2046 assert(InterfaceTy->getDecl() && "No decl for ObjC interface type");
2047 Selector IsKindOfClassSel = GetUnarySelector(name: "isKindOfClass", Ctx&: C);
2048 CallArgList IsKindOfClassArgs;
2049 llvm::Value *Cls =
2050 CGM.getObjCRuntime().GetClass(CGF&: *this, OID: InterfaceTy->getDecl());
2051 IsKindOfClassArgs.add(rvalue: RValue::get(V: Cls), type: C.getObjCClassType());
2052 llvm::Value *IsClass =
2053 CGM.getObjCRuntime()
2054 .GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), ResultType: C.BoolTy,
2055 Sel: IsKindOfClassSel, Receiver: CurrentItem,
2056 CallArgs: IsKindOfClassArgs)
2057 .getScalarVal();
2058 llvm::Constant *StaticData[] = {
2059 EmitCheckSourceLocation(Loc: S.getBeginLoc()),
2060 EmitCheckTypeDescriptor(T: QualType(InterfaceTy, 0))};
2061 EmitCheck(Checked: {{IsClass, CheckOrdinal}}, Check: CheckHandler,
2062 StaticArgs: ArrayRef<llvm::Constant *>(StaticData), DynamicArgs: CurrentItem);
2063 }
2064 }
2065
2066 // Cast that value to the right type.
2067 CurrentItem = Builder.CreateBitCast(V: CurrentItem, DestTy: convertedElementType,
2068 Name: "currentitem");
2069
2070 // Make sure we have an l-value. Yes, this gets evaluated every
2071 // time through the loop.
2072 if (!elementIsVariable) {
2073 elementLValue = EmitLValue(E: cast<Expr>(Val: S.getElement()));
2074 EmitStoreThroughLValue(Src: RValue::get(V: CurrentItem), Dst: elementLValue);
2075 } else {
2076 EmitStoreThroughLValue(Src: RValue::get(V: CurrentItem), Dst: elementLValue,
2077 /*isInit*/ true);
2078 }
2079
2080 // If we do have an element variable, this assignment is the end of
2081 // its initialization.
2082 if (elementIsVariable)
2083 EmitAutoVarCleanups(emission: variable);
2084
2085 // Perform the loop body, setting up break and continue labels.
2086 BreakContinueStack.push_back(Elt: BreakContinue(S, LoopEnd, AfterBody));
2087 {
2088 RunCleanupsScope Scope(*this);
2089 EmitStmt(S: S.getBody());
2090 }
2091 BreakContinueStack.pop_back();
2092
2093 // Destroy the element variable now.
2094 elementVariableScope.ForceCleanup();
2095
2096 // Check whether there are more elements.
2097 EmitBlock(BB: AfterBody.getBlock());
2098
2099 llvm::BasicBlock *FetchMoreBB = createBasicBlock(name: "forcoll.refetch");
2100
2101 // First we check in the local buffer.
2102 llvm::Value *indexPlusOne =
2103 Builder.CreateNUWAdd(LHS: index, RHS: llvm::ConstantInt::get(Ty: NSUIntegerTy, V: 1));
2104
2105 // If we haven't overrun the buffer yet, we can continue.
2106 // Set the branch weights based on the simplifying assumption that this is
2107 // like a while-loop, i.e., ignoring that the false branch fetches more
2108 // elements and then returns to the loop.
2109 Builder.CreateCondBr(
2110 Cond: Builder.CreateICmpULT(LHS: indexPlusOne, RHS: count), True: LoopBodyBB, False: FetchMoreBB,
2111 BranchWeights: createProfileWeights(TrueCount: getProfileCount(S: S.getBody()), FalseCount: EntryCount));
2112
2113 index->addIncoming(V: indexPlusOne, BB: AfterBody.getBlock());
2114 count->addIncoming(V: count, BB: AfterBody.getBlock());
2115
2116 // Otherwise, we have to fetch more elements.
2117 EmitBlock(BB: FetchMoreBB);
2118
2119 CountRV =
2120 CGM.getObjCRuntime().GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(),
2121 ResultType: getContext().getNSUIntegerType(),
2122 Sel: FastEnumSel, Receiver: Collection, CallArgs: Args);
2123
2124 // If we got a zero count, we're done.
2125 llvm::Value *refetchCount = CountRV.getScalarVal();
2126
2127 // (note that the message send might split FetchMoreBB)
2128 index->addIncoming(V: zero, BB: Builder.GetInsertBlock());
2129 count->addIncoming(V: refetchCount, BB: Builder.GetInsertBlock());
2130
2131 Builder.CreateCondBr(Cond: Builder.CreateICmpEQ(LHS: refetchCount, RHS: zero),
2132 True: EmptyBB, False: LoopBodyBB);
2133
2134 // No more elements.
2135 EmitBlock(BB: EmptyBB);
2136
2137 if (!elementIsVariable) {
2138 // If the element was not a declaration, set it to be null.
2139
2140 llvm::Value *null = llvm::Constant::getNullValue(Ty: convertedElementType);
2141 elementLValue = EmitLValue(E: cast<Expr>(Val: S.getElement()));
2142 EmitStoreThroughLValue(Src: RValue::get(V: null), Dst: elementLValue);
2143 }
2144
2145 if (DI)
2146 DI->EmitLexicalBlockEnd(Builder, Loc: S.getSourceRange().getEnd());
2147
2148 ForScope.ForceCleanup();
2149 EmitBlock(BB: LoopEnd.getBlock());
2150}
2151
2152void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
2153 CGM.getObjCRuntime().EmitTryStmt(CGF&: *this, S);
2154}
2155
2156void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
2157 CGM.getObjCRuntime().EmitThrowStmt(CGF&: *this, S);
2158}
2159
2160void CodeGenFunction::EmitObjCAtSynchronizedStmt(
2161 const ObjCAtSynchronizedStmt &S) {
2162 CGM.getObjCRuntime().EmitSynchronizedStmt(CGF&: *this, S);
2163}
2164
2165namespace {
2166 struct CallObjCRelease final : EHScopeStack::Cleanup {
2167 CallObjCRelease(llvm::Value *object) : object(object) {}
2168 llvm::Value *object;
2169
2170 void Emit(CodeGenFunction &CGF, Flags flags) override {
2171 // Releases at the end of the full-expression are imprecise.
2172 CGF.EmitARCRelease(value: object, precise: ARCImpreciseLifetime);
2173 }
2174 };
2175}
2176
2177/// Produce the code for a CK_ARCConsumeObject. Does a primitive
2178/// release at the end of the full-expression.
2179llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
2180 llvm::Value *object) {
2181 // If we're in a conditional branch, we need to make the cleanup
2182 // conditional.
2183 pushFullExprCleanup<CallObjCRelease>(kind: getARCCleanupKind(), A: object);
2184 return object;
2185}
2186
2187llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
2188 llvm::Value *value) {
2189 return EmitARCRetainAutorelease(type, value);
2190}
2191
2192/// Given a number of pointers, inform the optimizer that they're
2193/// being intrinsically used up until this point in the program.
2194void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
2195 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use;
2196 if (!fn)
2197 fn = CGM.getIntrinsic(IID: llvm::Intrinsic::objc_clang_arc_use);
2198
2199 // This isn't really a "runtime" function, but as an intrinsic it
2200 // doesn't really matter as long as we align things up.
2201 EmitNounwindRuntimeCall(callee: fn, args: values);
2202}
2203
2204/// Emit a call to "clang.arc.noop.use", which consumes the result of a call
2205/// that has operand bundle "clang.arc.attachedcall".
2206void CodeGenFunction::EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values) {
2207 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_noop_use;
2208 if (!fn)
2209 fn = CGM.getIntrinsic(IID: llvm::Intrinsic::objc_clang_arc_noop_use);
2210 EmitNounwindRuntimeCall(callee: fn, args: values);
2211}
2212
2213static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) {
2214 if (auto *F = dyn_cast<llvm::Function>(Val: RTF)) {
2215 // If the target runtime doesn't naturally support ARC, emit weak
2216 // references to the runtime support library. We don't really
2217 // permit this to fail, but we need a particular relocation style.
2218 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
2219 !CGM.getTriple().isOSBinFormatCOFF()) {
2220 F->setLinkage(llvm::Function::ExternalWeakLinkage);
2221 }
2222 }
2223}
2224
2225static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
2226 llvm::FunctionCallee RTF) {
2227 setARCRuntimeFunctionLinkage(CGM, RTF: RTF.getCallee());
2228}
2229
2230static llvm::Function *getARCIntrinsic(llvm::Intrinsic::ID IntID,
2231 CodeGenModule &CGM) {
2232 llvm::Function *fn = CGM.getIntrinsic(IID: IntID);
2233 setARCRuntimeFunctionLinkage(CGM, RTF: fn);
2234 return fn;
2235}
2236
2237/// Perform an operation having the signature
2238/// i8* (i8*)
2239/// where a null input causes a no-op and returns null.
2240static llvm::Value *emitARCValueOperation(
2241 CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType,
2242 llvm::Function *&fn, llvm::Intrinsic::ID IntID,
2243 llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) {
2244 if (isa<llvm::ConstantPointerNull>(Val: value))
2245 return value;
2246
2247 if (!fn)
2248 fn = getARCIntrinsic(IntID, CGM&: CGF.CGM);
2249
2250 // Cast the argument to 'id'.
2251 llvm::Type *origType = returnType ? returnType : value->getType();
2252 value = CGF.Builder.CreateBitCast(V: value, DestTy: CGF.Int8PtrTy);
2253
2254 // Call the function.
2255 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(callee: fn, args: value);
2256 call->setTailCallKind(tailKind);
2257
2258 // Cast the result back to the original type.
2259 return CGF.Builder.CreateBitCast(V: call, DestTy: origType);
2260}
2261
2262/// Perform an operation having the following signature:
2263/// i8* (i8**)
2264static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
2265 llvm::Function *&fn,
2266 llvm::Intrinsic::ID IntID) {
2267 if (!fn)
2268 fn = getARCIntrinsic(IntID, CGM&: CGF.CGM);
2269
2270 return CGF.EmitNounwindRuntimeCall(callee: fn, args: addr.emitRawPointer(CGF));
2271}
2272
2273/// Perform an operation having the following signature:
2274/// i8* (i8**, i8*)
2275static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr,
2276 llvm::Value *value,
2277 llvm::Function *&fn,
2278 llvm::Intrinsic::ID IntID,
2279 bool ignored) {
2280 assert(addr.getElementType() == value->getType());
2281
2282 if (!fn)
2283 fn = getARCIntrinsic(IntID, CGM&: CGF.CGM);
2284
2285 llvm::Type *origType = value->getType();
2286
2287 llvm::Value *args[] = {
2288 CGF.Builder.CreateBitCast(V: addr.emitRawPointer(CGF), DestTy: CGF.Int8PtrPtrTy),
2289 CGF.Builder.CreateBitCast(V: value, DestTy: CGF.Int8PtrTy)};
2290 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(callee: fn, args);
2291
2292 if (ignored) return nullptr;
2293
2294 return CGF.Builder.CreateBitCast(V: result, DestTy: origType);
2295}
2296
2297/// Perform an operation having the following signature:
2298/// void (i8**, i8**)
2299static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src,
2300 llvm::Function *&fn,
2301 llvm::Intrinsic::ID IntID) {
2302 assert(dst.getType() == src.getType());
2303
2304 if (!fn)
2305 fn = getARCIntrinsic(IntID, CGM&: CGF.CGM);
2306
2307 llvm::Value *args[] = {
2308 CGF.Builder.CreateBitCast(V: dst.emitRawPointer(CGF), DestTy: CGF.Int8PtrPtrTy),
2309 CGF.Builder.CreateBitCast(V: src.emitRawPointer(CGF), DestTy: CGF.Int8PtrPtrTy)};
2310 CGF.EmitNounwindRuntimeCall(callee: fn, args);
2311}
2312
2313/// Perform an operation having the signature
2314/// i8* (i8*)
2315/// where a null input causes a no-op and returns null.
2316static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
2317 llvm::Value *value,
2318 llvm::Type *returnType,
2319 llvm::FunctionCallee &fn,
2320 StringRef fnName) {
2321 if (isa<llvm::ConstantPointerNull>(Val: value))
2322 return value;
2323
2324 if (!fn) {
2325 llvm::FunctionType *fnType =
2326 llvm::FunctionType::get(Result: CGF.Int8PtrTy, Params: CGF.Int8PtrTy, isVarArg: false);
2327 fn = CGF.CGM.CreateRuntimeFunction(Ty: fnType, Name: fnName);
2328
2329 // We have Native ARC, so set nonlazybind attribute for performance
2330 if (llvm::Function *f = dyn_cast<llvm::Function>(Val: fn.getCallee()))
2331 if (fnName == "objc_retain")
2332 f->addFnAttr(Kind: llvm::Attribute::NonLazyBind);
2333 }
2334
2335 // Cast the argument to 'id'.
2336 llvm::Type *origType = returnType ? returnType : value->getType();
2337 value = CGF.Builder.CreateBitCast(V: value, DestTy: CGF.Int8PtrTy);
2338
2339 // Call the function.
2340 llvm::CallBase *Inst = CGF.EmitCallOrInvoke(Callee: fn, Args: value);
2341
2342 // Mark calls to objc_autorelease as tail on the assumption that methods
2343 // overriding autorelease do not touch anything on the stack.
2344 if (fnName == "objc_autorelease")
2345 if (auto *Call = dyn_cast<llvm::CallInst>(Val: Inst))
2346 Call->setTailCall();
2347
2348 // Cast the result back to the original type.
2349 return CGF.Builder.CreateBitCast(V: Inst, DestTy: origType);
2350}
2351
2352/// Produce the code to do a retain. Based on the type, calls one of:
2353/// call i8* \@objc_retain(i8* %value)
2354/// call i8* \@objc_retainBlock(i8* %value)
2355llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
2356 if (type->isBlockPointerType())
2357 return EmitARCRetainBlock(value, /*mandatory*/ false);
2358 else
2359 return EmitARCRetainNonBlock(value);
2360}
2361
2362/// Retain the given object, with normal retain semantics.
2363/// call i8* \@objc_retain(i8* %value)
2364llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
2365 return emitARCValueOperation(CGF&: *this, value, returnType: nullptr,
2366 fn&: CGM.getObjCEntrypoints().objc_retain,
2367 IntID: llvm::Intrinsic::objc_retain);
2368}
2369
2370/// Retain the given block, with _Block_copy semantics.
2371/// call i8* \@objc_retainBlock(i8* %value)
2372///
2373/// \param mandatory - If false, emit the call with metadata
2374/// indicating that it's okay for the optimizer to eliminate this call
2375/// if it can prove that the block never escapes except down the stack.
2376llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
2377 bool mandatory) {
2378 llvm::Value *result
2379 = emitARCValueOperation(CGF&: *this, value, returnType: nullptr,
2380 fn&: CGM.getObjCEntrypoints().objc_retainBlock,
2381 IntID: llvm::Intrinsic::objc_retainBlock);
2382
2383 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
2384 // tell the optimizer that it doesn't need to do this copy if the
2385 // block doesn't escape, where being passed as an argument doesn't
2386 // count as escaping.
2387 if (!mandatory && isa<llvm::Instruction>(Val: result)) {
2388 llvm::CallInst *call
2389 = cast<llvm::CallInst>(Val: result->stripPointerCasts());
2390 assert(call->getCalledOperand() ==
2391 CGM.getObjCEntrypoints().objc_retainBlock);
2392
2393 call->setMetadata(Kind: "clang.arc.copy_on_escape",
2394 Node: llvm::MDNode::get(Context&: Builder.getContext(), MDs: {}));
2395 }
2396
2397 return result;
2398}
2399
2400static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
2401 // Fetch the void(void) inline asm which marks that we're going to
2402 // do something with the autoreleased return value.
2403 llvm::InlineAsm *&marker
2404 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker;
2405 if (!marker) {
2406 StringRef assembly
2407 = CGF.CGM.getTargetCodeGenInfo()
2408 .getARCRetainAutoreleasedReturnValueMarker();
2409
2410 // If we have an empty assembly string, there's nothing to do.
2411 if (assembly.empty()) {
2412
2413 // Otherwise, at -O0, build an inline asm that we're going to call
2414 // in a moment.
2415 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) {
2416 llvm::FunctionType *type =
2417 llvm::FunctionType::get(Result: CGF.VoidTy, /*variadic*/isVarArg: false);
2418
2419 marker = llvm::InlineAsm::get(Ty: type, AsmString: assembly, Constraints: "", /*sideeffects*/ hasSideEffects: true);
2420
2421 // If we're at -O1 and above, we don't want to litter the code
2422 // with this marker yet, so leave a breadcrumb for the ARC
2423 // optimizer to pick up.
2424 } else {
2425 const char *retainRVMarkerKey = llvm::objcarc::getRVMarkerModuleFlagStr();
2426 if (!CGF.CGM.getModule().getModuleFlag(Key: retainRVMarkerKey)) {
2427 auto *str = llvm::MDString::get(Context&: CGF.getLLVMContext(), Str: assembly);
2428 CGF.CGM.getModule().addModuleFlag(Behavior: llvm::Module::Error,
2429 Key: retainRVMarkerKey, Val: str);
2430 }
2431 }
2432 }
2433
2434 // Call the marker asm if we made one, which we do only at -O0.
2435 if (marker)
2436 CGF.Builder.CreateCall(Callee: marker, Args: {}, OpBundles: CGF.getBundlesForFunclet(Callee: marker));
2437}
2438
2439static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value,
2440 bool IsRetainRV,
2441 CodeGenFunction &CGF) {
2442 emitAutoreleasedReturnValueMarker(CGF);
2443
2444 // Add operand bundle "clang.arc.attachedcall" to the call instead of emitting
2445 // retainRV or claimRV calls in the IR. We currently do this only when the
2446 // optimization level isn't -O0 since global-isel, which is currently run at
2447 // -O0, doesn't know about the operand bundle.
2448 ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints();
2449 llvm::Function *&EP = IsRetainRV
2450 ? EPs.objc_retainAutoreleasedReturnValue
2451 : EPs.objc_unsafeClaimAutoreleasedReturnValue;
2452 llvm::Intrinsic::ID IID =
2453 IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue
2454 : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue;
2455 EP = getARCIntrinsic(IntID: IID, CGM&: CGF.CGM);
2456
2457 llvm::Triple::ArchType Arch = CGF.CGM.getTriple().getArch();
2458
2459 // FIXME: Do this on all targets and at -O0 too. This can be enabled only if
2460 // the target backend knows how to handle the operand bundle.
2461 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2462 (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
2463 Arch == llvm::Triple::x86_64)) {
2464 llvm::Value *bundleArgs[] = {EP};
2465 llvm::OperandBundleDef OB("clang.arc.attachedcall", bundleArgs);
2466 auto *oldCall = cast<llvm::CallBase>(Val: value);
2467 llvm::CallBase *newCall = llvm::CallBase::addOperandBundle(
2468 CB: oldCall, ID: llvm::LLVMContext::OB_clang_arc_attachedcall, OB,
2469 InsertPt: oldCall->getIterator());
2470 newCall->copyMetadata(SrcInst: *oldCall);
2471 oldCall->replaceAllUsesWith(V: newCall);
2472 oldCall->eraseFromParent();
2473 CGF.EmitARCNoopIntrinsicUse(values: newCall);
2474 return newCall;
2475 }
2476
2477 bool isNoTail =
2478 CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail();
2479 llvm::CallInst::TailCallKind tailKind =
2480 isNoTail ? llvm::CallInst::TCK_NoTail : llvm::CallInst::TCK_None;
2481 return emitARCValueOperation(CGF, value, returnType: nullptr, fn&: EP, IntID: IID, tailKind);
2482}
2483
2484/// Retain the given object which is the result of a function call.
2485/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
2486///
2487/// Yes, this function name is one character away from a different
2488/// call with completely different semantics.
2489llvm::Value *
2490CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
2491 return emitOptimizedARCReturnCall(value, IsRetainRV: true, CGF&: *this);
2492}
2493
2494/// Claim a possibly-autoreleased return value at +0. This is only
2495/// valid to do in contexts which do not rely on the retain to keep
2496/// the object valid for all of its uses; for example, when
2497/// the value is ignored, or when it is being assigned to an
2498/// __unsafe_unretained variable.
2499///
2500/// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value)
2501llvm::Value *
2502CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
2503 return emitOptimizedARCReturnCall(value, IsRetainRV: false, CGF&: *this);
2504}
2505
2506/// Release the given object.
2507/// call void \@objc_release(i8* %value)
2508void CodeGenFunction::EmitARCRelease(llvm::Value *value,
2509 ARCPreciseLifetime_t precise) {
2510 if (isa<llvm::ConstantPointerNull>(Val: value)) return;
2511
2512 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release;
2513 if (!fn)
2514 fn = getARCIntrinsic(IntID: llvm::Intrinsic::objc_release, CGM);
2515
2516 // Cast the argument to 'id'.
2517 value = Builder.CreateBitCast(V: value, DestTy: Int8PtrTy);
2518
2519 // Call objc_release.
2520 llvm::CallInst *call = EmitNounwindRuntimeCall(callee: fn, args: value);
2521
2522 if (precise == ARCImpreciseLifetime) {
2523 call->setMetadata(Kind: "clang.imprecise_release",
2524 Node: llvm::MDNode::get(Context&: Builder.getContext(), MDs: {}));
2525 }
2526}
2527
2528/// Destroy a __strong variable.
2529///
2530/// At -O0, emit a call to store 'null' into the address;
2531/// instrumenting tools prefer this because the address is exposed,
2532/// but it's relatively cumbersome to optimize.
2533///
2534/// At -O1 and above, just load and call objc_release.
2535///
2536/// call void \@objc_storeStrong(i8** %addr, i8* null)
2537void CodeGenFunction::EmitARCDestroyStrong(Address addr,
2538 ARCPreciseLifetime_t precise) {
2539 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2540 llvm::Value *null = getNullForVariable(addr);
2541 EmitARCStoreStrongCall(addr, value: null, /*ignored*/ resultIgnored: true);
2542 return;
2543 }
2544
2545 llvm::Value *value = Builder.CreateLoad(Addr: addr);
2546 EmitARCRelease(value, precise);
2547}
2548
2549/// Store into a strong object. Always calls this:
2550/// call void \@objc_storeStrong(i8** %addr, i8* %value)
2551llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
2552 llvm::Value *value,
2553 bool ignored) {
2554 assert(addr.getElementType() == value->getType());
2555
2556 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
2557 if (!fn)
2558 fn = getARCIntrinsic(IntID: llvm::Intrinsic::objc_storeStrong, CGM);
2559
2560 llvm::Value *args[] = {
2561 Builder.CreateBitCast(V: addr.emitRawPointer(CGF&: *this), DestTy: Int8PtrPtrTy),
2562 Builder.CreateBitCast(V: value, DestTy: Int8PtrTy)};
2563 EmitNounwindRuntimeCall(callee: fn, args);
2564
2565 if (ignored) return nullptr;
2566 return value;
2567}
2568
2569/// Store into a strong object. Sometimes calls this:
2570/// call void \@objc_storeStrong(i8** %addr, i8* %value)
2571/// Other times, breaks it down into components.
2572llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
2573 llvm::Value *newValue,
2574 bool ignored) {
2575 QualType type = dst.getType();
2576 bool isBlock = type->isBlockPointerType();
2577
2578 // Use a store barrier at -O0 unless this is a block type or the
2579 // lvalue is inadequately aligned.
2580 if (shouldUseFusedARCCalls() &&
2581 !isBlock &&
2582 (dst.getAlignment().isZero() ||
2583 dst.getAlignment() >= CharUnits::fromQuantity(Quantity: PointerAlignInBytes))) {
2584 return EmitARCStoreStrongCall(addr: dst.getAddress(), value: newValue, ignored);
2585 }
2586
2587 // Otherwise, split it out.
2588
2589 // Retain the new value.
2590 newValue = EmitARCRetain(type, value: newValue);
2591
2592 // Read the old value.
2593 llvm::Value *oldValue = EmitLoadOfScalar(lvalue: dst, Loc: SourceLocation());
2594
2595 // Store. We do this before the release so that any deallocs won't
2596 // see the old value.
2597 EmitStoreOfScalar(value: newValue, lvalue: dst);
2598
2599 // Finally, release the old value.
2600 EmitARCRelease(value: oldValue, precise: dst.isARCPreciseLifetime());
2601
2602 return newValue;
2603}
2604
2605/// Autorelease the given object.
2606/// call i8* \@objc_autorelease(i8* %value)
2607llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
2608 return emitARCValueOperation(CGF&: *this, value, returnType: nullptr,
2609 fn&: CGM.getObjCEntrypoints().objc_autorelease,
2610 IntID: llvm::Intrinsic::objc_autorelease);
2611}
2612
2613/// Autorelease the given object.
2614/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
2615llvm::Value *
2616CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
2617 return emitARCValueOperation(CGF&: *this, value, returnType: nullptr,
2618 fn&: CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
2619 IntID: llvm::Intrinsic::objc_autoreleaseReturnValue,
2620 tailKind: llvm::CallInst::TCK_Tail);
2621}
2622
2623/// Do a fused retain/autorelease of the given object.
2624/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
2625llvm::Value *
2626CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
2627 return emitARCValueOperation(CGF&: *this, value, returnType: nullptr,
2628 fn&: CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
2629 IntID: llvm::Intrinsic::objc_retainAutoreleaseReturnValue,
2630 tailKind: llvm::CallInst::TCK_Tail);
2631}
2632
2633/// Do a fused retain/autorelease of the given object.
2634/// call i8* \@objc_retainAutorelease(i8* %value)
2635/// or
2636/// %retain = call i8* \@objc_retainBlock(i8* %value)
2637/// call i8* \@objc_autorelease(i8* %retain)
2638llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
2639 llvm::Value *value) {
2640 if (!type->isBlockPointerType())
2641 return EmitARCRetainAutoreleaseNonBlock(value);
2642
2643 if (isa<llvm::ConstantPointerNull>(Val: value)) return value;
2644
2645 llvm::Type *origType = value->getType();
2646 value = Builder.CreateBitCast(V: value, DestTy: Int8PtrTy);
2647 value = EmitARCRetainBlock(value, /*mandatory*/ true);
2648 value = EmitARCAutorelease(value);
2649 return Builder.CreateBitCast(V: value, DestTy: origType);
2650}
2651
2652/// Do a fused retain/autorelease of the given object.
2653/// call i8* \@objc_retainAutorelease(i8* %value)
2654llvm::Value *
2655CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
2656 return emitARCValueOperation(CGF&: *this, value, returnType: nullptr,
2657 fn&: CGM.getObjCEntrypoints().objc_retainAutorelease,
2658 IntID: llvm::Intrinsic::objc_retainAutorelease);
2659}
2660
2661/// i8* \@objc_loadWeak(i8** %addr)
2662/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
2663llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
2664 return emitARCLoadOperation(CGF&: *this, addr,
2665 fn&: CGM.getObjCEntrypoints().objc_loadWeak,
2666 IntID: llvm::Intrinsic::objc_loadWeak);
2667}
2668
2669/// i8* \@objc_loadWeakRetained(i8** %addr)
2670llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
2671 return emitARCLoadOperation(CGF&: *this, addr,
2672 fn&: CGM.getObjCEntrypoints().objc_loadWeakRetained,
2673 IntID: llvm::Intrinsic::objc_loadWeakRetained);
2674}
2675
2676/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
2677/// Returns %value.
2678llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
2679 llvm::Value *value,
2680 bool ignored) {
2681 return emitARCStoreOperation(CGF&: *this, addr, value,
2682 fn&: CGM.getObjCEntrypoints().objc_storeWeak,
2683 IntID: llvm::Intrinsic::objc_storeWeak, ignored);
2684}
2685
2686/// i8* \@objc_initWeak(i8** %addr, i8* %value)
2687/// Returns %value. %addr is known to not have a current weak entry.
2688/// Essentially equivalent to:
2689/// *addr = nil; objc_storeWeak(addr, value);
2690void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
2691 // If we're initializing to null, just write null to memory; no need
2692 // to get the runtime involved. But don't do this if optimization
2693 // is enabled, because accounting for this would make the optimizer
2694 // much more complicated.
2695 if (isa<llvm::ConstantPointerNull>(Val: value) &&
2696 CGM.getCodeGenOpts().OptimizationLevel == 0) {
2697 Builder.CreateStore(Val: value, Addr: addr);
2698 return;
2699 }
2700
2701 emitARCStoreOperation(CGF&: *this, addr, value,
2702 fn&: CGM.getObjCEntrypoints().objc_initWeak,
2703 IntID: llvm::Intrinsic::objc_initWeak, /*ignored*/ true);
2704}
2705
2706/// void \@objc_destroyWeak(i8** %addr)
2707/// Essentially objc_storeWeak(addr, nil).
2708void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
2709 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
2710 if (!fn)
2711 fn = getARCIntrinsic(IntID: llvm::Intrinsic::objc_destroyWeak, CGM);
2712
2713 EmitNounwindRuntimeCall(callee: fn, args: addr.emitRawPointer(CGF&: *this));
2714}
2715
2716/// void \@objc_moveWeak(i8** %dest, i8** %src)
2717/// Disregards the current value in %dest. Leaves %src pointing to nothing.
2718/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
2719void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
2720 emitARCCopyOperation(CGF&: *this, dst, src,
2721 fn&: CGM.getObjCEntrypoints().objc_moveWeak,
2722 IntID: llvm::Intrinsic::objc_moveWeak);
2723}
2724
2725/// void \@objc_copyWeak(i8** %dest, i8** %src)
2726/// Disregards the current value in %dest. Essentially
2727/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
2728void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
2729 emitARCCopyOperation(CGF&: *this, dst, src,
2730 fn&: CGM.getObjCEntrypoints().objc_copyWeak,
2731 IntID: llvm::Intrinsic::objc_copyWeak);
2732}
2733
2734void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr,
2735 Address SrcAddr) {
2736 llvm::Value *Object = EmitARCLoadWeakRetained(addr: SrcAddr);
2737 Object = EmitObjCConsumeObject(type: Ty, object: Object);
2738 EmitARCStoreWeak(addr: DstAddr, value: Object, ignored: false);
2739}
2740
2741void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
2742 Address SrcAddr) {
2743 llvm::Value *Object = EmitARCLoadWeakRetained(addr: SrcAddr);
2744 Object = EmitObjCConsumeObject(type: Ty, object: Object);
2745 EmitARCStoreWeak(addr: DstAddr, value: Object, ignored: false);
2746 EmitARCDestroyWeak(addr: SrcAddr);
2747}
2748
2749/// Produce the code to do a objc_autoreleasepool_push.
2750/// call i8* \@objc_autoreleasePoolPush(void)
2751llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
2752 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
2753 if (!fn)
2754 fn = getARCIntrinsic(IntID: llvm::Intrinsic::objc_autoreleasePoolPush, CGM);
2755
2756 return EmitNounwindRuntimeCall(callee: fn);
2757}
2758
2759/// Produce the code to do a primitive release.
2760/// call void \@objc_autoreleasePoolPop(i8* %ptr)
2761void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
2762 assert(value->getType() == Int8PtrTy);
2763
2764 if (getInvokeDest()) {
2765 // Call the runtime method not the intrinsic if we are handling exceptions
2766 llvm::FunctionCallee &fn =
2767 CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke;
2768 if (!fn) {
2769 llvm::FunctionType *fnType =
2770 llvm::FunctionType::get(Result: Builder.getVoidTy(), Params: Int8PtrTy, isVarArg: false);
2771 fn = CGM.CreateRuntimeFunction(Ty: fnType, Name: "objc_autoreleasePoolPop");
2772 setARCRuntimeFunctionLinkage(CGM, RTF: fn);
2773 }
2774
2775 // objc_autoreleasePoolPop can throw.
2776 EmitRuntimeCallOrInvoke(callee: fn, args: value);
2777 } else {
2778 llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
2779 if (!fn)
2780 fn = getARCIntrinsic(IntID: llvm::Intrinsic::objc_autoreleasePoolPop, CGM);
2781
2782 EmitRuntimeCall(callee: fn, args: value);
2783 }
2784}
2785
2786/// Produce the code to do an MRR version objc_autoreleasepool_push.
2787/// Which is: [[NSAutoreleasePool alloc] init];
2788/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
2789/// init is declared as: - (id) init; in its NSObject super class.
2790///
2791llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
2792 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
2793 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(CGF&: *this);
2794 // [NSAutoreleasePool alloc]
2795 const IdentifierInfo *II = &CGM.getContext().Idents.get(Name: "alloc");
2796 Selector AllocSel = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II);
2797 CallArgList Args;
2798 RValue AllocRV =
2799 Runtime.GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(),
2800 ResultType: getContext().getObjCIdType(),
2801 Sel: AllocSel, Receiver, CallArgs: Args);
2802
2803 // [Receiver init]
2804 Receiver = AllocRV.getScalarVal();
2805 II = &CGM.getContext().Idents.get(Name: "init");
2806 Selector InitSel = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II);
2807 RValue InitRV =
2808 Runtime.GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(),
2809 ResultType: getContext().getObjCIdType(),
2810 Sel: InitSel, Receiver, CallArgs: Args);
2811 return InitRV.getScalarVal();
2812}
2813
2814/// Allocate the given objc object.
2815/// call i8* \@objc_alloc(i8* %value)
2816llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value,
2817 llvm::Type *resultType) {
2818 return emitObjCValueOperation(CGF&: *this, value, returnType: resultType,
2819 fn&: CGM.getObjCEntrypoints().objc_alloc,
2820 fnName: "objc_alloc");
2821}
2822
2823/// Allocate the given objc object.
2824/// call i8* \@objc_allocWithZone(i8* %value)
2825llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value,
2826 llvm::Type *resultType) {
2827 return emitObjCValueOperation(CGF&: *this, value, returnType: resultType,
2828 fn&: CGM.getObjCEntrypoints().objc_allocWithZone,
2829 fnName: "objc_allocWithZone");
2830}
2831
2832llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value,
2833 llvm::Type *resultType) {
2834 return emitObjCValueOperation(CGF&: *this, value, returnType: resultType,
2835 fn&: CGM.getObjCEntrypoints().objc_alloc_init,
2836 fnName: "objc_alloc_init");
2837}
2838
2839/// Produce the code to do a primitive release.
2840/// [tmp drain];
2841void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
2842 const IdentifierInfo *II = &CGM.getContext().Idents.get(Name: "drain");
2843 Selector DrainSel = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II);
2844 CallArgList Args;
2845 CGM.getObjCRuntime().GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(),
2846 ResultType: getContext().VoidTy, Sel: DrainSel, Receiver: Arg, CallArgs: Args);
2847}
2848
2849void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
2850 Address addr,
2851 QualType type) {
2852 CGF.EmitARCDestroyStrong(addr, precise: ARCPreciseLifetime);
2853}
2854
2855void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
2856 Address addr,
2857 QualType type) {
2858 CGF.EmitARCDestroyStrong(addr, precise: ARCImpreciseLifetime);
2859}
2860
2861void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
2862 Address addr,
2863 QualType type) {
2864 CGF.EmitARCDestroyWeak(addr);
2865}
2866
2867void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr,
2868 QualType type) {
2869 llvm::Value *value = CGF.Builder.CreateLoad(Addr: addr);
2870 CGF.EmitARCIntrinsicUse(values: value);
2871}
2872
2873/// Autorelease the given object.
2874/// call i8* \@objc_autorelease(i8* %value)
2875llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value,
2876 llvm::Type *returnType) {
2877 return emitObjCValueOperation(
2878 CGF&: *this, value, returnType,
2879 fn&: CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction,
2880 fnName: "objc_autorelease");
2881}
2882
2883/// Retain the given object, with normal retain semantics.
2884/// call i8* \@objc_retain(i8* %value)
2885llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value,
2886 llvm::Type *returnType) {
2887 return emitObjCValueOperation(
2888 CGF&: *this, value, returnType,
2889 fn&: CGM.getObjCEntrypoints().objc_retainRuntimeFunction, fnName: "objc_retain");
2890}
2891
2892/// Release the given object.
2893/// call void \@objc_release(i8* %value)
2894void CodeGenFunction::EmitObjCRelease(llvm::Value *value,
2895 ARCPreciseLifetime_t precise) {
2896 if (isa<llvm::ConstantPointerNull>(Val: value)) return;
2897
2898 llvm::FunctionCallee &fn =
2899 CGM.getObjCEntrypoints().objc_releaseRuntimeFunction;
2900 if (!fn) {
2901 llvm::FunctionType *fnType =
2902 llvm::FunctionType::get(Result: Builder.getVoidTy(), Params: Int8PtrTy, isVarArg: false);
2903 fn = CGM.CreateRuntimeFunction(Ty: fnType, Name: "objc_release");
2904 setARCRuntimeFunctionLinkage(CGM, RTF: fn);
2905 // We have Native ARC, so set nonlazybind attribute for performance
2906 if (llvm::Function *f = dyn_cast<llvm::Function>(Val: fn.getCallee()))
2907 f->addFnAttr(Kind: llvm::Attribute::NonLazyBind);
2908 }
2909
2910 // Cast the argument to 'id'.
2911 value = Builder.CreateBitCast(V: value, DestTy: Int8PtrTy);
2912
2913 // Call objc_release.
2914 llvm::CallBase *call = EmitCallOrInvoke(Callee: fn, Args: value);
2915
2916 if (precise == ARCImpreciseLifetime) {
2917 call->setMetadata(Kind: "clang.imprecise_release",
2918 Node: llvm::MDNode::get(Context&: Builder.getContext(), MDs: {}));
2919 }
2920}
2921
2922namespace {
2923 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup {
2924 llvm::Value *Token;
2925
2926 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2927
2928 void Emit(CodeGenFunction &CGF, Flags flags) override {
2929 CGF.EmitObjCAutoreleasePoolPop(value: Token);
2930 }
2931 };
2932 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup {
2933 llvm::Value *Token;
2934
2935 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2936
2937 void Emit(CodeGenFunction &CGF, Flags flags) override {
2938 CGF.EmitObjCMRRAutoreleasePoolPop(Arg: Token);
2939 }
2940 };
2941}
2942
2943void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
2944 if (CGM.getLangOpts().ObjCAutoRefCount)
2945 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(Kind: NormalCleanup, A: Ptr);
2946 else
2947 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(Kind: NormalCleanup, A: Ptr);
2948}
2949
2950static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) {
2951 switch (lifetime) {
2952 case Qualifiers::OCL_None:
2953 case Qualifiers::OCL_ExplicitNone:
2954 case Qualifiers::OCL_Strong:
2955 case Qualifiers::OCL_Autoreleasing:
2956 return true;
2957
2958 case Qualifiers::OCL_Weak:
2959 return false;
2960 }
2961
2962 llvm_unreachable("impossible lifetime!");
2963}
2964
2965static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2966 LValue lvalue,
2967 QualType type) {
2968 llvm::Value *result;
2969 bool shouldRetain = shouldRetainObjCLifetime(lifetime: type.getObjCLifetime());
2970 if (shouldRetain) {
2971 result = CGF.EmitLoadOfLValue(V: lvalue, Loc: SourceLocation()).getScalarVal();
2972 } else {
2973 assert(type.getObjCLifetime() == Qualifiers::OCL_Weak);
2974 result = CGF.EmitARCLoadWeakRetained(addr: lvalue.getAddress());
2975 }
2976 return TryEmitResult(result, !shouldRetain);
2977}
2978
2979static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2980 const Expr *e) {
2981 e = e->IgnoreParens();
2982 QualType type = e->getType();
2983
2984 // If we're loading retained from a __strong xvalue, we can avoid
2985 // an extra retain/release pair by zeroing out the source of this
2986 // "move" operation.
2987 if (e->isXValue() &&
2988 !type.isConstQualified() &&
2989 type.getObjCLifetime() == Qualifiers::OCL_Strong) {
2990 // Emit the lvalue.
2991 LValue lv = CGF.EmitLValue(E: e);
2992
2993 // Load the object pointer.
2994 llvm::Value *result = CGF.EmitLoadOfLValue(V: lv,
2995 Loc: SourceLocation()).getScalarVal();
2996
2997 // Set the source pointer to NULL.
2998 CGF.EmitStoreOfScalar(value: getNullForVariable(addr: lv.getAddress()), lvalue: lv);
2999
3000 return TryEmitResult(result, true);
3001 }
3002
3003 // As a very special optimization, in ARC++, if the l-value is the
3004 // result of a non-volatile assignment, do a simple retain of the
3005 // result of the call to objc_storeWeak instead of reloading.
3006 if (CGF.getLangOpts().CPlusPlus &&
3007 !type.isVolatileQualified() &&
3008 type.getObjCLifetime() == Qualifiers::OCL_Weak &&
3009 isa<BinaryOperator>(Val: e) &&
3010 cast<BinaryOperator>(Val: e)->getOpcode() == BO_Assign)
3011 return TryEmitResult(CGF.EmitScalarExpr(E: e), false);
3012
3013 // Try to emit code for scalar constant instead of emitting LValue and
3014 // loading it because we are not guaranteed to have an l-value. One of such
3015 // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable.
3016 if (const auto *decl_expr = dyn_cast<DeclRefExpr>(Val: e)) {
3017 auto *DRE = const_cast<DeclRefExpr *>(decl_expr);
3018 if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(RefExpr: DRE))
3019 return TryEmitResult(CGF.emitScalarConstant(Constant: constant, E: DRE),
3020 !shouldRetainObjCLifetime(lifetime: type.getObjCLifetime()));
3021 }
3022
3023 return tryEmitARCRetainLoadOfScalar(CGF, lvalue: CGF.EmitLValue(E: e), type);
3024}
3025
3026typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
3027 llvm::Value *value)>
3028 ValueTransform;
3029
3030/// Insert code immediately after a call.
3031
3032// FIXME: We should find a way to emit the runtime call immediately
3033// after the call is emitted to eliminate the need for this function.
3034static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
3035 llvm::Value *value,
3036 ValueTransform doAfterCall,
3037 ValueTransform doFallback) {
3038 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
3039 auto *callBase = dyn_cast<llvm::CallBase>(Val: value);
3040
3041 if (callBase && llvm::objcarc::hasAttachedCallOpBundle(CB: callBase)) {
3042 // Fall back if the call base has operand bundle "clang.arc.attachedcall".
3043 value = doFallback(CGF, value);
3044 } else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(Val: value)) {
3045 // Place the retain immediately following the call.
3046 CGF.Builder.SetInsertPoint(TheBB: call->getParent(),
3047 IP: ++llvm::BasicBlock::iterator(call));
3048 value = doAfterCall(CGF, value);
3049 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(Val: value)) {
3050 // Place the retain at the beginning of the normal destination block.
3051 llvm::BasicBlock *BB = invoke->getNormalDest();
3052 CGF.Builder.SetInsertPoint(TheBB: BB, IP: BB->begin());
3053 value = doAfterCall(CGF, value);
3054
3055 // Bitcasts can arise because of related-result returns. Rewrite
3056 // the operand.
3057 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(Val: value)) {
3058 // Change the insert point to avoid emitting the fall-back call after the
3059 // bitcast.
3060 CGF.Builder.SetInsertPoint(TheBB: bitcast->getParent(), IP: bitcast->getIterator());
3061 llvm::Value *operand = bitcast->getOperand(i_nocapture: 0);
3062 operand = emitARCOperationAfterCall(CGF, value: operand, doAfterCall, doFallback);
3063 bitcast->setOperand(i_nocapture: 0, Val_nocapture: operand);
3064 value = bitcast;
3065 } else {
3066 auto *phi = dyn_cast<llvm::PHINode>(Val: value);
3067 if (phi && phi->getNumIncomingValues() == 2 &&
3068 isa<llvm::ConstantPointerNull>(Val: phi->getIncomingValue(i: 1)) &&
3069 isa<llvm::CallBase>(Val: phi->getIncomingValue(i: 0))) {
3070 // Handle phi instructions that are generated when it's necessary to check
3071 // whether the receiver of a message is null.
3072 llvm::Value *inVal = phi->getIncomingValue(i: 0);
3073 inVal = emitARCOperationAfterCall(CGF, value: inVal, doAfterCall, doFallback);
3074 phi->setIncomingValue(i: 0, V: inVal);
3075 value = phi;
3076 } else {
3077 // Generic fall-back case.
3078 // Retain using the non-block variant: we never need to do a copy
3079 // of a block that's been returned to us.
3080 value = doFallback(CGF, value);
3081 }
3082 }
3083
3084 CGF.Builder.restoreIP(IP: ip);
3085 return value;
3086}
3087
3088/// Given that the given expression is some sort of call (which does
3089/// not return retained), emit a retain following it.
3090static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF,
3091 const Expr *e) {
3092 llvm::Value *value = CGF.EmitScalarExpr(E: e);
3093 return emitARCOperationAfterCall(CGF, value,
3094 doAfterCall: [](CodeGenFunction &CGF, llvm::Value *value) {
3095 return CGF.EmitARCRetainAutoreleasedReturnValue(value);
3096 },
3097 doFallback: [](CodeGenFunction &CGF, llvm::Value *value) {
3098 return CGF.EmitARCRetainNonBlock(value);
3099 });
3100}
3101
3102/// Given that the given expression is some sort of call (which does
3103/// not return retained), perform an unsafeClaim following it.
3104static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF,
3105 const Expr *e) {
3106 llvm::Value *value = CGF.EmitScalarExpr(E: e);
3107 return emitARCOperationAfterCall(CGF, value,
3108 doAfterCall: [](CodeGenFunction &CGF, llvm::Value *value) {
3109 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value);
3110 },
3111 doFallback: [](CodeGenFunction &CGF, llvm::Value *value) {
3112 return value;
3113 });
3114}
3115
3116llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E,
3117 bool allowUnsafeClaim) {
3118 if (allowUnsafeClaim &&
3119 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) {
3120 return emitARCUnsafeClaimCallResult(CGF&: *this, e: E);
3121 } else {
3122 llvm::Value *value = emitARCRetainCallResult(CGF&: *this, e: E);
3123 return EmitObjCConsumeObject(type: E->getType(), object: value);
3124 }
3125}
3126
3127/// Determine whether it might be important to emit a separate
3128/// objc_retain_block on the result of the given expression, or
3129/// whether it's okay to just emit it in a +1 context.
3130static bool shouldEmitSeparateBlockRetain(const Expr *e) {
3131 assert(e->getType()->isBlockPointerType());
3132 e = e->IgnoreParens();
3133
3134 // For future goodness, emit block expressions directly in +1
3135 // contexts if we can.
3136 if (isa<BlockExpr>(Val: e))
3137 return false;
3138
3139 if (const CastExpr *cast = dyn_cast<CastExpr>(Val: e)) {
3140 switch (cast->getCastKind()) {
3141 // Emitting these operations in +1 contexts is goodness.
3142 case CK_LValueToRValue:
3143 case CK_ARCReclaimReturnedObject:
3144 case CK_ARCConsumeObject:
3145 case CK_ARCProduceObject:
3146 return false;
3147
3148 // These operations preserve a block type.
3149 case CK_NoOp:
3150 case CK_BitCast:
3151 return shouldEmitSeparateBlockRetain(e: cast->getSubExpr());
3152
3153 // These operations are known to be bad (or haven't been considered).
3154 case CK_AnyPointerToBlockPointerCast:
3155 default:
3156 return true;
3157 }
3158 }
3159
3160 return true;
3161}
3162
3163namespace {
3164/// A CRTP base class for emitting expressions of retainable object
3165/// pointer type in ARC.
3166template <typename Impl, typename Result> class ARCExprEmitter {
3167protected:
3168 CodeGenFunction &CGF;
3169 Impl &asImpl() { return *static_cast<Impl*>(this); }
3170
3171 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {}
3172
3173public:
3174 Result visit(const Expr *e);
3175 Result visitCastExpr(const CastExpr *e);
3176 Result visitPseudoObjectExpr(const PseudoObjectExpr *e);
3177 Result visitBlockExpr(const BlockExpr *e);
3178 Result visitBinaryOperator(const BinaryOperator *e);
3179 Result visitBinAssign(const BinaryOperator *e);
3180 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e);
3181 Result visitBinAssignAutoreleasing(const BinaryOperator *e);
3182 Result visitBinAssignWeak(const BinaryOperator *e);
3183 Result visitBinAssignStrong(const BinaryOperator *e);
3184
3185 // Minimal implementation:
3186 // Result visitLValueToRValue(const Expr *e)
3187 // Result visitConsumeObject(const Expr *e)
3188 // Result visitExtendBlockObject(const Expr *e)
3189 // Result visitReclaimReturnedObject(const Expr *e)
3190 // Result visitCall(const Expr *e)
3191 // Result visitExpr(const Expr *e)
3192 //
3193 // Result emitBitCast(Result result, llvm::Type *resultType)
3194 // llvm::Value *getValueOfResult(Result result)
3195};
3196}
3197
3198/// Try to emit a PseudoObjectExpr under special ARC rules.
3199///
3200/// This massively duplicates emitPseudoObjectRValue.
3201template <typename Impl, typename Result>
3202Result
3203ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) {
3204 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3205
3206 // Find the result expression.
3207 const Expr *resultExpr = E->getResultExpr();
3208 assert(resultExpr);
3209 Result result;
3210
3211 for (PseudoObjectExpr::const_semantics_iterator
3212 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3213 const Expr *semantic = *i;
3214
3215 // If this semantic expression is an opaque value, bind it
3216 // to the result of its source expression.
3217 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(Val: semantic)) {
3218 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3219 OVMA opaqueData;
3220
3221 // If this semantic is the result of the pseudo-object
3222 // expression, try to evaluate the source as +1.
3223 if (ov == resultExpr) {
3224 assert(!OVMA::shouldBindAsLValue(ov));
3225 result = asImpl().visit(ov->getSourceExpr());
3226 opaqueData = OVMA::bind(CGF, ov,
3227 RValue::get(asImpl().getValueOfResult(result)));
3228
3229 // Otherwise, just bind it.
3230 } else {
3231 opaqueData = OVMA::bind(CGF, ov, e: ov->getSourceExpr());
3232 }
3233 opaques.push_back(Elt: opaqueData);
3234
3235 // Otherwise, if the expression is the result, evaluate it
3236 // and remember the result.
3237 } else if (semantic == resultExpr) {
3238 result = asImpl().visit(semantic);
3239
3240 // Otherwise, evaluate the expression in an ignored context.
3241 } else {
3242 CGF.EmitIgnoredExpr(E: semantic);
3243 }
3244 }
3245
3246 // Unbind all the opaques now.
3247 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques)
3248 opaque.unbind(CGF);
3249
3250 return result;
3251}
3252
3253template <typename Impl, typename Result>
3254Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) {
3255 // The default implementation just forwards the expression to visitExpr.
3256 return asImpl().visitExpr(e);
3257}
3258
3259template <typename Impl, typename Result>
3260Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) {
3261 switch (e->getCastKind()) {
3262
3263 // No-op casts don't change the type, so we just ignore them.
3264 case CK_NoOp:
3265 return asImpl().visit(e->getSubExpr());
3266
3267 // These casts can change the type.
3268 case CK_CPointerToObjCPointerCast:
3269 case CK_BlockPointerToObjCPointerCast:
3270 case CK_AnyPointerToBlockPointerCast:
3271 case CK_BitCast: {
3272 llvm::Type *resultType = CGF.ConvertType(T: e->getType());
3273 assert(e->getSubExpr()->getType()->hasPointerRepresentation());
3274 Result result = asImpl().visit(e->getSubExpr());
3275 return asImpl().emitBitCast(result, resultType);
3276 }
3277
3278 // Handle some casts specially.
3279 case CK_LValueToRValue:
3280 return asImpl().visitLValueToRValue(e->getSubExpr());
3281 case CK_ARCConsumeObject:
3282 return asImpl().visitConsumeObject(e->getSubExpr());
3283 case CK_ARCExtendBlockObject:
3284 return asImpl().visitExtendBlockObject(e->getSubExpr());
3285 case CK_ARCReclaimReturnedObject:
3286 return asImpl().visitReclaimReturnedObject(e->getSubExpr());
3287
3288 // Otherwise, use the default logic.
3289 default:
3290 return asImpl().visitExpr(e);
3291 }
3292}
3293
3294template <typename Impl, typename Result>
3295Result
3296ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) {
3297 switch (e->getOpcode()) {
3298 case BO_Comma:
3299 CGF.EmitIgnoredExpr(E: e->getLHS());
3300 CGF.EnsureInsertPoint();
3301 return asImpl().visit(e->getRHS());
3302
3303 case BO_Assign:
3304 return asImpl().visitBinAssign(e);
3305
3306 default:
3307 return asImpl().visitExpr(e);
3308 }
3309}
3310
3311template <typename Impl, typename Result>
3312Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) {
3313 switch (e->getLHS()->getType().getObjCLifetime()) {
3314 case Qualifiers::OCL_ExplicitNone:
3315 return asImpl().visitBinAssignUnsafeUnretained(e);
3316
3317 case Qualifiers::OCL_Weak:
3318 return asImpl().visitBinAssignWeak(e);
3319
3320 case Qualifiers::OCL_Autoreleasing:
3321 return asImpl().visitBinAssignAutoreleasing(e);
3322
3323 case Qualifiers::OCL_Strong:
3324 return asImpl().visitBinAssignStrong(e);
3325
3326 case Qualifiers::OCL_None:
3327 return asImpl().visitExpr(e);
3328 }
3329 llvm_unreachable("bad ObjC ownership qualifier");
3330}
3331
3332/// The default rule for __unsafe_unretained emits the RHS recursively,
3333/// stores into the unsafe variable, and propagates the result outward.
3334template <typename Impl, typename Result>
3335Result ARCExprEmitter<Impl,Result>::
3336 visitBinAssignUnsafeUnretained(const BinaryOperator *e) {
3337 // Recursively emit the RHS.
3338 // For __block safety, do this before emitting the LHS.
3339 Result result = asImpl().visit(e->getRHS());
3340
3341 // Perform the store.
3342 LValue lvalue =
3343 CGF.EmitCheckedLValue(E: e->getLHS(), TCK: CodeGenFunction::TCK_Store);
3344 CGF.EmitStoreThroughLValue(Src: RValue::get(asImpl().getValueOfResult(result)),
3345 Dst: lvalue);
3346
3347 return result;
3348}
3349
3350template <typename Impl, typename Result>
3351Result
3352ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) {
3353 return asImpl().visitExpr(e);
3354}
3355
3356template <typename Impl, typename Result>
3357Result
3358ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) {
3359 return asImpl().visitExpr(e);
3360}
3361
3362template <typename Impl, typename Result>
3363Result
3364ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) {
3365 return asImpl().visitExpr(e);
3366}
3367
3368/// The general expression-emission logic.
3369template <typename Impl, typename Result>
3370Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) {
3371 // We should *never* see a nested full-expression here, because if
3372 // we fail to emit at +1, our caller must not retain after we close
3373 // out the full-expression. This isn't as important in the unsafe
3374 // emitter.
3375 assert(!isa<ExprWithCleanups>(e));
3376
3377 // Look through parens, __extension__, generic selection, etc.
3378 e = e->IgnoreParens();
3379
3380 // Handle certain kinds of casts.
3381 if (const CastExpr *ce = dyn_cast<CastExpr>(Val: e)) {
3382 return asImpl().visitCastExpr(ce);
3383
3384 // Handle the comma operator.
3385 } else if (auto op = dyn_cast<BinaryOperator>(Val: e)) {
3386 return asImpl().visitBinaryOperator(op);
3387
3388 // TODO: handle conditional operators here
3389
3390 // For calls and message sends, use the retained-call logic.
3391 // Delegate inits are a special case in that they're the only
3392 // returns-retained expression that *isn't* surrounded by
3393 // a consume.
3394 } else if (isa<CallExpr>(Val: e) ||
3395 (isa<ObjCMessageExpr>(Val: e) &&
3396 !cast<ObjCMessageExpr>(Val: e)->isDelegateInitCall())) {
3397 return asImpl().visitCall(e);
3398
3399 // Look through pseudo-object expressions.
3400 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(Val: e)) {
3401 return asImpl().visitPseudoObjectExpr(pseudo);
3402 } else if (auto *be = dyn_cast<BlockExpr>(Val: e))
3403 return asImpl().visitBlockExpr(be);
3404
3405 return asImpl().visitExpr(e);
3406}
3407
3408namespace {
3409
3410/// An emitter for +1 results.
3411struct ARCRetainExprEmitter :
3412 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> {
3413
3414 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
3415
3416 llvm::Value *getValueOfResult(TryEmitResult result) {
3417 return result.getPointer();
3418 }
3419
3420 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) {
3421 llvm::Value *value = result.getPointer();
3422 value = CGF.Builder.CreateBitCast(V: value, DestTy: resultType);
3423 result.setPointer(value);
3424 return result;
3425 }
3426
3427 TryEmitResult visitLValueToRValue(const Expr *e) {
3428 return tryEmitARCRetainLoadOfScalar(CGF, e);
3429 }
3430
3431 /// For consumptions, just emit the subexpression and thus elide
3432 /// the retain/release pair.
3433 TryEmitResult visitConsumeObject(const Expr *e) {
3434 llvm::Value *result = CGF.EmitScalarExpr(E: e);
3435 return TryEmitResult(result, true);
3436 }
3437
3438 TryEmitResult visitBlockExpr(const BlockExpr *e) {
3439 TryEmitResult result = visitExpr(e);
3440 // Avoid the block-retain if this is a block literal that doesn't need to be
3441 // copied to the heap.
3442 if (CGF.CGM.getCodeGenOpts().ObjCAvoidHeapifyLocalBlocks &&
3443 e->getBlockDecl()->canAvoidCopyToHeap())
3444 result.setInt(true);
3445 return result;
3446 }
3447
3448 /// Block extends are net +0. Naively, we could just recurse on
3449 /// the subexpression, but actually we need to ensure that the
3450 /// value is copied as a block, so there's a little filter here.
3451 TryEmitResult visitExtendBlockObject(const Expr *e) {
3452 llvm::Value *result; // will be a +0 value
3453
3454 // If we can't safely assume the sub-expression will produce a
3455 // block-copied value, emit the sub-expression at +0.
3456 if (shouldEmitSeparateBlockRetain(e)) {
3457 result = CGF.EmitScalarExpr(E: e);
3458
3459 // Otherwise, try to emit the sub-expression at +1 recursively.
3460 } else {
3461 TryEmitResult subresult = asImpl().visit(e);
3462
3463 // If that produced a retained value, just use that.
3464 if (subresult.getInt()) {
3465 return subresult;
3466 }
3467
3468 // Otherwise it's +0.
3469 result = subresult.getPointer();
3470 }
3471
3472 // Retain the object as a block.
3473 result = CGF.EmitARCRetainBlock(value: result, /*mandatory*/ true);
3474 return TryEmitResult(result, true);
3475 }
3476
3477 /// For reclaims, emit the subexpression as a retained call and
3478 /// skip the consumption.
3479 TryEmitResult visitReclaimReturnedObject(const Expr *e) {
3480 llvm::Value *result = emitARCRetainCallResult(CGF, e);
3481 return TryEmitResult(result, true);
3482 }
3483
3484 /// When we have an undecorated call, retroactively do a claim.
3485 TryEmitResult visitCall(const Expr *e) {
3486 llvm::Value *result = emitARCRetainCallResult(CGF, e);
3487 return TryEmitResult(result, true);
3488 }
3489
3490 // TODO: maybe special-case visitBinAssignWeak?
3491
3492 TryEmitResult visitExpr(const Expr *e) {
3493 // We didn't find an obvious production, so emit what we've got and
3494 // tell the caller that we didn't manage to retain.
3495 llvm::Value *result = CGF.EmitScalarExpr(E: e);
3496 return TryEmitResult(result, false);
3497 }
3498};
3499}
3500
3501static TryEmitResult
3502tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
3503 return ARCRetainExprEmitter(CGF).visit(e);
3504}
3505
3506static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
3507 LValue lvalue,
3508 QualType type) {
3509 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
3510 llvm::Value *value = result.getPointer();
3511 if (!result.getInt())
3512 value = CGF.EmitARCRetain(type, value);
3513 return value;
3514}
3515
3516/// EmitARCRetainScalarExpr - Semantically equivalent to
3517/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
3518/// best-effort attempt to peephole expressions that naturally produce
3519/// retained objects.
3520llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
3521 // The retain needs to happen within the full-expression.
3522 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Val: e)) {
3523 RunCleanupsScope scope(*this);
3524 return EmitARCRetainScalarExpr(e: cleanups->getSubExpr());
3525 }
3526
3527 TryEmitResult result = tryEmitARCRetainScalarExpr(CGF&: *this, e);
3528 llvm::Value *value = result.getPointer();
3529 if (!result.getInt())
3530 value = EmitARCRetain(type: e->getType(), value);
3531 return value;
3532}
3533
3534llvm::Value *
3535CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
3536 // The retain needs to happen within the full-expression.
3537 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Val: e)) {
3538 RunCleanupsScope scope(*this);
3539 return EmitARCRetainAutoreleaseScalarExpr(e: cleanups->getSubExpr());
3540 }
3541
3542 TryEmitResult result = tryEmitARCRetainScalarExpr(CGF&: *this, e);
3543 llvm::Value *value = result.getPointer();
3544 if (result.getInt())
3545 value = EmitARCAutorelease(value);
3546 else
3547 value = EmitARCRetainAutorelease(type: e->getType(), value);
3548 return value;
3549}
3550
3551llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
3552 llvm::Value *result;
3553 bool doRetain;
3554
3555 if (shouldEmitSeparateBlockRetain(e)) {
3556 result = EmitScalarExpr(E: e);
3557 doRetain = true;
3558 } else {
3559 TryEmitResult subresult = tryEmitARCRetainScalarExpr(CGF&: *this, e);
3560 result = subresult.getPointer();
3561 doRetain = !subresult.getInt();
3562 }
3563
3564 if (doRetain)
3565 result = EmitARCRetainBlock(value: result, /*mandatory*/ true);
3566 return EmitObjCConsumeObject(type: e->getType(), object: result);
3567}
3568
3569llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
3570 // In ARC, retain and autorelease the expression.
3571 if (getLangOpts().ObjCAutoRefCount) {
3572 // Do so before running any cleanups for the full-expression.
3573 // EmitARCRetainAutoreleaseScalarExpr does this for us.
3574 return EmitARCRetainAutoreleaseScalarExpr(e: expr);
3575 }
3576
3577 // Otherwise, use the normal scalar-expression emission. The
3578 // exception machinery doesn't do anything special with the
3579 // exception like retaining it, so there's no safety associated with
3580 // only running cleanups after the throw has started, and when it
3581 // matters it tends to be substantially inferior code.
3582 return EmitScalarExpr(E: expr);
3583}
3584
3585namespace {
3586
3587/// An emitter for assigning into an __unsafe_unretained context.
3588struct ARCUnsafeUnretainedExprEmitter :
3589 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> {
3590
3591 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
3592
3593 llvm::Value *getValueOfResult(llvm::Value *value) {
3594 return value;
3595 }
3596
3597 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) {
3598 return CGF.Builder.CreateBitCast(V: value, DestTy: resultType);
3599 }
3600
3601 llvm::Value *visitLValueToRValue(const Expr *e) {
3602 return CGF.EmitScalarExpr(E: e);
3603 }
3604
3605 /// For consumptions, just emit the subexpression and perform the
3606 /// consumption like normal.
3607 llvm::Value *visitConsumeObject(const Expr *e) {
3608 llvm::Value *value = CGF.EmitScalarExpr(E: e);
3609 return CGF.EmitObjCConsumeObject(type: e->getType(), object: value);
3610 }
3611
3612 /// No special logic for block extensions. (This probably can't
3613 /// actually happen in this emitter, though.)
3614 llvm::Value *visitExtendBlockObject(const Expr *e) {
3615 return CGF.EmitARCExtendBlockObject(e);
3616 }
3617
3618 /// For reclaims, perform an unsafeClaim if that's enabled.
3619 llvm::Value *visitReclaimReturnedObject(const Expr *e) {
3620 return CGF.EmitARCReclaimReturnedObject(E: e, /*unsafe*/ allowUnsafeClaim: true);
3621 }
3622
3623 /// When we have an undecorated call, just emit it without adding
3624 /// the unsafeClaim.
3625 llvm::Value *visitCall(const Expr *e) {
3626 return CGF.EmitScalarExpr(E: e);
3627 }
3628
3629 /// Just do normal scalar emission in the default case.
3630 llvm::Value *visitExpr(const Expr *e) {
3631 return CGF.EmitScalarExpr(E: e);
3632 }
3633};
3634}
3635
3636static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF,
3637 const Expr *e) {
3638 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e);
3639}
3640
3641/// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to
3642/// immediately releasing the resut of EmitARCRetainScalarExpr, but
3643/// avoiding any spurious retains, including by performing reclaims
3644/// with objc_unsafeClaimAutoreleasedReturnValue.
3645llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) {
3646 // Look through full-expressions.
3647 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Val: e)) {
3648 RunCleanupsScope scope(*this);
3649 return emitARCUnsafeUnretainedScalarExpr(CGF&: *this, e: cleanups->getSubExpr());
3650 }
3651
3652 return emitARCUnsafeUnretainedScalarExpr(CGF&: *this, e);
3653}
3654
3655std::pair<LValue,llvm::Value*>
3656CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e,
3657 bool ignored) {
3658 // Evaluate the RHS first. If we're ignoring the result, assume
3659 // that we can emit at an unsafe +0.
3660 llvm::Value *value;
3661 if (ignored) {
3662 value = EmitARCUnsafeUnretainedScalarExpr(e: e->getRHS());
3663 } else {
3664 value = EmitScalarExpr(E: e->getRHS());
3665 }
3666
3667 // Emit the LHS and perform the store.
3668 LValue lvalue = EmitLValue(E: e->getLHS());
3669 EmitStoreOfScalar(value, lvalue);
3670
3671 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value);
3672}
3673
3674std::pair<LValue,llvm::Value*>
3675CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
3676 bool ignored) {
3677 // Evaluate the RHS first.
3678 TryEmitResult result = tryEmitARCRetainScalarExpr(CGF&: *this, e: e->getRHS());
3679 llvm::Value *value = result.getPointer();
3680
3681 bool hasImmediateRetain = result.getInt();
3682
3683 // If we didn't emit a retained object, and the l-value is of block
3684 // type, then we need to emit the block-retain immediately in case
3685 // it invalidates the l-value.
3686 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
3687 value = EmitARCRetainBlock(value, /*mandatory*/ false);
3688 hasImmediateRetain = true;
3689 }
3690
3691 LValue lvalue = EmitLValue(E: e->getLHS());
3692
3693 // If the RHS was emitted retained, expand this.
3694 if (hasImmediateRetain) {
3695 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, Loc: SourceLocation());
3696 EmitStoreOfScalar(value, lvalue);
3697 EmitARCRelease(value: oldValue, precise: lvalue.isARCPreciseLifetime());
3698 } else {
3699 value = EmitARCStoreStrong(dst: lvalue, newValue: value, ignored);
3700 }
3701
3702 return std::pair<LValue,llvm::Value*>(lvalue, value);
3703}
3704
3705std::pair<LValue,llvm::Value*>
3706CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
3707 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e: e->getRHS());
3708 LValue lvalue = EmitLValue(E: e->getLHS());
3709
3710 EmitStoreOfScalar(value, lvalue);
3711
3712 return std::pair<LValue,llvm::Value*>(lvalue, value);
3713}
3714
3715void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
3716 const ObjCAutoreleasePoolStmt &ARPS) {
3717 const Stmt *subStmt = ARPS.getSubStmt();
3718 const CompoundStmt &S = cast<CompoundStmt>(Val: *subStmt);
3719
3720 CGDebugInfo *DI = getDebugInfo();
3721 if (DI)
3722 DI->EmitLexicalBlockStart(Builder, Loc: S.getLBracLoc());
3723
3724 // Keep track of the current cleanup stack depth.
3725 RunCleanupsScope Scope(*this);
3726 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
3727 llvm::Value *token = EmitObjCAutoreleasePoolPush();
3728 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(Kind: NormalCleanup, A: token);
3729 } else {
3730 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
3731 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(Kind: NormalCleanup, A: token);
3732 }
3733
3734 for (const auto *I : S.body())
3735 EmitStmt(S: I);
3736
3737 if (DI)
3738 DI->EmitLexicalBlockEnd(Builder, Loc: S.getRBracLoc());
3739}
3740
3741/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3742/// make sure it survives garbage collection until this point.
3743void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
3744 // We just use an inline assembly.
3745 llvm::FunctionType *extenderType
3746 = llvm::FunctionType::get(Result: VoidTy, Params: VoidPtrTy, isVarArg: RequiredArgs::All);
3747 llvm::InlineAsm *extender = llvm::InlineAsm::get(Ty: extenderType,
3748 /* assembly */ AsmString: "",
3749 /* constraints */ Constraints: "r",
3750 /* side effects */ hasSideEffects: true);
3751
3752 EmitNounwindRuntimeCall(callee: extender, args: object);
3753}
3754
3755/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
3756/// non-trivial copy assignment function, produce following helper function.
3757/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
3758///
3759llvm::Constant *
3760CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
3761 const ObjCPropertyImplDecl *PID) {
3762 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
3763 if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
3764 return nullptr;
3765
3766 QualType Ty = PID->getPropertyIvarDecl()->getType();
3767 ASTContext &C = getContext();
3768
3769 if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
3770 // Call the move assignment operator instead of calling the copy assignment
3771 // operator and destructor.
3772 CharUnits Alignment = C.getTypeAlignInChars(T: Ty);
3773 llvm::Constant *Fn = getNonTrivialCStructMoveAssignmentOperator(
3774 CGM, DstAlignment: Alignment, SrcAlignment: Alignment, IsVolatile: Ty.isVolatileQualified(), QT: Ty);
3775 return Fn;
3776 }
3777
3778 if (!getLangOpts().CPlusPlus ||
3779 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
3780 return nullptr;
3781 if (!Ty->isRecordType())
3782 return nullptr;
3783 llvm::Constant *HelperFn = nullptr;
3784 if (hasTrivialSetExpr(PID))
3785 return nullptr;
3786 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
3787 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
3788 return HelperFn;
3789
3790 const IdentifierInfo *II =
3791 &CGM.getContext().Idents.get(Name: "__assign_helper_atomic_property_");
3792
3793 QualType ReturnTy = C.VoidTy;
3794 QualType DestTy = C.getPointerType(T: Ty);
3795 QualType SrcTy = Ty;
3796 SrcTy.addConst();
3797 SrcTy = C.getPointerType(T: SrcTy);
3798
3799 SmallVector<QualType, 2> ArgTys;
3800 ArgTys.push_back(Elt: DestTy);
3801 ArgTys.push_back(Elt: SrcTy);
3802 QualType FunctionTy = C.getFunctionType(ResultTy: ReturnTy, Args: ArgTys, EPI: {});
3803
3804 FunctionDecl *FD = FunctionDecl::Create(
3805 C, DC: C.getTranslationUnitDecl(), StartLoc: SourceLocation(), NLoc: SourceLocation(), N: II,
3806 T: FunctionTy, TInfo: nullptr, SC: SC_Static, UsesFPIntrin: false, isInlineSpecified: false, hasWrittenPrototype: false);
3807
3808 FunctionArgList args;
3809 ParmVarDecl *Params[2];
3810 ParmVarDecl *DstDecl = ParmVarDecl::Create(
3811 C, DC: FD, StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: nullptr, T: DestTy,
3812 TInfo: C.getTrivialTypeSourceInfo(T: DestTy, Loc: SourceLocation()), S: SC_None,
3813 /*DefArg=*/nullptr);
3814 args.push_back(Elt: Params[0] = DstDecl);
3815 ParmVarDecl *SrcDecl = ParmVarDecl::Create(
3816 C, DC: FD, StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: nullptr, T: SrcTy,
3817 TInfo: C.getTrivialTypeSourceInfo(T: SrcTy, Loc: SourceLocation()), S: SC_None,
3818 /*DefArg=*/nullptr);
3819 args.push_back(Elt: Params[1] = SrcDecl);
3820 FD->setParams(Params);
3821
3822 const CGFunctionInfo &FI =
3823 CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: ReturnTy, args);
3824
3825 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(Info: FI);
3826
3827 llvm::Function *Fn =
3828 llvm::Function::Create(Ty: LTy, Linkage: llvm::GlobalValue::InternalLinkage,
3829 N: "__assign_helper_atomic_property_",
3830 M: &CGM.getModule());
3831
3832 CGM.SetInternalFunctionAttributes(GD: GlobalDecl(), F: Fn, FI);
3833
3834 StartFunction(GD: FD, RetTy: ReturnTy, Fn, FnInfo: FI, Args: args);
3835
3836 DeclRefExpr DstExpr(C, DstDecl, false, DestTy, VK_PRValue, SourceLocation());
3837 UnaryOperator *DST = UnaryOperator::Create(
3838 C, input: &DstExpr, opc: UO_Deref, type: DestTy->getPointeeType(), VK: VK_LValue, OK: OK_Ordinary,
3839 l: SourceLocation(), CanOverflow: false, FPFeatures: FPOptionsOverride());
3840
3841 DeclRefExpr SrcExpr(C, SrcDecl, false, SrcTy, VK_PRValue, SourceLocation());
3842 UnaryOperator *SRC = UnaryOperator::Create(
3843 C, input: &SrcExpr, opc: UO_Deref, type: SrcTy->getPointeeType(), VK: VK_LValue, OK: OK_Ordinary,
3844 l: SourceLocation(), CanOverflow: false, FPFeatures: FPOptionsOverride());
3845
3846 Expr *Args[2] = {DST, SRC};
3847 CallExpr *CalleeExp = cast<CallExpr>(Val: PID->getSetterCXXAssignment());
3848 CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
3849 Ctx: C, OpKind: OO_Equal, Fn: CalleeExp->getCallee(), Args, Ty: DestTy->getPointeeType(),
3850 VK: VK_LValue, OperatorLoc: SourceLocation(), FPFeatures: FPOptionsOverride());
3851
3852 EmitStmt(S: TheCall);
3853
3854 FinishFunction();
3855 HelperFn = Fn;
3856 CGM.setAtomicSetterHelperFnMap(Ty, Fn: HelperFn);
3857 return HelperFn;
3858}
3859
3860llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
3861 const ObjCPropertyImplDecl *PID) {
3862 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
3863 if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
3864 return nullptr;
3865
3866 QualType Ty = PD->getType();
3867 ASTContext &C = getContext();
3868
3869 if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
3870 CharUnits Alignment = C.getTypeAlignInChars(T: Ty);
3871 llvm::Constant *Fn = getNonTrivialCStructCopyConstructor(
3872 CGM, DstAlignment: Alignment, SrcAlignment: Alignment, IsVolatile: Ty.isVolatileQualified(), QT: Ty);
3873 return Fn;
3874 }
3875
3876 if (!getLangOpts().CPlusPlus ||
3877 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
3878 return nullptr;
3879 if (!Ty->isRecordType())
3880 return nullptr;
3881 llvm::Constant *HelperFn = nullptr;
3882 if (hasTrivialGetExpr(propImpl: PID))
3883 return nullptr;
3884 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
3885 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
3886 return HelperFn;
3887
3888 const IdentifierInfo *II =
3889 &CGM.getContext().Idents.get(Name: "__copy_helper_atomic_property_");
3890
3891 QualType ReturnTy = C.VoidTy;
3892 QualType DestTy = C.getPointerType(T: Ty);
3893 QualType SrcTy = Ty;
3894 SrcTy.addConst();
3895 SrcTy = C.getPointerType(T: SrcTy);
3896
3897 SmallVector<QualType, 2> ArgTys;
3898 ArgTys.push_back(Elt: DestTy);
3899 ArgTys.push_back(Elt: SrcTy);
3900 QualType FunctionTy = C.getFunctionType(ResultTy: ReturnTy, Args: ArgTys, EPI: {});
3901
3902 FunctionDecl *FD = FunctionDecl::Create(
3903 C, DC: C.getTranslationUnitDecl(), StartLoc: SourceLocation(), NLoc: SourceLocation(), N: II,
3904 T: FunctionTy, TInfo: nullptr, SC: SC_Static, UsesFPIntrin: false, isInlineSpecified: false, hasWrittenPrototype: false);
3905
3906 FunctionArgList args;
3907 ParmVarDecl *Params[2];
3908 ParmVarDecl *DstDecl = ParmVarDecl::Create(
3909 C, DC: FD, StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: nullptr, T: DestTy,
3910 TInfo: C.getTrivialTypeSourceInfo(T: DestTy, Loc: SourceLocation()), S: SC_None,
3911 /*DefArg=*/nullptr);
3912 args.push_back(Elt: Params[0] = DstDecl);
3913 ParmVarDecl *SrcDecl = ParmVarDecl::Create(
3914 C, DC: FD, StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: nullptr, T: SrcTy,
3915 TInfo: C.getTrivialTypeSourceInfo(T: SrcTy, Loc: SourceLocation()), S: SC_None,
3916 /*DefArg=*/nullptr);
3917 args.push_back(Elt: Params[1] = SrcDecl);
3918 FD->setParams(Params);
3919
3920 const CGFunctionInfo &FI =
3921 CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: ReturnTy, args);
3922
3923 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(Info: FI);
3924
3925 llvm::Function *Fn = llvm::Function::Create(
3926 Ty: LTy, Linkage: llvm::GlobalValue::InternalLinkage, N: "__copy_helper_atomic_property_",
3927 M: &CGM.getModule());
3928
3929 CGM.SetInternalFunctionAttributes(GD: GlobalDecl(), F: Fn, FI);
3930
3931 StartFunction(GD: FD, RetTy: ReturnTy, Fn, FnInfo: FI, Args: args);
3932
3933 DeclRefExpr SrcExpr(getContext(), SrcDecl, false, SrcTy, VK_PRValue,
3934 SourceLocation());
3935
3936 UnaryOperator *SRC = UnaryOperator::Create(
3937 C, input: &SrcExpr, opc: UO_Deref, type: SrcTy->getPointeeType(), VK: VK_LValue, OK: OK_Ordinary,
3938 l: SourceLocation(), CanOverflow: false, FPFeatures: FPOptionsOverride());
3939
3940 CXXConstructExpr *CXXConstExpr =
3941 cast<CXXConstructExpr>(Val: PID->getGetterCXXConstructor());
3942
3943 SmallVector<Expr*, 4> ConstructorArgs;
3944 ConstructorArgs.push_back(Elt: SRC);
3945 ConstructorArgs.append(in_start: std::next(x: CXXConstExpr->arg_begin()),
3946 in_end: CXXConstExpr->arg_end());
3947
3948 CXXConstructExpr *TheCXXConstructExpr =
3949 CXXConstructExpr::Create(Ctx: C, Ty, Loc: SourceLocation(),
3950 Ctor: CXXConstExpr->getConstructor(),
3951 Elidable: CXXConstExpr->isElidable(),
3952 Args: ConstructorArgs,
3953 HadMultipleCandidates: CXXConstExpr->hadMultipleCandidates(),
3954 ListInitialization: CXXConstExpr->isListInitialization(),
3955 StdInitListInitialization: CXXConstExpr->isStdInitListInitialization(),
3956 ZeroInitialization: CXXConstExpr->requiresZeroInitialization(),
3957 ConstructKind: CXXConstExpr->getConstructionKind(),
3958 ParenOrBraceRange: SourceRange());
3959
3960 DeclRefExpr DstExpr(getContext(), DstDecl, false, DestTy, VK_PRValue,
3961 SourceLocation());
3962
3963 RValue DV = EmitAnyExpr(E: &DstExpr);
3964 CharUnits Alignment =
3965 getContext().getTypeAlignInChars(T: TheCXXConstructExpr->getType());
3966 EmitAggExpr(E: TheCXXConstructExpr,
3967 AS: AggValueSlot::forAddr(
3968 addr: Address(DV.getScalarVal(), ConvertTypeForMem(T: Ty), Alignment),
3969 quals: Qualifiers(), isDestructed: AggValueSlot::IsDestructed,
3970 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
3971 isAliased: AggValueSlot::IsNotAliased, mayOverlap: AggValueSlot::DoesNotOverlap));
3972
3973 FinishFunction();
3974 HelperFn = Fn;
3975 CGM.setAtomicGetterHelperFnMap(Ty, Fn: HelperFn);
3976 return HelperFn;
3977}
3978
3979llvm::Value *
3980CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
3981 // Get selectors for retain/autorelease.
3982 const IdentifierInfo *CopyID = &getContext().Idents.get(Name: "copy");
3983 Selector CopySelector =
3984 getContext().Selectors.getNullarySelector(ID: CopyID);
3985 const IdentifierInfo *AutoreleaseID = &getContext().Idents.get(Name: "autorelease");
3986 Selector AutoreleaseSelector =
3987 getContext().Selectors.getNullarySelector(ID: AutoreleaseID);
3988
3989 // Emit calls to retain/autorelease.
3990 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
3991 llvm::Value *Val = Block;
3992 RValue Result;
3993 Result = Runtime.GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(),
3994 ResultType: Ty, Sel: CopySelector,
3995 Receiver: Val, CallArgs: CallArgList(), Class: nullptr, Method: nullptr);
3996 Val = Result.getScalarVal();
3997 Result = Runtime.GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(),
3998 ResultType: Ty, Sel: AutoreleaseSelector,
3999 Receiver: Val, CallArgs: CallArgList(), Class: nullptr, Method: nullptr);
4000 Val = Result.getScalarVal();
4001 return Val;
4002}
4003
4004static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) {
4005 switch (TT.getOS()) {
4006 case llvm::Triple::Darwin:
4007 case llvm::Triple::MacOSX:
4008 return llvm::MachO::PLATFORM_MACOS;
4009 case llvm::Triple::IOS:
4010 return llvm::MachO::PLATFORM_IOS;
4011 case llvm::Triple::TvOS:
4012 return llvm::MachO::PLATFORM_TVOS;
4013 case llvm::Triple::WatchOS:
4014 return llvm::MachO::PLATFORM_WATCHOS;
4015 case llvm::Triple::XROS:
4016 return llvm::MachO::PLATFORM_XROS;
4017 case llvm::Triple::DriverKit:
4018 return llvm::MachO::PLATFORM_DRIVERKIT;
4019 default:
4020 return llvm::MachO::PLATFORM_UNKNOWN;
4021 }
4022}
4023
4024static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF,
4025 const VersionTuple &Version) {
4026 CodeGenModule &CGM = CGF.CGM;
4027 // Note: we intend to support multi-platform version checks, so reserve
4028 // the room for a dual platform checking invocation that will be
4029 // implemented in the future.
4030 llvm::SmallVector<llvm::Value *, 8> Args;
4031
4032 auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) {
4033 std::optional<unsigned> Min = Version.getMinor(),
4034 SMin = Version.getSubminor();
4035 Args.push_back(
4036 Elt: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: getBaseMachOPlatformID(TT)));
4037 Args.push_back(Elt: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Version.getMajor()));
4038 Args.push_back(Elt: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Min.value_or(u: 0)));
4039 Args.push_back(Elt: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: SMin.value_or(u: 0)));
4040 };
4041
4042 assert(!Version.empty() && "unexpected empty version");
4043 EmitArgs(Version, CGM.getTarget().getTriple());
4044
4045 if (!CGM.IsPlatformVersionAtLeastFn) {
4046 llvm::FunctionType *FTy = llvm::FunctionType::get(
4047 Result: CGM.Int32Ty, Params: {CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty},
4048 isVarArg: false);
4049 CGM.IsPlatformVersionAtLeastFn =
4050 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__isPlatformVersionAtLeast");
4051 }
4052
4053 llvm::Value *Check =
4054 CGF.EmitNounwindRuntimeCall(callee: CGM.IsPlatformVersionAtLeastFn, args: Args);
4055 return CGF.Builder.CreateICmpNE(LHS: Check,
4056 RHS: llvm::Constant::getNullValue(Ty: CGM.Int32Ty));
4057}
4058
4059llvm::Value *
4060CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) {
4061 // Darwin uses the new __isPlatformVersionAtLeast family of routines.
4062 if (CGM.getTarget().getTriple().isOSDarwin())
4063 return emitIsPlatformVersionAtLeast(CGF&: *this, Version);
4064
4065 if (!CGM.IsOSVersionAtLeastFn) {
4066 llvm::FunctionType *FTy =
4067 llvm::FunctionType::get(Result: Int32Ty, Params: {Int32Ty, Int32Ty, Int32Ty}, isVarArg: false);
4068 CGM.IsOSVersionAtLeastFn =
4069 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__isOSVersionAtLeast");
4070 }
4071
4072 std::optional<unsigned> Min = Version.getMinor(),
4073 SMin = Version.getSubminor();
4074 llvm::Value *Args[] = {
4075 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Version.getMajor()),
4076 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Min.value_or(u: 0)),
4077 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: SMin.value_or(u: 0))};
4078
4079 llvm::Value *CallRes =
4080 EmitNounwindRuntimeCall(callee: CGM.IsOSVersionAtLeastFn, args: Args);
4081
4082 return Builder.CreateICmpNE(LHS: CallRes, RHS: llvm::Constant::getNullValue(Ty: Int32Ty));
4083}
4084
4085static bool isFoundationNeededForDarwinAvailabilityCheck(
4086 const llvm::Triple &TT, const VersionTuple &TargetVersion) {
4087 VersionTuple FoundationDroppedInVersion;
4088 switch (TT.getOS()) {
4089 case llvm::Triple::IOS:
4090 case llvm::Triple::TvOS:
4091 FoundationDroppedInVersion = VersionTuple(/*Major=*/13);
4092 break;
4093 case llvm::Triple::WatchOS:
4094 FoundationDroppedInVersion = VersionTuple(/*Major=*/6);
4095 break;
4096 case llvm::Triple::Darwin:
4097 case llvm::Triple::MacOSX:
4098 FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15);
4099 break;
4100 case llvm::Triple::XROS:
4101 // XROS doesn't need Foundation.
4102 return false;
4103 case llvm::Triple::DriverKit:
4104 // DriverKit doesn't need Foundation.
4105 return false;
4106 default:
4107 llvm_unreachable("Unexpected OS");
4108 }
4109 return TargetVersion < FoundationDroppedInVersion;
4110}
4111
4112void CodeGenModule::emitAtAvailableLinkGuard() {
4113 if (!IsPlatformVersionAtLeastFn)
4114 return;
4115 // @available requires CoreFoundation only on Darwin.
4116 if (!Target.getTriple().isOSDarwin())
4117 return;
4118 // @available doesn't need Foundation on macOS 10.15+, iOS/tvOS 13+, or
4119 // watchOS 6+.
4120 if (!isFoundationNeededForDarwinAvailabilityCheck(
4121 TT: Target.getTriple(), TargetVersion: Target.getPlatformMinVersion()))
4122 return;
4123 // Add -framework CoreFoundation to the linker commands. We still want to
4124 // emit the core foundation reference down below because otherwise if
4125 // CoreFoundation is not used in the code, the linker won't link the
4126 // framework.
4127 auto &Context = getLLVMContext();
4128 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, Str: "-framework"),
4129 llvm::MDString::get(Context, Str: "CoreFoundation")};
4130 LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args));
4131 // Emit a reference to a symbol from CoreFoundation to ensure that
4132 // CoreFoundation is linked into the final binary.
4133 llvm::FunctionType *FTy =
4134 llvm::FunctionType::get(Result: Int32Ty, Params: {VoidPtrTy}, isVarArg: false);
4135 llvm::FunctionCallee CFFunc =
4136 CreateRuntimeFunction(Ty: FTy, Name: "CFBundleGetVersionNumber");
4137
4138 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(Result: VoidTy, Params: {}, isVarArg: false);
4139 llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction(
4140 Ty: CheckFTy, Name: "__clang_at_available_requires_core_foundation_framework",
4141 ExtraAttrs: llvm::AttributeList(), /*Local=*/true);
4142 llvm::Function *CFLinkCheckFunc =
4143 cast<llvm::Function>(Val: CFLinkCheckFuncRef.getCallee()->stripPointerCasts());
4144 if (CFLinkCheckFunc->empty()) {
4145 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
4146 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility);
4147 CodeGenFunction CGF(*this);
4148 CGF.Builder.SetInsertPoint(CGF.createBasicBlock(name: "", parent: CFLinkCheckFunc));
4149 CGF.EmitNounwindRuntimeCall(callee: CFFunc,
4150 args: llvm::Constant::getNullValue(Ty: VoidPtrTy));
4151 CGF.Builder.CreateUnreachable();
4152 addCompilerUsedGlobal(GV: CFLinkCheckFunc);
4153 }
4154}
4155
4156CGObjCRuntime::~CGObjCRuntime() {}
4157