| 1 | //==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This abstract class defines the interface for Objective-C runtime-specific |
| 10 | // code generation. It provides some concrete helper methods for functionality |
| 11 | // shared between all (or most) of the Objective-C runtimes supported by clang. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "CGObjCRuntime.h" |
| 16 | #include "CGCXXABI.h" |
| 17 | #include "CGCleanup.h" |
| 18 | #include "CGRecordLayout.h" |
| 19 | #include "CodeGenFunction.h" |
| 20 | #include "CodeGenModule.h" |
| 21 | #include "clang/AST/RecordLayout.h" |
| 22 | #include "clang/AST/StmtObjC.h" |
| 23 | #include "clang/CodeGen/CGFunctionInfo.h" |
| 24 | #include "clang/CodeGen/CodeGenABITypes.h" |
| 25 | #include "llvm/IR/Instruction.h" |
| 26 | #include "llvm/Support/SaveAndRestore.h" |
| 27 | |
| 28 | using namespace clang; |
| 29 | using namespace CodeGen; |
| 30 | |
| 31 | uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, |
| 32 | const ObjCInterfaceDecl *OID, |
| 33 | const ObjCIvarDecl *Ivar) { |
| 34 | return CGM.getContext().lookupFieldBitOffset(OID, Ivar) / |
| 35 | CGM.getContext().getCharWidth(); |
| 36 | } |
| 37 | |
| 38 | uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, |
| 39 | const ObjCImplementationDecl *OID, |
| 40 | const ObjCIvarDecl *Ivar) { |
| 41 | return CGM.getContext().lookupFieldBitOffset(OID: OID->getClassInterface(), Ivar) / |
| 42 | CGM.getContext().getCharWidth(); |
| 43 | } |
| 44 | |
| 45 | unsigned CGObjCRuntime::ComputeBitfieldBitOffset( |
| 46 | CodeGen::CodeGenModule &CGM, |
| 47 | const ObjCInterfaceDecl *ID, |
| 48 | const ObjCIvarDecl *Ivar) { |
| 49 | return CGM.getContext().lookupFieldBitOffset(OID: ID, Ivar); |
| 50 | } |
| 51 | |
| 52 | LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, |
| 53 | const ObjCInterfaceDecl *OID, |
| 54 | llvm::Value *BaseValue, |
| 55 | const ObjCIvarDecl *Ivar, |
| 56 | unsigned CVRQualifiers, |
| 57 | llvm::Value *Offset) { |
| 58 | // Compute (type*) ( (char *) BaseValue + Offset) |
| 59 | QualType InterfaceTy{OID->getTypeForDecl(), 0}; |
| 60 | QualType ObjectPtrTy = |
| 61 | CGF.CGM.getContext().getObjCObjectPointerType(OIT: InterfaceTy); |
| 62 | QualType IvarTy = |
| 63 | Ivar->getUsageType(objectType: ObjectPtrTy).withCVRQualifiers(CVR: CVRQualifiers); |
| 64 | llvm::Value *V = BaseValue; |
| 65 | V = CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: V, IdxList: Offset, Name: "add.ptr" ); |
| 66 | |
| 67 | if (!Ivar->isBitField()) { |
| 68 | LValue LV = CGF.MakeNaturalAlignRawAddrLValue(V, T: IvarTy); |
| 69 | return LV; |
| 70 | } |
| 71 | |
| 72 | // We need to compute an access strategy for this bit-field. We are given the |
| 73 | // offset to the first byte in the bit-field, the sub-byte offset is taken |
| 74 | // from the original layout. We reuse the normal bit-field access strategy by |
| 75 | // treating this as an access to a struct where the bit-field is in byte 0, |
| 76 | // and adjust the containing type size as appropriate. |
| 77 | // |
| 78 | // FIXME: Note that currently we make a very conservative estimate of the |
| 79 | // alignment of the bit-field, because (a) it is not clear what guarantees the |
| 80 | // runtime makes us, and (b) we don't have a way to specify that the struct is |
| 81 | // at an alignment plus offset. |
| 82 | // |
| 83 | // Note, there is a subtle invariant here: we can only call this routine on |
| 84 | // non-synthesized ivars but we may be called for synthesized ivars. However, |
| 85 | // a synthesized ivar can never be a bit-field, so this is safe. |
| 86 | uint64_t FieldBitOffset = |
| 87 | CGF.CGM.getContext().lookupFieldBitOffset(OID, Ivar); |
| 88 | uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth(); |
| 89 | uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign(); |
| 90 | uint64_t BitFieldSize = Ivar->getBitWidthValue(); |
| 91 | CharUnits StorageSize = CGF.CGM.getContext().toCharUnitsFromBits( |
| 92 | BitSize: llvm::alignTo(Value: BitOffset + BitFieldSize, Align: AlignmentBits)); |
| 93 | CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(BitSize: AlignmentBits); |
| 94 | |
| 95 | // Allocate a new CGBitFieldInfo object to describe this access. |
| 96 | // |
| 97 | // FIXME: This is incredibly wasteful, these should be uniqued or part of some |
| 98 | // layout object. However, this is blocked on other cleanups to the |
| 99 | // Objective-C code, so for now we just live with allocating a bunch of these |
| 100 | // objects. |
| 101 | CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo( |
| 102 | CGBitFieldInfo::MakeInfo(Types&: CGF.CGM.getTypes(), FD: Ivar, Offset: BitOffset, Size: BitFieldSize, |
| 103 | StorageSize: CGF.CGM.getContext().toBits(CharSize: StorageSize), |
| 104 | StorageOffset: CharUnits::fromQuantity(Quantity: 0))); |
| 105 | |
| 106 | Address Addr = |
| 107 | Address(V, llvm::Type::getIntNTy(C&: CGF.getLLVMContext(), N: Info->StorageSize), |
| 108 | Alignment); |
| 109 | |
| 110 | return LValue::MakeBitfield(Addr, Info: *Info, type: IvarTy, |
| 111 | BaseInfo: LValueBaseInfo(AlignmentSource::Decl), |
| 112 | TBAAInfo: TBAAAccessInfo()); |
| 113 | } |
| 114 | |
| 115 | namespace { |
| 116 | struct CatchHandler { |
| 117 | const VarDecl *Variable; |
| 118 | const Stmt *Body; |
| 119 | llvm::BasicBlock *Block; |
| 120 | llvm::Constant *TypeInfo; |
| 121 | /// Flags used to differentiate cleanups and catchalls in Windows SEH |
| 122 | unsigned Flags; |
| 123 | }; |
| 124 | |
| 125 | struct CallObjCEndCatch final : EHScopeStack::Cleanup { |
| 126 | CallObjCEndCatch(bool MightThrow, llvm::FunctionCallee Fn) |
| 127 | : MightThrow(MightThrow), Fn(Fn) {} |
| 128 | bool MightThrow; |
| 129 | llvm::FunctionCallee Fn; |
| 130 | |
| 131 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
| 132 | if (MightThrow) |
| 133 | CGF.EmitRuntimeCallOrInvoke(callee: Fn); |
| 134 | else |
| 135 | CGF.EmitNounwindRuntimeCall(callee: Fn); |
| 136 | } |
| 137 | }; |
| 138 | } |
| 139 | |
| 140 | void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, |
| 141 | const ObjCAtTryStmt &S, |
| 142 | llvm::FunctionCallee beginCatchFn, |
| 143 | llvm::FunctionCallee endCatchFn, |
| 144 | llvm::FunctionCallee exceptionRethrowFn) { |
| 145 | // Jump destination for falling out of catch bodies. |
| 146 | CodeGenFunction::JumpDest Cont; |
| 147 | if (S.getNumCatchStmts()) |
| 148 | Cont = CGF.getJumpDestInCurrentScope(Name: "eh.cont" ); |
| 149 | |
| 150 | bool useFunclets = EHPersonality::get(CGF).usesFuncletPads(); |
| 151 | |
| 152 | CodeGenFunction::FinallyInfo FinallyInfo; |
| 153 | if (!useFunclets) |
| 154 | if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) |
| 155 | FinallyInfo.enter(CGF, Finally: Finally->getFinallyBody(), |
| 156 | beginCatchFn, endCatchFn, rethrowFn: exceptionRethrowFn); |
| 157 | |
| 158 | SmallVector<CatchHandler, 8> Handlers; |
| 159 | |
| 160 | |
| 161 | // Enter the catch, if there is one. |
| 162 | if (S.getNumCatchStmts()) { |
| 163 | for (const ObjCAtCatchStmt *CatchStmt : S.catch_stmts()) { |
| 164 | const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); |
| 165 | |
| 166 | Handlers.push_back(Elt: CatchHandler()); |
| 167 | CatchHandler &Handler = Handlers.back(); |
| 168 | Handler.Variable = CatchDecl; |
| 169 | Handler.Body = CatchStmt->getCatchBody(); |
| 170 | Handler.Block = CGF.createBasicBlock(name: "catch" ); |
| 171 | Handler.Flags = 0; |
| 172 | |
| 173 | // @catch(...) always matches. |
| 174 | if (!CatchDecl) { |
| 175 | auto catchAll = getCatchAllTypeInfo(); |
| 176 | Handler.TypeInfo = catchAll.RTTI; |
| 177 | Handler.Flags = catchAll.Flags; |
| 178 | // Don't consider any other catches. |
| 179 | break; |
| 180 | } |
| 181 | |
| 182 | Handler.TypeInfo = GetEHType(T: CatchDecl->getType()); |
| 183 | } |
| 184 | |
| 185 | EHCatchScope *Catch = CGF.EHStack.pushCatch(NumHandlers: Handlers.size()); |
| 186 | for (unsigned I = 0, E = Handlers.size(); I != E; ++I) |
| 187 | Catch->setHandler(I, Type: { .RTTI: Handlers[I].TypeInfo, .Flags: Handlers[I].Flags }, Block: Handlers[I].Block); |
| 188 | } |
| 189 | |
| 190 | if (useFunclets) |
| 191 | if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) { |
| 192 | CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true); |
| 193 | if (!CGF.CurSEHParent) |
| 194 | CGF.CurSEHParent = cast<NamedDecl>(Val: CGF.CurFuncDecl); |
| 195 | // Outline the finally block. |
| 196 | const Stmt *FinallyBlock = Finally->getFinallyBody(); |
| 197 | HelperCGF.startOutlinedSEHHelper(ParentCGF&: CGF, /*isFilter*/IsFilter: false, OutlinedStmt: FinallyBlock); |
| 198 | |
| 199 | // Emit the original filter expression, convert to i32, and return. |
| 200 | HelperCGF.EmitStmt(S: FinallyBlock); |
| 201 | |
| 202 | HelperCGF.FinishFunction(EndLoc: FinallyBlock->getEndLoc()); |
| 203 | |
| 204 | llvm::Function *FinallyFunc = HelperCGF.CurFn; |
| 205 | |
| 206 | |
| 207 | // Push a cleanup for __finally blocks. |
| 208 | CGF.pushSEHCleanup(kind: NormalAndEHCleanup, FinallyFunc); |
| 209 | } |
| 210 | |
| 211 | |
| 212 | // Emit the try body. |
| 213 | CGF.EmitStmt(S: S.getTryBody()); |
| 214 | |
| 215 | // Leave the try. |
| 216 | if (S.getNumCatchStmts()) |
| 217 | CGF.popCatchScope(); |
| 218 | |
| 219 | // Remember where we were. |
| 220 | CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); |
| 221 | |
| 222 | // Emit the handlers. |
| 223 | for (CatchHandler &Handler : Handlers) { |
| 224 | CGF.EmitBlock(BB: Handler.Block); |
| 225 | |
| 226 | CodeGenFunction::LexicalScope Cleanups(CGF, Handler.Body->getSourceRange()); |
| 227 | SaveAndRestore RevertAfterScope(CGF.CurrentFuncletPad); |
| 228 | if (useFunclets) { |
| 229 | llvm::BasicBlock::iterator CPICandidate = |
| 230 | Handler.Block->getFirstNonPHIIt(); |
| 231 | if (CPICandidate != Handler.Block->end()) { |
| 232 | if (auto *CPI = dyn_cast_or_null<llvm::CatchPadInst>(Val&: CPICandidate)) { |
| 233 | CGF.CurrentFuncletPad = CPI; |
| 234 | CPI->setOperand(i_nocapture: 2, Val_nocapture: CGF.getExceptionSlot().emitRawPointer(CGF)); |
| 235 | CGF.EHStack.pushCleanup<CatchRetScope>(Kind: NormalCleanup, A: CPI); |
| 236 | } |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | llvm::Value *RawExn = CGF.getExceptionFromSlot(); |
| 241 | |
| 242 | // Enter the catch. |
| 243 | llvm::Value *Exn = RawExn; |
| 244 | if (beginCatchFn) |
| 245 | Exn = CGF.EmitNounwindRuntimeCall(callee: beginCatchFn, args: RawExn, name: "exn.adjusted" ); |
| 246 | |
| 247 | if (endCatchFn) { |
| 248 | // Add a cleanup to leave the catch. |
| 249 | bool EndCatchMightThrow = (Handler.Variable == nullptr); |
| 250 | |
| 251 | CGF.EHStack.pushCleanup<CallObjCEndCatch>(Kind: NormalAndEHCleanup, |
| 252 | A: EndCatchMightThrow, |
| 253 | A: endCatchFn); |
| 254 | } |
| 255 | |
| 256 | // Bind the catch parameter if it exists. |
| 257 | if (const VarDecl *CatchParam = Handler.Variable) { |
| 258 | llvm::Type *CatchType = CGF.ConvertType(T: CatchParam->getType()); |
| 259 | llvm::Value *CastExn = CGF.Builder.CreateBitCast(V: Exn, DestTy: CatchType); |
| 260 | |
| 261 | CGF.EmitAutoVarDecl(D: *CatchParam); |
| 262 | EmitInitOfCatchParam(CGF, exn: CastExn, paramDecl: CatchParam); |
| 263 | } |
| 264 | |
| 265 | CGF.ObjCEHValueStack.push_back(Elt: Exn); |
| 266 | CGF.EmitStmt(S: Handler.Body); |
| 267 | CGF.ObjCEHValueStack.pop_back(); |
| 268 | |
| 269 | // Leave any cleanups associated with the catch. |
| 270 | Cleanups.ForceCleanup(); |
| 271 | |
| 272 | CGF.EmitBranchThroughCleanup(Dest: Cont); |
| 273 | } |
| 274 | |
| 275 | // Go back to the try-statement fallthrough. |
| 276 | CGF.Builder.restoreIP(IP: SavedIP); |
| 277 | |
| 278 | // Pop out of the finally. |
| 279 | if (!useFunclets && S.getFinallyStmt()) |
| 280 | FinallyInfo.exit(CGF); |
| 281 | |
| 282 | if (Cont.isValid()) |
| 283 | CGF.EmitBlock(BB: Cont.getBlock()); |
| 284 | } |
| 285 | |
| 286 | void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF, |
| 287 | llvm::Value *exn, |
| 288 | const VarDecl *paramDecl) { |
| 289 | |
| 290 | Address paramAddr = CGF.GetAddrOfLocalVar(VD: paramDecl); |
| 291 | |
| 292 | switch (paramDecl->getType().getQualifiers().getObjCLifetime()) { |
| 293 | case Qualifiers::OCL_Strong: |
| 294 | exn = CGF.EmitARCRetainNonBlock(value: exn); |
| 295 | [[fallthrough]]; |
| 296 | |
| 297 | case Qualifiers::OCL_None: |
| 298 | case Qualifiers::OCL_ExplicitNone: |
| 299 | case Qualifiers::OCL_Autoreleasing: |
| 300 | CGF.Builder.CreateStore(Val: exn, Addr: paramAddr); |
| 301 | return; |
| 302 | |
| 303 | case Qualifiers::OCL_Weak: |
| 304 | CGF.EmitARCInitWeak(addr: paramAddr, value: exn); |
| 305 | return; |
| 306 | } |
| 307 | llvm_unreachable("invalid ownership qualifier" ); |
| 308 | } |
| 309 | |
| 310 | namespace { |
| 311 | struct CallSyncExit final : EHScopeStack::Cleanup { |
| 312 | llvm::FunctionCallee SyncExitFn; |
| 313 | llvm::Value *SyncArg; |
| 314 | CallSyncExit(llvm::FunctionCallee SyncExitFn, llvm::Value *SyncArg) |
| 315 | : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {} |
| 316 | |
| 317 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
| 318 | CGF.EmitNounwindRuntimeCall(callee: SyncExitFn, args: SyncArg); |
| 319 | } |
| 320 | }; |
| 321 | } |
| 322 | |
| 323 | void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF, |
| 324 | const ObjCAtSynchronizedStmt &S, |
| 325 | llvm::FunctionCallee syncEnterFn, |
| 326 | llvm::FunctionCallee syncExitFn) { |
| 327 | CodeGenFunction::RunCleanupsScope cleanups(CGF); |
| 328 | |
| 329 | // Evaluate the lock operand. This is guaranteed to dominate the |
| 330 | // ARC release and lock-release cleanups. |
| 331 | const Expr *lockExpr = S.getSynchExpr(); |
| 332 | llvm::Value *lock; |
| 333 | if (CGF.getLangOpts().ObjCAutoRefCount) { |
| 334 | lock = CGF.EmitARCRetainScalarExpr(expr: lockExpr); |
| 335 | lock = CGF.EmitObjCConsumeObject(T: lockExpr->getType(), Ptr: lock); |
| 336 | } else { |
| 337 | lock = CGF.EmitScalarExpr(E: lockExpr); |
| 338 | } |
| 339 | lock = CGF.Builder.CreateBitCast(V: lock, DestTy: CGF.VoidPtrTy); |
| 340 | |
| 341 | // Acquire the lock. |
| 342 | CGF.Builder.CreateCall(Callee: syncEnterFn, Args: lock)->setDoesNotThrow(); |
| 343 | |
| 344 | // Register an all-paths cleanup to release the lock. |
| 345 | CGF.EHStack.pushCleanup<CallSyncExit>(Kind: NormalAndEHCleanup, A: syncExitFn, A: lock); |
| 346 | |
| 347 | // Emit the body of the statement. |
| 348 | CGF.EmitStmt(S: S.getSynchBody()); |
| 349 | } |
| 350 | |
| 351 | /// Compute the pointer-to-function type to which a message send |
| 352 | /// should be casted in order to correctly call the given method |
| 353 | /// with the given arguments. |
| 354 | /// |
| 355 | /// \param method - may be null |
| 356 | /// \param resultType - the result type to use if there's no method |
| 357 | /// \param callArgs - the actual arguments, including implicit ones |
| 358 | CGObjCRuntime::MessageSendInfo |
| 359 | CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method, |
| 360 | QualType resultType, |
| 361 | CallArgList &callArgs) { |
| 362 | unsigned ProgramAS = CGM.getDataLayout().getProgramAddressSpace(); |
| 363 | |
| 364 | llvm::PointerType *signatureType = |
| 365 | llvm::PointerType::get(C&: CGM.getLLVMContext(), AddressSpace: ProgramAS); |
| 366 | |
| 367 | // If there's a method, use information from that. |
| 368 | if (method) { |
| 369 | const CGFunctionInfo &signature = |
| 370 | CGM.getTypes().arrangeObjCMessageSendSignature(MD: method, receiverType: callArgs[0].Ty); |
| 371 | |
| 372 | const CGFunctionInfo &signatureForCall = |
| 373 | CGM.getTypes().arrangeCall(declFI: signature, args: callArgs); |
| 374 | |
| 375 | return MessageSendInfo(signatureForCall, signatureType); |
| 376 | } |
| 377 | |
| 378 | // There's no method; just use a default CC. |
| 379 | const CGFunctionInfo &argsInfo = |
| 380 | CGM.getTypes().arrangeUnprototypedObjCMessageSend(returnType: resultType, args: callArgs); |
| 381 | |
| 382 | return MessageSendInfo(argsInfo, signatureType); |
| 383 | } |
| 384 | |
| 385 | bool CGObjCRuntime::canMessageReceiverBeNull(CodeGenFunction &CGF, |
| 386 | const ObjCMethodDecl *method, |
| 387 | bool isSuper, |
| 388 | const ObjCInterfaceDecl *classReceiver, |
| 389 | llvm::Value *receiver) { |
| 390 | // Super dispatch assumes that self is non-null; even the messenger |
| 391 | // doesn't have a null check internally. |
| 392 | if (isSuper) |
| 393 | return false; |
| 394 | |
| 395 | // If this is a direct dispatch of a class method, check whether the class, |
| 396 | // or anything in its hierarchy, was weak-linked. |
| 397 | if (classReceiver && method && method->isClassMethod()) |
| 398 | return isWeakLinkedClass(cls: classReceiver); |
| 399 | |
| 400 | // If we're emitting a method, and self is const (meaning just ARC, for now), |
| 401 | // and the receiver is a load of self, then self is a valid object. |
| 402 | if (auto curMethod = |
| 403 | dyn_cast_or_null<ObjCMethodDecl>(Val: CGF.CurCodeDecl)) { |
| 404 | auto self = curMethod->getSelfDecl(); |
| 405 | if (self->getType().isConstQualified()) { |
| 406 | if (auto LI = dyn_cast<llvm::LoadInst>(Val: receiver->stripPointerCasts())) { |
| 407 | llvm::Value *selfAddr = CGF.GetAddrOfLocalVar(VD: self).emitRawPointer(CGF); |
| 408 | if (selfAddr == LI->getPointerOperand()) { |
| 409 | return false; |
| 410 | } |
| 411 | } |
| 412 | } |
| 413 | } |
| 414 | |
| 415 | // Otherwise, assume it can be null. |
| 416 | return true; |
| 417 | } |
| 418 | |
| 419 | bool CGObjCRuntime::isWeakLinkedClass(const ObjCInterfaceDecl *ID) { |
| 420 | do { |
| 421 | if (ID->isWeakImported()) |
| 422 | return true; |
| 423 | } while ((ID = ID->getSuperClass())); |
| 424 | |
| 425 | return false; |
| 426 | } |
| 427 | |
| 428 | void CGObjCRuntime::destroyCalleeDestroyedArguments(CodeGenFunction &CGF, |
| 429 | const ObjCMethodDecl *method, |
| 430 | const CallArgList &callArgs) { |
| 431 | CallArgList::const_iterator I = callArgs.begin(); |
| 432 | for (auto i = method->param_begin(), e = method->param_end(); |
| 433 | i != e; ++i, ++I) { |
| 434 | const ParmVarDecl *param = (*i); |
| 435 | if (param->hasAttr<NSConsumedAttr>()) { |
| 436 | RValue RV = I->getRValue(CGF); |
| 437 | assert(RV.isScalar() && |
| 438 | "NullReturnState::complete - arg not on object" ); |
| 439 | CGF.EmitARCRelease(value: RV.getScalarVal(), precise: ARCImpreciseLifetime); |
| 440 | } else { |
| 441 | QualType QT = param->getType(); |
| 442 | auto *RT = QT->getAs<RecordType>(); |
| 443 | if (RT && RT->getDecl()->isParamDestroyedInCallee()) { |
| 444 | RValue RV = I->getRValue(CGF); |
| 445 | QualType::DestructionKind DtorKind = QT.isDestructedType(); |
| 446 | switch (DtorKind) { |
| 447 | case QualType::DK_cxx_destructor: |
| 448 | CGF.destroyCXXObject(CGF, RV.getAggregateAddress(), QT); |
| 449 | break; |
| 450 | case QualType::DK_nontrivial_c_struct: |
| 451 | CGF.destroyNonTrivialCStruct(CGF, RV.getAggregateAddress(), QT); |
| 452 | break; |
| 453 | default: |
| 454 | llvm_unreachable("unexpected dtor kind" ); |
| 455 | break; |
| 456 | } |
| 457 | } |
| 458 | } |
| 459 | } |
| 460 | } |
| 461 | |
| 462 | llvm::Constant * |
| 463 | clang::CodeGen::emitObjCProtocolObject(CodeGenModule &CGM, |
| 464 | const ObjCProtocolDecl *protocol) { |
| 465 | return CGM.getObjCRuntime().GetOrEmitProtocol(PD: protocol); |
| 466 | } |
| 467 | |
| 468 | std::string CGObjCRuntime::getSymbolNameForMethod(const ObjCMethodDecl *OMD, |
| 469 | bool includeCategoryName) { |
| 470 | std::string buffer; |
| 471 | llvm::raw_string_ostream out(buffer); |
| 472 | CGM.getCXXABI().getMangleContext().mangleObjCMethodName(MD: OMD, OS&: out, |
| 473 | /*includePrefixByte=*/true, |
| 474 | includeCategoryNamespace: includeCategoryName); |
| 475 | return buffer; |
| 476 | } |
| 477 | |