1 | //===- ObjCARCContract.cpp - ObjC ARC Optimization ------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// \file |
9 | /// This file defines late ObjC ARC optimizations. ARC stands for Automatic |
10 | /// Reference Counting and is a system for managing reference counts for objects |
11 | /// in Objective C. |
12 | /// |
13 | /// This specific file mainly deals with ``contracting'' multiple lower level |
14 | /// operations into singular higher level operations through pattern matching. |
15 | /// |
16 | /// WARNING: This file knows about certain library functions. It recognizes them |
17 | /// by name, and hardwires knowledge of their semantics. |
18 | /// |
19 | /// WARNING: This file knows about how certain Objective-C library functions are |
20 | /// used. Naive LLVM IR transformations which would otherwise be |
21 | /// behavior-preserving may break these assumptions. |
22 | /// |
23 | //===----------------------------------------------------------------------===// |
24 | |
25 | // TODO: ObjCARCContract could insert PHI nodes when uses aren't |
26 | // dominated by single calls. |
27 | |
28 | #include "ARCRuntimeEntryPoints.h" |
29 | #include "DependencyAnalysis.h" |
30 | #include "ObjCARC.h" |
31 | #include "ProvenanceAnalysis.h" |
32 | #include "llvm/ADT/Statistic.h" |
33 | #include "llvm/Analysis/AliasAnalysis.h" |
34 | #include "llvm/Analysis/BasicAliasAnalysis.h" |
35 | #include "llvm/Analysis/ObjCARCUtil.h" |
36 | #include "llvm/IR/Dominators.h" |
37 | #include "llvm/IR/EHPersonalities.h" |
38 | #include "llvm/IR/InlineAsm.h" |
39 | #include "llvm/IR/InstIterator.h" |
40 | #include "llvm/IR/Operator.h" |
41 | #include "llvm/IR/PassManager.h" |
42 | #include "llvm/InitializePasses.h" |
43 | #include "llvm/Support/Debug.h" |
44 | #include "llvm/Support/raw_ostream.h" |
45 | #include "llvm/TargetParser/Triple.h" |
46 | #include "llvm/Transforms/ObjCARC.h" |
47 | |
48 | using namespace llvm; |
49 | using namespace llvm::objcarc; |
50 | |
51 | #define DEBUG_TYPE "objc-arc-contract" |
52 | |
53 | STATISTIC(NumPeeps, "Number of calls peephole-optimized" ); |
54 | STATISTIC(NumStoreStrongs, "Number objc_storeStrong calls formed" ); |
55 | |
56 | static cl::opt<cl::boolOrDefault> UseObjCClaimRV( |
57 | "arc-contract-use-objc-claim-rv" , |
58 | cl::desc( |
59 | "Enable generation of calls to objc_claimAutoreleasedReturnValue" )); |
60 | |
61 | //===----------------------------------------------------------------------===// |
62 | // Declarations |
63 | //===----------------------------------------------------------------------===// |
64 | |
65 | namespace { |
66 | /// Late ARC optimizations |
67 | /// |
68 | /// These change the IR in a way that makes it difficult to be analyzed by |
69 | /// ObjCARCOpt, so it's run late. |
70 | |
71 | class ObjCARCContract { |
72 | bool Changed; |
73 | bool CFGChanged = false; |
74 | AAResults *AA; |
75 | DominatorTree *DT; |
76 | ProvenanceAnalysis PA; |
77 | ARCRuntimeEntryPoints EP; |
78 | BundledRetainClaimRVs *BundledInsts = nullptr; |
79 | |
80 | /// A flag indicating whether this optimization pass should run. |
81 | bool Run; |
82 | |
83 | /// Whether objc_claimAutoreleasedReturnValue is available. |
84 | bool HasClaimRV = false; |
85 | |
86 | /// The inline asm string to insert between calls and RetainRV calls to make |
87 | /// the optimization work on targets which need it. |
88 | const MDString *RVInstMarker; |
89 | |
90 | /// The set of inserted objc_storeStrong calls. If at the end of walking the |
91 | /// function we have found no alloca instructions, these calls can be marked |
92 | /// "tail". |
93 | SmallPtrSet<CallInst *, 8> StoreStrongCalls; |
94 | |
95 | /// Returns true if we eliminated Inst. |
96 | bool tryToPeepholeInstruction( |
97 | Function &F, Instruction *Inst, inst_iterator &Iter, |
98 | bool &TailOkForStoreStrong, |
99 | const DenseMap<BasicBlock *, ColorVector> &BlockColors); |
100 | |
101 | bool optimizeRetainCall(Function &F, Instruction *Retain); |
102 | |
103 | bool contractAutorelease(Function &F, Instruction *Autorelease, |
104 | ARCInstKind Class); |
105 | |
106 | void tryToContractReleaseIntoStoreStrong( |
107 | Instruction *Release, inst_iterator &Iter, |
108 | const DenseMap<BasicBlock *, ColorVector> &BlockColors); |
109 | |
110 | public: |
111 | bool init(Module &M); |
112 | bool run(Function &F, AAResults *AA, DominatorTree *DT); |
113 | bool hasCFGChanged() const { return CFGChanged; } |
114 | }; |
115 | |
116 | class ObjCARCContractLegacyPass : public FunctionPass { |
117 | public: |
118 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
119 | bool runOnFunction(Function &F) override; |
120 | |
121 | static char ID; |
122 | ObjCARCContractLegacyPass() : FunctionPass(ID) { |
123 | initializeObjCARCContractLegacyPassPass(*PassRegistry::getPassRegistry()); |
124 | } |
125 | }; |
126 | } |
127 | |
128 | //===----------------------------------------------------------------------===// |
129 | // Implementation |
130 | //===----------------------------------------------------------------------===// |
131 | |
132 | /// Turn objc_retain into objc_retainAutoreleasedReturnValue if the operand is a |
133 | /// return value. We do this late so we do not disrupt the dataflow analysis in |
134 | /// ObjCARCOpt. |
135 | bool ObjCARCContract::optimizeRetainCall(Function &F, Instruction *Retain) { |
136 | const auto *Call = dyn_cast<CallBase>(Val: GetArgRCIdentityRoot(Inst: Retain)); |
137 | if (!Call) |
138 | return false; |
139 | if (Call->getParent() != Retain->getParent()) |
140 | return false; |
141 | |
142 | // Check that the call is next to the retain. |
143 | BasicBlock::const_iterator I = ++Call->getIterator(); |
144 | while (IsNoopInstruction(I: &*I)) |
145 | ++I; |
146 | if (&*I != Retain) |
147 | return false; |
148 | |
149 | // Turn it to an objc_retainAutoreleasedReturnValue. |
150 | Changed = true; |
151 | ++NumPeeps; |
152 | |
153 | LLVM_DEBUG( |
154 | dbgs() << "Transforming objc_retain => " |
155 | "objc_retainAutoreleasedReturnValue since the operand is a " |
156 | "return value.\nOld: " |
157 | << *Retain << "\n" ); |
158 | |
159 | // We do not have to worry about tail calls/does not throw since |
160 | // retain/retainRV have the same properties. |
161 | Function *Decl = EP.get(kind: ARCRuntimeEntryPointKind::RetainRV); |
162 | cast<CallInst>(Val: Retain)->setCalledFunction(Decl); |
163 | |
164 | LLVM_DEBUG(dbgs() << "New: " << *Retain << "\n" ); |
165 | return true; |
166 | } |
167 | |
168 | /// Merge an autorelease with a retain into a fused call. |
169 | bool ObjCARCContract::contractAutorelease(Function &F, Instruction *Autorelease, |
170 | ARCInstKind Class) { |
171 | const Value *Arg = GetArgRCIdentityRoot(Inst: Autorelease); |
172 | |
173 | // Check that there are no instructions between the retain and the autorelease |
174 | // (such as an autorelease_pop) which may change the count. |
175 | DependenceKind DK = Class == ARCInstKind::AutoreleaseRV |
176 | ? RetainAutoreleaseRVDep |
177 | : RetainAutoreleaseDep; |
178 | auto *Retain = dyn_cast_or_null<CallInst>( |
179 | Val: findSingleDependency(Flavor: DK, Arg, StartBB: Autorelease->getParent(), StartInst: Autorelease, PA)); |
180 | |
181 | if (!Retain || GetBasicARCInstKind(V: Retain) != ARCInstKind::Retain || |
182 | GetArgRCIdentityRoot(Inst: Retain) != Arg) |
183 | return false; |
184 | |
185 | Changed = true; |
186 | ++NumPeeps; |
187 | |
188 | LLVM_DEBUG(dbgs() << " Fusing retain/autorelease!\n" |
189 | " Autorelease:" |
190 | << *Autorelease |
191 | << "\n" |
192 | " Retain: " |
193 | << *Retain << "\n" ); |
194 | |
195 | Function *Decl = EP.get(kind: Class == ARCInstKind::AutoreleaseRV |
196 | ? ARCRuntimeEntryPointKind::RetainAutoreleaseRV |
197 | : ARCRuntimeEntryPointKind::RetainAutorelease); |
198 | Retain->setCalledFunction(Decl); |
199 | |
200 | LLVM_DEBUG(dbgs() << " New RetainAutorelease: " << *Retain << "\n" ); |
201 | |
202 | EraseInstruction(CI: Autorelease); |
203 | return true; |
204 | } |
205 | |
206 | static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load, |
207 | Instruction *Release, |
208 | ProvenanceAnalysis &PA, |
209 | AAResults *AA) { |
210 | StoreInst *Store = nullptr; |
211 | bool SawRelease = false; |
212 | |
213 | // Get the location associated with Load. |
214 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
215 | auto *LocPtr = Loc.Ptr->stripPointerCasts(); |
216 | |
217 | // Walk down to find the store and the release, which may be in either order. |
218 | for (auto I = std::next(x: BasicBlock::iterator(Load)), |
219 | E = Load->getParent()->end(); |
220 | I != E; ++I) { |
221 | // If we found the store we were looking for and saw the release, |
222 | // break. There is no more work to be done. |
223 | if (Store && SawRelease) |
224 | break; |
225 | |
226 | // Now we know that we have not seen either the store or the release. If I |
227 | // is the release, mark that we saw the release and continue. |
228 | Instruction *Inst = &*I; |
229 | if (Inst == Release) { |
230 | SawRelease = true; |
231 | continue; |
232 | } |
233 | |
234 | // Otherwise, we check if Inst is a "good" store. Grab the instruction class |
235 | // of Inst. |
236 | ARCInstKind Class = GetBasicARCInstKind(V: Inst); |
237 | |
238 | // If we have seen the store, but not the release... |
239 | if (Store) { |
240 | // We need to make sure that it is safe to move the release from its |
241 | // current position to the store. This implies proving that any |
242 | // instruction in between Store and the Release conservatively can not use |
243 | // the RCIdentityRoot of Release. If we can prove we can ignore Inst, so |
244 | // continue... |
245 | if (!CanUse(Inst, Ptr: Load, PA, Class)) { |
246 | continue; |
247 | } |
248 | |
249 | // Otherwise, be conservative and return nullptr. |
250 | return nullptr; |
251 | } |
252 | |
253 | // Ok, now we know we have not seen a store yet. |
254 | |
255 | // If Inst is a retain, we don't care about it as it doesn't prevent moving |
256 | // the load to the store. |
257 | // |
258 | // TODO: This is one area where the optimization could be made more |
259 | // aggressive. |
260 | if (IsRetain(Class)) |
261 | continue; |
262 | |
263 | // See if Inst can write to our load location, if it can not, just ignore |
264 | // the instruction. |
265 | if (!isModSet(MRI: AA->getModRefInfo(I: Inst, OptLoc: Loc))) |
266 | continue; |
267 | |
268 | Store = dyn_cast<StoreInst>(Val: Inst); |
269 | |
270 | // If Inst can, then check if Inst is a simple store. If Inst is not a |
271 | // store or a store that is not simple, then we have some we do not |
272 | // understand writing to this memory implying we can not move the load |
273 | // over the write to any subsequent store that we may find. |
274 | if (!Store || !Store->isSimple()) |
275 | return nullptr; |
276 | |
277 | // Then make sure that the pointer we are storing to is Ptr. If so, we |
278 | // found our Store! |
279 | if (Store->getPointerOperand()->stripPointerCasts() == LocPtr) |
280 | continue; |
281 | |
282 | // Otherwise, we have an unknown store to some other ptr that clobbers |
283 | // Loc.Ptr. Bail! |
284 | return nullptr; |
285 | } |
286 | |
287 | // If we did not find the store or did not see the release, fail. |
288 | if (!Store || !SawRelease) |
289 | return nullptr; |
290 | |
291 | // We succeeded! |
292 | return Store; |
293 | } |
294 | |
295 | static Instruction * |
296 | findRetainForStoreStrongContraction(Value *New, StoreInst *Store, |
297 | Instruction *Release, |
298 | ProvenanceAnalysis &PA) { |
299 | // Walk up from the Store to find the retain. |
300 | BasicBlock::iterator I = Store->getIterator(); |
301 | BasicBlock::iterator Begin = Store->getParent()->begin(); |
302 | while (I != Begin && GetBasicARCInstKind(V: &*I) != ARCInstKind::Retain) { |
303 | Instruction *Inst = &*I; |
304 | |
305 | // It is only safe to move the retain to the store if we can prove |
306 | // conservatively that nothing besides the release can decrement reference |
307 | // counts in between the retain and the store. |
308 | if (CanDecrementRefCount(Inst, Ptr: New, PA) && Inst != Release) |
309 | return nullptr; |
310 | --I; |
311 | } |
312 | Instruction *Retain = &*I; |
313 | if (GetBasicARCInstKind(V: Retain) != ARCInstKind::Retain) |
314 | return nullptr; |
315 | if (GetArgRCIdentityRoot(Inst: Retain) != New) |
316 | return nullptr; |
317 | return Retain; |
318 | } |
319 | |
320 | /// Attempt to merge an objc_release with a store, load, and objc_retain to form |
321 | /// an objc_storeStrong. An objc_storeStrong: |
322 | /// |
323 | /// objc_storeStrong(i8** %old_ptr, i8* new_value) |
324 | /// |
325 | /// is equivalent to the following IR sequence: |
326 | /// |
327 | /// ; Load old value. |
328 | /// %old_value = load i8** %old_ptr (1) |
329 | /// |
330 | /// ; Increment the new value and then release the old value. This must occur |
331 | /// ; in order in case old_value releases new_value in its destructor causing |
332 | /// ; us to potentially have a dangling ptr. |
333 | /// tail call i8* @objc_retain(i8* %new_value) (2) |
334 | /// tail call void @objc_release(i8* %old_value) (3) |
335 | /// |
336 | /// ; Store the new_value into old_ptr |
337 | /// store i8* %new_value, i8** %old_ptr (4) |
338 | /// |
339 | /// The safety of this optimization is based around the following |
340 | /// considerations: |
341 | /// |
342 | /// 1. We are forming the store strong at the store. Thus to perform this |
343 | /// optimization it must be safe to move the retain, load, and release to |
344 | /// (4). |
345 | /// 2. We need to make sure that any re-orderings of (1), (2), (3), (4) are |
346 | /// safe. |
347 | void ObjCARCContract::tryToContractReleaseIntoStoreStrong( |
348 | Instruction *Release, inst_iterator &Iter, |
349 | const DenseMap<BasicBlock *, ColorVector> &BlockColors) { |
350 | // See if we are releasing something that we just loaded. |
351 | auto *Load = dyn_cast<LoadInst>(Val: GetArgRCIdentityRoot(Inst: Release)); |
352 | if (!Load || !Load->isSimple()) |
353 | return; |
354 | |
355 | // For now, require everything to be in one basic block. |
356 | BasicBlock *BB = Release->getParent(); |
357 | if (Load->getParent() != BB) |
358 | return; |
359 | |
360 | // First scan down the BB from Load, looking for a store of the RCIdentityRoot |
361 | // of Load's |
362 | StoreInst *Store = |
363 | findSafeStoreForStoreStrongContraction(Load, Release, PA, AA); |
364 | // If we fail, bail. |
365 | if (!Store) |
366 | return; |
367 | |
368 | // Then find what new_value's RCIdentity Root is. |
369 | Value *New = GetRCIdentityRoot(V: Store->getValueOperand()); |
370 | |
371 | // Then walk up the BB and look for a retain on New without any intervening |
372 | // instructions which conservatively might decrement ref counts. |
373 | Instruction *Retain = |
374 | findRetainForStoreStrongContraction(New, Store, Release, PA); |
375 | |
376 | // If we fail, bail. |
377 | if (!Retain) |
378 | return; |
379 | |
380 | Changed = true; |
381 | ++NumStoreStrongs; |
382 | |
383 | LLVM_DEBUG( |
384 | llvm::dbgs() << " Contracting retain, release into objc_storeStrong.\n" |
385 | << " Old:\n" |
386 | << " Store: " << *Store << "\n" |
387 | << " Release: " << *Release << "\n" |
388 | << " Retain: " << *Retain << "\n" |
389 | << " Load: " << *Load << "\n" ); |
390 | |
391 | Value *Args[] = {Load->getPointerOperand(), New}; |
392 | Function *Decl = EP.get(kind: ARCRuntimeEntryPointKind::StoreStrong); |
393 | CallInst *StoreStrong = objcarc::createCallInstWithColors( |
394 | Func: Decl, Args, NameStr: "" , InsertBefore: Store->getIterator(), BlockColors); |
395 | StoreStrong->setDoesNotThrow(); |
396 | StoreStrong->setDebugLoc(Store->getDebugLoc()); |
397 | |
398 | // We can't set the tail flag yet, because we haven't yet determined |
399 | // whether there are any escaping allocas. Remember this call, so that |
400 | // we can set the tail flag once we know it's safe. |
401 | StoreStrongCalls.insert(Ptr: StoreStrong); |
402 | |
403 | LLVM_DEBUG(llvm::dbgs() << " New Store Strong: " << *StoreStrong |
404 | << "\n" ); |
405 | |
406 | if (&*Iter == Retain) ++Iter; |
407 | if (&*Iter == Store) ++Iter; |
408 | Store->eraseFromParent(); |
409 | Release->eraseFromParent(); |
410 | EraseInstruction(CI: Retain); |
411 | if (Load->use_empty()) |
412 | Load->eraseFromParent(); |
413 | } |
414 | |
415 | bool ObjCARCContract::tryToPeepholeInstruction( |
416 | Function &F, Instruction *Inst, inst_iterator &Iter, |
417 | bool &TailOkForStoreStrongs, |
418 | const DenseMap<BasicBlock *, ColorVector> &BlockColors) { |
419 | // Only these library routines return their argument. In particular, |
420 | // objc_retainBlock does not necessarily return its argument. |
421 | ARCInstKind Class = GetBasicARCInstKind(V: Inst); |
422 | switch (Class) { |
423 | case ARCInstKind::FusedRetainAutorelease: |
424 | case ARCInstKind::FusedRetainAutoreleaseRV: |
425 | return false; |
426 | case ARCInstKind::Autorelease: |
427 | case ARCInstKind::AutoreleaseRV: |
428 | return contractAutorelease(F, Autorelease: Inst, Class); |
429 | case ARCInstKind::Retain: |
430 | // Attempt to convert retains to retainrvs if they are next to function |
431 | // calls. |
432 | if (!optimizeRetainCall(F, Retain: Inst)) |
433 | return false; |
434 | // If we succeed in our optimization, fall through. |
435 | [[fallthrough]]; |
436 | case ARCInstKind::RetainRV: |
437 | case ARCInstKind::UnsafeClaimRV: { |
438 | // Return true if this is a bundled retainRV/claimRV call, which is always |
439 | // redundant with the attachedcall in the bundle, and is going to be erased |
440 | // at the end of this pass. This avoids undoing objc-arc-expand and |
441 | // replacing uses of the retainRV/claimRV call's argument with its result. |
442 | if (BundledInsts->contains(I: Inst)) |
443 | return true; |
444 | |
445 | // If this isn't a bundled call, and the target doesn't need a special |
446 | // inline-asm marker, we're done: return now, and undo objc-arc-expand. |
447 | if (!RVInstMarker) |
448 | return false; |
449 | |
450 | // The target needs a special inline-asm marker. Insert it. |
451 | |
452 | BasicBlock::iterator BBI = Inst->getIterator(); |
453 | BasicBlock *InstParent = Inst->getParent(); |
454 | |
455 | // Step up to see if the call immediately precedes the RV call. |
456 | // If it's an invoke, we have to cross a block boundary. And we have |
457 | // to carefully dodge no-op instructions. |
458 | do { |
459 | if (BBI == InstParent->begin()) { |
460 | BasicBlock *Pred = InstParent->getSinglePredecessor(); |
461 | if (!Pred) |
462 | goto decline_rv_optimization; |
463 | BBI = Pred->getTerminator()->getIterator(); |
464 | break; |
465 | } |
466 | --BBI; |
467 | } while (IsNoopInstruction(I: &*BBI)); |
468 | |
469 | if (GetRCIdentityRoot(V: &*BBI) == GetArgRCIdentityRoot(Inst)) { |
470 | LLVM_DEBUG(dbgs() << "Adding inline asm marker for the return value " |
471 | "optimization.\n" ); |
472 | Changed = true; |
473 | InlineAsm *IA = |
474 | InlineAsm::get(Ty: FunctionType::get(Result: Type::getVoidTy(C&: Inst->getContext()), |
475 | /*isVarArg=*/false), |
476 | AsmString: RVInstMarker->getString(), |
477 | /*Constraints=*/"" , /*hasSideEffects=*/true); |
478 | |
479 | objcarc::createCallInstWithColors(Func: IA, Args: {}, NameStr: "" , InsertBefore: Inst->getIterator(), |
480 | BlockColors); |
481 | } |
482 | decline_rv_optimization: |
483 | return false; |
484 | } |
485 | case ARCInstKind::InitWeak: { |
486 | // objc_initWeak(p, null) => *p = null |
487 | CallInst *CI = cast<CallInst>(Val: Inst); |
488 | if (IsNullOrUndef(V: CI->getArgOperand(i: 1))) { |
489 | Value *Null = ConstantPointerNull::get(T: cast<PointerType>(Val: CI->getType())); |
490 | Changed = true; |
491 | new StoreInst(Null, CI->getArgOperand(i: 0), CI->getIterator()); |
492 | |
493 | LLVM_DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n" |
494 | << " New = " << *Null << "\n" ); |
495 | |
496 | CI->replaceAllUsesWith(V: Null); |
497 | CI->eraseFromParent(); |
498 | } |
499 | return true; |
500 | } |
501 | case ARCInstKind::Release: |
502 | // Try to form an objc store strong from our release. If we fail, there is |
503 | // nothing further to do below, so continue. |
504 | tryToContractReleaseIntoStoreStrong(Release: Inst, Iter, BlockColors); |
505 | return true; |
506 | case ARCInstKind::User: |
507 | // Be conservative if the function has any alloca instructions. |
508 | // Technically we only care about escaping alloca instructions, |
509 | // but this is sufficient to handle some interesting cases. |
510 | if (isa<AllocaInst>(Val: Inst)) |
511 | TailOkForStoreStrongs = false; |
512 | return true; |
513 | case ARCInstKind::IntrinsicUser: |
514 | // Remove calls to @llvm.objc.clang.arc.use(...). |
515 | Changed = true; |
516 | Inst->eraseFromParent(); |
517 | return true; |
518 | default: |
519 | if (auto *CI = dyn_cast<CallInst>(Val: Inst)) |
520 | if (CI->getIntrinsicID() == Intrinsic::objc_clang_arc_noop_use) { |
521 | // Remove calls to @llvm.objc.clang.arc.noop.use(...). |
522 | Changed = true; |
523 | CI->eraseFromParent(); |
524 | } |
525 | return true; |
526 | } |
527 | } |
528 | |
529 | /// Should we use objc_claimAutoreleasedReturnValue? |
530 | static bool useClaimRuntimeCall(Module &M) { |
531 | // Let the flag override our OS-based default. |
532 | if (UseObjCClaimRV != cl::BOU_UNSET) |
533 | return UseObjCClaimRV == cl::BOU_TRUE; |
534 | |
535 | Triple TT(M.getTargetTriple()); |
536 | |
537 | // On x86_64, claimARV doesn't make sense, as the marker isn't actually a nop |
538 | // there (it's needed by the calling convention). |
539 | if (!TT.isAArch64()) |
540 | return false; |
541 | |
542 | unsigned Major = TT.getOSMajorVersion(); |
543 | switch (TT.getOS()) { |
544 | default: |
545 | return false; |
546 | case Triple::IOS: |
547 | case Triple::TvOS: |
548 | return Major >= 16; |
549 | case Triple::WatchOS: |
550 | return Major >= 9; |
551 | case Triple::BridgeOS: |
552 | return Major >= 7; |
553 | case Triple::MacOSX: |
554 | return Major >= 13; |
555 | case Triple::Darwin: |
556 | return Major >= 21; |
557 | } |
558 | |
559 | return false; |
560 | } |
561 | |
562 | //===----------------------------------------------------------------------===// |
563 | // Top Level Driver |
564 | //===----------------------------------------------------------------------===// |
565 | |
566 | bool ObjCARCContract::init(Module &M) { |
567 | Run = ModuleHasARC(M); |
568 | if (!Run) |
569 | return false; |
570 | |
571 | EP.init(M: &M); |
572 | |
573 | HasClaimRV = useClaimRuntimeCall(M); |
574 | |
575 | // Initialize RVInstMarker. |
576 | RVInstMarker = getRVInstMarker(M); |
577 | |
578 | return false; |
579 | } |
580 | |
581 | bool ObjCARCContract::run(Function &F, AAResults *A, DominatorTree *D) { |
582 | if (!Run) |
583 | return false; |
584 | |
585 | if (!EnableARCOpts) |
586 | return false; |
587 | |
588 | Changed = CFGChanged = false; |
589 | AA = A; |
590 | DT = D; |
591 | PA.setAA(A); |
592 | BundledRetainClaimRVs BRV(EP, /*ContractPass=*/true, HasClaimRV); |
593 | BundledInsts = &BRV; |
594 | |
595 | std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, DT); |
596 | Changed |= R.first; |
597 | CFGChanged |= R.second; |
598 | |
599 | DenseMap<BasicBlock *, ColorVector> BlockColors; |
600 | if (F.hasPersonalityFn() && |
601 | isScopedEHPersonality(Pers: classifyEHPersonality(Pers: F.getPersonalityFn()))) |
602 | BlockColors = colorEHFunclets(F); |
603 | |
604 | LLVM_DEBUG(llvm::dbgs() << "**** ObjCARC Contract ****\n" ); |
605 | |
606 | // Track whether it's ok to mark objc_storeStrong calls with the "tail" |
607 | // keyword. Be conservative if the function has variadic arguments. |
608 | // It seems that functions which "return twice" are also unsafe for the |
609 | // "tail" argument, because they are setjmp, which could need to |
610 | // return to an earlier stack state. |
611 | bool TailOkForStoreStrongs = |
612 | !F.isVarArg() && !F.callsFunctionThatReturnsTwice(); |
613 | |
614 | // For ObjC library calls which return their argument, replace uses of the |
615 | // argument with uses of the call return value, if it dominates the use. This |
616 | // reduces register pressure. |
617 | for (inst_iterator I = inst_begin(F: &F), E = inst_end(F: &F); I != E;) { |
618 | Instruction *Inst = &*I++; |
619 | |
620 | LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n" ); |
621 | |
622 | if (auto *CI = dyn_cast<CallInst>(Val: Inst)) |
623 | if (objcarc::hasAttachedCallOpBundle(CB: CI)) { |
624 | BundledInsts->insertRVCallWithColors(InsertPt: I->getIterator(), AnnotatedCall: CI, BlockColors); |
625 | --I; |
626 | Changed = true; |
627 | } |
628 | |
629 | // First try to peephole Inst. If there is nothing further we can do in |
630 | // terms of undoing objc-arc-expand, process the next inst. |
631 | if (tryToPeepholeInstruction(F, Inst, Iter&: I, TailOkForStoreStrongs, |
632 | BlockColors)) |
633 | continue; |
634 | |
635 | // Otherwise, try to undo objc-arc-expand. |
636 | |
637 | // Don't use GetArgRCIdentityRoot because we don't want to look through bitcasts |
638 | // and such; to do the replacement, the argument must have type i8*. |
639 | |
640 | // Function for replacing uses of Arg dominated by Inst. |
641 | auto ReplaceArgUses = [Inst, this](Value *Arg) { |
642 | // If we're compiling bugpointed code, don't get in trouble. |
643 | if (!isa<Instruction>(Val: Arg) && !isa<Argument>(Val: Arg)) |
644 | return; |
645 | |
646 | // Look through the uses of the pointer. |
647 | for (Value::use_iterator UI = Arg->use_begin(), UE = Arg->use_end(); |
648 | UI != UE; ) { |
649 | // Increment UI now, because we may unlink its element. |
650 | Use &U = *UI++; |
651 | unsigned OperandNo = U.getOperandNo(); |
652 | |
653 | // If the call's return value dominates a use of the call's argument |
654 | // value, rewrite the use to use the return value. We check for |
655 | // reachability here because an unreachable call is considered to |
656 | // trivially dominate itself, which would lead us to rewriting its |
657 | // argument in terms of its return value, which would lead to |
658 | // infinite loops in GetArgRCIdentityRoot. |
659 | if (!DT->isReachableFromEntry(U) || !DT->dominates(Def: Inst, U)) |
660 | continue; |
661 | |
662 | Changed = true; |
663 | Instruction *Replacement = Inst; |
664 | Type *UseTy = U.get()->getType(); |
665 | if (PHINode *PHI = dyn_cast<PHINode>(Val: U.getUser())) { |
666 | // For PHI nodes, insert the bitcast in the predecessor block. |
667 | unsigned ValNo = PHINode::getIncomingValueNumForOperand(i: OperandNo); |
668 | BasicBlock *IncomingBB = PHI->getIncomingBlock(i: ValNo); |
669 | if (Replacement->getType() != UseTy) { |
670 | // A catchswitch is both a pad and a terminator, meaning a basic |
671 | // block with a catchswitch has no insertion point. Keep going up |
672 | // the dominator tree until we find a non-catchswitch. |
673 | BasicBlock *InsertBB = IncomingBB; |
674 | while (isa<CatchSwitchInst>(Val: InsertBB->getFirstNonPHIIt())) { |
675 | InsertBB = DT->getNode(BB: InsertBB)->getIDom()->getBlock(); |
676 | } |
677 | |
678 | assert(DT->dominates(Inst, &InsertBB->back()) && |
679 | "Invalid insertion point for bitcast" ); |
680 | Replacement = new BitCastInst(Replacement, UseTy, "" , |
681 | InsertBB->back().getIterator()); |
682 | } |
683 | |
684 | // While we're here, rewrite all edges for this PHI, rather |
685 | // than just one use at a time, to minimize the number of |
686 | // bitcasts we emit. |
687 | for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) |
688 | if (PHI->getIncomingBlock(i) == IncomingBB) { |
689 | // Keep the UI iterator valid. |
690 | if (UI != UE && |
691 | &PHI->getOperandUse( |
692 | i: PHINode::getOperandNumForIncomingValue(i)) == &*UI) |
693 | ++UI; |
694 | PHI->setIncomingValue(i, V: Replacement); |
695 | } |
696 | } else { |
697 | if (Replacement->getType() != UseTy) |
698 | Replacement = |
699 | new BitCastInst(Replacement, UseTy, "" , |
700 | cast<Instruction>(Val: U.getUser())->getIterator()); |
701 | U.set(Replacement); |
702 | } |
703 | } |
704 | }; |
705 | |
706 | Value *Arg = cast<CallInst>(Val: Inst)->getArgOperand(i: 0); |
707 | |
708 | // TODO: Change this to a do-while. |
709 | for (;;) { |
710 | ReplaceArgUses(Arg); |
711 | |
712 | // If Arg is a no-op casted pointer, strip one level of casts and iterate. |
713 | if (const BitCastInst *BI = dyn_cast<BitCastInst>(Val: Arg)) |
714 | Arg = BI->getOperand(i_nocapture: 0); |
715 | else if (isa<GEPOperator>(Val: Arg) && |
716 | cast<GEPOperator>(Val: Arg)->hasAllZeroIndices()) |
717 | Arg = cast<GEPOperator>(Val: Arg)->getPointerOperand(); |
718 | else if (isa<GlobalAlias>(Val: Arg) && |
719 | !cast<GlobalAlias>(Val: Arg)->isInterposable()) |
720 | Arg = cast<GlobalAlias>(Val: Arg)->getAliasee(); |
721 | else { |
722 | // If Arg is a PHI node, get PHIs that are equivalent to it and replace |
723 | // their uses. |
724 | if (PHINode *PN = dyn_cast<PHINode>(Val: Arg)) { |
725 | SmallVector<Value *, 1> PHIList; |
726 | getEquivalentPHIs(PN&: *PN, PHIList); |
727 | for (Value *PHI : PHIList) |
728 | ReplaceArgUses(PHI); |
729 | } |
730 | break; |
731 | } |
732 | } |
733 | } |
734 | |
735 | // If this function has no escaping allocas or suspicious vararg usage, |
736 | // objc_storeStrong calls can be marked with the "tail" keyword. |
737 | if (TailOkForStoreStrongs) |
738 | for (CallInst *CI : StoreStrongCalls) |
739 | CI->setTailCall(); |
740 | StoreStrongCalls.clear(); |
741 | |
742 | return Changed; |
743 | } |
744 | |
745 | //===----------------------------------------------------------------------===// |
746 | // Misc Pass Manager |
747 | //===----------------------------------------------------------------------===// |
748 | |
749 | char ObjCARCContractLegacyPass::ID = 0; |
750 | INITIALIZE_PASS_BEGIN(ObjCARCContractLegacyPass, "objc-arc-contract" , |
751 | "ObjC ARC contraction" , false, false) |
752 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
753 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
754 | INITIALIZE_PASS_END(ObjCARCContractLegacyPass, "objc-arc-contract" , |
755 | "ObjC ARC contraction" , false, false) |
756 | |
757 | void ObjCARCContractLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { |
758 | AU.addRequired<AAResultsWrapperPass>(); |
759 | AU.addRequired<DominatorTreeWrapperPass>(); |
760 | AU.addPreserved<AAResultsWrapperPass>(); |
761 | AU.addPreserved<BasicAAWrapperPass>(); |
762 | AU.addPreserved<DominatorTreeWrapperPass>(); |
763 | } |
764 | |
765 | Pass *llvm::createObjCARCContractPass() { |
766 | return new ObjCARCContractLegacyPass(); |
767 | } |
768 | |
769 | bool ObjCARCContractLegacyPass::runOnFunction(Function &F) { |
770 | ObjCARCContract OCARCC; |
771 | OCARCC.init(M&: *F.getParent()); |
772 | auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); |
773 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
774 | return OCARCC.run(F, A: AA, D: DT); |
775 | } |
776 | |
777 | PreservedAnalyses ObjCARCContractPass::run(Function &F, |
778 | FunctionAnalysisManager &AM) { |
779 | ObjCARCContract OCAC; |
780 | OCAC.init(M&: *F.getParent()); |
781 | |
782 | bool Changed = OCAC.run(F, A: &AM.getResult<AAManager>(IR&: F), |
783 | D: &AM.getResult<DominatorTreeAnalysis>(IR&: F)); |
784 | bool CFGChanged = OCAC.hasCFGChanged(); |
785 | if (Changed) { |
786 | PreservedAnalyses PA; |
787 | if (!CFGChanged) |
788 | PA.preserveSet<CFGAnalyses>(); |
789 | return PA; |
790 | } |
791 | return PreservedAnalyses::all(); |
792 | } |
793 | |