1 | //===- SpillUtils.cpp - Utilities for checking for spills ---------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "llvm/Transforms/Coroutines/SpillUtils.h" |
10 | #include "CoroInternal.h" |
11 | #include "llvm/Analysis/CFG.h" |
12 | #include "llvm/Analysis/PtrUseVisitor.h" |
13 | #include "llvm/IR/CFG.h" |
14 | #include "llvm/IR/DebugInfo.h" |
15 | #include "llvm/IR/Dominators.h" |
16 | #include "llvm/IR/InstIterator.h" |
17 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
18 | |
19 | namespace llvm { |
20 | |
21 | namespace coro { |
22 | |
23 | namespace { |
24 | |
25 | typedef SmallPtrSet<BasicBlock *, 8> VisitedBlocksSet; |
26 | |
27 | static bool isNonSpilledIntrinsic(Instruction &I) { |
28 | // Structural coroutine intrinsics that should not be spilled into the |
29 | // coroutine frame. |
30 | return isa<CoroIdInst>(Val: &I) || isa<CoroSaveInst>(Val: &I); |
31 | } |
32 | |
33 | /// Does control flow starting at the given block ever reach a suspend |
34 | /// instruction before reaching a block in VisitedOrFreeBBs? |
35 | static bool isSuspendReachableFrom(BasicBlock *From, |
36 | VisitedBlocksSet &VisitedOrFreeBBs) { |
37 | // Eagerly try to add this block to the visited set. If it's already |
38 | // there, stop recursing; this path doesn't reach a suspend before |
39 | // either looping or reaching a freeing block. |
40 | if (!VisitedOrFreeBBs.insert(Ptr: From).second) |
41 | return false; |
42 | |
43 | // We assume that we'll already have split suspends into their own blocks. |
44 | if (coro::isSuspendBlock(BB: From)) |
45 | return true; |
46 | |
47 | // Recurse on the successors. |
48 | for (auto *Succ : successors(BB: From)) { |
49 | if (isSuspendReachableFrom(From: Succ, VisitedOrFreeBBs)) |
50 | return true; |
51 | } |
52 | |
53 | return false; |
54 | } |
55 | |
56 | /// Is the given alloca "local", i.e. bounded in lifetime to not cross a |
57 | /// suspend point? |
58 | static bool isLocalAlloca(CoroAllocaAllocInst *AI) { |
59 | // Seed the visited set with all the basic blocks containing a free |
60 | // so that we won't pass them up. |
61 | VisitedBlocksSet VisitedOrFreeBBs; |
62 | for (auto *User : AI->users()) { |
63 | if (auto FI = dyn_cast<CoroAllocaFreeInst>(Val: User)) |
64 | VisitedOrFreeBBs.insert(Ptr: FI->getParent()); |
65 | } |
66 | |
67 | return !isSuspendReachableFrom(From: AI->getParent(), VisitedOrFreeBBs); |
68 | } |
69 | |
70 | /// Turn the given coro.alloca.alloc call into a dynamic allocation. |
71 | /// This happens during the all-instructions iteration, so it must not |
72 | /// delete the call. |
73 | static Instruction * |
74 | lowerNonLocalAlloca(CoroAllocaAllocInst *AI, const coro::Shape &Shape, |
75 | SmallVectorImpl<Instruction *> &DeadInsts) { |
76 | IRBuilder<> Builder(AI); |
77 | auto Alloc = Shape.emitAlloc(Builder, Size: AI->getSize(), CG: nullptr); |
78 | |
79 | for (User *U : AI->users()) { |
80 | if (isa<CoroAllocaGetInst>(Val: U)) { |
81 | U->replaceAllUsesWith(V: Alloc); |
82 | } else { |
83 | auto FI = cast<CoroAllocaFreeInst>(Val: U); |
84 | Builder.SetInsertPoint(FI); |
85 | Shape.emitDealloc(Builder, Ptr: Alloc, CG: nullptr); |
86 | } |
87 | DeadInsts.push_back(Elt: cast<Instruction>(Val: U)); |
88 | } |
89 | |
90 | // Push this on last so that it gets deleted after all the others. |
91 | DeadInsts.push_back(Elt: AI); |
92 | |
93 | // Return the new allocation value so that we can check for needed spills. |
94 | return cast<Instruction>(Val: Alloc); |
95 | } |
96 | |
97 | // We need to make room to insert a spill after initial PHIs, but before |
98 | // catchswitch instruction. Placing it before violates the requirement that |
99 | // catchswitch, like all other EHPads must be the first nonPHI in a block. |
100 | // |
101 | // Split away catchswitch into a separate block and insert in its place: |
102 | // |
103 | // cleanuppad <InsertPt> cleanupret. |
104 | // |
105 | // cleanupret instruction will act as an insert point for the spill. |
106 | static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { |
107 | BasicBlock *CurrentBlock = CatchSwitch->getParent(); |
108 | BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(I: CatchSwitch); |
109 | CurrentBlock->getTerminator()->eraseFromParent(); |
110 | |
111 | auto *CleanupPad = |
112 | CleanupPadInst::Create(ParentPad: CatchSwitch->getParentPad(), Args: {}, NameStr: "" , InsertBefore: CurrentBlock); |
113 | auto *CleanupRet = |
114 | CleanupReturnInst::Create(CleanupPad, UnwindBB: NewBlock, InsertBefore: CurrentBlock); |
115 | return CleanupRet; |
116 | } |
117 | |
118 | // We use a pointer use visitor to track how an alloca is being used. |
119 | // The goal is to be able to answer the following three questions: |
120 | // 1. Should this alloca be allocated on the frame instead. |
121 | // 2. Could the content of the alloca be modified prior to CoroBegin, which |
122 | // would require copying the data from the alloca to the frame after |
123 | // CoroBegin. |
124 | // 3. Are there any aliases created for this alloca prior to CoroBegin, but |
125 | // used after CoroBegin. In that case, we will need to recreate the alias |
126 | // after CoroBegin based off the frame. |
127 | // |
128 | // To answer question 1, we track two things: |
129 | // A. List of all BasicBlocks that use this alloca or any of the aliases of |
130 | // the alloca. In the end, we check if there exists any two basic blocks that |
131 | // cross suspension points. If so, this alloca must be put on the frame. |
132 | // B. Whether the alloca or any alias of the alloca is escaped at some point, |
133 | // either by storing the address somewhere, or the address is used in a |
134 | // function call that might capture. If it's ever escaped, this alloca must be |
135 | // put on the frame conservatively. |
136 | // |
137 | // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. |
138 | // Whenever a potential write happens, either through a store instruction, a |
139 | // function call or any of the memory intrinsics, we check whether this |
140 | // instruction is prior to CoroBegin. |
141 | // |
142 | // To answer question 3, we track the offsets of all aliases created for the |
143 | // alloca prior to CoroBegin but used after CoroBegin. std::optional is used to |
144 | // be able to represent the case when the offset is unknown (e.g. when you have |
145 | // a PHINode that takes in different offset values). We cannot handle unknown |
146 | // offsets and will assert. This is the potential issue left out. An ideal |
147 | // solution would likely require a significant redesign. |
148 | |
149 | namespace { |
150 | struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { |
151 | using Base = PtrUseVisitor<AllocaUseVisitor>; |
152 | AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, |
153 | const coro::Shape &CoroShape, |
154 | const SuspendCrossingInfo &Checker, |
155 | bool ShouldUseLifetimeStartInfo) |
156 | : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker), |
157 | ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) { |
158 | for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends) |
159 | CoroSuspendBBs.insert(Ptr: SuspendInst->getParent()); |
160 | } |
161 | |
162 | void visit(Instruction &I) { |
163 | Users.insert(Ptr: &I); |
164 | Base::visit(I); |
165 | // If the pointer is escaped prior to CoroBegin, we have to assume it would |
166 | // be written into before CoroBegin as well. |
167 | if (PI.isEscaped() && |
168 | !DT.dominates(Def: CoroShape.CoroBegin, User: PI.getEscapingInst())) { |
169 | MayWriteBeforeCoroBegin = true; |
170 | } |
171 | } |
172 | // We need to provide this overload as PtrUseVisitor uses a pointer based |
173 | // visiting function. |
174 | void visit(Instruction *I) { return visit(I&: *I); } |
175 | |
176 | void visitPHINode(PHINode &I) { |
177 | enqueueUsers(I); |
178 | handleAlias(I); |
179 | } |
180 | |
181 | void visitSelectInst(SelectInst &I) { |
182 | enqueueUsers(I); |
183 | handleAlias(I); |
184 | } |
185 | |
186 | void visitStoreInst(StoreInst &SI) { |
187 | // Regardless whether the alias of the alloca is the value operand or the |
188 | // pointer operand, we need to assume the alloca is been written. |
189 | handleMayWrite(I: SI); |
190 | |
191 | if (SI.getValueOperand() != U->get()) |
192 | return; |
193 | |
194 | // We are storing the pointer into a memory location, potentially escaping. |
195 | // As an optimization, we try to detect simple cases where it doesn't |
196 | // actually escape, for example: |
197 | // %ptr = alloca .. |
198 | // %addr = alloca .. |
199 | // store %ptr, %addr |
200 | // %x = load %addr |
201 | // .. |
202 | // If %addr is only used by loading from it, we could simply treat %x as |
203 | // another alias of %ptr, and not considering %ptr being escaped. |
204 | auto IsSimpleStoreThenLoad = [&]() { |
205 | auto *AI = dyn_cast<AllocaInst>(Val: SI.getPointerOperand()); |
206 | // If the memory location we are storing to is not an alloca, it |
207 | // could be an alias of some other memory locations, which is difficult |
208 | // to analyze. |
209 | if (!AI) |
210 | return false; |
211 | // StoreAliases contains aliases of the memory location stored into. |
212 | SmallVector<Instruction *, 4> StoreAliases = {AI}; |
213 | while (!StoreAliases.empty()) { |
214 | Instruction *I = StoreAliases.pop_back_val(); |
215 | for (User *U : I->users()) { |
216 | // If we are loading from the memory location, we are creating an |
217 | // alias of the original pointer. |
218 | if (auto *LI = dyn_cast<LoadInst>(Val: U)) { |
219 | enqueueUsers(I&: *LI); |
220 | handleAlias(I&: *LI); |
221 | continue; |
222 | } |
223 | // If we are overriding the memory location, the pointer certainly |
224 | // won't escape. |
225 | if (auto *S = dyn_cast<StoreInst>(Val: U)) |
226 | if (S->getPointerOperand() == I) |
227 | continue; |
228 | if (isa<LifetimeIntrinsic>(Val: U)) |
229 | continue; |
230 | // BitCastInst creats aliases of the memory location being stored |
231 | // into. |
232 | if (auto *BI = dyn_cast<BitCastInst>(Val: U)) { |
233 | StoreAliases.push_back(Elt: BI); |
234 | continue; |
235 | } |
236 | return false; |
237 | } |
238 | } |
239 | |
240 | return true; |
241 | }; |
242 | |
243 | if (!IsSimpleStoreThenLoad()) |
244 | PI.setEscaped(&SI); |
245 | } |
246 | |
247 | // All mem intrinsics modify the data. |
248 | void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(I: MI); } |
249 | |
250 | void visitBitCastInst(BitCastInst &BC) { |
251 | Base::visitBitCastInst(BC); |
252 | handleAlias(I&: BC); |
253 | } |
254 | |
255 | void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { |
256 | Base::visitAddrSpaceCastInst(ASC); |
257 | handleAlias(I&: ASC); |
258 | } |
259 | |
260 | void visitGetElementPtrInst(GetElementPtrInst &GEPI) { |
261 | // The base visitor will adjust Offset accordingly. |
262 | Base::visitGetElementPtrInst(GEPI); |
263 | handleAlias(I&: GEPI); |
264 | } |
265 | |
266 | void visitIntrinsicInst(IntrinsicInst &II) { |
267 | // When we found the lifetime markers refers to a |
268 | // subrange of the original alloca, ignore the lifetime |
269 | // markers to avoid misleading the analysis. |
270 | if (!IsOffsetKnown || !Offset.isZero()) |
271 | return Base::visitIntrinsicInst(II); |
272 | switch (II.getIntrinsicID()) { |
273 | default: |
274 | return Base::visitIntrinsicInst(II); |
275 | case Intrinsic::lifetime_start: |
276 | LifetimeStarts.insert(Ptr: &II); |
277 | LifetimeStartBBs.push_back(Elt: II.getParent()); |
278 | break; |
279 | case Intrinsic::lifetime_end: |
280 | LifetimeEndBBs.insert(Ptr: II.getParent()); |
281 | break; |
282 | } |
283 | } |
284 | |
285 | void visitCallBase(CallBase &CB) { |
286 | for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op) |
287 | if (U->get() == CB.getArgOperand(i: Op) && !CB.doesNotCapture(OpNo: Op)) |
288 | PI.setEscaped(&CB); |
289 | handleMayWrite(I: CB); |
290 | } |
291 | |
292 | bool getShouldLiveOnFrame() const { |
293 | if (!ShouldLiveOnFrame) |
294 | ShouldLiveOnFrame = computeShouldLiveOnFrame(); |
295 | return *ShouldLiveOnFrame; |
296 | } |
297 | |
298 | bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } |
299 | |
300 | DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const { |
301 | assert(getShouldLiveOnFrame() && "This method should only be called if the " |
302 | "alloca needs to live on the frame." ); |
303 | for (const auto &P : AliasOffetMap) |
304 | if (!P.second) |
305 | report_fatal_error(reason: "Unable to handle an alias with unknown offset " |
306 | "created before CoroBegin." ); |
307 | return AliasOffetMap; |
308 | } |
309 | |
310 | private: |
311 | const DominatorTree &DT; |
312 | const coro::Shape &CoroShape; |
313 | const SuspendCrossingInfo &Checker; |
314 | // All alias to the original AllocaInst, created before CoroBegin and used |
315 | // after CoroBegin. Each entry contains the instruction and the offset in the |
316 | // original Alloca. They need to be recreated after CoroBegin off the frame. |
317 | DenseMap<Instruction *, std::optional<APInt>> AliasOffetMap{}; |
318 | SmallPtrSet<Instruction *, 4> Users{}; |
319 | SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{}; |
320 | SmallVector<BasicBlock *> LifetimeStartBBs{}; |
321 | SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{}; |
322 | SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{}; |
323 | bool MayWriteBeforeCoroBegin{false}; |
324 | bool ShouldUseLifetimeStartInfo{true}; |
325 | |
326 | mutable std::optional<bool> ShouldLiveOnFrame{}; |
327 | |
328 | bool computeShouldLiveOnFrame() const { |
329 | // If lifetime information is available, we check it first since it's |
330 | // more precise. We look at every pair of lifetime.start intrinsic and |
331 | // every basic block that uses the pointer to see if they cross suspension |
332 | // points. The uses cover both direct uses as well as indirect uses. |
333 | if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) { |
334 | // If there is no explicit lifetime.end, then assume the address can |
335 | // cross suspension points. |
336 | if (LifetimeEndBBs.empty()) |
337 | return true; |
338 | |
339 | // If there is a path from a lifetime.start to a suspend without a |
340 | // corresponding lifetime.end, then the alloca's lifetime persists |
341 | // beyond that suspension point and the alloca must go on the frame. |
342 | llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs); |
343 | if (isManyPotentiallyReachableFromMany(Worklist, StopSet: CoroSuspendBBs, |
344 | ExclusionSet: &LifetimeEndBBs, DT: &DT)) |
345 | return true; |
346 | |
347 | // Addresses are guaranteed to be identical after every lifetime.start so |
348 | // we cannot use the local stack if the address escaped and there is a |
349 | // suspend point between lifetime markers. This should also cover the |
350 | // case of a single lifetime.start intrinsic in a loop with suspend point. |
351 | if (PI.isEscaped()) { |
352 | for (auto *A : LifetimeStarts) { |
353 | for (auto *B : LifetimeStarts) { |
354 | if (Checker.hasPathOrLoopCrossingSuspendPoint(From: A->getParent(), |
355 | To: B->getParent())) |
356 | return true; |
357 | } |
358 | } |
359 | } |
360 | return false; |
361 | } |
362 | // FIXME: Ideally the isEscaped check should come at the beginning. |
363 | // However there are a few loose ends that need to be fixed first before |
364 | // we can do that. We need to make sure we are not over-conservative, so |
365 | // that the data accessed in-between await_suspend and symmetric transfer |
366 | // is always put on the stack, and also data accessed after coro.end is |
367 | // always put on the stack (esp the return object). To fix that, we need |
368 | // to: |
369 | // 1) Potentially treat sret as nocapture in calls |
370 | // 2) Special handle the return object and put it on the stack |
371 | // 3) Utilize lifetime.end intrinsic |
372 | if (PI.isEscaped()) |
373 | return true; |
374 | |
375 | for (auto *U1 : Users) |
376 | for (auto *U2 : Users) |
377 | if (Checker.isDefinitionAcrossSuspend(I&: *U1, U: U2)) |
378 | return true; |
379 | |
380 | return false; |
381 | } |
382 | |
383 | void handleMayWrite(const Instruction &I) { |
384 | if (!DT.dominates(Def: CoroShape.CoroBegin, User: &I)) |
385 | MayWriteBeforeCoroBegin = true; |
386 | } |
387 | |
388 | bool usedAfterCoroBegin(Instruction &I) { |
389 | for (auto &U : I.uses()) |
390 | if (DT.dominates(Def: CoroShape.CoroBegin, U)) |
391 | return true; |
392 | return false; |
393 | } |
394 | |
395 | void handleAlias(Instruction &I) { |
396 | // We track all aliases created prior to CoroBegin but used after. |
397 | // These aliases may need to be recreated after CoroBegin if the alloca |
398 | // need to live on the frame. |
399 | if (DT.dominates(Def: CoroShape.CoroBegin, User: &I) || !usedAfterCoroBegin(I)) |
400 | return; |
401 | |
402 | if (!IsOffsetKnown) { |
403 | AliasOffetMap[&I].reset(); |
404 | } else { |
405 | auto [Itr, Inserted] = AliasOffetMap.try_emplace(Key: &I, Args&: Offset); |
406 | if (!Inserted && Itr->second && *Itr->second != Offset) { |
407 | // If we have seen two different possible values for this alias, we set |
408 | // it to empty. |
409 | Itr->second.reset(); |
410 | } |
411 | } |
412 | } |
413 | }; |
414 | } // namespace |
415 | |
416 | static void collectFrameAlloca(AllocaInst *AI, const coro::Shape &Shape, |
417 | const SuspendCrossingInfo &Checker, |
418 | SmallVectorImpl<AllocaInfo> &Allocas, |
419 | const DominatorTree &DT) { |
420 | if (Shape.CoroSuspends.empty()) |
421 | return; |
422 | |
423 | // The PromiseAlloca will be specially handled since it needs to be in a |
424 | // fixed position in the frame. |
425 | if (AI == Shape.SwitchLowering.PromiseAlloca) |
426 | return; |
427 | |
428 | // The __coro_gro alloca should outlive the promise, make sure we |
429 | // keep it outside the frame. |
430 | if (AI->hasMetadata(KindID: LLVMContext::MD_coro_outside_frame)) |
431 | return; |
432 | |
433 | // The code that uses lifetime.start intrinsic does not work for functions |
434 | // with loops without exit. Disable it on ABIs we know to generate such |
435 | // code. |
436 | bool ShouldUseLifetimeStartInfo = |
437 | (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon && |
438 | Shape.ABI != coro::ABI::RetconOnce); |
439 | AllocaUseVisitor Visitor{AI->getDataLayout(), DT, Shape, Checker, |
440 | ShouldUseLifetimeStartInfo}; |
441 | Visitor.visitPtr(I&: *AI); |
442 | if (!Visitor.getShouldLiveOnFrame()) |
443 | return; |
444 | Allocas.emplace_back(Args&: AI, Args: Visitor.getAliasesCopy(), |
445 | Args: Visitor.getMayWriteBeforeCoroBegin()); |
446 | } |
447 | |
448 | } // namespace |
449 | |
450 | void collectSpillsFromArgs(SpillInfo &Spills, Function &F, |
451 | const SuspendCrossingInfo &Checker) { |
452 | // Collect the spills for arguments and other not-materializable values. |
453 | for (Argument &A : F.args()) |
454 | for (User *U : A.users()) |
455 | if (Checker.isDefinitionAcrossSuspend(A, U)) |
456 | Spills[&A].push_back(Elt: cast<Instruction>(Val: U)); |
457 | } |
458 | |
459 | void collectSpillsAndAllocasFromInsts( |
460 | SpillInfo &Spills, SmallVector<AllocaInfo, 8> &Allocas, |
461 | SmallVector<Instruction *, 4> &DeadInstructions, |
462 | SmallVector<CoroAllocaAllocInst *, 4> &LocalAllocas, Function &F, |
463 | const SuspendCrossingInfo &Checker, const DominatorTree &DT, |
464 | const coro::Shape &Shape) { |
465 | |
466 | for (Instruction &I : instructions(F)) { |
467 | // Values returned from coroutine structure intrinsics should not be part |
468 | // of the Coroutine Frame. |
469 | if (isNonSpilledIntrinsic(I) || &I == Shape.CoroBegin) |
470 | continue; |
471 | |
472 | // Handle alloca.alloc specially here. |
473 | if (auto AI = dyn_cast<CoroAllocaAllocInst>(Val: &I)) { |
474 | // Check whether the alloca's lifetime is bounded by suspend points. |
475 | if (isLocalAlloca(AI)) { |
476 | LocalAllocas.push_back(Elt: AI); |
477 | continue; |
478 | } |
479 | |
480 | // If not, do a quick rewrite of the alloca and then add spills of |
481 | // the rewritten value. The rewrite doesn't invalidate anything in |
482 | // Spills because the other alloca intrinsics have no other operands |
483 | // besides AI, and it doesn't invalidate the iteration because we delay |
484 | // erasing AI. |
485 | auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInsts&: DeadInstructions); |
486 | |
487 | for (User *U : Alloc->users()) { |
488 | if (Checker.isDefinitionAcrossSuspend(I&: *Alloc, U)) |
489 | Spills[Alloc].push_back(Elt: cast<Instruction>(Val: U)); |
490 | } |
491 | continue; |
492 | } |
493 | |
494 | // Ignore alloca.get; we process this as part of coro.alloca.alloc. |
495 | if (isa<CoroAllocaGetInst>(Val: I)) |
496 | continue; |
497 | |
498 | if (auto *AI = dyn_cast<AllocaInst>(Val: &I)) { |
499 | collectFrameAlloca(AI, Shape, Checker, Allocas, DT); |
500 | continue; |
501 | } |
502 | |
503 | for (User *U : I.users()) |
504 | if (Checker.isDefinitionAcrossSuspend(I, U)) { |
505 | // We cannot spill a token. |
506 | if (I.getType()->isTokenTy()) |
507 | report_fatal_error( |
508 | reason: "token definition is separated from the use by a suspend point" ); |
509 | Spills[&I].push_back(Elt: cast<Instruction>(Val: U)); |
510 | } |
511 | } |
512 | } |
513 | |
514 | void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, |
515 | const SuspendCrossingInfo &Checker) { |
516 | // We don't want the layout of coroutine frame to be affected |
517 | // by debug information. So we only choose to salvage DbgValueInst for |
518 | // whose value is already in the frame. |
519 | // We would handle the dbg.values for allocas specially |
520 | for (auto &Iter : Spills) { |
521 | auto *V = Iter.first; |
522 | SmallVector<DbgValueInst *, 16> DVIs; |
523 | SmallVector<DbgVariableRecord *, 16> DVRs; |
524 | findDbgValues(DbgValues&: DVIs, V, DbgVariableRecords: &DVRs); |
525 | for (DbgValueInst *DVI : DVIs) |
526 | if (Checker.isDefinitionAcrossSuspend(V&: *V, U: DVI)) |
527 | Spills[V].push_back(Elt: DVI); |
528 | // Add the instructions which carry debug info that is in the frame. |
529 | for (DbgVariableRecord *DVR : DVRs) |
530 | if (Checker.isDefinitionAcrossSuspend(V&: *V, U: DVR->Marker->MarkedInstr)) |
531 | Spills[V].push_back(Elt: DVR->Marker->MarkedInstr); |
532 | } |
533 | } |
534 | |
535 | /// Async and Retcon{Once} conventions assume that all spill uses can be sunk |
536 | /// after the coro.begin intrinsic. |
537 | void sinkSpillUsesAfterCoroBegin(const DominatorTree &Dom, |
538 | CoroBeginInst *CoroBegin, |
539 | coro::SpillInfo &Spills, |
540 | SmallVectorImpl<coro::AllocaInfo> &Allocas) { |
541 | SmallSetVector<Instruction *, 32> ToMove; |
542 | SmallVector<Instruction *, 32> Worklist; |
543 | |
544 | // Collect all users that precede coro.begin. |
545 | auto collectUsers = [&](Value *Def) { |
546 | for (User *U : Def->users()) { |
547 | auto Inst = cast<Instruction>(Val: U); |
548 | if (Inst->getParent() != CoroBegin->getParent() || |
549 | Dom.dominates(Def: CoroBegin, User: Inst)) |
550 | continue; |
551 | if (ToMove.insert(X: Inst)) |
552 | Worklist.push_back(Elt: Inst); |
553 | } |
554 | }; |
555 | for (auto &I : Spills) |
556 | collectUsers(I.first); |
557 | for (auto &I : Allocas) |
558 | collectUsers(I.Alloca); |
559 | |
560 | // Recursively collect users before coro.begin. |
561 | while (!Worklist.empty()) { |
562 | auto *Def = Worklist.pop_back_val(); |
563 | for (User *U : Def->users()) { |
564 | auto Inst = cast<Instruction>(Val: U); |
565 | if (Dom.dominates(Def: CoroBegin, User: Inst)) |
566 | continue; |
567 | if (ToMove.insert(X: Inst)) |
568 | Worklist.push_back(Elt: Inst); |
569 | } |
570 | } |
571 | |
572 | // Sort by dominance. |
573 | SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); |
574 | llvm::sort(C&: InsertionList, Comp: [&Dom](Instruction *A, Instruction *B) -> bool { |
575 | // If a dominates b it should precede (<) b. |
576 | return Dom.dominates(Def: A, User: B); |
577 | }); |
578 | |
579 | Instruction *InsertPt = CoroBegin->getNextNode(); |
580 | for (Instruction *Inst : InsertionList) |
581 | Inst->moveBefore(InsertPos: InsertPt->getIterator()); |
582 | } |
583 | |
584 | BasicBlock::iterator getSpillInsertionPt(const coro::Shape &Shape, Value *Def, |
585 | const DominatorTree &DT) { |
586 | BasicBlock::iterator InsertPt; |
587 | if (auto *Arg = dyn_cast<Argument>(Val: Def)) { |
588 | // For arguments, we will place the store instruction right after |
589 | // the coroutine frame pointer instruction, i.e. coro.begin. |
590 | InsertPt = Shape.getInsertPtAfterFramePtr(); |
591 | |
592 | // If we're spilling an Argument, make sure we clear 'captures' |
593 | // from the coroutine function. |
594 | Arg->getParent()->removeParamAttr(ArgNo: Arg->getArgNo(), Kind: Attribute::Captures); |
595 | } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Val: Def)) { |
596 | // Don't spill immediately after a suspend; splitting assumes |
597 | // that the suspend will be followed by a branch. |
598 | InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt(); |
599 | } else { |
600 | auto *I = cast<Instruction>(Val: Def); |
601 | if (!DT.dominates(Def: Shape.CoroBegin, User: I)) { |
602 | // If it is not dominated by CoroBegin, then spill should be |
603 | // inserted immediately after CoroFrame is computed. |
604 | InsertPt = Shape.getInsertPtAfterFramePtr(); |
605 | } else if (auto *II = dyn_cast<InvokeInst>(Val: I)) { |
606 | // If we are spilling the result of the invoke instruction, split |
607 | // the normal edge and insert the spill in the new block. |
608 | auto *NewBB = SplitEdge(From: II->getParent(), To: II->getNormalDest()); |
609 | InsertPt = NewBB->getTerminator()->getIterator(); |
610 | } else if (isa<PHINode>(Val: I)) { |
611 | // Skip the PHINodes and EH pads instructions. |
612 | BasicBlock *DefBlock = I->getParent(); |
613 | if (auto *CSI = dyn_cast<CatchSwitchInst>(Val: DefBlock->getTerminator())) |
614 | InsertPt = splitBeforeCatchSwitch(CatchSwitch: CSI)->getIterator(); |
615 | else |
616 | InsertPt = DefBlock->getFirstInsertionPt(); |
617 | } else { |
618 | assert(!I->isTerminator() && "unexpected terminator" ); |
619 | // For all other values, the spill is placed immediately after |
620 | // the definition. |
621 | InsertPt = I->getNextNode()->getIterator(); |
622 | } |
623 | } |
624 | |
625 | return InsertPt; |
626 | } |
627 | |
628 | } // End namespace coro. |
629 | |
630 | } // End namespace llvm. |
631 | |