1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This pass builds the coroutine frame and outlines resume and destroy parts
9// of the coroutine into separate functions.
10//
11// We present a coroutine to an LLVM as an ordinary function with suspension
12// points marked up with intrinsics. We let the optimizer party on the coroutine
13// as a single function for as long as possible. Shortly before the coroutine is
14// eligible to be inlined into its callers, we split up the coroutine into parts
15// corresponding to an initial, resume and destroy invocations of the coroutine,
16// add them to the current SCC and restart the IPO pipeline to optimize the
17// coroutine subfunctions we extracted before proceeding to the caller of the
18// coroutine.
19//===----------------------------------------------------------------------===//
20
21#include "llvm/Transforms/Coroutines/CoroSplit.h"
22#include "CoroCloner.h"
23#include "CoroInternal.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/PriorityWorklist.h"
26#include "llvm/ADT/STLExtras.h"
27#include "llvm/ADT/SmallPtrSet.h"
28#include "llvm/ADT/SmallVector.h"
29#include "llvm/ADT/StringExtras.h"
30#include "llvm/ADT/StringRef.h"
31#include "llvm/ADT/Twine.h"
32#include "llvm/Analysis/CFG.h"
33#include "llvm/Analysis/CallGraph.h"
34#include "llvm/Analysis/ConstantFolding.h"
35#include "llvm/Analysis/LazyCallGraph.h"
36#include "llvm/Analysis/OptimizationRemarkEmitter.h"
37#include "llvm/Analysis/TargetTransformInfo.h"
38#include "llvm/BinaryFormat/Dwarf.h"
39#include "llvm/IR/Argument.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/BasicBlock.h"
42#include "llvm/IR/CFG.h"
43#include "llvm/IR/CallingConv.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DIBuilder.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/DebugInfo.h"
48#include "llvm/IR/DerivedTypes.h"
49#include "llvm/IR/Dominators.h"
50#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/GlobalVariable.h"
52#include "llvm/IR/InstIterator.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Instructions.h"
56#include "llvm/IR/IntrinsicInst.h"
57#include "llvm/IR/LLVMContext.h"
58#include "llvm/IR/Module.h"
59#include "llvm/IR/Type.h"
60#include "llvm/IR/Value.h"
61#include "llvm/IR/Verifier.h"
62#include "llvm/Support/Casting.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/PrettyStackTrace.h"
65#include "llvm/Support/raw_ostream.h"
66#include "llvm/Transforms/Coroutines/MaterializationUtils.h"
67#include "llvm/Transforms/Scalar.h"
68#include "llvm/Transforms/Utils/BasicBlockUtils.h"
69#include "llvm/Transforms/Utils/CallGraphUpdater.h"
70#include "llvm/Transforms/Utils/Cloning.h"
71#include "llvm/Transforms/Utils/Local.h"
72#include <cassert>
73#include <cstddef>
74#include <cstdint>
75#include <initializer_list>
76#include <iterator>
77
78using namespace llvm;
79
80#define DEBUG_TYPE "coro-split"
81
82// FIXME:
83// Lower the intrinisc in CoroEarly phase if coroutine frame doesn't escape
84// and it is known that other transformations, for example, sanitizers
85// won't lead to incorrect code.
86static void lowerAwaitSuspend(IRBuilder<> &Builder, CoroAwaitSuspendInst *CB,
87 coro::Shape &Shape) {
88 auto Wrapper = CB->getWrapperFunction();
89 auto Awaiter = CB->getAwaiter();
90 auto FramePtr = CB->getFrame();
91
92 Builder.SetInsertPoint(CB);
93
94 CallBase *NewCall = nullptr;
95 // await_suspend has only 2 parameters, awaiter and handle.
96 // Copy parameter attributes from the intrinsic call, but remove the last,
97 // because the last parameter now becomes the function that is being called.
98 AttributeList NewAttributes =
99 CB->getAttributes().removeParamAttributes(C&: CB->getContext(), ArgNo: 2);
100
101 if (auto Invoke = dyn_cast<InvokeInst>(Val: CB)) {
102 auto WrapperInvoke =
103 Builder.CreateInvoke(Callee: Wrapper, NormalDest: Invoke->getNormalDest(),
104 UnwindDest: Invoke->getUnwindDest(), Args: {Awaiter, FramePtr});
105
106 WrapperInvoke->setCallingConv(Invoke->getCallingConv());
107 std::copy(first: Invoke->bundle_op_info_begin(), last: Invoke->bundle_op_info_end(),
108 result: WrapperInvoke->bundle_op_info_begin());
109 WrapperInvoke->setAttributes(NewAttributes);
110 WrapperInvoke->setDebugLoc(Invoke->getDebugLoc());
111 NewCall = WrapperInvoke;
112 } else if (auto Call = dyn_cast<CallInst>(Val: CB)) {
113 auto WrapperCall = Builder.CreateCall(Callee: Wrapper, Args: {Awaiter, FramePtr});
114
115 WrapperCall->setAttributes(NewAttributes);
116 WrapperCall->setDebugLoc(Call->getDebugLoc());
117 NewCall = WrapperCall;
118 } else {
119 llvm_unreachable("Unexpected coro_await_suspend invocation method");
120 }
121
122 if (CB->getCalledFunction()->getIntrinsicID() ==
123 Intrinsic::coro_await_suspend_handle) {
124 // Follow the lowered await_suspend call above with a lowered resume call
125 // to the returned coroutine.
126 if (auto *Invoke = dyn_cast<InvokeInst>(Val: CB)) {
127 // If the await_suspend call is an invoke, we continue in the next block.
128 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstInsertionPt());
129 }
130
131 coro::LowererBase LB(*Wrapper->getParent());
132 auto *ResumeAddr = LB.makeSubFnCall(Arg: NewCall, Index: CoroSubFnInst::ResumeIndex,
133 InsertPt: &*Builder.GetInsertPoint());
134
135 LLVMContext &Ctx = Builder.getContext();
136 FunctionType *ResumeTy = FunctionType::get(
137 Result: Type::getVoidTy(C&: Ctx), Params: PointerType::getUnqual(C&: Ctx), isVarArg: false);
138 auto *ResumeCall = Builder.CreateCall(FTy: ResumeTy, Callee: ResumeAddr, Args: {NewCall});
139 ResumeCall->setCallingConv(CallingConv::Fast);
140
141 // We can't insert the 'ret' instruction and adjust the cc until the
142 // function has been split, so remember this for later.
143 Shape.SymmetricTransfers.push_back(Elt: ResumeCall);
144
145 NewCall = ResumeCall;
146 }
147
148 CB->replaceAllUsesWith(V: NewCall);
149 CB->eraseFromParent();
150}
151
152static void lowerAwaitSuspends(Function &F, coro::Shape &Shape) {
153 IRBuilder<> Builder(F.getContext());
154 for (auto *AWS : Shape.CoroAwaitSuspends)
155 lowerAwaitSuspend(Builder, CB: AWS, Shape);
156}
157
158static void maybeFreeRetconStorage(IRBuilder<> &Builder,
159 const coro::Shape &Shape, Value *FramePtr,
160 CallGraph *CG) {
161 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce);
162 if (Shape.RetconLowering.IsFrameInlineInStorage)
163 return;
164
165 Shape.emitDealloc(Builder, Ptr: FramePtr, CG);
166}
167
168/// Replace an llvm.coro.end.async.
169/// Will inline the must tail call function call if there is one.
170/// \returns true if cleanup of the coro.end block is needed, false otherwise.
171static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
172 IRBuilder<> Builder(End);
173
174 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(Val: End);
175 if (!EndAsync) {
176 Builder.CreateRetVoid();
177 return true /*needs cleanup of coro.end block*/;
178 }
179
180 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
181 if (!MustTailCallFunc) {
182 Builder.CreateRetVoid();
183 return true /*needs cleanup of coro.end block*/;
184 }
185
186 // Move the must tail call from the predecessor block into the end block.
187 auto *CoroEndBlock = End->getParent();
188 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
189 assert(MustTailCallFuncBlock && "Must have a single predecessor block");
190 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
191 auto *MustTailCall = cast<CallInst>(Val: &*std::prev(x: It));
192 CoroEndBlock->splice(ToIt: End->getIterator(), FromBB: MustTailCallFuncBlock,
193 FromIt: MustTailCall->getIterator());
194
195 // Insert the return instruction.
196 Builder.SetInsertPoint(End);
197 Builder.CreateRetVoid();
198 InlineFunctionInfo FnInfo;
199
200 // Remove the rest of the block, by splitting it into an unreachable block.
201 auto *BB = End->getParent();
202 BB->splitBasicBlock(I: End);
203 BB->getTerminator()->eraseFromParent();
204
205 auto InlineRes = InlineFunction(CB&: *MustTailCall, IFI&: FnInfo);
206 assert(InlineRes.isSuccess() && "Expected inlining to succeed");
207 (void)InlineRes;
208
209 // We have cleaned up the coro.end block above.
210 return false;
211}
212
213/// Replace a non-unwind call to llvm.coro.end.
214static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
215 const coro::Shape &Shape, Value *FramePtr,
216 bool InRamp, CallGraph *CG) {
217 // Start inserting right before the coro.end.
218 IRBuilder<> Builder(End);
219
220 // Create the return instruction.
221 switch (Shape.ABI) {
222 // The cloned functions in switch-lowering always return void.
223 case coro::ABI::Switch:
224 assert(!cast<CoroEndInst>(End)->hasResults() &&
225 "switch coroutine should not return any values");
226 // coro.end doesn't immediately end the coroutine in the main function
227 // in this lowering, because we need to deallocate the coroutine.
228 if (InRamp)
229 return;
230 Builder.CreateRetVoid();
231 break;
232
233 // In async lowering this returns.
234 case coro::ABI::Async: {
235 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
236 if (!CoroEndBlockNeedsCleanup)
237 return;
238 break;
239 }
240
241 // In unique continuation lowering, the continuations always return void.
242 // But we may have implicitly allocated storage.
243 case coro::ABI::RetconOnce: {
244 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
245 auto *CoroEnd = cast<CoroEndInst>(Val: End);
246 auto *RetTy = Shape.getResumeFunctionType()->getReturnType();
247
248 if (!CoroEnd->hasResults()) {
249 assert(RetTy->isVoidTy());
250 Builder.CreateRetVoid();
251 break;
252 }
253
254 auto *CoroResults = CoroEnd->getResults();
255 unsigned NumReturns = CoroResults->numReturns();
256
257 if (auto *RetStructTy = dyn_cast<StructType>(Val: RetTy)) {
258 assert(RetStructTy->getNumElements() == NumReturns &&
259 "numbers of returns should match resume function singature");
260 Value *ReturnValue = PoisonValue::get(T: RetStructTy);
261 unsigned Idx = 0;
262 for (Value *RetValEl : CoroResults->return_values())
263 ReturnValue = Builder.CreateInsertValue(Agg: ReturnValue, Val: RetValEl, Idxs: Idx++);
264 Builder.CreateRet(V: ReturnValue);
265 } else if (NumReturns == 0) {
266 assert(RetTy->isVoidTy());
267 Builder.CreateRetVoid();
268 } else {
269 assert(NumReturns == 1);
270 Builder.CreateRet(V: *CoroResults->retval_begin());
271 }
272 CoroResults->replaceAllUsesWith(
273 V: ConstantTokenNone::get(Context&: CoroResults->getContext()));
274 CoroResults->eraseFromParent();
275 break;
276 }
277
278 // In non-unique continuation lowering, we signal completion by returning
279 // a null continuation.
280 case coro::ABI::Retcon: {
281 assert(!cast<CoroEndInst>(End)->hasResults() &&
282 "retcon coroutine should not return any values");
283 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
284 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
285 auto RetStructTy = dyn_cast<StructType>(Val: RetTy);
286 PointerType *ContinuationTy =
287 cast<PointerType>(Val: RetStructTy ? RetStructTy->getElementType(N: 0) : RetTy);
288
289 Value *ReturnValue = ConstantPointerNull::get(T: ContinuationTy);
290 if (RetStructTy) {
291 ReturnValue = Builder.CreateInsertValue(Agg: PoisonValue::get(T: RetStructTy),
292 Val: ReturnValue, Idxs: 0);
293 }
294 Builder.CreateRet(V: ReturnValue);
295 break;
296 }
297 }
298
299 // Remove the rest of the block, by splitting it into an unreachable block.
300 auto *BB = End->getParent();
301 BB->splitBasicBlock(I: End);
302 BB->getTerminator()->eraseFromParent();
303}
304
305/// Create a pointer to the switch index field in the coroutine frame.
306static Value *createSwitchIndexPtr(const coro::Shape &Shape,
307 IRBuilder<> &Builder, Value *FramePtr) {
308 auto *Offset = ConstantInt::get(Ty: Type::getInt64Ty(C&: FramePtr->getContext()),
309 V: Shape.SwitchLowering.IndexOffset);
310 return Builder.CreateInBoundsPtrAdd(Ptr: FramePtr, Offset, Name: "index.addr");
311}
312
313// Mark a coroutine as done, which implies that the coroutine is finished and
314// never gets resumed.
315//
316// In resume-switched ABI, the done state is represented by storing zero in
317// ResumeFnAddr.
318//
319// NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
320// pointer to the frame in splitted function is not stored in `Shape`.
321static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
322 Value *FramePtr) {
323 assert(
324 Shape.ABI == coro::ABI::Switch &&
325 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
326 // Resume function pointer is always first
327 auto *NullPtr = ConstantPointerNull::get(T: Shape.getSwitchResumePointerType());
328 Builder.CreateStore(Val: NullPtr, Ptr: FramePtr);
329
330 // If the coroutine don't have unwind coro end, we could omit the store to
331 // the final suspend point since we could infer the coroutine is suspended
332 // at the final suspend point by the nullness of ResumeFnAddr.
333 // However, we can't skip it if the coroutine have unwind coro end. Since
334 // the coroutine reaches unwind coro end is considered suspended at the
335 // final suspend point (the ResumeFnAddr is null) but in fact the coroutine
336 // didn't complete yet. We need the IndexVal for the final suspend point
337 // to make the states clear.
338 if (Shape.SwitchLowering.HasUnwindCoroEnd &&
339 Shape.SwitchLowering.HasFinalSuspend) {
340 assert(cast<CoroSuspendInst>(Shape.CoroSuspends.back())->isFinal() &&
341 "The final suspend should only live in the last position of "
342 "CoroSuspends.");
343 ConstantInt *IndexVal = Shape.getIndex(Value: Shape.CoroSuspends.size() - 1);
344 Value *FinalIndex = createSwitchIndexPtr(Shape, Builder, FramePtr);
345 Builder.CreateStore(Val: IndexVal, Ptr: FinalIndex);
346 }
347}
348
349/// Replace an unwind call to llvm.coro.end.
350static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
351 Value *FramePtr, bool InRamp, CallGraph *CG) {
352 IRBuilder<> Builder(End);
353
354 switch (Shape.ABI) {
355 // In switch-lowering, this does nothing in the main function.
356 case coro::ABI::Switch: {
357 // In C++'s specification, the coroutine should be marked as done
358 // if promise.unhandled_exception() throws. The frontend will
359 // call coro.end(true) along this path.
360 //
361 // FIXME: We should refactor this once there is other language
362 // which uses Switch-Resumed style other than C++.
363 markCoroutineAsDone(Builder, Shape, FramePtr);
364 if (InRamp)
365 return;
366 break;
367 }
368 // In async lowering this does nothing.
369 case coro::ABI::Async:
370 break;
371 // In continuation-lowering, this frees the continuation storage.
372 case coro::ABI::Retcon:
373 case coro::ABI::RetconOnce:
374 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
375 break;
376 }
377
378 // If coro.end has an associated bundle, add cleanupret instruction.
379 if (auto Bundle = End->getOperandBundle(ID: LLVMContext::OB_funclet)) {
380 auto *FromPad = cast<CleanupPadInst>(Val: Bundle->Inputs[0]);
381 auto *CleanupRet = Builder.CreateCleanupRet(CleanupPad: FromPad, UnwindBB: nullptr);
382 End->getParent()->splitBasicBlock(I: End);
383 CleanupRet->getParent()->getTerminator()->eraseFromParent();
384 }
385}
386
387static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
388 Value *FramePtr, bool InRamp, CallGraph *CG) {
389 if (End->isUnwind())
390 replaceUnwindCoroEnd(End, Shape, FramePtr, InRamp, CG);
391 else
392 replaceFallthroughCoroEnd(End, Shape, FramePtr, InRamp, CG);
393 End->eraseFromParent();
394}
395
396// In the resume function, we remove the last case (when coro::Shape is built,
397// the final suspend point (if present) is always the last element of
398// CoroSuspends array) since it is an undefined behavior to resume a coroutine
399// suspended at the final suspend point.
400// In the destroy function, if it isn't possible that the ResumeFnAddr is NULL
401// and the coroutine doesn't suspend at the final suspend point actually (this
402// is possible since the coroutine is considered suspended at the final suspend
403// point if promise.unhandled_exception() exits via an exception), we can
404// remove the last case.
405void coro::BaseCloner::handleFinalSuspend() {
406 assert(Shape.ABI == coro::ABI::Switch &&
407 Shape.SwitchLowering.HasFinalSuspend);
408
409 if (isSwitchDestroyFunction() && Shape.SwitchLowering.HasUnwindCoroEnd)
410 return;
411
412 auto *Switch = cast<SwitchInst>(Val&: VMap[Shape.SwitchLowering.ResumeSwitch]);
413 auto FinalCaseIt = std::prev(x: Switch->case_end());
414 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
415 Switch->removeCase(I: FinalCaseIt);
416 if (isSwitchDestroyFunction()) {
417 BasicBlock *OldSwitchBB = Switch->getParent();
418 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(I: Switch, BBName: "Switch");
419 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
420
421 if (NewF->isCoroOnlyDestroyWhenComplete()) {
422 // When the coroutine can only be destroyed when complete, we don't need
423 // to generate code for other cases.
424 Builder.CreateBr(Dest: ResumeBB);
425 } else {
426 // Resume function pointer is always first
427 auto *Load =
428 Builder.CreateLoad(Ty: Shape.getSwitchResumePointerType(), Ptr: NewFramePtr);
429 auto *Cond = Builder.CreateIsNull(Arg: Load);
430 Builder.CreateCondBr(Cond, True: ResumeBB, False: NewSwitchBB);
431 }
432 OldSwitchBB->getTerminator()->eraseFromParent();
433 }
434}
435
436static FunctionType *
437getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
438 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Val: Suspend);
439 auto *StructTy = cast<StructType>(Val: AsyncSuspend->getType());
440 auto &Context = Suspend->getParent()->getParent()->getContext();
441 auto *VoidTy = Type::getVoidTy(C&: Context);
442 return FunctionType::get(Result: VoidTy, Params: StructTy->elements(), isVarArg: false);
443}
444
445static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
446 const Twine &Suffix,
447 Module::iterator InsertBefore,
448 AnyCoroSuspendInst *ActiveSuspend) {
449 Module *M = OrigF.getParent();
450 auto *FnTy = (Shape.ABI != coro::ABI::Async)
451 ? Shape.getResumeFunctionType()
452 : getFunctionTypeFromAsyncSuspend(Suspend: ActiveSuspend);
453
454 Function *NewF =
455 Function::Create(Ty: FnTy, Linkage: GlobalValue::LinkageTypes::InternalLinkage,
456 N: OrigF.getName() + Suffix);
457
458 M->getFunctionList().insert(where: InsertBefore, New: NewF);
459
460 return NewF;
461}
462
463/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
464/// arguments to the continuation function.
465///
466/// This assumes that the builder has a meaningful insertion point.
467void coro::BaseCloner::replaceRetconOrAsyncSuspendUses() {
468 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
469 Shape.ABI == coro::ABI::Async);
470
471 auto NewS = VMap[ActiveSuspend];
472 if (NewS->use_empty())
473 return;
474
475 // Copy out all the continuation arguments after the buffer pointer into
476 // an easily-indexed data structure for convenience.
477 SmallVector<Value *, 8> Args;
478 // The async ABI includes all arguments -- including the first argument.
479 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
480 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(x: NewF->arg_begin()),
481 E = NewF->arg_end();
482 I != E; ++I)
483 Args.push_back(Elt: &*I);
484
485 // If the suspend returns a single scalar value, we can just do a simple
486 // replacement.
487 if (!isa<StructType>(Val: NewS->getType())) {
488 assert(Args.size() == 1);
489 NewS->replaceAllUsesWith(V: Args.front());
490 return;
491 }
492
493 // Try to peephole extracts of an aggregate return.
494 for (Use &U : llvm::make_early_inc_range(Range: NewS->uses())) {
495 auto *EVI = dyn_cast<ExtractValueInst>(Val: U.getUser());
496 if (!EVI || EVI->getNumIndices() != 1)
497 continue;
498
499 EVI->replaceAllUsesWith(V: Args[EVI->getIndices().front()]);
500 EVI->eraseFromParent();
501 }
502
503 // If we have no remaining uses, we're done.
504 if (NewS->use_empty())
505 return;
506
507 // Otherwise, we need to create an aggregate.
508 Value *Aggr = PoisonValue::get(T: NewS->getType());
509 for (auto [Idx, Arg] : llvm::enumerate(First&: Args))
510 Aggr = Builder.CreateInsertValue(Agg: Aggr, Val: Arg, Idxs: Idx);
511
512 NewS->replaceAllUsesWith(V: Aggr);
513}
514
515void coro::BaseCloner::replaceCoroSuspends() {
516 Value *SuspendResult;
517
518 switch (Shape.ABI) {
519 // In switch lowering, replace coro.suspend with the appropriate value
520 // for the type of function we're extracting.
521 // Replacing coro.suspend with (0) will result in control flow proceeding to
522 // a resume label associated with a suspend point, replacing it with (1) will
523 // result in control flow proceeding to a cleanup label associated with this
524 // suspend point.
525 case coro::ABI::Switch:
526 SuspendResult = Builder.getInt8(C: isSwitchDestroyFunction() ? 1 : 0);
527 break;
528
529 // In async lowering there are no uses of the result.
530 case coro::ABI::Async:
531 return;
532
533 // In returned-continuation lowering, the arguments from earlier
534 // continuations are theoretically arbitrary, and they should have been
535 // spilled.
536 case coro::ABI::RetconOnce:
537 case coro::ABI::Retcon:
538 return;
539 }
540
541 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
542 // The active suspend was handled earlier.
543 if (CS == ActiveSuspend)
544 continue;
545
546 auto *MappedCS = cast<AnyCoroSuspendInst>(Val&: VMap[CS]);
547 MappedCS->replaceAllUsesWith(V: SuspendResult);
548 MappedCS->eraseFromParent();
549 }
550}
551
552void coro::BaseCloner::replaceCoroEnds() {
553 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
554 // We use a null call graph because there's no call graph node for
555 // the cloned function yet. We'll just be rebuilding that later.
556 auto *NewCE = cast<AnyCoroEndInst>(Val&: VMap[CE]);
557 replaceCoroEnd(End: NewCE, Shape, FramePtr: NewFramePtr, /*in ramp*/ InRamp: false, CG: nullptr);
558 }
559}
560
561void coro::BaseCloner::replaceCoroIsInRamp() {
562 auto &Ctx = OrigF.getContext();
563 for (auto *II : Shape.CoroIsInRampInsts) {
564 auto *NewII = cast<CoroIsInRampInst>(Val&: VMap[II]);
565 NewII->replaceAllUsesWith(V: ConstantInt::getFalse(Context&: Ctx));
566 NewII->eraseFromParent();
567 }
568}
569
570static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
571 ValueToValueMapTy *VMap) {
572 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
573 return;
574 Value *CachedSlot = nullptr;
575 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
576 if (CachedSlot)
577 return CachedSlot;
578
579 // Check if the function has a swifterror argument.
580 for (auto &Arg : F.args()) {
581 if (Arg.isSwiftError()) {
582 CachedSlot = &Arg;
583 return &Arg;
584 }
585 }
586
587 // Create a swifterror alloca.
588 IRBuilder<> Builder(&F.getEntryBlock(),
589 F.getEntryBlock().getFirstNonPHIOrDbg());
590 auto Alloca = Builder.CreateAlloca(Ty: ValueTy);
591 Alloca->setSwiftError(true);
592
593 CachedSlot = Alloca;
594 return Alloca;
595 };
596
597 for (CallInst *Op : Shape.SwiftErrorOps) {
598 auto MappedOp = VMap ? cast<CallInst>(Val&: (*VMap)[Op]) : Op;
599 IRBuilder<> Builder(MappedOp);
600
601 // If there are no arguments, this is a 'get' operation.
602 Value *MappedResult;
603 if (Op->arg_empty()) {
604 auto ValueTy = Op->getType();
605 auto Slot = getSwiftErrorSlot(ValueTy);
606 MappedResult = Builder.CreateLoad(Ty: ValueTy, Ptr: Slot);
607 } else {
608 assert(Op->arg_size() == 1);
609 auto Value = MappedOp->getArgOperand(i: 0);
610 auto ValueTy = Value->getType();
611 auto Slot = getSwiftErrorSlot(ValueTy);
612 Builder.CreateStore(Val: Value, Ptr: Slot);
613 MappedResult = Slot;
614 }
615
616 MappedOp->replaceAllUsesWith(V: MappedResult);
617 MappedOp->eraseFromParent();
618 }
619
620 // If we're updating the original function, we've invalidated SwiftErrorOps.
621 if (VMap == nullptr) {
622 Shape.SwiftErrorOps.clear();
623 }
624}
625
626/// Returns all debug records in F.
627static SmallVector<DbgVariableRecord *>
628collectDbgVariableRecords(Function &F) {
629 SmallVector<DbgVariableRecord *> DbgVariableRecords;
630 for (auto &I : instructions(F)) {
631 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange()))
632 DbgVariableRecords.push_back(Elt: &DVR);
633 }
634 return DbgVariableRecords;
635}
636
637void coro::BaseCloner::replaceSwiftErrorOps() {
638 ::replaceSwiftErrorOps(F&: *NewF, Shape, VMap: &VMap);
639}
640
641void coro::BaseCloner::salvageDebugInfo() {
642 auto DbgVariableRecords = collectDbgVariableRecords(F&: *NewF);
643 SmallDenseMap<Argument *, AllocaInst *, 4> ArgToAllocaMap;
644
645 // Only 64-bit ABIs have a register we can refer to with the entry value.
646 bool UseEntryValue = OrigF.getParent()->getTargetTriple().isArch64Bit();
647 for (DbgVariableRecord *DVR : DbgVariableRecords)
648 coro::salvageDebugInfo(ArgToAllocaMap, DVR&: *DVR, UseEntryValue);
649
650 // Remove all salvaged dbg.declare intrinsics that became
651 // either unreachable or stale due to the CoroSplit transformation.
652 DominatorTree DomTree(*NewF);
653 auto IsUnreachableBlock = [&](BasicBlock *BB) {
654 return !isPotentiallyReachable(From: &NewF->getEntryBlock(), To: BB, ExclusionSet: nullptr,
655 DT: &DomTree);
656 };
657 auto RemoveOne = [&](DbgVariableRecord *DVI) {
658 if (IsUnreachableBlock(DVI->getParent()))
659 DVI->eraseFromParent();
660 else if (isa_and_nonnull<AllocaInst>(Val: DVI->getVariableLocationOp(OpIdx: 0))) {
661 // Count all non-debuginfo uses in reachable blocks.
662 unsigned Uses = 0;
663 for (auto *User : DVI->getVariableLocationOp(OpIdx: 0)->users())
664 if (auto *I = dyn_cast<Instruction>(Val: User))
665 if (!isa<AllocaInst>(Val: I) && !IsUnreachableBlock(I->getParent()))
666 ++Uses;
667 if (!Uses)
668 DVI->eraseFromParent();
669 }
670 };
671 for_each(Range&: DbgVariableRecords, F: RemoveOne);
672}
673
674void coro::BaseCloner::replaceEntryBlock() {
675 // In the original function, the AllocaSpillBlock is a block immediately
676 // following the allocation of the frame object which defines GEPs for
677 // all the allocas that have been moved into the frame, and it ends by
678 // branching to the original beginning of the coroutine. Make this
679 // the entry block of the cloned function.
680 auto *Entry = cast<BasicBlock>(Val&: VMap[Shape.AllocaSpillBlock]);
681 auto *OldEntry = &NewF->getEntryBlock();
682 Entry->setName("entry" + Suffix);
683 Entry->moveBefore(MovePos: OldEntry);
684 Entry->getTerminator()->eraseFromParent();
685
686 // Clear all predecessors of the new entry block. There should be
687 // exactly one predecessor, which we created when splitting out
688 // AllocaSpillBlock to begin with.
689 assert(Entry->hasOneUse());
690 auto BranchToEntry = cast<UncondBrInst>(Val: Entry->user_back());
691 Builder.SetInsertPoint(BranchToEntry);
692 Builder.CreateUnreachable();
693 BranchToEntry->eraseFromParent();
694
695 // Branch from the entry to the appropriate place.
696 Builder.SetInsertPoint(Entry);
697 switch (Shape.ABI) {
698 case coro::ABI::Switch: {
699 // In switch-lowering, we built a resume-entry block in the original
700 // function. Make the entry block branch to this.
701 auto *SwitchBB =
702 cast<BasicBlock>(Val&: VMap[Shape.SwitchLowering.ResumeEntryBlock]);
703 Builder.CreateBr(Dest: SwitchBB);
704 SwitchBB->moveAfter(MovePos: Entry);
705 break;
706 }
707 case coro::ABI::Async:
708 case coro::ABI::Retcon:
709 case coro::ABI::RetconOnce: {
710 // In continuation ABIs, we want to branch to immediately after the
711 // active suspend point. Earlier phases will have put the suspend in its
712 // own basic block, so just thread our jump directly to its successor.
713 assert((Shape.ABI == coro::ABI::Async &&
714 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
715 ((Shape.ABI == coro::ABI::Retcon ||
716 Shape.ABI == coro::ABI::RetconOnce) &&
717 isa<CoroSuspendRetconInst>(ActiveSuspend)));
718 auto *MappedCS = cast<AnyCoroSuspendInst>(Val&: VMap[ActiveSuspend]);
719 auto Branch = cast<UncondBrInst>(Val: MappedCS->getNextNode());
720 Builder.CreateBr(Dest: Branch->getSuccessor(i: 0));
721 break;
722 }
723 }
724
725 // Any static alloca that's still being used but not reachable from the new
726 // entry needs to be moved to the new entry.
727 Function *F = OldEntry->getParent();
728 DominatorTree DT{*F};
729 for (Instruction &I : llvm::make_early_inc_range(Range: instructions(F))) {
730 auto *Alloca = dyn_cast<AllocaInst>(Val: &I);
731 if (!Alloca || I.use_empty())
732 continue;
733 if (DT.isReachableFromEntry(A: I.getParent()) ||
734 !isa<ConstantInt>(Val: Alloca->getArraySize()))
735 continue;
736 I.moveBefore(BB&: *Entry, I: Entry->getFirstInsertionPt());
737 }
738}
739
740/// Derive the value of the new frame pointer.
741Value *coro::BaseCloner::deriveNewFramePointer() {
742 // Builder should be inserting to the front of the new entry block.
743
744 switch (Shape.ABI) {
745 // In switch-lowering, the argument is the frame pointer.
746 case coro::ABI::Switch:
747 return &*NewF->arg_begin();
748 // In async-lowering, one of the arguments is an async context as determined
749 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
750 // the resume function from the async context projection function associated
751 // with the active suspend. The frame is located as a tail to the async
752 // context header.
753 case coro::ABI::Async: {
754 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(Val: ActiveSuspend);
755 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
756 auto *CalleeContext = NewF->getArg(i: ContextIdx);
757 auto *ProjectionFunc =
758 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
759 auto DbgLoc =
760 cast<CoroSuspendAsyncInst>(Val&: VMap[ActiveSuspend])->getDebugLoc();
761 // Calling i8* (i8*)
762 auto *CallerContext = Builder.CreateCall(FTy: ProjectionFunc->getFunctionType(),
763 Callee: ProjectionFunc, Args: CalleeContext);
764 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
765 CallerContext->setDebugLoc(DbgLoc);
766 // The frame is located after the async_context header.
767 auto &Context = Builder.getContext();
768 auto *FramePtrAddr = Builder.CreateInBoundsPtrAdd(
769 Ptr: CallerContext,
770 Offset: ConstantInt::get(Ty: Type::getInt64Ty(C&: Context),
771 V: Shape.AsyncLowering.FrameOffset),
772 Name: "async.ctx.frameptr");
773 // Inline the projection function.
774 InlineFunctionInfo InlineInfo;
775 auto InlineRes = InlineFunction(CB&: *CallerContext, IFI&: InlineInfo);
776 assert(InlineRes.isSuccess());
777 (void)InlineRes;
778 return FramePtrAddr;
779 }
780 // In continuation-lowering, the argument is the opaque storage.
781 case coro::ABI::Retcon:
782 case coro::ABI::RetconOnce: {
783 Argument *NewStorage = &*NewF->arg_begin();
784 auto FramePtrTy = PointerType::getUnqual(C&: Shape.FramePtr->getContext());
785
786 // If the storage is inline, just bitcast to the storage to the frame type.
787 if (Shape.RetconLowering.IsFrameInlineInStorage)
788 return NewStorage;
789
790 // Otherwise, load the real frame from the opaque storage.
791 return Builder.CreateLoad(Ty: FramePtrTy, Ptr: NewStorage);
792 }
793 }
794 llvm_unreachable("bad ABI");
795}
796
797/// Adjust the scope line of the funclet to the first line number after the
798/// suspend point. This avoids a jump in the line table from the function
799/// declaration (where prologue instructions are attributed to) to the suspend
800/// point.
801/// Only adjust the scope line when the files are the same.
802/// If no candidate line number is found, fallback to the line of ActiveSuspend.
803static void updateScopeLine(Instruction *ActiveSuspend,
804 DISubprogram &SPToUpdate) {
805 if (!ActiveSuspend)
806 return;
807
808 // No subsequent instruction -> fallback to the location of ActiveSuspend.
809 if (!ActiveSuspend->getNextNode()) {
810 if (auto DL = ActiveSuspend->getDebugLoc())
811 if (SPToUpdate.getFile() == DL->getFile())
812 SPToUpdate.setScopeLine(DL->getLine());
813 return;
814 }
815
816 BasicBlock::iterator Successor = ActiveSuspend->getNextNode()->getIterator();
817 // Corosplit splits the BB around ActiveSuspend, so the meaningful
818 // instructions are not in the same BB.
819 // FIXME: remove this hardcoded number of tries.
820 for (unsigned Repeat = 0; Repeat < 2; Repeat++) {
821 auto *Branch = dyn_cast_or_null<UncondBrInst>(Val&: Successor);
822 if (!Branch)
823 break;
824 Successor = Branch->getSuccessor()->getFirstNonPHIOrDbg();
825 }
826
827 // Find the first successor of ActiveSuspend with a non-zero line location.
828 // If that matches the file of ActiveSuspend, use it.
829 BasicBlock *PBB = Successor->getParent();
830 for (; Successor != PBB->end(); Successor = std::next(x: Successor)) {
831 Successor = skipDebugIntrinsics(It: Successor);
832 auto DL = Successor->getDebugLoc();
833 if (!DL || DL.getLine() == 0)
834 continue;
835
836 if (SPToUpdate.getFile() == DL->getFile()) {
837 SPToUpdate.setScopeLine(DL.getLine());
838 return;
839 }
840
841 break;
842 }
843
844 // If the search above failed, fallback to the location of ActiveSuspend.
845 if (auto DL = ActiveSuspend->getDebugLoc())
846 if (SPToUpdate.getFile() == DL->getFile())
847 SPToUpdate.setScopeLine(DL->getLine());
848}
849
850static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
851 unsigned ParamIndex, uint64_t Size,
852 Align Alignment, bool NoAlias) {
853 AttrBuilder ParamAttrs(Context);
854 ParamAttrs.addAttribute(Val: Attribute::NonNull);
855 ParamAttrs.addAttribute(Val: Attribute::NoUndef);
856
857 if (NoAlias)
858 ParamAttrs.addAttribute(Val: Attribute::NoAlias);
859
860 ParamAttrs.addAlignmentAttr(Align: Alignment);
861 ParamAttrs.addDereferenceableAttr(Bytes: Size);
862 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
863}
864
865static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
866 unsigned ParamIndex) {
867 AttrBuilder ParamAttrs(Context);
868 ParamAttrs.addAttribute(Val: Attribute::SwiftAsync);
869 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
870}
871
872static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
873 unsigned ParamIndex) {
874 AttrBuilder ParamAttrs(Context);
875 ParamAttrs.addAttribute(Val: Attribute::SwiftSelf);
876 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
877}
878
879/// Clone the body of the original function into a resume function of
880/// some sort.
881void coro::BaseCloner::create() {
882 assert(NewF);
883
884 // Replace all args with dummy instructions. If an argument is the old frame
885 // pointer, the dummy will be replaced by the new frame pointer once it is
886 // computed below. Uses of all other arguments should have already been
887 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
888 // frame.
889 SmallVector<Instruction *> DummyArgs;
890 for (Argument &A : OrigF.args()) {
891 DummyArgs.push_back(Elt: new FreezeInst(PoisonValue::get(T: A.getType())));
892 VMap[&A] = DummyArgs.back();
893 }
894
895 SmallVector<ReturnInst *, 4> Returns;
896
897 // Ignore attempts to change certain attributes of the function.
898 // TODO: maybe there should be a way to suppress this during cloning?
899 auto savedVisibility = NewF->getVisibility();
900 auto savedUnnamedAddr = NewF->getUnnamedAddr();
901 auto savedDLLStorageClass = NewF->getDLLStorageClass();
902
903 // NewF's linkage (which CloneFunctionInto does *not* change) might not
904 // be compatible with the visibility of OrigF (which it *does* change),
905 // so protect against that.
906 auto savedLinkage = NewF->getLinkage();
907 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
908
909 CloneFunctionInto(NewFunc: NewF, OldFunc: &OrigF, VMap,
910 Changes: CloneFunctionChangeType::LocalChangesOnly, Returns);
911
912 auto &Context = NewF->getContext();
913
914 if (DISubprogram *SP = NewF->getSubprogram()) {
915 assert(SP != OrigF.getSubprogram() && SP->isDistinct());
916 updateScopeLine(ActiveSuspend, SPToUpdate&: *SP);
917
918 // Update the linkage name and the function name to reflect the modified
919 // name.
920 MDString *NewLinkageName = MDString::get(Context, Str: NewF->getName());
921 SP->replaceLinkageName(LN: NewLinkageName);
922 if (DISubprogram *Decl = SP->getDeclaration()) {
923 TempDISubprogram NewDecl = Decl->clone();
924 NewDecl->replaceLinkageName(LN: NewLinkageName);
925 SP->replaceDeclaration(Decl: MDNode::replaceWithUniqued(N: std::move(NewDecl)));
926 }
927 }
928
929 NewF->setLinkage(savedLinkage);
930 NewF->setVisibility(savedVisibility);
931 NewF->setUnnamedAddr(savedUnnamedAddr);
932 NewF->setDLLStorageClass(savedDLLStorageClass);
933 // The function sanitizer metadata needs to match the signature of the
934 // function it is being attached to. However this does not hold for split
935 // functions here. Thus remove the metadata for split functions.
936 if (Shape.ABI == coro::ABI::Switch &&
937 NewF->hasMetadata(KindID: LLVMContext::MD_func_sanitize))
938 NewF->eraseMetadata(KindID: LLVMContext::MD_func_sanitize);
939
940 // Replace the attributes of the new function:
941 auto OrigAttrs = NewF->getAttributes();
942 auto NewAttrs = AttributeList();
943
944 switch (Shape.ABI) {
945 case coro::ABI::Switch:
946 // Bootstrap attributes by copying function attributes from the
947 // original function. This should include optimization settings and so on.
948 NewAttrs = NewAttrs.addFnAttributes(
949 C&: Context, B: AttrBuilder(Context, OrigAttrs.getFnAttrs()));
950
951 addFramePointerAttrs(Attrs&: NewAttrs, Context, ParamIndex: 0, Size: Shape.FrameSize,
952 Alignment: Shape.FrameAlign, /*NoAlias=*/false);
953 break;
954 case coro::ABI::Async: {
955 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(Val: ActiveSuspend);
956 if (OrigF.hasParamAttribute(ArgNo: Shape.AsyncLowering.ContextArgNo,
957 Kind: Attribute::SwiftAsync)) {
958 uint32_t ArgAttributeIndices =
959 ActiveAsyncSuspend->getStorageArgumentIndex();
960 auto ContextArgIndex = ArgAttributeIndices & 0xff;
961 addAsyncContextAttrs(Attrs&: NewAttrs, Context, ParamIndex: ContextArgIndex);
962
963 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
964 // `swiftself`.
965 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
966 if (SwiftSelfIndex)
967 addSwiftSelfAttrs(Attrs&: NewAttrs, Context, ParamIndex: SwiftSelfIndex);
968 }
969
970 // Transfer the original function's attributes.
971 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
972 NewAttrs = NewAttrs.addFnAttributes(C&: Context, B: AttrBuilder(Context, FnAttrs));
973 break;
974 }
975 case coro::ABI::Retcon:
976 case coro::ABI::RetconOnce:
977 // If we have a continuation prototype, just use its attributes,
978 // full-stop.
979 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
980
981 /// FIXME: Is it really good to add the NoAlias attribute?
982 addFramePointerAttrs(Attrs&: NewAttrs, Context, ParamIndex: 0,
983 Size: Shape.getRetconCoroId()->getStorageSize(),
984 Alignment: Shape.getRetconCoroId()->getStorageAlignment(),
985 /*NoAlias=*/true);
986
987 break;
988 }
989
990 switch (Shape.ABI) {
991 // In these ABIs, the cloned functions always return 'void', and the
992 // existing return sites are meaningless. Note that for unique
993 // continuations, this includes the returns associated with suspends;
994 // this is fine because we can't suspend twice.
995 case coro::ABI::Switch:
996 case coro::ABI::RetconOnce:
997 // Remove old returns.
998 for (ReturnInst *Return : Returns)
999 changeToUnreachable(I: Return);
1000 break;
1001
1002 // With multi-suspend continuations, we'll already have eliminated the
1003 // original returns and inserted returns before all the suspend points,
1004 // so we want to leave any returns in place.
1005 case coro::ABI::Retcon:
1006 break;
1007 // Async lowering will insert musttail call functions at all suspend points
1008 // followed by a return.
1009 // Don't change returns to unreachable because that will trip up the verifier.
1010 // These returns should be unreachable from the clone.
1011 case coro::ABI::Async:
1012 break;
1013 }
1014
1015 NewF->setAttributes(NewAttrs);
1016 NewF->setCallingConv(Shape.getResumeFunctionCC());
1017
1018 // Set up the new entry block.
1019 replaceEntryBlock();
1020
1021 // Turn symmetric transfers into musttail calls.
1022 for (CallInst *ResumeCall : Shape.SymmetricTransfers) {
1023 ResumeCall = cast<CallInst>(Val&: VMap[ResumeCall]);
1024 if (TTI.supportsTailCallFor(CB: ResumeCall)) {
1025 // FIXME: Could we support symmetric transfer effectively without
1026 // musttail?
1027 ResumeCall->setTailCallKind(CallInst::TCK_MustTail);
1028 }
1029
1030 // Put a 'ret void' after the call, and split any remaining instructions to
1031 // an unreachable block.
1032 BasicBlock *BB = ResumeCall->getParent();
1033 BB->splitBasicBlock(I: ResumeCall->getNextNode());
1034 Builder.SetInsertPoint(BB->getTerminator());
1035 Builder.CreateRetVoid();
1036 BB->getTerminator()->eraseFromParent();
1037 }
1038
1039 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1040 NewFramePtr = deriveNewFramePointer();
1041
1042 // Remap frame pointer.
1043 Value *OldFramePtr = VMap[Shape.FramePtr];
1044 NewFramePtr->takeName(V: OldFramePtr);
1045 OldFramePtr->replaceAllUsesWith(V: NewFramePtr);
1046
1047 // Remap vFrame pointer.
1048 auto *NewVFrame = Builder.CreateBitCast(
1049 V: NewFramePtr, DestTy: PointerType::getUnqual(C&: Builder.getContext()), Name: "vFrame");
1050 Value *OldVFrame = cast<Value>(Val&: VMap[Shape.CoroBegin]);
1051 if (OldVFrame != NewVFrame)
1052 OldVFrame->replaceAllUsesWith(V: NewVFrame);
1053
1054 // All uses of the arguments should have been resolved by this point,
1055 // so we can safely remove the dummy values.
1056 for (Instruction *DummyArg : DummyArgs) {
1057 DummyArg->replaceAllUsesWith(V: PoisonValue::get(T: DummyArg->getType()));
1058 DummyArg->deleteValue();
1059 }
1060
1061 switch (Shape.ABI) {
1062 case coro::ABI::Switch:
1063 // Rewrite final suspend handling as it is not done via switch (allows to
1064 // remove final case from the switch, since it is undefined behavior to
1065 // resume the coroutine suspended at the final suspend point.
1066 if (Shape.SwitchLowering.HasFinalSuspend)
1067 handleFinalSuspend();
1068 break;
1069 case coro::ABI::Async:
1070 case coro::ABI::Retcon:
1071 case coro::ABI::RetconOnce:
1072 // Replace uses of the active suspend with the corresponding
1073 // continuation-function arguments.
1074 assert(ActiveSuspend != nullptr &&
1075 "no active suspend when lowering a continuation-style coroutine");
1076 replaceRetconOrAsyncSuspendUses();
1077 break;
1078 }
1079
1080 // Handle suspends.
1081 replaceCoroSuspends();
1082
1083 // Handle swifterror.
1084 replaceSwiftErrorOps();
1085
1086 // Remove coro.end intrinsics.
1087 replaceCoroEnds();
1088
1089 replaceCoroIsInRamp();
1090
1091 // Salvage debug info that points into the coroutine frame.
1092 salvageDebugInfo();
1093}
1094
1095void coro::SwitchCloner::create() {
1096 // Create a new function matching the original type
1097 NewF = createCloneDeclaration(OrigF, Shape, Suffix, InsertBefore: OrigF.getParent()->end(),
1098 ActiveSuspend);
1099
1100 // Clone the function
1101 coro::BaseCloner::create();
1102
1103 // Replacing coro.free with 'null' in cleanup to suppress deallocation code.
1104 if (FKind == coro::CloneKind::SwitchCleanup)
1105 coro::replaceCoroFree(CoroId: cast<CoroIdInst>(Val&: VMap[Shape.CoroBegin->getId()]),
1106 /*Elide=*/FKind == coro::CloneKind::SwitchCleanup);
1107}
1108
1109static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1110 assert(Shape.ABI == coro::ABI::Async);
1111
1112 auto *FuncPtrStruct = cast<ConstantStruct>(
1113 Val: Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1114 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(i_nocapture: 0);
1115 auto *OrigContextSize = FuncPtrStruct->getOperand(i_nocapture: 1);
1116 auto *NewContextSize = ConstantInt::get(Ty: OrigContextSize->getType(),
1117 V: Shape.AsyncLowering.ContextSize);
1118 auto *NewFuncPtrStruct = ConstantStruct::get(
1119 T: FuncPtrStruct->getType(), Vs: OrigRelativeFunOffset, Vs: NewContextSize);
1120
1121 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1122}
1123
1124static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
1125 if (Shape.ABI == coro::ABI::Async)
1126 updateAsyncFuncPointerContextSize(Shape);
1127
1128 for (CoroAlignInst *CA : Shape.CoroAligns) {
1129 CA->replaceAllUsesWith(
1130 V: ConstantInt::get(Ty: CA->getType(), V: Shape.FrameAlign.value()));
1131 CA->eraseFromParent();
1132 }
1133
1134 if (Shape.CoroSizes.empty())
1135 return;
1136
1137 // In the same function all coro.sizes should have the same result type.
1138 auto *SizeIntrin = Shape.CoroSizes.back();
1139 auto *SizeConstant = ConstantInt::get(Ty: SizeIntrin->getType(),
1140 V: TypeSize::getFixed(ExactSize: Shape.FrameSize));
1141
1142 for (CoroSizeInst *CS : Shape.CoroSizes) {
1143 CS->replaceAllUsesWith(V: SizeConstant);
1144 CS->eraseFromParent();
1145 }
1146}
1147
1148static void postSplitCleanup(Function &F) {
1149 removeUnreachableBlocks(F);
1150
1151#ifndef NDEBUG
1152 // For now, we do a mandatory verification step because we don't
1153 // entirely trust this pass. Note that we don't want to add a verifier
1154 // pass to FPM below because it will also verify all the global data.
1155 if (verifyFunction(F, &errs()))
1156 report_fatal_error("Broken function");
1157#endif
1158}
1159
1160// Coroutine has no suspend points. Remove heap allocation for the coroutine
1161// frame if possible.
1162static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1163 auto *CoroBegin = Shape.CoroBegin;
1164 switch (Shape.ABI) {
1165 case coro::ABI::Switch: {
1166 auto SwitchId = Shape.getSwitchCoroId();
1167 auto *AllocInst = SwitchId->getCoroAlloc();
1168 coro::replaceCoroFree(CoroId: SwitchId, /*Elide=*/AllocInst != nullptr);
1169 if (AllocInst) {
1170 IRBuilder<> Builder(AllocInst);
1171 // Create an alloca for a byte array of the frame size
1172 auto *FrameTy = ArrayType::get(ElementType: Type::getInt8Ty(C&: Builder.getContext()),
1173 NumElements: Shape.FrameSize);
1174 auto *Frame = Builder.CreateAlloca(
1175 Ty: FrameTy, ArraySize: nullptr, Name: AllocInst->getFunction()->getName() + ".Frame");
1176 Frame->setAlignment(Shape.FrameAlign);
1177 AllocInst->replaceAllUsesWith(V: Builder.getFalse());
1178 AllocInst->eraseFromParent();
1179 CoroBegin->replaceAllUsesWith(V: Frame);
1180 } else {
1181 CoroBegin->replaceAllUsesWith(V: CoroBegin->getMem());
1182 }
1183
1184 break;
1185 }
1186 case coro::ABI::Async:
1187 case coro::ABI::Retcon:
1188 case coro::ABI::RetconOnce:
1189 CoroBegin->replaceAllUsesWith(V: PoisonValue::get(T: CoroBegin->getType()));
1190 break;
1191 }
1192
1193 CoroBegin->eraseFromParent();
1194 Shape.CoroBegin = nullptr;
1195}
1196
1197// SimplifySuspendPoint needs to check that there is no calls between
1198// coro_save and coro_suspend, since any of the calls may potentially resume
1199// the coroutine and if that is the case we cannot eliminate the suspend point.
1200static bool hasCallsInBlockBetween(iterator_range<BasicBlock::iterator> R) {
1201 for (Instruction &I : R) {
1202 // Assume that no intrinsic can resume the coroutine.
1203 if (isa<IntrinsicInst>(Val: I))
1204 continue;
1205
1206 if (isa<CallBase>(Val: I))
1207 return true;
1208 }
1209 return false;
1210}
1211
1212static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1213 SmallPtrSet<BasicBlock *, 8> Set;
1214 SmallVector<BasicBlock *, 8> Worklist;
1215
1216 Set.insert(Ptr: SaveBB);
1217 Worklist.push_back(Elt: ResDesBB);
1218
1219 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1220 // returns a token consumed by suspend instruction, all blocks in between
1221 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1222 while (!Worklist.empty()) {
1223 auto *BB = Worklist.pop_back_val();
1224 Set.insert(Ptr: BB);
1225 for (auto *Pred : predecessors(BB))
1226 if (!Set.contains(Ptr: Pred))
1227 Worklist.push_back(Elt: Pred);
1228 }
1229
1230 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1231 Set.erase(Ptr: SaveBB);
1232 Set.erase(Ptr: ResDesBB);
1233
1234 for (auto *BB : Set)
1235 if (hasCallsInBlockBetween(R: {BB->getFirstNonPHIIt(), BB->end()}))
1236 return true;
1237
1238 return false;
1239}
1240
1241static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1242 auto *SaveBB = Save->getParent();
1243 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1244 BasicBlock::iterator SaveIt = Save->getIterator();
1245 BasicBlock::iterator ResumeOrDestroyIt = ResumeOrDestroy->getIterator();
1246
1247 if (SaveBB == ResumeOrDestroyBB)
1248 return hasCallsInBlockBetween(R: {std::next(x: SaveIt), ResumeOrDestroyIt});
1249
1250 // Any calls from Save to the end of the block?
1251 if (hasCallsInBlockBetween(R: {std::next(x: SaveIt), SaveBB->end()}))
1252 return true;
1253
1254 // Any calls from begging of the block up to ResumeOrDestroy?
1255 if (hasCallsInBlockBetween(
1256 R: {ResumeOrDestroyBB->getFirstNonPHIIt(), ResumeOrDestroyIt}))
1257 return true;
1258
1259 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1260 if (hasCallsInBlocksBetween(SaveBB, ResDesBB: ResumeOrDestroyBB))
1261 return true;
1262
1263 return false;
1264}
1265
1266// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1267// suspend point and replace it with nornal control flow.
1268static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1269 CoroBeginInst *CoroBegin) {
1270 Instruction *Prev = Suspend->getPrevNode();
1271 if (!Prev) {
1272 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1273 if (!Pred)
1274 return false;
1275 Prev = Pred->getTerminator();
1276 }
1277
1278 CallBase *CB = dyn_cast<CallBase>(Val: Prev);
1279 if (!CB)
1280 return false;
1281
1282 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1283
1284 // See if the callsite is for resumption or destruction of the coroutine.
1285 auto *SubFn = dyn_cast<CoroSubFnInst>(Val: Callee);
1286 if (!SubFn)
1287 return false;
1288
1289 // Does not refer to the current coroutine, we cannot do anything with it.
1290 if (SubFn->getFrame() != CoroBegin)
1291 return false;
1292
1293 // See if the transformation is safe. Specifically, see if there are any
1294 // calls in between Save and CallInstr. They can potenitally resume the
1295 // coroutine rendering this optimization unsafe.
1296 auto *Save = Suspend->getCoroSave();
1297 if (hasCallsBetween(Save, ResumeOrDestroy: CB))
1298 return false;
1299
1300 // Replace llvm.coro.suspend with the value that results in resumption over
1301 // the resume or cleanup path.
1302 Suspend->replaceAllUsesWith(V: SubFn->getRawIndex());
1303 Suspend->eraseFromParent();
1304 Save->eraseFromParent();
1305
1306 // No longer need a call to coro.resume or coro.destroy.
1307 if (auto *Invoke = dyn_cast<InvokeInst>(Val: CB)) {
1308 UncondBrInst::Create(IfTrue: Invoke->getNormalDest(), InsertBefore: Invoke->getIterator());
1309 }
1310
1311 // Grab the CalledValue from CB before erasing the CallInstr.
1312 auto *CalledValue = CB->getCalledOperand();
1313 CB->eraseFromParent();
1314
1315 // If no more users remove it. Usually it is a bitcast of SubFn.
1316 if (CalledValue != SubFn && CalledValue->user_empty())
1317 if (auto *I = dyn_cast<Instruction>(Val: CalledValue))
1318 I->eraseFromParent();
1319
1320 // Now we are good to remove SubFn.
1321 if (SubFn->user_empty())
1322 SubFn->eraseFromParent();
1323
1324 return true;
1325}
1326
1327// Remove suspend points that are simplified.
1328static void simplifySuspendPoints(coro::Shape &Shape) {
1329 // Currently, the only simplification we do is switch-lowering-specific.
1330 if (Shape.ABI != coro::ABI::Switch)
1331 return;
1332
1333 auto &S = Shape.CoroSuspends;
1334 size_t I = 0, N = S.size();
1335 if (N == 0)
1336 return;
1337
1338 size_t ChangedFinalIndex = std::numeric_limits<size_t>::max();
1339 while (true) {
1340 auto SI = cast<CoroSuspendInst>(Val: S[I]);
1341 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1342 // to resume a coroutine suspended at the final suspend point.
1343 if (!SI->isFinal() && simplifySuspendPoint(Suspend: SI, CoroBegin: Shape.CoroBegin)) {
1344 if (--N == I)
1345 break;
1346
1347 std::swap(a&: S[I], b&: S[N]);
1348
1349 if (cast<CoroSuspendInst>(Val: S[I])->isFinal()) {
1350 assert(Shape.SwitchLowering.HasFinalSuspend);
1351 ChangedFinalIndex = I;
1352 }
1353
1354 continue;
1355 }
1356 if (++I == N)
1357 break;
1358 }
1359 S.resize(N);
1360
1361 // Maintain final.suspend in case final suspend was swapped.
1362 // Due to we requrie the final suspend to be the last element of CoroSuspends.
1363 if (ChangedFinalIndex < N) {
1364 assert(cast<CoroSuspendInst>(S[ChangedFinalIndex])->isFinal());
1365 std::swap(a&: S[ChangedFinalIndex], b&: S.back());
1366 }
1367}
1368
1369namespace {
1370
1371struct SwitchCoroutineSplitter {
1372 static void split(Function &F, coro::Shape &Shape,
1373 SmallVectorImpl<Function *> &Clones,
1374 TargetTransformInfo &TTI) {
1375 assert(Shape.ABI == coro::ABI::Switch);
1376
1377 // Create a resume clone by cloning the body of the original function,
1378 // setting new entry block and replacing coro.suspend an appropriate value
1379 // to force resume or cleanup pass for every suspend point.
1380 createResumeEntryBlock(F, Shape);
1381 auto *ResumeClone = coro::SwitchCloner::createClone(
1382 OrigF&: F, Suffix: ".resume", Shape, FKind: coro::CloneKind::SwitchResume, TTI);
1383 auto *DestroyClone = coro::SwitchCloner::createClone(
1384 OrigF&: F, Suffix: ".destroy", Shape, FKind: coro::CloneKind::SwitchUnwind, TTI);
1385 auto *CleanupClone = coro::SwitchCloner::createClone(
1386 OrigF&: F, Suffix: ".cleanup", Shape, FKind: coro::CloneKind::SwitchCleanup, TTI);
1387
1388 postSplitCleanup(F&: *ResumeClone);
1389 postSplitCleanup(F&: *DestroyClone);
1390 postSplitCleanup(F&: *CleanupClone);
1391
1392 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1393 updateCoroFrame(Shape, ResumeFn: ResumeClone, DestroyFn: DestroyClone, CleanupFn: CleanupClone);
1394
1395 assert(Clones.empty());
1396 Clones.push_back(Elt: ResumeClone);
1397 Clones.push_back(Elt: DestroyClone);
1398 Clones.push_back(Elt: CleanupClone);
1399
1400 // Create a constant array referring to resume/destroy/clone functions
1401 // pointed by the last argument of @llvm.coro.info, so that CoroElide pass
1402 // can determined correct function to call.
1403 setCoroInfo(F, Shape, Fns: Clones);
1404 }
1405
1406 // Create a variant of ramp function that does not perform heap allocation
1407 // for a switch ABI coroutine.
1408 //
1409 // The newly split `.noalloc` ramp function has the following differences:
1410 // - Has one additional frame pointer parameter in lieu of dynamic
1411 // allocation.
1412 // - Suppressed allocations by replacing coro.alloc and coro.free.
1413 static Function *createNoAllocVariant(Function &F, coro::Shape &Shape,
1414 SmallVectorImpl<Function *> &Clones) {
1415 assert(Shape.ABI == coro::ABI::Switch);
1416 auto *OrigFnTy = F.getFunctionType();
1417 auto OldParams = OrigFnTy->params();
1418
1419 SmallVector<Type *> NewParams;
1420 NewParams.reserve(N: OldParams.size() + 1);
1421 NewParams.append(in_start: OldParams.begin(), in_end: OldParams.end());
1422 NewParams.push_back(Elt: PointerType::getUnqual(C&: Shape.FramePtr->getContext()));
1423
1424 auto *NewFnTy = FunctionType::get(Result: OrigFnTy->getReturnType(), Params: NewParams,
1425 isVarArg: OrigFnTy->isVarArg());
1426 Function *NoAllocF =
1427 Function::Create(Ty: NewFnTy, Linkage: F.getLinkage(), N: F.getName() + ".noalloc");
1428
1429 ValueToValueMapTy VMap;
1430 unsigned int Idx = 0;
1431 for (const auto &I : F.args()) {
1432 VMap[&I] = NoAllocF->getArg(i: Idx++);
1433 }
1434 // We just appended the frame pointer as the last argument of the new
1435 // function.
1436 auto FrameIdx = NoAllocF->arg_size() - 1;
1437 SmallVector<ReturnInst *, 4> Returns;
1438 CloneFunctionInto(NewFunc: NoAllocF, OldFunc: &F, VMap,
1439 Changes: CloneFunctionChangeType::LocalChangesOnly, Returns);
1440
1441 if (Shape.CoroBegin) {
1442 auto *NewCoroBegin =
1443 cast_if_present<CoroBeginInst>(Val&: VMap[Shape.CoroBegin]);
1444 auto *NewCoroId = cast<CoroIdInst>(Val: NewCoroBegin->getId());
1445 coro::replaceCoroFree(CoroId: NewCoroId, /*Elide=*/true);
1446 coro::suppressCoroAllocs(CoroId: NewCoroId);
1447 NewCoroBegin->replaceAllUsesWith(V: NoAllocF->getArg(i: FrameIdx));
1448 NewCoroBegin->eraseFromParent();
1449 }
1450
1451 Module *M = F.getParent();
1452 M->getFunctionList().insert(where: M->end(), New: NoAllocF);
1453
1454 removeUnreachableBlocks(F&: *NoAllocF);
1455 auto NewAttrs = NoAllocF->getAttributes();
1456 // When we elide allocation, we read these attributes to determine the
1457 // frame size and alignment.
1458 addFramePointerAttrs(Attrs&: NewAttrs, Context&: NoAllocF->getContext(), ParamIndex: FrameIdx,
1459 Size: Shape.FrameSize, Alignment: Shape.FrameAlign,
1460 /*NoAlias=*/false);
1461
1462 NoAllocF->setAttributes(NewAttrs);
1463
1464 Clones.push_back(Elt: NoAllocF);
1465 // Reset the original function's coro info, make the new noalloc variant
1466 // connected to the original ramp function.
1467 setCoroInfo(F, Shape, Fns: Clones);
1468 // After copying, set the linkage to internal linkage. Original function
1469 // may have different linkage, but optimization dependent on this function
1470 // generally relies on LTO.
1471 NoAllocF->setLinkage(llvm::GlobalValue::InternalLinkage);
1472 return NoAllocF;
1473 }
1474
1475private:
1476 // Create an entry block for a resume function with a switch that will jump to
1477 // suspend points.
1478 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
1479 LLVMContext &C = F.getContext();
1480
1481 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
1482 DISubprogram *DIS = F.getSubprogram();
1483 // If there is no DISubprogram for F, it implies the function is compiled
1484 // without debug info. So we also don't generate debug info for the
1485 // suspension points.
1486 bool AddDebugLabels = DIS && DIS->getUnit() &&
1487 (DIS->getUnit()->getEmissionKind() ==
1488 DICompileUnit::DebugEmissionKind::FullDebug);
1489
1490 // resume.entry:
1491 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32
1492 // 0, i32 2 % index = load i32, i32* %index.addr switch i32 %index, label
1493 // %unreachable [
1494 // i32 0, label %resume.0
1495 // i32 1, label %resume.1
1496 // ...
1497 // ]
1498
1499 auto *NewEntry = BasicBlock::Create(Context&: C, Name: "resume.entry", Parent: &F);
1500 auto *UnreachBB = BasicBlock::Create(Context&: C, Name: "unreachable", Parent: &F);
1501
1502 IRBuilder<> Builder(NewEntry);
1503 auto *FramePtr = Shape.FramePtr;
1504 Value *GepIndex = createSwitchIndexPtr(Shape, Builder, FramePtr);
1505 auto *Index = Builder.CreateLoad(Ty: Shape.getIndexType(), Ptr: GepIndex, Name: "index");
1506 auto *Switch =
1507 Builder.CreateSwitch(V: Index, Dest: UnreachBB, NumCases: Shape.CoroSuspends.size());
1508 Shape.SwitchLowering.ResumeSwitch = Switch;
1509
1510 // Split all coro.suspend calls
1511 size_t SuspendIndex = 0;
1512 for (auto *AnyS : Shape.CoroSuspends) {
1513 auto *S = cast<CoroSuspendInst>(Val: AnyS);
1514 ConstantInt *IndexVal = Shape.getIndex(Value: SuspendIndex);
1515
1516 // Replace CoroSave with a store to Index:
1517 // %index.addr = getelementptr %f.frame... (index field number)
1518 // store i32 %IndexVal, i32* %index.addr1
1519 auto *Save = S->getCoroSave();
1520 Builder.SetInsertPoint(Save);
1521 if (S->isFinal()) {
1522 // The coroutine should be marked done if it reaches the final suspend
1523 // point.
1524 markCoroutineAsDone(Builder, Shape, FramePtr);
1525 } else {
1526 Value *GepIndex = createSwitchIndexPtr(Shape, Builder, FramePtr);
1527 Builder.CreateStore(Val: IndexVal, Ptr: GepIndex);
1528 }
1529
1530 Save->replaceAllUsesWith(V: ConstantTokenNone::get(Context&: C));
1531 Save->eraseFromParent();
1532
1533 // Split block before and after coro.suspend and add a jump from an entry
1534 // switch:
1535 //
1536 // whateverBB:
1537 // whatever
1538 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
1539 // switch i8 %0, label %suspend[i8 0, label %resume
1540 // i8 1, label %cleanup]
1541 // becomes:
1542 //
1543 // whateverBB:
1544 // whatever
1545 // br label %resume.0.landing
1546 //
1547 // resume.0: ; <--- jump from the switch in the resume.entry
1548 // #dbg_label(...) ; <--- artificial label for debuggers
1549 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
1550 // br label %resume.0.landing
1551 //
1552 // resume.0.landing:
1553 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
1554 // switch i8 % 1, label %suspend [i8 0, label %resume
1555 // i8 1, label %cleanup]
1556
1557 auto *SuspendBB = S->getParent();
1558 auto *ResumeBB =
1559 SuspendBB->splitBasicBlock(I: S, BBName: "resume." + Twine(SuspendIndex));
1560 auto *LandingBB = ResumeBB->splitBasicBlock(
1561 I: S->getNextNode(), BBName: ResumeBB->getName() + Twine(".landing"));
1562 Switch->addCase(OnVal: IndexVal, Dest: ResumeBB);
1563
1564 cast<UncondBrInst>(Val: SuspendBB->getTerminator())->setSuccessor(LandingBB);
1565 auto *PN = PHINode::Create(Ty: Builder.getInt8Ty(), NumReservedValues: 2, NameStr: "");
1566 PN->insertBefore(InsertPos: LandingBB->begin());
1567 S->replaceAllUsesWith(V: PN);
1568 PN->addIncoming(V: Builder.getInt8(C: -1), BB: SuspendBB);
1569 PN->addIncoming(V: S, BB: ResumeBB);
1570
1571 if (AddDebugLabels) {
1572 if (DebugLoc SuspendLoc = S->getDebugLoc()) {
1573 std::string LabelName =
1574 ("__coro_resume_" + Twine(SuspendIndex)).str();
1575 // Take the "inlined at" location recursively, if present. This is
1576 // mandatory as the DILabel insertion checks that the scopes of label
1577 // and the attached location match. This is not the case when the
1578 // suspend location has been inlined due to pointing to the original
1579 // scope.
1580 DILocation *DILoc = SuspendLoc;
1581 while (DILocation *InlinedAt = DILoc->getInlinedAt())
1582 DILoc = InlinedAt;
1583
1584 DILabel *ResumeLabel =
1585 DBuilder.createLabel(Scope: DIS, Name: LabelName, File: DILoc->getFile(),
1586 LineNo: SuspendLoc.getLine(), Column: SuspendLoc.getCol(),
1587 /*IsArtificial=*/true,
1588 /*CoroSuspendIdx=*/SuspendIndex,
1589 /*AlwaysPreserve=*/false);
1590 DBuilder.insertLabel(LabelInfo: ResumeLabel, DL: DILoc, InsertPt: ResumeBB->begin());
1591 }
1592 }
1593
1594 ++SuspendIndex;
1595 }
1596
1597 Builder.SetInsertPoint(UnreachBB);
1598 Builder.CreateUnreachable();
1599 DBuilder.finalize();
1600
1601 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
1602 }
1603
1604 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1605 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1606 Function *DestroyFn, Function *CleanupFn) {
1607 IRBuilder<> Builder(&*Shape.getInsertPtAfterFramePtr());
1608 LLVMContext &C = ResumeFn->getContext();
1609
1610 // Resume function pointer
1611 Value *ResumeAddr = Shape.FramePtr;
1612 Builder.CreateStore(Val: ResumeFn, Ptr: ResumeAddr);
1613
1614 Value *DestroyOrCleanupFn = DestroyFn;
1615
1616 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1617 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1618 // If there is a CoroAlloc and it returns false (meaning we elide the
1619 // allocation, use CleanupFn instead of DestroyFn).
1620 DestroyOrCleanupFn = Builder.CreateSelect(C: CA, True: DestroyFn, False: CleanupFn);
1621 }
1622
1623 // Destroy function pointer
1624 Value *DestroyAddr = Builder.CreateInBoundsPtrAdd(
1625 Ptr: Shape.FramePtr,
1626 Offset: ConstantInt::get(Ty: Type::getInt64Ty(C),
1627 V: Shape.SwitchLowering.DestroyOffset),
1628 Name: "destroy.addr");
1629 Builder.CreateStore(Val: DestroyOrCleanupFn, Ptr: DestroyAddr);
1630 }
1631
1632 // Create a global constant array containing pointers to functions provided
1633 // and set Info parameter of CoroBegin to point at this constant. Example:
1634 //
1635 // @f.resumers = internal constant [2 x void(%f.frame*)*]
1636 // [void(%f.frame*)* @f.resume, void(%f.frame*)*
1637 // @f.destroy]
1638 // define void @f() {
1639 // ...
1640 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1641 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to
1642 // i8*))
1643 //
1644 // Assumes that all the functions have the same signature.
1645 static void setCoroInfo(Function &F, coro::Shape &Shape,
1646 ArrayRef<Function *> Fns) {
1647 // This only works under the switch-lowering ABI because coro elision
1648 // only works on the switch-lowering ABI.
1649 SmallVector<Constant *, 4> Args(Fns);
1650 assert(!Args.empty());
1651 Function *Part = *Fns.begin();
1652 Module *M = Part->getParent();
1653 auto *ArrTy = ArrayType::get(ElementType: Part->getType(), NumElements: Args.size());
1654
1655 auto *ConstVal = ConstantArray::get(T: ArrTy, V: Args);
1656 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1657 GlobalVariable::PrivateLinkage, ConstVal,
1658 F.getName() + Twine(".resumers"));
1659
1660 // Update coro.begin instruction to refer to this constant.
1661 LLVMContext &C = F.getContext();
1662 auto *BC = ConstantExpr::getPointerCast(C: GV, Ty: PointerType::getUnqual(C));
1663 Shape.getSwitchCoroId()->setInfo(BC);
1664 }
1665};
1666
1667} // namespace
1668
1669static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1670 Value *Continuation) {
1671 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1672 auto &Context = Suspend->getParent()->getParent()->getContext();
1673 auto *Int8PtrTy = PointerType::getUnqual(C&: Context);
1674
1675 IRBuilder<> Builder(ResumeIntrinsic);
1676 auto *Val = Builder.CreateBitOrPointerCast(V: Continuation, DestTy: Int8PtrTy);
1677 ResumeIntrinsic->replaceAllUsesWith(V: Val);
1678 ResumeIntrinsic->eraseFromParent();
1679 Suspend->setOperand(i_nocapture: CoroSuspendAsyncInst::ResumeFunctionArg,
1680 Val_nocapture: PoisonValue::get(T: Int8PtrTy));
1681}
1682
1683/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1684static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1685 ArrayRef<Value *> FnArgs,
1686 SmallVectorImpl<Value *> &CallArgs) {
1687 size_t ArgIdx = 0;
1688 for (auto *paramTy : FnTy->params()) {
1689 assert(ArgIdx < FnArgs.size());
1690 if (paramTy != FnArgs[ArgIdx]->getType())
1691 CallArgs.push_back(
1692 Elt: Builder.CreateBitOrPointerCast(V: FnArgs[ArgIdx], DestTy: paramTy));
1693 else
1694 CallArgs.push_back(Elt: FnArgs[ArgIdx]);
1695 ++ArgIdx;
1696 }
1697}
1698
1699CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1700 TargetTransformInfo &TTI,
1701 ArrayRef<Value *> Arguments,
1702 IRBuilder<> &Builder) {
1703 auto *FnTy = MustTailCallFn->getFunctionType();
1704 // Coerce the arguments, llvm optimizations seem to ignore the types in
1705 // vaarg functions and throws away casts in optimized mode.
1706 SmallVector<Value *, 8> CallArgs;
1707 coerceArguments(Builder, FnTy, FnArgs: Arguments, CallArgs);
1708
1709 auto *TailCall = Builder.CreateCall(FTy: FnTy, Callee: MustTailCallFn, Args: CallArgs);
1710 // Skip targets which don't support tail call.
1711 if (TTI.supportsTailCallFor(CB: TailCall)) {
1712 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1713 }
1714 TailCall->setDebugLoc(Loc);
1715 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1716 return TailCall;
1717}
1718
1719void coro::AsyncABI::splitCoroutine(Function &F, coro::Shape &Shape,
1720 SmallVectorImpl<Function *> &Clones,
1721 TargetTransformInfo &TTI) {
1722 assert(Shape.ABI == coro::ABI::Async);
1723 assert(Clones.empty());
1724 // Reset various things that the optimizer might have decided it
1725 // "knows" about the coroutine function due to not seeing a return.
1726 F.removeFnAttr(Kind: Attribute::NoReturn);
1727 F.removeRetAttr(Kind: Attribute::NoAlias);
1728 F.removeRetAttr(Kind: Attribute::NonNull);
1729
1730 auto &Context = F.getContext();
1731 auto *Int8PtrTy = PointerType::getUnqual(C&: Context);
1732
1733 auto *Id = Shape.getAsyncCoroId();
1734 IRBuilder<> Builder(Id);
1735
1736 auto *FramePtr = Id->getStorage();
1737 FramePtr = Builder.CreateBitOrPointerCast(V: FramePtr, DestTy: Int8PtrTy);
1738 FramePtr = Builder.CreateInBoundsPtrAdd(
1739 Ptr: FramePtr,
1740 Offset: ConstantInt::get(Ty: Type::getInt64Ty(C&: Context),
1741 V: Shape.AsyncLowering.FrameOffset),
1742 Name: "async.ctx.frameptr");
1743
1744 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1745 {
1746 // Make sure we don't invalidate Shape.FramePtr.
1747 TrackingVH<Value> Handle(Shape.FramePtr);
1748 Shape.CoroBegin->replaceAllUsesWith(V: FramePtr);
1749 Shape.FramePtr = Handle.getValPtr();
1750 }
1751
1752 // Create all the functions in order after the main function.
1753 auto NextF = std::next(x: F.getIterator());
1754
1755 // Create a continuation function for each of the suspend points.
1756 Clones.reserve(N: Shape.CoroSuspends.size());
1757 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1758 auto *Suspend = cast<CoroSuspendAsyncInst>(Val: CS);
1759
1760 // Create the clone declaration.
1761 auto ResumeNameSuffix = ".resume.";
1762 auto ProjectionFunctionName =
1763 Suspend->getAsyncContextProjectionFunction()->getName();
1764 bool UseSwiftMangling = false;
1765 if (ProjectionFunctionName == "__swift_async_resume_project_context") {
1766 ResumeNameSuffix = "TQ";
1767 UseSwiftMangling = true;
1768 } else if (ProjectionFunctionName == "__swift_async_resume_get_context") {
1769 ResumeNameSuffix = "TY";
1770 UseSwiftMangling = true;
1771 }
1772 auto *Continuation = createCloneDeclaration(
1773 OrigF&: F, Shape,
1774 Suffix: UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1775 : ResumeNameSuffix + Twine(Idx),
1776 InsertBefore: NextF, ActiveSuspend: Suspend);
1777 Clones.push_back(Elt: Continuation);
1778
1779 // Insert a branch to a new return block immediately before the suspend
1780 // point.
1781 auto *SuspendBB = Suspend->getParent();
1782 auto *NewSuspendBB = SuspendBB->splitBasicBlock(I: Suspend);
1783 auto *Branch = cast<UncondBrInst>(Val: SuspendBB->getTerminator());
1784
1785 // Place it before the first suspend.
1786 auto *ReturnBB =
1787 BasicBlock::Create(Context&: F.getContext(), Name: "coro.return", Parent: &F, InsertBefore: NewSuspendBB);
1788 Branch->setSuccessor(idx: 0, NewSucc: ReturnBB);
1789
1790 IRBuilder<> Builder(ReturnBB);
1791
1792 // Insert the call to the tail call function and inline it.
1793 auto *Fn = Suspend->getMustTailCallFunction();
1794 SmallVector<Value *, 8> Args(Suspend->args());
1795 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1796 N: CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1797 auto *TailCall = coro::createMustTailCall(Loc: Suspend->getDebugLoc(), MustTailCallFn: Fn, TTI,
1798 Arguments: FnArgs, Builder);
1799 Builder.CreateRetVoid();
1800 InlineFunctionInfo FnInfo;
1801 (void)InlineFunction(CB&: *TailCall, IFI&: FnInfo);
1802
1803 // Replace the lvm.coro.async.resume intrisic call.
1804 replaceAsyncResumeFunction(Suspend, Continuation);
1805 }
1806
1807 assert(Clones.size() == Shape.CoroSuspends.size());
1808
1809 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1810 auto *Suspend = CS;
1811 auto *Clone = Clones[Idx];
1812
1813 coro::BaseCloner::createClone(OrigF&: F, Suffix: "resume." + Twine(Idx), Shape, NewF: Clone,
1814 ActiveSuspend: Suspend, TTI);
1815 }
1816}
1817
1818void coro::AnyRetconABI::splitCoroutine(Function &F, coro::Shape &Shape,
1819 SmallVectorImpl<Function *> &Clones,
1820 TargetTransformInfo &TTI) {
1821 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce);
1822 assert(Clones.empty());
1823
1824 // Reset various things that the optimizer might have decided it
1825 // "knows" about the coroutine function due to not seeing a return.
1826 F.removeFnAttr(Kind: Attribute::NoReturn);
1827 F.removeRetAttr(Kind: Attribute::NoAlias);
1828 F.removeRetAttr(Kind: Attribute::NonNull);
1829
1830 // Allocate the frame.
1831 auto *Id = Shape.getRetconCoroId();
1832 Value *RawFramePtr;
1833 if (Shape.RetconLowering.IsFrameInlineInStorage) {
1834 RawFramePtr = Id->getStorage();
1835 } else {
1836 IRBuilder<> Builder(Id);
1837
1838 auto FrameSize = Builder.getInt64(C: Shape.FrameSize);
1839
1840 // Allocate. We don't need to update the call graph node because we're
1841 // going to recompute it from scratch after splitting.
1842 // FIXME: pass the required alignment
1843 RawFramePtr = Shape.emitAlloc(Builder, Size: FrameSize, CG: nullptr);
1844 RawFramePtr =
1845 Builder.CreateBitCast(V: RawFramePtr, DestTy: Shape.CoroBegin->getType());
1846
1847 // Stash the allocated frame pointer in the continuation storage.
1848 Builder.CreateStore(Val: RawFramePtr, Ptr: Id->getStorage());
1849 }
1850
1851 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1852 {
1853 // Make sure we don't invalidate Shape.FramePtr.
1854 TrackingVH<Value> Handle(Shape.FramePtr);
1855 Shape.CoroBegin->replaceAllUsesWith(V: RawFramePtr);
1856 Shape.FramePtr = Handle.getValPtr();
1857 }
1858
1859 // Create a unique return block.
1860 BasicBlock *ReturnBB = nullptr;
1861 PHINode *ContinuationPhi = nullptr;
1862 SmallVector<PHINode *, 4> ReturnPHIs;
1863
1864 // Create all the functions in order after the main function.
1865 auto NextF = std::next(x: F.getIterator());
1866
1867 // Create a continuation function for each of the suspend points.
1868 Clones.reserve(N: Shape.CoroSuspends.size());
1869 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1870 auto Suspend = cast<CoroSuspendRetconInst>(Val: CS);
1871
1872 // Create the clone declaration.
1873 auto Continuation = createCloneDeclaration(
1874 OrigF&: F, Shape, Suffix: ".resume." + Twine(Idx), InsertBefore: NextF, ActiveSuspend: nullptr);
1875 Clones.push_back(Elt: Continuation);
1876
1877 // Insert a branch to the unified return block immediately before
1878 // the suspend point.
1879 auto SuspendBB = Suspend->getParent();
1880 auto NewSuspendBB = SuspendBB->splitBasicBlock(I: Suspend);
1881 auto Branch = cast<UncondBrInst>(Val: SuspendBB->getTerminator());
1882
1883 // Create the unified return block.
1884 if (!ReturnBB) {
1885 // Place it before the first suspend.
1886 ReturnBB =
1887 BasicBlock::Create(Context&: F.getContext(), Name: "coro.return", Parent: &F, InsertBefore: NewSuspendBB);
1888 Shape.RetconLowering.ReturnBlock = ReturnBB;
1889
1890 IRBuilder<> Builder(ReturnBB);
1891
1892 // First, the continuation.
1893 ContinuationPhi =
1894 Builder.CreatePHI(Ty: Continuation->getType(), NumReservedValues: Shape.CoroSuspends.size());
1895
1896 // Create PHIs for all other return values.
1897 assert(ReturnPHIs.empty());
1898
1899 // Next, all the directly-yielded values.
1900 for (auto *ResultTy : Shape.getRetconResultTypes())
1901 ReturnPHIs.push_back(
1902 Elt: Builder.CreatePHI(Ty: ResultTy, NumReservedValues: Shape.CoroSuspends.size()));
1903
1904 // Build the return value.
1905 auto RetTy = F.getReturnType();
1906
1907 // Cast the continuation value if necessary.
1908 // We can't rely on the types matching up because that type would
1909 // have to be infinite.
1910 auto CastedContinuationTy =
1911 (ReturnPHIs.empty() ? RetTy : RetTy->getStructElementType(N: 0));
1912 auto *CastedContinuation =
1913 Builder.CreateBitCast(V: ContinuationPhi, DestTy: CastedContinuationTy);
1914
1915 Value *RetV = CastedContinuation;
1916 if (!ReturnPHIs.empty()) {
1917 auto ValueIdx = 0;
1918 RetV = PoisonValue::get(T: RetTy);
1919 RetV = Builder.CreateInsertValue(Agg: RetV, Val: CastedContinuation, Idxs: ValueIdx++);
1920
1921 for (auto Phi : ReturnPHIs)
1922 RetV = Builder.CreateInsertValue(Agg: RetV, Val: Phi, Idxs: ValueIdx++);
1923 }
1924
1925 Builder.CreateRet(V: RetV);
1926 }
1927
1928 // Branch to the return block.
1929 Branch->setSuccessor(idx: 0, NewSucc: ReturnBB);
1930 assert(ContinuationPhi);
1931 ContinuationPhi->addIncoming(V: Continuation, BB: SuspendBB);
1932 for (auto [Phi, VUse] :
1933 llvm::zip_equal(t&: ReturnPHIs, u: Suspend->value_operands()))
1934 Phi->addIncoming(V: VUse, BB: SuspendBB);
1935 }
1936
1937 assert(Clones.size() == Shape.CoroSuspends.size());
1938
1939 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1940 auto Suspend = CS;
1941 auto Clone = Clones[Idx];
1942
1943 coro::BaseCloner::createClone(OrigF&: F, Suffix: "resume." + Twine(Idx), Shape, NewF: Clone,
1944 ActiveSuspend: Suspend, TTI);
1945 }
1946}
1947
1948namespace {
1949class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1950 Function &F;
1951
1952public:
1953 PrettyStackTraceFunction(Function &F) : F(F) {}
1954 void print(raw_ostream &OS) const override {
1955 OS << "While splitting coroutine ";
1956 F.printAsOperand(O&: OS, /*print type*/ PrintType: false, M: F.getParent());
1957 OS << "\n";
1958 }
1959};
1960} // namespace
1961
1962/// Remove calls to llvm.coro.end in the original function.
1963static void removeCoroEndsFromRampFunction(const coro::Shape &Shape) {
1964 if (Shape.ABI != coro::ABI::Switch) {
1965 for (auto *End : Shape.CoroEnds) {
1966 replaceCoroEnd(End, Shape, FramePtr: Shape.FramePtr, /*in ramp*/ InRamp: true, CG: nullptr);
1967 }
1968 } else {
1969 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds)
1970 End->eraseFromParent();
1971 }
1972}
1973
1974static void removeCoroIsInRampFromRampFunction(const coro::Shape &Shape) {
1975 for (auto *II : Shape.CoroIsInRampInsts) {
1976 auto &Ctx = II->getContext();
1977 II->replaceAllUsesWith(V: ConstantInt::getTrue(Context&: Ctx));
1978 II->eraseFromParent();
1979 }
1980}
1981
1982static bool hasSafeElideCaller(Function &F) {
1983 for (auto *U : F.users()) {
1984 if (auto *CB = dyn_cast<CallBase>(Val: U)) {
1985 auto *Caller = CB->getFunction();
1986 if (Caller && Caller->isPresplitCoroutine() &&
1987 CB->hasFnAttr(Kind: llvm::Attribute::CoroElideSafe))
1988 return true;
1989 }
1990 }
1991 return false;
1992}
1993
1994void coro::SwitchABI::splitCoroutine(Function &F, coro::Shape &Shape,
1995 SmallVectorImpl<Function *> &Clones,
1996 TargetTransformInfo &TTI) {
1997 SwitchCoroutineSplitter::split(F, Shape, Clones, TTI);
1998}
1999
2000static void doSplitCoroutine(Function &F, SmallVectorImpl<Function *> &Clones,
2001 coro::BaseABI &ABI, TargetTransformInfo &TTI,
2002 bool OptimizeFrame) {
2003 PrettyStackTraceFunction prettyStackTrace(F);
2004
2005 auto &Shape = ABI.Shape;
2006 assert(Shape.CoroBegin);
2007
2008 lowerAwaitSuspends(F, Shape);
2009
2010 simplifySuspendPoints(Shape);
2011
2012 normalizeCoroutine(F, Shape, TTI);
2013 ABI.buildCoroutineFrame(OptimizeFrame);
2014 replaceFrameSizeAndAlignment(Shape);
2015
2016 bool isNoSuspendCoroutine = Shape.CoroSuspends.empty();
2017
2018 bool shouldCreateNoAllocVariant =
2019 !isNoSuspendCoroutine && Shape.ABI == coro::ABI::Switch &&
2020 hasSafeElideCaller(F) && !F.hasFnAttribute(Kind: llvm::Attribute::NoInline);
2021
2022 // If there are no suspend points, no split required, just remove
2023 // the allocation and deallocation blocks, they are not needed.
2024 if (isNoSuspendCoroutine) {
2025 handleNoSuspendCoroutine(Shape);
2026 } else {
2027 ABI.splitCoroutine(F, Shape, Clones, TTI);
2028 }
2029
2030 // Replace all the swifterror operations in the original function.
2031 // This invalidates SwiftErrorOps in the Shape.
2032 replaceSwiftErrorOps(F, Shape, VMap: nullptr);
2033
2034 // Salvage debug intrinsics that point into the coroutine frame in the
2035 // original function. The Cloner has already salvaged debug info in the new
2036 // coroutine funclets.
2037 SmallDenseMap<Argument *, AllocaInst *, 4> ArgToAllocaMap;
2038 auto DbgVariableRecords = collectDbgVariableRecords(F);
2039 for (DbgVariableRecord *DVR : DbgVariableRecords)
2040 coro::salvageDebugInfo(ArgToAllocaMap, DVR&: *DVR, UseEntryValue: false /*UseEntryValue*/);
2041
2042 removeCoroEndsFromRampFunction(Shape);
2043 removeCoroIsInRampFromRampFunction(Shape);
2044
2045 if (shouldCreateNoAllocVariant)
2046 SwitchCoroutineSplitter::createNoAllocVariant(F, Shape, Clones);
2047}
2048
2049static LazyCallGraph::SCC &updateCallGraphAfterCoroutineSplit(
2050 LazyCallGraph::Node &N, const coro::Shape &Shape,
2051 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
2052 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
2053 FunctionAnalysisManager &FAM) {
2054
2055 auto *CurrentSCC = &C;
2056 if (!Clones.empty()) {
2057 switch (Shape.ABI) {
2058 case coro::ABI::Switch:
2059 // Each clone in the Switch lowering is independent of the other clones.
2060 // Let the LazyCallGraph know about each one separately.
2061 for (Function *Clone : Clones)
2062 CG.addSplitFunction(OriginalFunction&: N.getFunction(), NewFunction&: *Clone);
2063 break;
2064 case coro::ABI::Async:
2065 case coro::ABI::Retcon:
2066 case coro::ABI::RetconOnce:
2067 // Each clone in the Async/Retcon lowering references of the other clones.
2068 // Let the LazyCallGraph know about all of them at once.
2069 if (!Clones.empty())
2070 CG.addSplitRefRecursiveFunctions(OriginalFunction&: N.getFunction(), NewFunctions: Clones);
2071 break;
2072 }
2073
2074 // Let the CGSCC infra handle the changes to the original function.
2075 CurrentSCC = &updateCGAndAnalysisManagerForCGSCCPass(G&: CG, C&: *CurrentSCC, N, AM,
2076 UR, FAM);
2077 }
2078
2079 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
2080 // to the split functions.
2081 postSplitCleanup(F&: N.getFunction());
2082 CurrentSCC = &updateCGAndAnalysisManagerForFunctionPass(G&: CG, C&: *CurrentSCC, N,
2083 AM, UR, FAM);
2084 return *CurrentSCC;
2085}
2086
2087/// Replace a call to llvm.coro.prepare.retcon.
2088static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2089 LazyCallGraph::SCC &C) {
2090 auto CastFn = Prepare->getArgOperand(i: 0); // as an i8*
2091 auto Fn = CastFn->stripPointerCasts(); // as its original type
2092
2093 // Attempt to peephole this pattern:
2094 // %0 = bitcast [[TYPE]] @some_function to i8*
2095 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2096 // %2 = bitcast %1 to [[TYPE]]
2097 // ==>
2098 // %2 = @some_function
2099 for (Use &U : llvm::make_early_inc_range(Range: Prepare->uses())) {
2100 // Look for bitcasts back to the original function type.
2101 auto *Cast = dyn_cast<BitCastInst>(Val: U.getUser());
2102 if (!Cast || Cast->getType() != Fn->getType())
2103 continue;
2104
2105 // Replace and remove the cast.
2106 Cast->replaceAllUsesWith(V: Fn);
2107 Cast->eraseFromParent();
2108 }
2109
2110 // Replace any remaining uses with the function as an i8*.
2111 // This can never directly be a callee, so we don't need to update CG.
2112 Prepare->replaceAllUsesWith(V: CastFn);
2113 Prepare->eraseFromParent();
2114
2115 // Kill dead bitcasts.
2116 while (auto *Cast = dyn_cast<BitCastInst>(Val: CastFn)) {
2117 if (!Cast->use_empty())
2118 break;
2119 CastFn = Cast->getOperand(i_nocapture: 0);
2120 Cast->eraseFromParent();
2121 }
2122}
2123
2124static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2125 LazyCallGraph::SCC &C) {
2126 bool Changed = false;
2127 for (Use &P : llvm::make_early_inc_range(Range: PrepareFn->uses())) {
2128 // Intrinsics can only be used in calls.
2129 auto *Prepare = cast<CallInst>(Val: P.getUser());
2130 replacePrepare(Prepare, CG, C);
2131 Changed = true;
2132 }
2133
2134 return Changed;
2135}
2136
2137static void addPrepareFunction(const Module &M,
2138 SmallVectorImpl<Function *> &Fns,
2139 StringRef Name) {
2140 auto *PrepareFn = M.getFunction(Name);
2141 if (PrepareFn && !PrepareFn->use_empty())
2142 Fns.push_back(Elt: PrepareFn);
2143}
2144
2145static std::unique_ptr<coro::BaseABI>
2146CreateNewABI(Function &F, coro::Shape &S,
2147 std::function<bool(Instruction &)> IsMatCallback,
2148 const SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs) {
2149 if (S.CoroBegin->hasCustomABI()) {
2150 unsigned CustomABI = S.CoroBegin->getCustomABI();
2151 if (CustomABI >= GenCustomABIs.size())
2152 llvm_unreachable("Custom ABI not found amoung those specified");
2153 return GenCustomABIs[CustomABI](F, S);
2154 }
2155
2156 switch (S.ABI) {
2157 case coro::ABI::Switch:
2158 return std::make_unique<coro::SwitchABI>(args&: F, args&: S, args&: IsMatCallback);
2159 case coro::ABI::Async:
2160 return std::make_unique<coro::AsyncABI>(args&: F, args&: S, args&: IsMatCallback);
2161 case coro::ABI::Retcon:
2162 return std::make_unique<coro::AnyRetconABI>(args&: F, args&: S, args&: IsMatCallback);
2163 case coro::ABI::RetconOnce:
2164 return std::make_unique<coro::AnyRetconABI>(args&: F, args&: S, args&: IsMatCallback);
2165 }
2166 llvm_unreachable("Unknown ABI");
2167}
2168
2169CoroSplitPass::CoroSplitPass(bool OptimizeFrame)
2170 : CreateAndInitABI([](Function &F, coro::Shape &S) {
2171 std::unique_ptr<coro::BaseABI> ABI =
2172 CreateNewABI(F, S, IsMatCallback: coro::isTriviallyMaterializable, GenCustomABIs: {});
2173 ABI->init();
2174 return ABI;
2175 }),
2176 OptimizeFrame(OptimizeFrame) {}
2177
2178CoroSplitPass::CoroSplitPass(
2179 SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs, bool OptimizeFrame)
2180 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2181 std::unique_ptr<coro::BaseABI> ABI =
2182 CreateNewABI(F, S, IsMatCallback: coro::isTriviallyMaterializable, GenCustomABIs);
2183 ABI->init();
2184 return ABI;
2185 }),
2186 OptimizeFrame(OptimizeFrame) {}
2187
2188// For back compatibility, constructor takes a materializable callback and
2189// creates a generator for an ABI with a modified materializable callback.
2190CoroSplitPass::CoroSplitPass(std::function<bool(Instruction &)> IsMatCallback,
2191 bool OptimizeFrame)
2192 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2193 std::unique_ptr<coro::BaseABI> ABI =
2194 CreateNewABI(F, S, IsMatCallback, GenCustomABIs: {});
2195 ABI->init();
2196 return ABI;
2197 }),
2198 OptimizeFrame(OptimizeFrame) {}
2199
2200// For back compatibility, constructor takes a materializable callback and
2201// creates a generator for an ABI with a modified materializable callback.
2202CoroSplitPass::CoroSplitPass(
2203 std::function<bool(Instruction &)> IsMatCallback,
2204 SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs, bool OptimizeFrame)
2205 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2206 std::unique_ptr<coro::BaseABI> ABI =
2207 CreateNewABI(F, S, IsMatCallback, GenCustomABIs);
2208 ABI->init();
2209 return ABI;
2210 }),
2211 OptimizeFrame(OptimizeFrame) {}
2212
2213PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2214 CGSCCAnalysisManager &AM,
2215 LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2216 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2217 // non-zero number of nodes, so we assume that here and grab the first
2218 // node's function's module.
2219 Module &M = *C.begin()->getFunction().getParent();
2220 auto &FAM =
2221 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(IR&: C, ExtraArgs&: CG).getManager();
2222
2223 // Check for uses of llvm.coro.prepare.retcon/async.
2224 SmallVector<Function *, 2> PrepareFns;
2225 addPrepareFunction(M, Fns&: PrepareFns, Name: "llvm.coro.prepare.retcon");
2226 addPrepareFunction(M, Fns&: PrepareFns, Name: "llvm.coro.prepare.async");
2227
2228 // Find coroutines for processing.
2229 SmallVector<LazyCallGraph::Node *> Coroutines;
2230 for (LazyCallGraph::Node &N : C)
2231 if (N.getFunction().isPresplitCoroutine())
2232 Coroutines.push_back(Elt: &N);
2233
2234 if (Coroutines.empty() && PrepareFns.empty())
2235 return PreservedAnalyses::all();
2236
2237 auto *CurrentSCC = &C;
2238 // Split all the coroutines.
2239 for (LazyCallGraph::Node *N : Coroutines) {
2240 Function &F = N->getFunction();
2241 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2242 << "\n");
2243
2244 // The suspend-crossing algorithm in buildCoroutineFrame gets tripped up
2245 // by unreachable blocks, so remove them as a first pass. Remove the
2246 // unreachable blocks before collecting intrinsics into Shape.
2247 removeUnreachableBlocks(F);
2248
2249 coro::Shape Shape(F);
2250 if (!Shape.CoroBegin)
2251 continue;
2252
2253 F.setSplittedCoroutine();
2254
2255 std::unique_ptr<coro::BaseABI> ABI = CreateAndInitABI(F, Shape);
2256
2257 SmallVector<Function *, 4> Clones;
2258 auto &TTI = FAM.getResult<TargetIRAnalysis>(IR&: F);
2259 doSplitCoroutine(F, Clones, ABI&: *ABI, TTI, OptimizeFrame);
2260 CurrentSCC = &updateCallGraphAfterCoroutineSplit(
2261 N&: *N, Shape, Clones, C&: *CurrentSCC, CG, AM, UR, FAM);
2262
2263 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
2264 ORE.emit(RemarkBuilder: [&]() {
2265 return OptimizationRemark(DEBUG_TYPE, "CoroSplit", &F)
2266 << "Split '" << ore::NV("function", F.getName())
2267 << "' (frame_size=" << ore::NV("frame_size", Shape.FrameSize)
2268 << ", align=" << ore::NV("align", Shape.FrameAlign.value()) << ")";
2269 });
2270
2271 if (!Shape.CoroSuspends.empty()) {
2272 // Run the CGSCC pipeline on the original and newly split functions.
2273 UR.CWorklist.insert(X: CurrentSCC);
2274 for (Function *Clone : Clones)
2275 UR.CWorklist.insert(X: CG.lookupSCC(N&: CG.get(F&: *Clone)));
2276 } else if (Shape.ABI == coro::ABI::Async) {
2277 // Reprocess the function to inline the tail called return function of
2278 // coro.async.end.
2279 UR.CWorklist.insert(X: &C);
2280 }
2281 }
2282
2283 for (auto *PrepareFn : PrepareFns) {
2284 replaceAllPrepares(PrepareFn, CG, C&: *CurrentSCC);
2285 }
2286
2287 return PreservedAnalyses::none();
2288}
2289