1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This pass builds the coroutine frame and outlines resume and destroy parts
9// of the coroutine into separate functions.
10//
11// We present a coroutine to an LLVM as an ordinary function with suspension
12// points marked up with intrinsics. We let the optimizer party on the coroutine
13// as a single function for as long as possible. Shortly before the coroutine is
14// eligible to be inlined into its callers, we split up the coroutine into parts
15// corresponding to an initial, resume and destroy invocations of the coroutine,
16// add them to the current SCC and restart the IPO pipeline to optimize the
17// coroutine subfunctions we extracted before proceeding to the caller of the
18// coroutine.
19//===----------------------------------------------------------------------===//
20
21#include "llvm/Transforms/Coroutines/CoroSplit.h"
22#include "CoroCloner.h"
23#include "CoroInternal.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/PriorityWorklist.h"
26#include "llvm/ADT/STLExtras.h"
27#include "llvm/ADT/SmallPtrSet.h"
28#include "llvm/ADT/SmallVector.h"
29#include "llvm/ADT/StringExtras.h"
30#include "llvm/ADT/StringRef.h"
31#include "llvm/ADT/Twine.h"
32#include "llvm/Analysis/CFG.h"
33#include "llvm/Analysis/CallGraph.h"
34#include "llvm/Analysis/ConstantFolding.h"
35#include "llvm/Analysis/LazyCallGraph.h"
36#include "llvm/Analysis/OptimizationRemarkEmitter.h"
37#include "llvm/Analysis/TargetTransformInfo.h"
38#include "llvm/BinaryFormat/Dwarf.h"
39#include "llvm/IR/Argument.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/BasicBlock.h"
42#include "llvm/IR/CFG.h"
43#include "llvm/IR/CallingConv.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DIBuilder.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/DebugInfo.h"
48#include "llvm/IR/DerivedTypes.h"
49#include "llvm/IR/Dominators.h"
50#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/GlobalVariable.h"
52#include "llvm/IR/InstIterator.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Instructions.h"
56#include "llvm/IR/IntrinsicInst.h"
57#include "llvm/IR/LLVMContext.h"
58#include "llvm/IR/Module.h"
59#include "llvm/IR/Type.h"
60#include "llvm/IR/Value.h"
61#include "llvm/IR/Verifier.h"
62#include "llvm/Support/Casting.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/PrettyStackTrace.h"
65#include "llvm/Support/raw_ostream.h"
66#include "llvm/Transforms/Coroutines/MaterializationUtils.h"
67#include "llvm/Transforms/Scalar.h"
68#include "llvm/Transforms/Utils/BasicBlockUtils.h"
69#include "llvm/Transforms/Utils/CallGraphUpdater.h"
70#include "llvm/Transforms/Utils/Cloning.h"
71#include "llvm/Transforms/Utils/Local.h"
72#include <cassert>
73#include <cstddef>
74#include <cstdint>
75#include <initializer_list>
76#include <iterator>
77
78using namespace llvm;
79
80#define DEBUG_TYPE "coro-split"
81
82// FIXME:
83// Lower the intrinisc in CoroEarly phase if coroutine frame doesn't escape
84// and it is known that other transformations, for example, sanitizers
85// won't lead to incorrect code.
86static void lowerAwaitSuspend(IRBuilder<> &Builder, CoroAwaitSuspendInst *CB,
87 coro::Shape &Shape) {
88 auto Wrapper = CB->getWrapperFunction();
89 auto Awaiter = CB->getAwaiter();
90 auto FramePtr = CB->getFrame();
91
92 Builder.SetInsertPoint(CB);
93
94 CallBase *NewCall = nullptr;
95 // await_suspend has only 2 parameters, awaiter and handle.
96 // Copy parameter attributes from the intrinsic call, but remove the last,
97 // because the last parameter now becomes the function that is being called.
98 AttributeList NewAttributes =
99 CB->getAttributes().removeParamAttributes(C&: CB->getContext(), ArgNo: 2);
100
101 if (auto Invoke = dyn_cast<InvokeInst>(Val: CB)) {
102 auto WrapperInvoke =
103 Builder.CreateInvoke(Callee: Wrapper, NormalDest: Invoke->getNormalDest(),
104 UnwindDest: Invoke->getUnwindDest(), Args: {Awaiter, FramePtr});
105
106 WrapperInvoke->setCallingConv(Invoke->getCallingConv());
107 std::copy(first: Invoke->bundle_op_info_begin(), last: Invoke->bundle_op_info_end(),
108 result: WrapperInvoke->bundle_op_info_begin());
109 WrapperInvoke->setAttributes(NewAttributes);
110 WrapperInvoke->setDebugLoc(Invoke->getDebugLoc());
111 NewCall = WrapperInvoke;
112 } else if (auto Call = dyn_cast<CallInst>(Val: CB)) {
113 auto WrapperCall = Builder.CreateCall(Callee: Wrapper, Args: {Awaiter, FramePtr});
114
115 WrapperCall->setAttributes(NewAttributes);
116 WrapperCall->setDebugLoc(Call->getDebugLoc());
117 NewCall = WrapperCall;
118 } else {
119 llvm_unreachable("Unexpected coro_await_suspend invocation method");
120 }
121
122 if (CB->getCalledFunction()->getIntrinsicID() ==
123 Intrinsic::coro_await_suspend_handle) {
124 // Follow the lowered await_suspend call above with a lowered resume call
125 // to the returned coroutine.
126 if (auto *Invoke = dyn_cast<InvokeInst>(Val: CB)) {
127 // If the await_suspend call is an invoke, we continue in the next block.
128 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstInsertionPt());
129 }
130
131 coro::LowererBase LB(*Wrapper->getParent());
132 auto *ResumeAddr = LB.makeSubFnCall(Arg: NewCall, Index: CoroSubFnInst::ResumeIndex,
133 InsertPt: &*Builder.GetInsertPoint());
134
135 LLVMContext &Ctx = Builder.getContext();
136 FunctionType *ResumeTy = FunctionType::get(
137 Result: Type::getVoidTy(C&: Ctx), Params: PointerType::getUnqual(C&: Ctx), isVarArg: false);
138 auto *ResumeCall = Builder.CreateCall(FTy: ResumeTy, Callee: ResumeAddr, Args: {NewCall});
139 ResumeCall->setCallingConv(CallingConv::Fast);
140
141 // We can't insert the 'ret' instruction and adjust the cc until the
142 // function has been split, so remember this for later.
143 Shape.SymmetricTransfers.push_back(Elt: ResumeCall);
144
145 NewCall = ResumeCall;
146 }
147
148 CB->replaceAllUsesWith(V: NewCall);
149 CB->eraseFromParent();
150}
151
152static void lowerAwaitSuspends(Function &F, coro::Shape &Shape) {
153 IRBuilder<> Builder(F.getContext());
154 for (auto *AWS : Shape.CoroAwaitSuspends)
155 lowerAwaitSuspend(Builder, CB: AWS, Shape);
156}
157
158static void maybeFreeRetconStorage(IRBuilder<> &Builder,
159 const coro::Shape &Shape, Value *FramePtr,
160 CallGraph *CG) {
161 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce);
162 if (Shape.RetconLowering.IsFrameInlineInStorage)
163 return;
164
165 Shape.emitDealloc(Builder, Ptr: FramePtr, CG);
166}
167
168/// Replace an llvm.coro.end.async.
169/// Will inline the must tail call function call if there is one.
170/// \returns true if cleanup of the coro.end block is needed, false otherwise.
171static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
172 IRBuilder<> Builder(End);
173
174 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(Val: End);
175 if (!EndAsync) {
176 Builder.CreateRetVoid();
177 return true /*needs cleanup of coro.end block*/;
178 }
179
180 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
181 if (!MustTailCallFunc) {
182 Builder.CreateRetVoid();
183 return true /*needs cleanup of coro.end block*/;
184 }
185
186 // Move the must tail call from the predecessor block into the end block.
187 auto *CoroEndBlock = End->getParent();
188 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
189 assert(MustTailCallFuncBlock && "Must have a single predecessor block");
190 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
191 auto *MustTailCall = cast<CallInst>(Val: &*std::prev(x: It));
192 CoroEndBlock->splice(ToIt: End->getIterator(), FromBB: MustTailCallFuncBlock,
193 FromIt: MustTailCall->getIterator());
194
195 // Insert the return instruction.
196 Builder.SetInsertPoint(End);
197 Builder.CreateRetVoid();
198 InlineFunctionInfo FnInfo;
199
200 // Remove the rest of the block, by splitting it into an unreachable block.
201 auto *BB = End->getParent();
202 BB->splitBasicBlock(I: End);
203 BB->getTerminator()->eraseFromParent();
204
205 auto InlineRes = InlineFunction(CB&: *MustTailCall, IFI&: FnInfo);
206 assert(InlineRes.isSuccess() && "Expected inlining to succeed");
207 (void)InlineRes;
208
209 // We have cleaned up the coro.end block above.
210 return false;
211}
212
213/// Replace a non-unwind call to llvm.coro.end.
214static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
215 const coro::Shape &Shape, Value *FramePtr,
216 bool InRamp, CallGraph *CG) {
217 // Start inserting right before the coro.end.
218 IRBuilder<> Builder(End);
219
220 // Create the return instruction.
221 switch (Shape.ABI) {
222 // The cloned functions in switch-lowering always return void.
223 case coro::ABI::Switch:
224 assert(!cast<CoroEndInst>(End)->hasResults() &&
225 "switch coroutine should not return any values");
226 // coro.end doesn't immediately end the coroutine in the main function
227 // in this lowering, because we need to deallocate the coroutine.
228 if (InRamp)
229 return;
230 Builder.CreateRetVoid();
231 break;
232
233 // In async lowering this returns.
234 case coro::ABI::Async: {
235 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
236 if (!CoroEndBlockNeedsCleanup)
237 return;
238 break;
239 }
240
241 // In unique continuation lowering, the continuations always return void.
242 // But we may have implicitly allocated storage.
243 case coro::ABI::RetconOnce: {
244 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
245 auto *CoroEnd = cast<CoroEndInst>(Val: End);
246 auto *RetTy = Shape.getResumeFunctionType()->getReturnType();
247
248 if (!CoroEnd->hasResults()) {
249 assert(RetTy->isVoidTy());
250 Builder.CreateRetVoid();
251 break;
252 }
253
254 auto *CoroResults = CoroEnd->getResults();
255 unsigned NumReturns = CoroResults->numReturns();
256
257 if (auto *RetStructTy = dyn_cast<StructType>(Val: RetTy)) {
258 assert(RetStructTy->getNumElements() == NumReturns &&
259 "numbers of returns should match resume function singature");
260 Value *ReturnValue = PoisonValue::get(T: RetStructTy);
261 unsigned Idx = 0;
262 for (Value *RetValEl : CoroResults->return_values())
263 ReturnValue = Builder.CreateInsertValue(Agg: ReturnValue, Val: RetValEl, Idxs: Idx++);
264 Builder.CreateRet(V: ReturnValue);
265 } else if (NumReturns == 0) {
266 assert(RetTy->isVoidTy());
267 Builder.CreateRetVoid();
268 } else {
269 assert(NumReturns == 1);
270 Builder.CreateRet(V: *CoroResults->retval_begin());
271 }
272 CoroResults->replaceAllUsesWith(
273 V: ConstantTokenNone::get(Context&: CoroResults->getContext()));
274 CoroResults->eraseFromParent();
275 break;
276 }
277
278 // In non-unique continuation lowering, we signal completion by returning
279 // a null continuation.
280 case coro::ABI::Retcon: {
281 assert(!cast<CoroEndInst>(End)->hasResults() &&
282 "retcon coroutine should not return any values");
283 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
284 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
285 auto RetStructTy = dyn_cast<StructType>(Val: RetTy);
286 PointerType *ContinuationTy =
287 cast<PointerType>(Val: RetStructTy ? RetStructTy->getElementType(N: 0) : RetTy);
288
289 Value *ReturnValue = ConstantPointerNull::get(T: ContinuationTy);
290 if (RetStructTy) {
291 ReturnValue = Builder.CreateInsertValue(Agg: PoisonValue::get(T: RetStructTy),
292 Val: ReturnValue, Idxs: 0);
293 }
294 Builder.CreateRet(V: ReturnValue);
295 break;
296 }
297 }
298
299 // Remove the rest of the block, by splitting it into an unreachable block.
300 auto *BB = End->getParent();
301 BB->splitBasicBlock(I: End);
302 BB->getTerminator()->eraseFromParent();
303}
304
305// Mark a coroutine as done, which implies that the coroutine is finished and
306// never gets resumed.
307//
308// In resume-switched ABI, the done state is represented by storing zero in
309// ResumeFnAddr.
310//
311// NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
312// pointer to the frame in splitted function is not stored in `Shape`.
313static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
314 Value *FramePtr) {
315 assert(
316 Shape.ABI == coro::ABI::Switch &&
317 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
318 auto *GepIndex = Builder.CreateStructGEP(
319 Ty: Shape.FrameTy, Ptr: FramePtr, Idx: coro::Shape::SwitchFieldIndex::Resume,
320 Name: "ResumeFn.addr");
321 auto *NullPtr = ConstantPointerNull::get(T: cast<PointerType>(
322 Val: Shape.FrameTy->getTypeAtIndex(N: coro::Shape::SwitchFieldIndex::Resume)));
323 Builder.CreateStore(Val: NullPtr, Ptr: GepIndex);
324
325 // If the coroutine don't have unwind coro end, we could omit the store to
326 // the final suspend point since we could infer the coroutine is suspended
327 // at the final suspend point by the nullness of ResumeFnAddr.
328 // However, we can't skip it if the coroutine have unwind coro end. Since
329 // the coroutine reaches unwind coro end is considered suspended at the
330 // final suspend point (the ResumeFnAddr is null) but in fact the coroutine
331 // didn't complete yet. We need the IndexVal for the final suspend point
332 // to make the states clear.
333 if (Shape.SwitchLowering.HasUnwindCoroEnd &&
334 Shape.SwitchLowering.HasFinalSuspend) {
335 assert(cast<CoroSuspendInst>(Shape.CoroSuspends.back())->isFinal() &&
336 "The final suspend should only live in the last position of "
337 "CoroSuspends.");
338 ConstantInt *IndexVal = Shape.getIndex(Value: Shape.CoroSuspends.size() - 1);
339 auto *FinalIndex = Builder.CreateStructGEP(
340 Ty: Shape.FrameTy, Ptr: FramePtr, Idx: Shape.getSwitchIndexField(), Name: "index.addr");
341
342 Builder.CreateStore(Val: IndexVal, Ptr: FinalIndex);
343 }
344}
345
346/// Replace an unwind call to llvm.coro.end.
347static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
348 Value *FramePtr, bool InRamp, CallGraph *CG) {
349 IRBuilder<> Builder(End);
350
351 switch (Shape.ABI) {
352 // In switch-lowering, this does nothing in the main function.
353 case coro::ABI::Switch: {
354 // In C++'s specification, the coroutine should be marked as done
355 // if promise.unhandled_exception() throws. The frontend will
356 // call coro.end(true) along this path.
357 //
358 // FIXME: We should refactor this once there is other language
359 // which uses Switch-Resumed style other than C++.
360 markCoroutineAsDone(Builder, Shape, FramePtr);
361 if (InRamp)
362 return;
363 break;
364 }
365 // In async lowering this does nothing.
366 case coro::ABI::Async:
367 break;
368 // In continuation-lowering, this frees the continuation storage.
369 case coro::ABI::Retcon:
370 case coro::ABI::RetconOnce:
371 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
372 break;
373 }
374
375 // If coro.end has an associated bundle, add cleanupret instruction.
376 if (auto Bundle = End->getOperandBundle(ID: LLVMContext::OB_funclet)) {
377 auto *FromPad = cast<CleanupPadInst>(Val: Bundle->Inputs[0]);
378 auto *CleanupRet = Builder.CreateCleanupRet(CleanupPad: FromPad, UnwindBB: nullptr);
379 End->getParent()->splitBasicBlock(I: End);
380 CleanupRet->getParent()->getTerminator()->eraseFromParent();
381 }
382}
383
384static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
385 Value *FramePtr, bool InRamp, CallGraph *CG) {
386 if (End->isUnwind())
387 replaceUnwindCoroEnd(End, Shape, FramePtr, InRamp, CG);
388 else
389 replaceFallthroughCoroEnd(End, Shape, FramePtr, InRamp, CG);
390 End->eraseFromParent();
391}
392
393// In the resume function, we remove the last case (when coro::Shape is built,
394// the final suspend point (if present) is always the last element of
395// CoroSuspends array) since it is an undefined behavior to resume a coroutine
396// suspended at the final suspend point.
397// In the destroy function, if it isn't possible that the ResumeFnAddr is NULL
398// and the coroutine doesn't suspend at the final suspend point actually (this
399// is possible since the coroutine is considered suspended at the final suspend
400// point if promise.unhandled_exception() exits via an exception), we can
401// remove the last case.
402void coro::BaseCloner::handleFinalSuspend() {
403 assert(Shape.ABI == coro::ABI::Switch &&
404 Shape.SwitchLowering.HasFinalSuspend);
405
406 if (isSwitchDestroyFunction() && Shape.SwitchLowering.HasUnwindCoroEnd)
407 return;
408
409 auto *Switch = cast<SwitchInst>(Val&: VMap[Shape.SwitchLowering.ResumeSwitch]);
410 auto FinalCaseIt = std::prev(x: Switch->case_end());
411 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
412 Switch->removeCase(I: FinalCaseIt);
413 if (isSwitchDestroyFunction()) {
414 BasicBlock *OldSwitchBB = Switch->getParent();
415 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(I: Switch, BBName: "Switch");
416 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
417
418 if (NewF->isCoroOnlyDestroyWhenComplete()) {
419 // When the coroutine can only be destroyed when complete, we don't need
420 // to generate code for other cases.
421 Builder.CreateBr(Dest: ResumeBB);
422 } else {
423 auto *GepIndex = Builder.CreateStructGEP(
424 Ty: Shape.FrameTy, Ptr: NewFramePtr, Idx: coro::Shape::SwitchFieldIndex::Resume,
425 Name: "ResumeFn.addr");
426 auto *Load =
427 Builder.CreateLoad(Ty: Shape.getSwitchResumePointerType(), Ptr: GepIndex);
428 auto *Cond = Builder.CreateIsNull(Arg: Load);
429 Builder.CreateCondBr(Cond, True: ResumeBB, False: NewSwitchBB);
430 }
431 OldSwitchBB->getTerminator()->eraseFromParent();
432 }
433}
434
435static FunctionType *
436getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
437 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Val: Suspend);
438 auto *StructTy = cast<StructType>(Val: AsyncSuspend->getType());
439 auto &Context = Suspend->getParent()->getParent()->getContext();
440 auto *VoidTy = Type::getVoidTy(C&: Context);
441 return FunctionType::get(Result: VoidTy, Params: StructTy->elements(), isVarArg: false);
442}
443
444static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
445 const Twine &Suffix,
446 Module::iterator InsertBefore,
447 AnyCoroSuspendInst *ActiveSuspend) {
448 Module *M = OrigF.getParent();
449 auto *FnTy = (Shape.ABI != coro::ABI::Async)
450 ? Shape.getResumeFunctionType()
451 : getFunctionTypeFromAsyncSuspend(Suspend: ActiveSuspend);
452
453 Function *NewF =
454 Function::Create(Ty: FnTy, Linkage: GlobalValue::LinkageTypes::InternalLinkage,
455 N: OrigF.getName() + Suffix);
456
457 M->getFunctionList().insert(where: InsertBefore, New: NewF);
458
459 return NewF;
460}
461
462/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
463/// arguments to the continuation function.
464///
465/// This assumes that the builder has a meaningful insertion point.
466void coro::BaseCloner::replaceRetconOrAsyncSuspendUses() {
467 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
468 Shape.ABI == coro::ABI::Async);
469
470 auto NewS = VMap[ActiveSuspend];
471 if (NewS->use_empty())
472 return;
473
474 // Copy out all the continuation arguments after the buffer pointer into
475 // an easily-indexed data structure for convenience.
476 SmallVector<Value *, 8> Args;
477 // The async ABI includes all arguments -- including the first argument.
478 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
479 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(x: NewF->arg_begin()),
480 E = NewF->arg_end();
481 I != E; ++I)
482 Args.push_back(Elt: &*I);
483
484 // If the suspend returns a single scalar value, we can just do a simple
485 // replacement.
486 if (!isa<StructType>(Val: NewS->getType())) {
487 assert(Args.size() == 1);
488 NewS->replaceAllUsesWith(V: Args.front());
489 return;
490 }
491
492 // Try to peephole extracts of an aggregate return.
493 for (Use &U : llvm::make_early_inc_range(Range: NewS->uses())) {
494 auto *EVI = dyn_cast<ExtractValueInst>(Val: U.getUser());
495 if (!EVI || EVI->getNumIndices() != 1)
496 continue;
497
498 EVI->replaceAllUsesWith(V: Args[EVI->getIndices().front()]);
499 EVI->eraseFromParent();
500 }
501
502 // If we have no remaining uses, we're done.
503 if (NewS->use_empty())
504 return;
505
506 // Otherwise, we need to create an aggregate.
507 Value *Aggr = PoisonValue::get(T: NewS->getType());
508 for (auto [Idx, Arg] : llvm::enumerate(First&: Args))
509 Aggr = Builder.CreateInsertValue(Agg: Aggr, Val: Arg, Idxs: Idx);
510
511 NewS->replaceAllUsesWith(V: Aggr);
512}
513
514void coro::BaseCloner::replaceCoroSuspends() {
515 Value *SuspendResult;
516
517 switch (Shape.ABI) {
518 // In switch lowering, replace coro.suspend with the appropriate value
519 // for the type of function we're extracting.
520 // Replacing coro.suspend with (0) will result in control flow proceeding to
521 // a resume label associated with a suspend point, replacing it with (1) will
522 // result in control flow proceeding to a cleanup label associated with this
523 // suspend point.
524 case coro::ABI::Switch:
525 SuspendResult = Builder.getInt8(C: isSwitchDestroyFunction() ? 1 : 0);
526 break;
527
528 // In async lowering there are no uses of the result.
529 case coro::ABI::Async:
530 return;
531
532 // In returned-continuation lowering, the arguments from earlier
533 // continuations are theoretically arbitrary, and they should have been
534 // spilled.
535 case coro::ABI::RetconOnce:
536 case coro::ABI::Retcon:
537 return;
538 }
539
540 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
541 // The active suspend was handled earlier.
542 if (CS == ActiveSuspend)
543 continue;
544
545 auto *MappedCS = cast<AnyCoroSuspendInst>(Val&: VMap[CS]);
546 MappedCS->replaceAllUsesWith(V: SuspendResult);
547 MappedCS->eraseFromParent();
548 }
549}
550
551void coro::BaseCloner::replaceCoroEnds() {
552 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
553 // We use a null call graph because there's no call graph node for
554 // the cloned function yet. We'll just be rebuilding that later.
555 auto *NewCE = cast<AnyCoroEndInst>(Val&: VMap[CE]);
556 replaceCoroEnd(End: NewCE, Shape, FramePtr: NewFramePtr, /*in ramp*/ InRamp: false, CG: nullptr);
557 }
558}
559
560void coro::BaseCloner::replaceCoroIsInRamp() {
561 auto &Ctx = OrigF.getContext();
562 for (auto *II : Shape.CoroIsInRampInsts) {
563 auto *NewII = cast<CoroIsInRampInst>(Val&: VMap[II]);
564 NewII->replaceAllUsesWith(V: ConstantInt::getFalse(Context&: Ctx));
565 NewII->eraseFromParent();
566 }
567}
568
569static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
570 ValueToValueMapTy *VMap) {
571 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
572 return;
573 Value *CachedSlot = nullptr;
574 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
575 if (CachedSlot)
576 return CachedSlot;
577
578 // Check if the function has a swifterror argument.
579 for (auto &Arg : F.args()) {
580 if (Arg.isSwiftError()) {
581 CachedSlot = &Arg;
582 return &Arg;
583 }
584 }
585
586 // Create a swifterror alloca.
587 IRBuilder<> Builder(&F.getEntryBlock(),
588 F.getEntryBlock().getFirstNonPHIOrDbg());
589 auto Alloca = Builder.CreateAlloca(Ty: ValueTy);
590 Alloca->setSwiftError(true);
591
592 CachedSlot = Alloca;
593 return Alloca;
594 };
595
596 for (CallInst *Op : Shape.SwiftErrorOps) {
597 auto MappedOp = VMap ? cast<CallInst>(Val&: (*VMap)[Op]) : Op;
598 IRBuilder<> Builder(MappedOp);
599
600 // If there are no arguments, this is a 'get' operation.
601 Value *MappedResult;
602 if (Op->arg_empty()) {
603 auto ValueTy = Op->getType();
604 auto Slot = getSwiftErrorSlot(ValueTy);
605 MappedResult = Builder.CreateLoad(Ty: ValueTy, Ptr: Slot);
606 } else {
607 assert(Op->arg_size() == 1);
608 auto Value = MappedOp->getArgOperand(i: 0);
609 auto ValueTy = Value->getType();
610 auto Slot = getSwiftErrorSlot(ValueTy);
611 Builder.CreateStore(Val: Value, Ptr: Slot);
612 MappedResult = Slot;
613 }
614
615 MappedOp->replaceAllUsesWith(V: MappedResult);
616 MappedOp->eraseFromParent();
617 }
618
619 // If we're updating the original function, we've invalidated SwiftErrorOps.
620 if (VMap == nullptr) {
621 Shape.SwiftErrorOps.clear();
622 }
623}
624
625/// Returns all debug records in F.
626static SmallVector<DbgVariableRecord *>
627collectDbgVariableRecords(Function &F) {
628 SmallVector<DbgVariableRecord *> DbgVariableRecords;
629 for (auto &I : instructions(F)) {
630 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange()))
631 DbgVariableRecords.push_back(Elt: &DVR);
632 }
633 return DbgVariableRecords;
634}
635
636void coro::BaseCloner::replaceSwiftErrorOps() {
637 ::replaceSwiftErrorOps(F&: *NewF, Shape, VMap: &VMap);
638}
639
640void coro::BaseCloner::salvageDebugInfo() {
641 auto DbgVariableRecords = collectDbgVariableRecords(F&: *NewF);
642 SmallDenseMap<Argument *, AllocaInst *, 4> ArgToAllocaMap;
643
644 // Only 64-bit ABIs have a register we can refer to with the entry value.
645 bool UseEntryValue = OrigF.getParent()->getTargetTriple().isArch64Bit();
646 for (DbgVariableRecord *DVR : DbgVariableRecords)
647 coro::salvageDebugInfo(ArgToAllocaMap, DVR&: *DVR, UseEntryValue);
648
649 // Remove all salvaged dbg.declare intrinsics that became
650 // either unreachable or stale due to the CoroSplit transformation.
651 DominatorTree DomTree(*NewF);
652 auto IsUnreachableBlock = [&](BasicBlock *BB) {
653 return !isPotentiallyReachable(From: &NewF->getEntryBlock(), To: BB, ExclusionSet: nullptr,
654 DT: &DomTree);
655 };
656 auto RemoveOne = [&](DbgVariableRecord *DVI) {
657 if (IsUnreachableBlock(DVI->getParent()))
658 DVI->eraseFromParent();
659 else if (isa_and_nonnull<AllocaInst>(Val: DVI->getVariableLocationOp(OpIdx: 0))) {
660 // Count all non-debuginfo uses in reachable blocks.
661 unsigned Uses = 0;
662 for (auto *User : DVI->getVariableLocationOp(OpIdx: 0)->users())
663 if (auto *I = dyn_cast<Instruction>(Val: User))
664 if (!isa<AllocaInst>(Val: I) && !IsUnreachableBlock(I->getParent()))
665 ++Uses;
666 if (!Uses)
667 DVI->eraseFromParent();
668 }
669 };
670 for_each(Range&: DbgVariableRecords, F: RemoveOne);
671}
672
673void coro::BaseCloner::replaceEntryBlock() {
674 // In the original function, the AllocaSpillBlock is a block immediately
675 // following the allocation of the frame object which defines GEPs for
676 // all the allocas that have been moved into the frame, and it ends by
677 // branching to the original beginning of the coroutine. Make this
678 // the entry block of the cloned function.
679 auto *Entry = cast<BasicBlock>(Val&: VMap[Shape.AllocaSpillBlock]);
680 auto *OldEntry = &NewF->getEntryBlock();
681 Entry->setName("entry" + Suffix);
682 Entry->moveBefore(MovePos: OldEntry);
683 Entry->getTerminator()->eraseFromParent();
684
685 // Clear all predecessors of the new entry block. There should be
686 // exactly one predecessor, which we created when splitting out
687 // AllocaSpillBlock to begin with.
688 assert(Entry->hasOneUse());
689 auto BranchToEntry = cast<BranchInst>(Val: Entry->user_back());
690 assert(BranchToEntry->isUnconditional());
691 Builder.SetInsertPoint(BranchToEntry);
692 Builder.CreateUnreachable();
693 BranchToEntry->eraseFromParent();
694
695 // Branch from the entry to the appropriate place.
696 Builder.SetInsertPoint(Entry);
697 switch (Shape.ABI) {
698 case coro::ABI::Switch: {
699 // In switch-lowering, we built a resume-entry block in the original
700 // function. Make the entry block branch to this.
701 auto *SwitchBB =
702 cast<BasicBlock>(Val&: VMap[Shape.SwitchLowering.ResumeEntryBlock]);
703 Builder.CreateBr(Dest: SwitchBB);
704 SwitchBB->moveAfter(MovePos: Entry);
705 break;
706 }
707 case coro::ABI::Async:
708 case coro::ABI::Retcon:
709 case coro::ABI::RetconOnce: {
710 // In continuation ABIs, we want to branch to immediately after the
711 // active suspend point. Earlier phases will have put the suspend in its
712 // own basic block, so just thread our jump directly to its successor.
713 assert((Shape.ABI == coro::ABI::Async &&
714 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
715 ((Shape.ABI == coro::ABI::Retcon ||
716 Shape.ABI == coro::ABI::RetconOnce) &&
717 isa<CoroSuspendRetconInst>(ActiveSuspend)));
718 auto *MappedCS = cast<AnyCoroSuspendInst>(Val&: VMap[ActiveSuspend]);
719 auto Branch = cast<BranchInst>(Val: MappedCS->getNextNode());
720 assert(Branch->isUnconditional());
721 Builder.CreateBr(Dest: Branch->getSuccessor(i: 0));
722 break;
723 }
724 }
725
726 // Any static alloca that's still being used but not reachable from the new
727 // entry needs to be moved to the new entry.
728 Function *F = OldEntry->getParent();
729 DominatorTree DT{*F};
730 for (Instruction &I : llvm::make_early_inc_range(Range: instructions(F))) {
731 auto *Alloca = dyn_cast<AllocaInst>(Val: &I);
732 if (!Alloca || I.use_empty())
733 continue;
734 if (DT.isReachableFromEntry(A: I.getParent()) ||
735 !isa<ConstantInt>(Val: Alloca->getArraySize()))
736 continue;
737 I.moveBefore(BB&: *Entry, I: Entry->getFirstInsertionPt());
738 }
739}
740
741/// Derive the value of the new frame pointer.
742Value *coro::BaseCloner::deriveNewFramePointer() {
743 // Builder should be inserting to the front of the new entry block.
744
745 switch (Shape.ABI) {
746 // In switch-lowering, the argument is the frame pointer.
747 case coro::ABI::Switch:
748 return &*NewF->arg_begin();
749 // In async-lowering, one of the arguments is an async context as determined
750 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
751 // the resume function from the async context projection function associated
752 // with the active suspend. The frame is located as a tail to the async
753 // context header.
754 case coro::ABI::Async: {
755 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(Val: ActiveSuspend);
756 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
757 auto *CalleeContext = NewF->getArg(i: ContextIdx);
758 auto *ProjectionFunc =
759 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
760 auto DbgLoc =
761 cast<CoroSuspendAsyncInst>(Val&: VMap[ActiveSuspend])->getDebugLoc();
762 // Calling i8* (i8*)
763 auto *CallerContext = Builder.CreateCall(FTy: ProjectionFunc->getFunctionType(),
764 Callee: ProjectionFunc, Args: CalleeContext);
765 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
766 CallerContext->setDebugLoc(DbgLoc);
767 // The frame is located after the async_context header.
768 auto &Context = Builder.getContext();
769 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
770 Ty: Type::getInt8Ty(C&: Context), Ptr: CallerContext,
771 Idx0: Shape.AsyncLowering.FrameOffset, Name: "async.ctx.frameptr");
772 // Inline the projection function.
773 InlineFunctionInfo InlineInfo;
774 auto InlineRes = InlineFunction(CB&: *CallerContext, IFI&: InlineInfo);
775 assert(InlineRes.isSuccess());
776 (void)InlineRes;
777 return FramePtrAddr;
778 }
779 // In continuation-lowering, the argument is the opaque storage.
780 case coro::ABI::Retcon:
781 case coro::ABI::RetconOnce: {
782 Argument *NewStorage = &*NewF->arg_begin();
783 auto FramePtrTy = PointerType::getUnqual(C&: Shape.FrameTy->getContext());
784
785 // If the storage is inline, just bitcast to the storage to the frame type.
786 if (Shape.RetconLowering.IsFrameInlineInStorage)
787 return NewStorage;
788
789 // Otherwise, load the real frame from the opaque storage.
790 return Builder.CreateLoad(Ty: FramePtrTy, Ptr: NewStorage);
791 }
792 }
793 llvm_unreachable("bad ABI");
794}
795
796/// Adjust the scope line of the funclet to the first line number after the
797/// suspend point. This avoids a jump in the line table from the function
798/// declaration (where prologue instructions are attributed to) to the suspend
799/// point.
800/// Only adjust the scope line when the files are the same.
801/// If no candidate line number is found, fallback to the line of ActiveSuspend.
802static void updateScopeLine(Instruction *ActiveSuspend,
803 DISubprogram &SPToUpdate) {
804 if (!ActiveSuspend)
805 return;
806
807 // No subsequent instruction -> fallback to the location of ActiveSuspend.
808 if (!ActiveSuspend->getNextNode()) {
809 if (auto DL = ActiveSuspend->getDebugLoc())
810 if (SPToUpdate.getFile() == DL->getFile())
811 SPToUpdate.setScopeLine(DL->getLine());
812 return;
813 }
814
815 BasicBlock::iterator Successor = ActiveSuspend->getNextNode()->getIterator();
816 // Corosplit splits the BB around ActiveSuspend, so the meaningful
817 // instructions are not in the same BB.
818 if (auto *Branch = dyn_cast_or_null<BranchInst>(Val&: Successor);
819 Branch && Branch->isUnconditional())
820 Successor = Branch->getSuccessor(i: 0)->getFirstNonPHIOrDbg();
821
822 // Find the first successor of ActiveSuspend with a non-zero line location.
823 // If that matches the file of ActiveSuspend, use it.
824 BasicBlock *PBB = Successor->getParent();
825 for (; Successor != PBB->end(); Successor = std::next(x: Successor)) {
826 Successor = skipDebugIntrinsics(It: Successor);
827 auto DL = Successor->getDebugLoc();
828 if (!DL || DL.getLine() == 0)
829 continue;
830
831 if (SPToUpdate.getFile() == DL->getFile()) {
832 SPToUpdate.setScopeLine(DL.getLine());
833 return;
834 }
835
836 break;
837 }
838
839 // If the search above failed, fallback to the location of ActiveSuspend.
840 if (auto DL = ActiveSuspend->getDebugLoc())
841 if (SPToUpdate.getFile() == DL->getFile())
842 SPToUpdate.setScopeLine(DL->getLine());
843}
844
845static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
846 unsigned ParamIndex, uint64_t Size,
847 Align Alignment, bool NoAlias) {
848 AttrBuilder ParamAttrs(Context);
849 ParamAttrs.addAttribute(Val: Attribute::NonNull);
850 ParamAttrs.addAttribute(Val: Attribute::NoUndef);
851
852 if (NoAlias)
853 ParamAttrs.addAttribute(Val: Attribute::NoAlias);
854
855 ParamAttrs.addAlignmentAttr(Align: Alignment);
856 ParamAttrs.addDereferenceableAttr(Bytes: Size);
857 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
858}
859
860static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
861 unsigned ParamIndex) {
862 AttrBuilder ParamAttrs(Context);
863 ParamAttrs.addAttribute(Val: Attribute::SwiftAsync);
864 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
865}
866
867static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
868 unsigned ParamIndex) {
869 AttrBuilder ParamAttrs(Context);
870 ParamAttrs.addAttribute(Val: Attribute::SwiftSelf);
871 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
872}
873
874/// Clone the body of the original function into a resume function of
875/// some sort.
876void coro::BaseCloner::create() {
877 assert(NewF);
878
879 // Replace all args with dummy instructions. If an argument is the old frame
880 // pointer, the dummy will be replaced by the new frame pointer once it is
881 // computed below. Uses of all other arguments should have already been
882 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
883 // frame.
884 SmallVector<Instruction *> DummyArgs;
885 for (Argument &A : OrigF.args()) {
886 DummyArgs.push_back(Elt: new FreezeInst(PoisonValue::get(T: A.getType())));
887 VMap[&A] = DummyArgs.back();
888 }
889
890 SmallVector<ReturnInst *, 4> Returns;
891
892 // Ignore attempts to change certain attributes of the function.
893 // TODO: maybe there should be a way to suppress this during cloning?
894 auto savedVisibility = NewF->getVisibility();
895 auto savedUnnamedAddr = NewF->getUnnamedAddr();
896 auto savedDLLStorageClass = NewF->getDLLStorageClass();
897
898 // NewF's linkage (which CloneFunctionInto does *not* change) might not
899 // be compatible with the visibility of OrigF (which it *does* change),
900 // so protect against that.
901 auto savedLinkage = NewF->getLinkage();
902 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
903
904 CloneFunctionInto(NewFunc: NewF, OldFunc: &OrigF, VMap,
905 Changes: CloneFunctionChangeType::LocalChangesOnly, Returns);
906
907 auto &Context = NewF->getContext();
908
909 if (DISubprogram *SP = NewF->getSubprogram()) {
910 assert(SP != OrigF.getSubprogram() && SP->isDistinct());
911 updateScopeLine(ActiveSuspend, SPToUpdate&: *SP);
912
913 // Update the linkage name and the function name to reflect the modified
914 // name.
915 MDString *NewLinkageName = MDString::get(Context, Str: NewF->getName());
916 SP->replaceLinkageName(LN: NewLinkageName);
917 if (DISubprogram *Decl = SP->getDeclaration()) {
918 TempDISubprogram NewDecl = Decl->clone();
919 NewDecl->replaceLinkageName(LN: NewLinkageName);
920 SP->replaceDeclaration(Decl: MDNode::replaceWithUniqued(N: std::move(NewDecl)));
921 }
922 }
923
924 NewF->setLinkage(savedLinkage);
925 NewF->setVisibility(savedVisibility);
926 NewF->setUnnamedAddr(savedUnnamedAddr);
927 NewF->setDLLStorageClass(savedDLLStorageClass);
928 // The function sanitizer metadata needs to match the signature of the
929 // function it is being attached to. However this does not hold for split
930 // functions here. Thus remove the metadata for split functions.
931 if (Shape.ABI == coro::ABI::Switch &&
932 NewF->hasMetadata(KindID: LLVMContext::MD_func_sanitize))
933 NewF->eraseMetadata(KindID: LLVMContext::MD_func_sanitize);
934
935 // Replace the attributes of the new function:
936 auto OrigAttrs = NewF->getAttributes();
937 auto NewAttrs = AttributeList();
938
939 switch (Shape.ABI) {
940 case coro::ABI::Switch:
941 // Bootstrap attributes by copying function attributes from the
942 // original function. This should include optimization settings and so on.
943 NewAttrs = NewAttrs.addFnAttributes(
944 C&: Context, B: AttrBuilder(Context, OrigAttrs.getFnAttrs()));
945
946 addFramePointerAttrs(Attrs&: NewAttrs, Context, ParamIndex: 0, Size: Shape.FrameSize,
947 Alignment: Shape.FrameAlign, /*NoAlias=*/false);
948 break;
949 case coro::ABI::Async: {
950 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(Val: ActiveSuspend);
951 if (OrigF.hasParamAttribute(ArgNo: Shape.AsyncLowering.ContextArgNo,
952 Kind: Attribute::SwiftAsync)) {
953 uint32_t ArgAttributeIndices =
954 ActiveAsyncSuspend->getStorageArgumentIndex();
955 auto ContextArgIndex = ArgAttributeIndices & 0xff;
956 addAsyncContextAttrs(Attrs&: NewAttrs, Context, ParamIndex: ContextArgIndex);
957
958 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
959 // `swiftself`.
960 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
961 if (SwiftSelfIndex)
962 addSwiftSelfAttrs(Attrs&: NewAttrs, Context, ParamIndex: SwiftSelfIndex);
963 }
964
965 // Transfer the original function's attributes.
966 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
967 NewAttrs = NewAttrs.addFnAttributes(C&: Context, B: AttrBuilder(Context, FnAttrs));
968 break;
969 }
970 case coro::ABI::Retcon:
971 case coro::ABI::RetconOnce:
972 // If we have a continuation prototype, just use its attributes,
973 // full-stop.
974 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
975
976 /// FIXME: Is it really good to add the NoAlias attribute?
977 addFramePointerAttrs(Attrs&: NewAttrs, Context, ParamIndex: 0,
978 Size: Shape.getRetconCoroId()->getStorageSize(),
979 Alignment: Shape.getRetconCoroId()->getStorageAlignment(),
980 /*NoAlias=*/true);
981
982 break;
983 }
984
985 switch (Shape.ABI) {
986 // In these ABIs, the cloned functions always return 'void', and the
987 // existing return sites are meaningless. Note that for unique
988 // continuations, this includes the returns associated with suspends;
989 // this is fine because we can't suspend twice.
990 case coro::ABI::Switch:
991 case coro::ABI::RetconOnce:
992 // Remove old returns.
993 for (ReturnInst *Return : Returns)
994 changeToUnreachable(I: Return);
995 break;
996
997 // With multi-suspend continuations, we'll already have eliminated the
998 // original returns and inserted returns before all the suspend points,
999 // so we want to leave any returns in place.
1000 case coro::ABI::Retcon:
1001 break;
1002 // Async lowering will insert musttail call functions at all suspend points
1003 // followed by a return.
1004 // Don't change returns to unreachable because that will trip up the verifier.
1005 // These returns should be unreachable from the clone.
1006 case coro::ABI::Async:
1007 break;
1008 }
1009
1010 NewF->setAttributes(NewAttrs);
1011 NewF->setCallingConv(Shape.getResumeFunctionCC());
1012
1013 // Set up the new entry block.
1014 replaceEntryBlock();
1015
1016 // Turn symmetric transfers into musttail calls.
1017 for (CallInst *ResumeCall : Shape.SymmetricTransfers) {
1018 ResumeCall = cast<CallInst>(Val&: VMap[ResumeCall]);
1019 if (TTI.supportsTailCallFor(CB: ResumeCall)) {
1020 // FIXME: Could we support symmetric transfer effectively without
1021 // musttail?
1022 ResumeCall->setTailCallKind(CallInst::TCK_MustTail);
1023 }
1024
1025 // Put a 'ret void' after the call, and split any remaining instructions to
1026 // an unreachable block.
1027 BasicBlock *BB = ResumeCall->getParent();
1028 BB->splitBasicBlock(I: ResumeCall->getNextNode());
1029 Builder.SetInsertPoint(BB->getTerminator());
1030 Builder.CreateRetVoid();
1031 BB->getTerminator()->eraseFromParent();
1032 }
1033
1034 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1035 NewFramePtr = deriveNewFramePointer();
1036
1037 // Remap frame pointer.
1038 Value *OldFramePtr = VMap[Shape.FramePtr];
1039 NewFramePtr->takeName(V: OldFramePtr);
1040 OldFramePtr->replaceAllUsesWith(V: NewFramePtr);
1041
1042 // Remap vFrame pointer.
1043 auto *NewVFrame = Builder.CreateBitCast(
1044 V: NewFramePtr, DestTy: PointerType::getUnqual(C&: Builder.getContext()), Name: "vFrame");
1045 Value *OldVFrame = cast<Value>(Val&: VMap[Shape.CoroBegin]);
1046 if (OldVFrame != NewVFrame)
1047 OldVFrame->replaceAllUsesWith(V: NewVFrame);
1048
1049 // All uses of the arguments should have been resolved by this point,
1050 // so we can safely remove the dummy values.
1051 for (Instruction *DummyArg : DummyArgs) {
1052 DummyArg->replaceAllUsesWith(V: PoisonValue::get(T: DummyArg->getType()));
1053 DummyArg->deleteValue();
1054 }
1055
1056 switch (Shape.ABI) {
1057 case coro::ABI::Switch:
1058 // Rewrite final suspend handling as it is not done via switch (allows to
1059 // remove final case from the switch, since it is undefined behavior to
1060 // resume the coroutine suspended at the final suspend point.
1061 if (Shape.SwitchLowering.HasFinalSuspend)
1062 handleFinalSuspend();
1063 break;
1064 case coro::ABI::Async:
1065 case coro::ABI::Retcon:
1066 case coro::ABI::RetconOnce:
1067 // Replace uses of the active suspend with the corresponding
1068 // continuation-function arguments.
1069 assert(ActiveSuspend != nullptr &&
1070 "no active suspend when lowering a continuation-style coroutine");
1071 replaceRetconOrAsyncSuspendUses();
1072 break;
1073 }
1074
1075 // Handle suspends.
1076 replaceCoroSuspends();
1077
1078 // Handle swifterror.
1079 replaceSwiftErrorOps();
1080
1081 // Remove coro.end intrinsics.
1082 replaceCoroEnds();
1083
1084 replaceCoroIsInRamp();
1085
1086 // Salvage debug info that points into the coroutine frame.
1087 salvageDebugInfo();
1088}
1089
1090void coro::SwitchCloner::create() {
1091 // Create a new function matching the original type
1092 NewF = createCloneDeclaration(OrigF, Shape, Suffix, InsertBefore: OrigF.getParent()->end(),
1093 ActiveSuspend);
1094
1095 // Clone the function
1096 coro::BaseCloner::create();
1097
1098 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1099 // to suppress deallocation code.
1100 coro::replaceCoroFree(CoroId: cast<CoroIdInst>(Val&: VMap[Shape.CoroBegin->getId()]),
1101 /*Elide=*/FKind == coro::CloneKind::SwitchCleanup);
1102}
1103
1104static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1105 assert(Shape.ABI == coro::ABI::Async);
1106
1107 auto *FuncPtrStruct = cast<ConstantStruct>(
1108 Val: Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1109 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(i_nocapture: 0);
1110 auto *OrigContextSize = FuncPtrStruct->getOperand(i_nocapture: 1);
1111 auto *NewContextSize = ConstantInt::get(Ty: OrigContextSize->getType(),
1112 V: Shape.AsyncLowering.ContextSize);
1113 auto *NewFuncPtrStruct = ConstantStruct::get(
1114 T: FuncPtrStruct->getType(), Vs: OrigRelativeFunOffset, Vs: NewContextSize);
1115
1116 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1117}
1118
1119static TypeSize getFrameSizeForShape(coro::Shape &Shape) {
1120 // In the same function all coro.sizes should have the same result type.
1121 auto *SizeIntrin = Shape.CoroSizes.back();
1122 Module *M = SizeIntrin->getModule();
1123 const DataLayout &DL = M->getDataLayout();
1124 return DL.getTypeAllocSize(Ty: Shape.FrameTy);
1125}
1126
1127static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
1128 if (Shape.ABI == coro::ABI::Async)
1129 updateAsyncFuncPointerContextSize(Shape);
1130
1131 for (CoroAlignInst *CA : Shape.CoroAligns) {
1132 CA->replaceAllUsesWith(
1133 V: ConstantInt::get(Ty: CA->getType(), V: Shape.FrameAlign.value()));
1134 CA->eraseFromParent();
1135 }
1136
1137 if (Shape.CoroSizes.empty())
1138 return;
1139
1140 // In the same function all coro.sizes should have the same result type.
1141 auto *SizeIntrin = Shape.CoroSizes.back();
1142 auto *SizeConstant =
1143 ConstantInt::get(Ty: SizeIntrin->getType(), V: getFrameSizeForShape(Shape));
1144
1145 for (CoroSizeInst *CS : Shape.CoroSizes) {
1146 CS->replaceAllUsesWith(V: SizeConstant);
1147 CS->eraseFromParent();
1148 }
1149}
1150
1151static void postSplitCleanup(Function &F) {
1152 removeUnreachableBlocks(F);
1153
1154#ifndef NDEBUG
1155 // For now, we do a mandatory verification step because we don't
1156 // entirely trust this pass. Note that we don't want to add a verifier
1157 // pass to FPM below because it will also verify all the global data.
1158 if (verifyFunction(F, &errs()))
1159 report_fatal_error("Broken function");
1160#endif
1161}
1162
1163// Coroutine has no suspend points. Remove heap allocation for the coroutine
1164// frame if possible.
1165static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1166 auto *CoroBegin = Shape.CoroBegin;
1167 switch (Shape.ABI) {
1168 case coro::ABI::Switch: {
1169 auto SwitchId = Shape.getSwitchCoroId();
1170 auto *AllocInst = SwitchId->getCoroAlloc();
1171 coro::replaceCoroFree(CoroId: SwitchId, /*Elide=*/AllocInst != nullptr);
1172 if (AllocInst) {
1173 IRBuilder<> Builder(AllocInst);
1174 auto *Frame = Builder.CreateAlloca(Ty: Shape.FrameTy);
1175 Frame->setAlignment(Shape.FrameAlign);
1176 AllocInst->replaceAllUsesWith(V: Builder.getFalse());
1177 AllocInst->eraseFromParent();
1178 CoroBegin->replaceAllUsesWith(V: Frame);
1179 } else {
1180 CoroBegin->replaceAllUsesWith(V: CoroBegin->getMem());
1181 }
1182
1183 break;
1184 }
1185 case coro::ABI::Async:
1186 case coro::ABI::Retcon:
1187 case coro::ABI::RetconOnce:
1188 CoroBegin->replaceAllUsesWith(V: PoisonValue::get(T: CoroBegin->getType()));
1189 break;
1190 }
1191
1192 CoroBegin->eraseFromParent();
1193 Shape.CoroBegin = nullptr;
1194}
1195
1196// SimplifySuspendPoint needs to check that there is no calls between
1197// coro_save and coro_suspend, since any of the calls may potentially resume
1198// the coroutine and if that is the case we cannot eliminate the suspend point.
1199static bool hasCallsInBlockBetween(iterator_range<BasicBlock::iterator> R) {
1200 for (Instruction &I : R) {
1201 // Assume that no intrinsic can resume the coroutine.
1202 if (isa<IntrinsicInst>(Val: I))
1203 continue;
1204
1205 if (isa<CallBase>(Val: I))
1206 return true;
1207 }
1208 return false;
1209}
1210
1211static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1212 SmallPtrSet<BasicBlock *, 8> Set;
1213 SmallVector<BasicBlock *, 8> Worklist;
1214
1215 Set.insert(Ptr: SaveBB);
1216 Worklist.push_back(Elt: ResDesBB);
1217
1218 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1219 // returns a token consumed by suspend instruction, all blocks in between
1220 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1221 while (!Worklist.empty()) {
1222 auto *BB = Worklist.pop_back_val();
1223 Set.insert(Ptr: BB);
1224 for (auto *Pred : predecessors(BB))
1225 if (!Set.contains(Ptr: Pred))
1226 Worklist.push_back(Elt: Pred);
1227 }
1228
1229 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1230 Set.erase(Ptr: SaveBB);
1231 Set.erase(Ptr: ResDesBB);
1232
1233 for (auto *BB : Set)
1234 if (hasCallsInBlockBetween(R: {BB->getFirstNonPHIIt(), BB->end()}))
1235 return true;
1236
1237 return false;
1238}
1239
1240static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1241 auto *SaveBB = Save->getParent();
1242 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1243 BasicBlock::iterator SaveIt = Save->getIterator();
1244 BasicBlock::iterator ResumeOrDestroyIt = ResumeOrDestroy->getIterator();
1245
1246 if (SaveBB == ResumeOrDestroyBB)
1247 return hasCallsInBlockBetween(R: {std::next(x: SaveIt), ResumeOrDestroyIt});
1248
1249 // Any calls from Save to the end of the block?
1250 if (hasCallsInBlockBetween(R: {std::next(x: SaveIt), SaveBB->end()}))
1251 return true;
1252
1253 // Any calls from begging of the block up to ResumeOrDestroy?
1254 if (hasCallsInBlockBetween(
1255 R: {ResumeOrDestroyBB->getFirstNonPHIIt(), ResumeOrDestroyIt}))
1256 return true;
1257
1258 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1259 if (hasCallsInBlocksBetween(SaveBB, ResDesBB: ResumeOrDestroyBB))
1260 return true;
1261
1262 return false;
1263}
1264
1265// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1266// suspend point and replace it with nornal control flow.
1267static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1268 CoroBeginInst *CoroBegin) {
1269 Instruction *Prev = Suspend->getPrevNode();
1270 if (!Prev) {
1271 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1272 if (!Pred)
1273 return false;
1274 Prev = Pred->getTerminator();
1275 }
1276
1277 CallBase *CB = dyn_cast<CallBase>(Val: Prev);
1278 if (!CB)
1279 return false;
1280
1281 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1282
1283 // See if the callsite is for resumption or destruction of the coroutine.
1284 auto *SubFn = dyn_cast<CoroSubFnInst>(Val: Callee);
1285 if (!SubFn)
1286 return false;
1287
1288 // Does not refer to the current coroutine, we cannot do anything with it.
1289 if (SubFn->getFrame() != CoroBegin)
1290 return false;
1291
1292 // See if the transformation is safe. Specifically, see if there are any
1293 // calls in between Save and CallInstr. They can potenitally resume the
1294 // coroutine rendering this optimization unsafe.
1295 auto *Save = Suspend->getCoroSave();
1296 if (hasCallsBetween(Save, ResumeOrDestroy: CB))
1297 return false;
1298
1299 // Replace llvm.coro.suspend with the value that results in resumption over
1300 // the resume or cleanup path.
1301 Suspend->replaceAllUsesWith(V: SubFn->getRawIndex());
1302 Suspend->eraseFromParent();
1303 Save->eraseFromParent();
1304
1305 // No longer need a call to coro.resume or coro.destroy.
1306 if (auto *Invoke = dyn_cast<InvokeInst>(Val: CB)) {
1307 BranchInst::Create(IfTrue: Invoke->getNormalDest(), InsertBefore: Invoke->getIterator());
1308 }
1309
1310 // Grab the CalledValue from CB before erasing the CallInstr.
1311 auto *CalledValue = CB->getCalledOperand();
1312 CB->eraseFromParent();
1313
1314 // If no more users remove it. Usually it is a bitcast of SubFn.
1315 if (CalledValue != SubFn && CalledValue->user_empty())
1316 if (auto *I = dyn_cast<Instruction>(Val: CalledValue))
1317 I->eraseFromParent();
1318
1319 // Now we are good to remove SubFn.
1320 if (SubFn->user_empty())
1321 SubFn->eraseFromParent();
1322
1323 return true;
1324}
1325
1326// Remove suspend points that are simplified.
1327static void simplifySuspendPoints(coro::Shape &Shape) {
1328 // Currently, the only simplification we do is switch-lowering-specific.
1329 if (Shape.ABI != coro::ABI::Switch)
1330 return;
1331
1332 auto &S = Shape.CoroSuspends;
1333 size_t I = 0, N = S.size();
1334 if (N == 0)
1335 return;
1336
1337 size_t ChangedFinalIndex = std::numeric_limits<size_t>::max();
1338 while (true) {
1339 auto SI = cast<CoroSuspendInst>(Val: S[I]);
1340 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1341 // to resume a coroutine suspended at the final suspend point.
1342 if (!SI->isFinal() && simplifySuspendPoint(Suspend: SI, CoroBegin: Shape.CoroBegin)) {
1343 if (--N == I)
1344 break;
1345
1346 std::swap(a&: S[I], b&: S[N]);
1347
1348 if (cast<CoroSuspendInst>(Val: S[I])->isFinal()) {
1349 assert(Shape.SwitchLowering.HasFinalSuspend);
1350 ChangedFinalIndex = I;
1351 }
1352
1353 continue;
1354 }
1355 if (++I == N)
1356 break;
1357 }
1358 S.resize(N);
1359
1360 // Maintain final.suspend in case final suspend was swapped.
1361 // Due to we requrie the final suspend to be the last element of CoroSuspends.
1362 if (ChangedFinalIndex < N) {
1363 assert(cast<CoroSuspendInst>(S[ChangedFinalIndex])->isFinal());
1364 std::swap(a&: S[ChangedFinalIndex], b&: S.back());
1365 }
1366}
1367
1368namespace {
1369
1370struct SwitchCoroutineSplitter {
1371 static void split(Function &F, coro::Shape &Shape,
1372 SmallVectorImpl<Function *> &Clones,
1373 TargetTransformInfo &TTI) {
1374 assert(Shape.ABI == coro::ABI::Switch);
1375
1376 // Create a resume clone by cloning the body of the original function,
1377 // setting new entry block and replacing coro.suspend an appropriate value
1378 // to force resume or cleanup pass for every suspend point.
1379 createResumeEntryBlock(F, Shape);
1380 auto *ResumeClone = coro::SwitchCloner::createClone(
1381 OrigF&: F, Suffix: ".resume", Shape, FKind: coro::CloneKind::SwitchResume, TTI);
1382 auto *DestroyClone = coro::SwitchCloner::createClone(
1383 OrigF&: F, Suffix: ".destroy", Shape, FKind: coro::CloneKind::SwitchUnwind, TTI);
1384 auto *CleanupClone = coro::SwitchCloner::createClone(
1385 OrigF&: F, Suffix: ".cleanup", Shape, FKind: coro::CloneKind::SwitchCleanup, TTI);
1386
1387 postSplitCleanup(F&: *ResumeClone);
1388 postSplitCleanup(F&: *DestroyClone);
1389 postSplitCleanup(F&: *CleanupClone);
1390
1391 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1392 updateCoroFrame(Shape, ResumeFn: ResumeClone, DestroyFn: DestroyClone, CleanupFn: CleanupClone);
1393
1394 assert(Clones.empty());
1395 Clones.push_back(Elt: ResumeClone);
1396 Clones.push_back(Elt: DestroyClone);
1397 Clones.push_back(Elt: CleanupClone);
1398
1399 // Create a constant array referring to resume/destroy/clone functions
1400 // pointed by the last argument of @llvm.coro.info, so that CoroElide pass
1401 // can determined correct function to call.
1402 setCoroInfo(F, Shape, Fns: Clones);
1403 }
1404
1405 // Create a variant of ramp function that does not perform heap allocation
1406 // for a switch ABI coroutine.
1407 //
1408 // The newly split `.noalloc` ramp function has the following differences:
1409 // - Has one additional frame pointer parameter in lieu of dynamic
1410 // allocation.
1411 // - Suppressed allocations by replacing coro.alloc and coro.free.
1412 static Function *createNoAllocVariant(Function &F, coro::Shape &Shape,
1413 SmallVectorImpl<Function *> &Clones) {
1414 assert(Shape.ABI == coro::ABI::Switch);
1415 auto *OrigFnTy = F.getFunctionType();
1416 auto OldParams = OrigFnTy->params();
1417
1418 SmallVector<Type *> NewParams;
1419 NewParams.reserve(N: OldParams.size() + 1);
1420 NewParams.append(in_start: OldParams.begin(), in_end: OldParams.end());
1421 NewParams.push_back(Elt: PointerType::getUnqual(C&: Shape.FrameTy->getContext()));
1422
1423 auto *NewFnTy = FunctionType::get(Result: OrigFnTy->getReturnType(), Params: NewParams,
1424 isVarArg: OrigFnTy->isVarArg());
1425 Function *NoAllocF =
1426 Function::Create(Ty: NewFnTy, Linkage: F.getLinkage(), N: F.getName() + ".noalloc");
1427
1428 ValueToValueMapTy VMap;
1429 unsigned int Idx = 0;
1430 for (const auto &I : F.args()) {
1431 VMap[&I] = NoAllocF->getArg(i: Idx++);
1432 }
1433 // We just appended the frame pointer as the last argument of the new
1434 // function.
1435 auto FrameIdx = NoAllocF->arg_size() - 1;
1436 SmallVector<ReturnInst *, 4> Returns;
1437 CloneFunctionInto(NewFunc: NoAllocF, OldFunc: &F, VMap,
1438 Changes: CloneFunctionChangeType::LocalChangesOnly, Returns);
1439
1440 if (Shape.CoroBegin) {
1441 auto *NewCoroBegin =
1442 cast_if_present<CoroBeginInst>(Val&: VMap[Shape.CoroBegin]);
1443 auto *NewCoroId = cast<CoroIdInst>(Val: NewCoroBegin->getId());
1444 coro::replaceCoroFree(CoroId: NewCoroId, /*Elide=*/true);
1445 coro::suppressCoroAllocs(CoroId: NewCoroId);
1446 NewCoroBegin->replaceAllUsesWith(V: NoAllocF->getArg(i: FrameIdx));
1447 NewCoroBegin->eraseFromParent();
1448 }
1449
1450 Module *M = F.getParent();
1451 M->getFunctionList().insert(where: M->end(), New: NoAllocF);
1452
1453 removeUnreachableBlocks(F&: *NoAllocF);
1454 auto NewAttrs = NoAllocF->getAttributes();
1455 // When we elide allocation, we read these attributes to determine the
1456 // frame size and alignment.
1457 addFramePointerAttrs(Attrs&: NewAttrs, Context&: NoAllocF->getContext(), ParamIndex: FrameIdx,
1458 Size: Shape.FrameSize, Alignment: Shape.FrameAlign,
1459 /*NoAlias=*/false);
1460
1461 NoAllocF->setAttributes(NewAttrs);
1462
1463 Clones.push_back(Elt: NoAllocF);
1464 // Reset the original function's coro info, make the new noalloc variant
1465 // connected to the original ramp function.
1466 setCoroInfo(F, Shape, Fns: Clones);
1467 // After copying, set the linkage to internal linkage. Original function
1468 // may have different linkage, but optimization dependent on this function
1469 // generally relies on LTO.
1470 NoAllocF->setLinkage(llvm::GlobalValue::InternalLinkage);
1471 return NoAllocF;
1472 }
1473
1474private:
1475 // Create an entry block for a resume function with a switch that will jump to
1476 // suspend points.
1477 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
1478 LLVMContext &C = F.getContext();
1479
1480 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
1481 DISubprogram *DIS = F.getSubprogram();
1482 // If there is no DISubprogram for F, it implies the function is compiled
1483 // without debug info. So we also don't generate debug info for the
1484 // suspension points.
1485 bool AddDebugLabels = DIS && DIS->getUnit() &&
1486 (DIS->getUnit()->getEmissionKind() ==
1487 DICompileUnit::DebugEmissionKind::FullDebug);
1488
1489 // resume.entry:
1490 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32
1491 // 0, i32 2 % index = load i32, i32* %index.addr switch i32 %index, label
1492 // %unreachable [
1493 // i32 0, label %resume.0
1494 // i32 1, label %resume.1
1495 // ...
1496 // ]
1497
1498 auto *NewEntry = BasicBlock::Create(Context&: C, Name: "resume.entry", Parent: &F);
1499 auto *UnreachBB = BasicBlock::Create(Context&: C, Name: "unreachable", Parent: &F);
1500
1501 IRBuilder<> Builder(NewEntry);
1502 auto *FramePtr = Shape.FramePtr;
1503 auto *FrameTy = Shape.FrameTy;
1504 auto *GepIndex = Builder.CreateStructGEP(
1505 Ty: FrameTy, Ptr: FramePtr, Idx: Shape.getSwitchIndexField(), Name: "index.addr");
1506 auto *Index = Builder.CreateLoad(Ty: Shape.getIndexType(), Ptr: GepIndex, Name: "index");
1507 auto *Switch =
1508 Builder.CreateSwitch(V: Index, Dest: UnreachBB, NumCases: Shape.CoroSuspends.size());
1509 Shape.SwitchLowering.ResumeSwitch = Switch;
1510
1511 // Split all coro.suspend calls
1512 size_t SuspendIndex = 0;
1513 for (auto *AnyS : Shape.CoroSuspends) {
1514 auto *S = cast<CoroSuspendInst>(Val: AnyS);
1515 ConstantInt *IndexVal = Shape.getIndex(Value: SuspendIndex);
1516
1517 // Replace CoroSave with a store to Index:
1518 // %index.addr = getelementptr %f.frame... (index field number)
1519 // store i32 %IndexVal, i32* %index.addr1
1520 auto *Save = S->getCoroSave();
1521 Builder.SetInsertPoint(Save);
1522 if (S->isFinal()) {
1523 // The coroutine should be marked done if it reaches the final suspend
1524 // point.
1525 markCoroutineAsDone(Builder, Shape, FramePtr);
1526 } else {
1527 auto *GepIndex = Builder.CreateStructGEP(
1528 Ty: FrameTy, Ptr: FramePtr, Idx: Shape.getSwitchIndexField(), Name: "index.addr");
1529 Builder.CreateStore(Val: IndexVal, Ptr: GepIndex);
1530 }
1531
1532 Save->replaceAllUsesWith(V: ConstantTokenNone::get(Context&: C));
1533 Save->eraseFromParent();
1534
1535 // Split block before and after coro.suspend and add a jump from an entry
1536 // switch:
1537 //
1538 // whateverBB:
1539 // whatever
1540 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
1541 // switch i8 %0, label %suspend[i8 0, label %resume
1542 // i8 1, label %cleanup]
1543 // becomes:
1544 //
1545 // whateverBB:
1546 // whatever
1547 // br label %resume.0.landing
1548 //
1549 // resume.0: ; <--- jump from the switch in the resume.entry
1550 // #dbg_label(...) ; <--- artificial label for debuggers
1551 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
1552 // br label %resume.0.landing
1553 //
1554 // resume.0.landing:
1555 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
1556 // switch i8 % 1, label %suspend [i8 0, label %resume
1557 // i8 1, label %cleanup]
1558
1559 auto *SuspendBB = S->getParent();
1560 auto *ResumeBB =
1561 SuspendBB->splitBasicBlock(I: S, BBName: "resume." + Twine(SuspendIndex));
1562 auto *LandingBB = ResumeBB->splitBasicBlock(
1563 I: S->getNextNode(), BBName: ResumeBB->getName() + Twine(".landing"));
1564 Switch->addCase(OnVal: IndexVal, Dest: ResumeBB);
1565
1566 cast<BranchInst>(Val: SuspendBB->getTerminator())->setSuccessor(idx: 0, NewSucc: LandingBB);
1567 auto *PN = PHINode::Create(Ty: Builder.getInt8Ty(), NumReservedValues: 2, NameStr: "");
1568 PN->insertBefore(InsertPos: LandingBB->begin());
1569 S->replaceAllUsesWith(V: PN);
1570 PN->addIncoming(V: Builder.getInt8(C: -1), BB: SuspendBB);
1571 PN->addIncoming(V: S, BB: ResumeBB);
1572
1573 if (AddDebugLabels) {
1574 if (DebugLoc SuspendLoc = S->getDebugLoc()) {
1575 std::string LabelName =
1576 ("__coro_resume_" + Twine(SuspendIndex)).str();
1577 // Take the "inlined at" location recursively, if present. This is
1578 // mandatory as the DILabel insertion checks that the scopes of label
1579 // and the attached location match. This is not the case when the
1580 // suspend location has been inlined due to pointing to the original
1581 // scope.
1582 DILocation *DILoc = SuspendLoc;
1583 while (DILocation *InlinedAt = DILoc->getInlinedAt())
1584 DILoc = InlinedAt;
1585
1586 DILabel *ResumeLabel =
1587 DBuilder.createLabel(Scope: DIS, Name: LabelName, File: DILoc->getFile(),
1588 LineNo: SuspendLoc.getLine(), Column: SuspendLoc.getCol(),
1589 /*IsArtificial=*/true,
1590 /*CoroSuspendIdx=*/SuspendIndex,
1591 /*AlwaysPreserve=*/false);
1592 DBuilder.insertLabel(LabelInfo: ResumeLabel, DL: DILoc, InsertPt: ResumeBB->begin());
1593 }
1594 }
1595
1596 ++SuspendIndex;
1597 }
1598
1599 Builder.SetInsertPoint(UnreachBB);
1600 Builder.CreateUnreachable();
1601 DBuilder.finalize();
1602
1603 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
1604 }
1605
1606 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1607 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1608 Function *DestroyFn, Function *CleanupFn) {
1609 IRBuilder<> Builder(&*Shape.getInsertPtAfterFramePtr());
1610
1611 auto *ResumeAddr = Builder.CreateStructGEP(
1612 Ty: Shape.FrameTy, Ptr: Shape.FramePtr, Idx: coro::Shape::SwitchFieldIndex::Resume,
1613 Name: "resume.addr");
1614 Builder.CreateStore(Val: ResumeFn, Ptr: ResumeAddr);
1615
1616 Value *DestroyOrCleanupFn = DestroyFn;
1617
1618 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1619 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1620 // If there is a CoroAlloc and it returns false (meaning we elide the
1621 // allocation, use CleanupFn instead of DestroyFn).
1622 DestroyOrCleanupFn = Builder.CreateSelect(C: CA, True: DestroyFn, False: CleanupFn);
1623 }
1624
1625 auto *DestroyAddr = Builder.CreateStructGEP(
1626 Ty: Shape.FrameTy, Ptr: Shape.FramePtr, Idx: coro::Shape::SwitchFieldIndex::Destroy,
1627 Name: "destroy.addr");
1628 Builder.CreateStore(Val: DestroyOrCleanupFn, Ptr: DestroyAddr);
1629 }
1630
1631 // Create a global constant array containing pointers to functions provided
1632 // and set Info parameter of CoroBegin to point at this constant. Example:
1633 //
1634 // @f.resumers = internal constant [2 x void(%f.frame*)*]
1635 // [void(%f.frame*)* @f.resume, void(%f.frame*)*
1636 // @f.destroy]
1637 // define void @f() {
1638 // ...
1639 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1640 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to
1641 // i8*))
1642 //
1643 // Assumes that all the functions have the same signature.
1644 static void setCoroInfo(Function &F, coro::Shape &Shape,
1645 ArrayRef<Function *> Fns) {
1646 // This only works under the switch-lowering ABI because coro elision
1647 // only works on the switch-lowering ABI.
1648 SmallVector<Constant *, 4> Args(Fns);
1649 assert(!Args.empty());
1650 Function *Part = *Fns.begin();
1651 Module *M = Part->getParent();
1652 auto *ArrTy = ArrayType::get(ElementType: Part->getType(), NumElements: Args.size());
1653
1654 auto *ConstVal = ConstantArray::get(T: ArrTy, V: Args);
1655 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1656 GlobalVariable::PrivateLinkage, ConstVal,
1657 F.getName() + Twine(".resumers"));
1658
1659 // Update coro.begin instruction to refer to this constant.
1660 LLVMContext &C = F.getContext();
1661 auto *BC = ConstantExpr::getPointerCast(C: GV, Ty: PointerType::getUnqual(C));
1662 Shape.getSwitchCoroId()->setInfo(BC);
1663 }
1664};
1665
1666} // namespace
1667
1668static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1669 Value *Continuation) {
1670 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1671 auto &Context = Suspend->getParent()->getParent()->getContext();
1672 auto *Int8PtrTy = PointerType::getUnqual(C&: Context);
1673
1674 IRBuilder<> Builder(ResumeIntrinsic);
1675 auto *Val = Builder.CreateBitOrPointerCast(V: Continuation, DestTy: Int8PtrTy);
1676 ResumeIntrinsic->replaceAllUsesWith(V: Val);
1677 ResumeIntrinsic->eraseFromParent();
1678 Suspend->setOperand(i_nocapture: CoroSuspendAsyncInst::ResumeFunctionArg,
1679 Val_nocapture: PoisonValue::get(T: Int8PtrTy));
1680}
1681
1682/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1683static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1684 ArrayRef<Value *> FnArgs,
1685 SmallVectorImpl<Value *> &CallArgs) {
1686 size_t ArgIdx = 0;
1687 for (auto *paramTy : FnTy->params()) {
1688 assert(ArgIdx < FnArgs.size());
1689 if (paramTy != FnArgs[ArgIdx]->getType())
1690 CallArgs.push_back(
1691 Elt: Builder.CreateBitOrPointerCast(V: FnArgs[ArgIdx], DestTy: paramTy));
1692 else
1693 CallArgs.push_back(Elt: FnArgs[ArgIdx]);
1694 ++ArgIdx;
1695 }
1696}
1697
1698CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1699 TargetTransformInfo &TTI,
1700 ArrayRef<Value *> Arguments,
1701 IRBuilder<> &Builder) {
1702 auto *FnTy = MustTailCallFn->getFunctionType();
1703 // Coerce the arguments, llvm optimizations seem to ignore the types in
1704 // vaarg functions and throws away casts in optimized mode.
1705 SmallVector<Value *, 8> CallArgs;
1706 coerceArguments(Builder, FnTy, FnArgs: Arguments, CallArgs);
1707
1708 auto *TailCall = Builder.CreateCall(FTy: FnTy, Callee: MustTailCallFn, Args: CallArgs);
1709 // Skip targets which don't support tail call.
1710 if (TTI.supportsTailCallFor(CB: TailCall)) {
1711 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1712 }
1713 TailCall->setDebugLoc(Loc);
1714 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1715 return TailCall;
1716}
1717
1718void coro::AsyncABI::splitCoroutine(Function &F, coro::Shape &Shape,
1719 SmallVectorImpl<Function *> &Clones,
1720 TargetTransformInfo &TTI) {
1721 assert(Shape.ABI == coro::ABI::Async);
1722 assert(Clones.empty());
1723 // Reset various things that the optimizer might have decided it
1724 // "knows" about the coroutine function due to not seeing a return.
1725 F.removeFnAttr(Kind: Attribute::NoReturn);
1726 F.removeRetAttr(Kind: Attribute::NoAlias);
1727 F.removeRetAttr(Kind: Attribute::NonNull);
1728
1729 auto &Context = F.getContext();
1730 auto *Int8PtrTy = PointerType::getUnqual(C&: Context);
1731
1732 auto *Id = Shape.getAsyncCoroId();
1733 IRBuilder<> Builder(Id);
1734
1735 auto *FramePtr = Id->getStorage();
1736 FramePtr = Builder.CreateBitOrPointerCast(V: FramePtr, DestTy: Int8PtrTy);
1737 FramePtr = Builder.CreateConstInBoundsGEP1_32(
1738 Ty: Type::getInt8Ty(C&: Context), Ptr: FramePtr, Idx0: Shape.AsyncLowering.FrameOffset,
1739 Name: "async.ctx.frameptr");
1740
1741 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1742 {
1743 // Make sure we don't invalidate Shape.FramePtr.
1744 TrackingVH<Value> Handle(Shape.FramePtr);
1745 Shape.CoroBegin->replaceAllUsesWith(V: FramePtr);
1746 Shape.FramePtr = Handle.getValPtr();
1747 }
1748
1749 // Create all the functions in order after the main function.
1750 auto NextF = std::next(x: F.getIterator());
1751
1752 // Create a continuation function for each of the suspend points.
1753 Clones.reserve(N: Shape.CoroSuspends.size());
1754 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1755 auto *Suspend = cast<CoroSuspendAsyncInst>(Val: CS);
1756
1757 // Create the clone declaration.
1758 auto ResumeNameSuffix = ".resume.";
1759 auto ProjectionFunctionName =
1760 Suspend->getAsyncContextProjectionFunction()->getName();
1761 bool UseSwiftMangling = false;
1762 if (ProjectionFunctionName == "__swift_async_resume_project_context") {
1763 ResumeNameSuffix = "TQ";
1764 UseSwiftMangling = true;
1765 } else if (ProjectionFunctionName == "__swift_async_resume_get_context") {
1766 ResumeNameSuffix = "TY";
1767 UseSwiftMangling = true;
1768 }
1769 auto *Continuation = createCloneDeclaration(
1770 OrigF&: F, Shape,
1771 Suffix: UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1772 : ResumeNameSuffix + Twine(Idx),
1773 InsertBefore: NextF, ActiveSuspend: Suspend);
1774 Clones.push_back(Elt: Continuation);
1775
1776 // Insert a branch to a new return block immediately before the suspend
1777 // point.
1778 auto *SuspendBB = Suspend->getParent();
1779 auto *NewSuspendBB = SuspendBB->splitBasicBlock(I: Suspend);
1780 auto *Branch = cast<BranchInst>(Val: SuspendBB->getTerminator());
1781
1782 // Place it before the first suspend.
1783 auto *ReturnBB =
1784 BasicBlock::Create(Context&: F.getContext(), Name: "coro.return", Parent: &F, InsertBefore: NewSuspendBB);
1785 Branch->setSuccessor(idx: 0, NewSucc: ReturnBB);
1786
1787 IRBuilder<> Builder(ReturnBB);
1788
1789 // Insert the call to the tail call function and inline it.
1790 auto *Fn = Suspend->getMustTailCallFunction();
1791 SmallVector<Value *, 8> Args(Suspend->args());
1792 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1793 N: CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1794 auto *TailCall = coro::createMustTailCall(Loc: Suspend->getDebugLoc(), MustTailCallFn: Fn, TTI,
1795 Arguments: FnArgs, Builder);
1796 Builder.CreateRetVoid();
1797 InlineFunctionInfo FnInfo;
1798 (void)InlineFunction(CB&: *TailCall, IFI&: FnInfo);
1799
1800 // Replace the lvm.coro.async.resume intrisic call.
1801 replaceAsyncResumeFunction(Suspend, Continuation);
1802 }
1803
1804 assert(Clones.size() == Shape.CoroSuspends.size());
1805
1806 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1807 auto *Suspend = CS;
1808 auto *Clone = Clones[Idx];
1809
1810 coro::BaseCloner::createClone(OrigF&: F, Suffix: "resume." + Twine(Idx), Shape, NewF: Clone,
1811 ActiveSuspend: Suspend, TTI);
1812 }
1813}
1814
1815void coro::AnyRetconABI::splitCoroutine(Function &F, coro::Shape &Shape,
1816 SmallVectorImpl<Function *> &Clones,
1817 TargetTransformInfo &TTI) {
1818 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce);
1819 assert(Clones.empty());
1820
1821 // Reset various things that the optimizer might have decided it
1822 // "knows" about the coroutine function due to not seeing a return.
1823 F.removeFnAttr(Kind: Attribute::NoReturn);
1824 F.removeRetAttr(Kind: Attribute::NoAlias);
1825 F.removeRetAttr(Kind: Attribute::NonNull);
1826
1827 // Allocate the frame.
1828 auto *Id = Shape.getRetconCoroId();
1829 Value *RawFramePtr;
1830 if (Shape.RetconLowering.IsFrameInlineInStorage) {
1831 RawFramePtr = Id->getStorage();
1832 } else {
1833 IRBuilder<> Builder(Id);
1834
1835 // Determine the size of the frame.
1836 const DataLayout &DL = F.getDataLayout();
1837 auto Size = DL.getTypeAllocSize(Ty: Shape.FrameTy);
1838
1839 // Allocate. We don't need to update the call graph node because we're
1840 // going to recompute it from scratch after splitting.
1841 // FIXME: pass the required alignment
1842 RawFramePtr = Shape.emitAlloc(Builder, Size: Builder.getInt64(C: Size), CG: nullptr);
1843 RawFramePtr =
1844 Builder.CreateBitCast(V: RawFramePtr, DestTy: Shape.CoroBegin->getType());
1845
1846 // Stash the allocated frame pointer in the continuation storage.
1847 Builder.CreateStore(Val: RawFramePtr, Ptr: Id->getStorage());
1848 }
1849
1850 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1851 {
1852 // Make sure we don't invalidate Shape.FramePtr.
1853 TrackingVH<Value> Handle(Shape.FramePtr);
1854 Shape.CoroBegin->replaceAllUsesWith(V: RawFramePtr);
1855 Shape.FramePtr = Handle.getValPtr();
1856 }
1857
1858 // Create a unique return block.
1859 BasicBlock *ReturnBB = nullptr;
1860 PHINode *ContinuationPhi = nullptr;
1861 SmallVector<PHINode *, 4> ReturnPHIs;
1862
1863 // Create all the functions in order after the main function.
1864 auto NextF = std::next(x: F.getIterator());
1865
1866 // Create a continuation function for each of the suspend points.
1867 Clones.reserve(N: Shape.CoroSuspends.size());
1868 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1869 auto Suspend = cast<CoroSuspendRetconInst>(Val: CS);
1870
1871 // Create the clone declaration.
1872 auto Continuation = createCloneDeclaration(
1873 OrigF&: F, Shape, Suffix: ".resume." + Twine(Idx), InsertBefore: NextF, ActiveSuspend: nullptr);
1874 Clones.push_back(Elt: Continuation);
1875
1876 // Insert a branch to the unified return block immediately before
1877 // the suspend point.
1878 auto SuspendBB = Suspend->getParent();
1879 auto NewSuspendBB = SuspendBB->splitBasicBlock(I: Suspend);
1880 auto Branch = cast<BranchInst>(Val: SuspendBB->getTerminator());
1881
1882 // Create the unified return block.
1883 if (!ReturnBB) {
1884 // Place it before the first suspend.
1885 ReturnBB =
1886 BasicBlock::Create(Context&: F.getContext(), Name: "coro.return", Parent: &F, InsertBefore: NewSuspendBB);
1887 Shape.RetconLowering.ReturnBlock = ReturnBB;
1888
1889 IRBuilder<> Builder(ReturnBB);
1890
1891 // First, the continuation.
1892 ContinuationPhi =
1893 Builder.CreatePHI(Ty: Continuation->getType(), NumReservedValues: Shape.CoroSuspends.size());
1894
1895 // Create PHIs for all other return values.
1896 assert(ReturnPHIs.empty());
1897
1898 // Next, all the directly-yielded values.
1899 for (auto *ResultTy : Shape.getRetconResultTypes())
1900 ReturnPHIs.push_back(
1901 Elt: Builder.CreatePHI(Ty: ResultTy, NumReservedValues: Shape.CoroSuspends.size()));
1902
1903 // Build the return value.
1904 auto RetTy = F.getReturnType();
1905
1906 // Cast the continuation value if necessary.
1907 // We can't rely on the types matching up because that type would
1908 // have to be infinite.
1909 auto CastedContinuationTy =
1910 (ReturnPHIs.empty() ? RetTy : RetTy->getStructElementType(N: 0));
1911 auto *CastedContinuation =
1912 Builder.CreateBitCast(V: ContinuationPhi, DestTy: CastedContinuationTy);
1913
1914 Value *RetV = CastedContinuation;
1915 if (!ReturnPHIs.empty()) {
1916 auto ValueIdx = 0;
1917 RetV = PoisonValue::get(T: RetTy);
1918 RetV = Builder.CreateInsertValue(Agg: RetV, Val: CastedContinuation, Idxs: ValueIdx++);
1919
1920 for (auto Phi : ReturnPHIs)
1921 RetV = Builder.CreateInsertValue(Agg: RetV, Val: Phi, Idxs: ValueIdx++);
1922 }
1923
1924 Builder.CreateRet(V: RetV);
1925 }
1926
1927 // Branch to the return block.
1928 Branch->setSuccessor(idx: 0, NewSucc: ReturnBB);
1929 assert(ContinuationPhi);
1930 ContinuationPhi->addIncoming(V: Continuation, BB: SuspendBB);
1931 for (auto [Phi, VUse] :
1932 llvm::zip_equal(t&: ReturnPHIs, u: Suspend->value_operands()))
1933 Phi->addIncoming(V: VUse, BB: SuspendBB);
1934 }
1935
1936 assert(Clones.size() == Shape.CoroSuspends.size());
1937
1938 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1939 auto Suspend = CS;
1940 auto Clone = Clones[Idx];
1941
1942 coro::BaseCloner::createClone(OrigF&: F, Suffix: "resume." + Twine(Idx), Shape, NewF: Clone,
1943 ActiveSuspend: Suspend, TTI);
1944 }
1945}
1946
1947namespace {
1948class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1949 Function &F;
1950
1951public:
1952 PrettyStackTraceFunction(Function &F) : F(F) {}
1953 void print(raw_ostream &OS) const override {
1954 OS << "While splitting coroutine ";
1955 F.printAsOperand(O&: OS, /*print type*/ PrintType: false, M: F.getParent());
1956 OS << "\n";
1957 }
1958};
1959} // namespace
1960
1961/// Remove calls to llvm.coro.end in the original function.
1962static void removeCoroEndsFromRampFunction(const coro::Shape &Shape) {
1963 if (Shape.ABI != coro::ABI::Switch) {
1964 for (auto *End : Shape.CoroEnds) {
1965 replaceCoroEnd(End, Shape, FramePtr: Shape.FramePtr, /*in ramp*/ InRamp: true, CG: nullptr);
1966 }
1967 } else {
1968 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds)
1969 End->eraseFromParent();
1970 }
1971}
1972
1973static void removeCoroIsInRampFromRampFunction(const coro::Shape &Shape) {
1974 for (auto *II : Shape.CoroIsInRampInsts) {
1975 auto &Ctx = II->getContext();
1976 II->replaceAllUsesWith(V: ConstantInt::getTrue(Context&: Ctx));
1977 II->eraseFromParent();
1978 }
1979}
1980
1981static bool hasSafeElideCaller(Function &F) {
1982 for (auto *U : F.users()) {
1983 if (auto *CB = dyn_cast<CallBase>(Val: U)) {
1984 auto *Caller = CB->getFunction();
1985 if (Caller && Caller->isPresplitCoroutine() &&
1986 CB->hasFnAttr(Kind: llvm::Attribute::CoroElideSafe))
1987 return true;
1988 }
1989 }
1990 return false;
1991}
1992
1993void coro::SwitchABI::splitCoroutine(Function &F, coro::Shape &Shape,
1994 SmallVectorImpl<Function *> &Clones,
1995 TargetTransformInfo &TTI) {
1996 SwitchCoroutineSplitter::split(F, Shape, Clones, TTI);
1997}
1998
1999static void doSplitCoroutine(Function &F, SmallVectorImpl<Function *> &Clones,
2000 coro::BaseABI &ABI, TargetTransformInfo &TTI,
2001 bool OptimizeFrame) {
2002 PrettyStackTraceFunction prettyStackTrace(F);
2003
2004 auto &Shape = ABI.Shape;
2005 assert(Shape.CoroBegin);
2006
2007 lowerAwaitSuspends(F, Shape);
2008
2009 simplifySuspendPoints(Shape);
2010
2011 normalizeCoroutine(F, Shape, TTI);
2012 ABI.buildCoroutineFrame(OptimizeFrame);
2013 replaceFrameSizeAndAlignment(Shape);
2014
2015 bool isNoSuspendCoroutine = Shape.CoroSuspends.empty();
2016
2017 bool shouldCreateNoAllocVariant =
2018 !isNoSuspendCoroutine && Shape.ABI == coro::ABI::Switch &&
2019 hasSafeElideCaller(F) && !F.hasFnAttribute(Kind: llvm::Attribute::NoInline);
2020
2021 // If there are no suspend points, no split required, just remove
2022 // the allocation and deallocation blocks, they are not needed.
2023 if (isNoSuspendCoroutine) {
2024 handleNoSuspendCoroutine(Shape);
2025 } else {
2026 ABI.splitCoroutine(F, Shape, Clones, TTI);
2027 }
2028
2029 // Replace all the swifterror operations in the original function.
2030 // This invalidates SwiftErrorOps in the Shape.
2031 replaceSwiftErrorOps(F, Shape, VMap: nullptr);
2032
2033 // Salvage debug intrinsics that point into the coroutine frame in the
2034 // original function. The Cloner has already salvaged debug info in the new
2035 // coroutine funclets.
2036 SmallDenseMap<Argument *, AllocaInst *, 4> ArgToAllocaMap;
2037 auto DbgVariableRecords = collectDbgVariableRecords(F);
2038 for (DbgVariableRecord *DVR : DbgVariableRecords)
2039 coro::salvageDebugInfo(ArgToAllocaMap, DVR&: *DVR, UseEntryValue: false /*UseEntryValue*/);
2040
2041 removeCoroEndsFromRampFunction(Shape);
2042 removeCoroIsInRampFromRampFunction(Shape);
2043
2044 if (shouldCreateNoAllocVariant)
2045 SwitchCoroutineSplitter::createNoAllocVariant(F, Shape, Clones);
2046}
2047
2048static LazyCallGraph::SCC &updateCallGraphAfterCoroutineSplit(
2049 LazyCallGraph::Node &N, const coro::Shape &Shape,
2050 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
2051 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
2052 FunctionAnalysisManager &FAM) {
2053
2054 auto *CurrentSCC = &C;
2055 if (!Clones.empty()) {
2056 switch (Shape.ABI) {
2057 case coro::ABI::Switch:
2058 // Each clone in the Switch lowering is independent of the other clones.
2059 // Let the LazyCallGraph know about each one separately.
2060 for (Function *Clone : Clones)
2061 CG.addSplitFunction(OriginalFunction&: N.getFunction(), NewFunction&: *Clone);
2062 break;
2063 case coro::ABI::Async:
2064 case coro::ABI::Retcon:
2065 case coro::ABI::RetconOnce:
2066 // Each clone in the Async/Retcon lowering references of the other clones.
2067 // Let the LazyCallGraph know about all of them at once.
2068 if (!Clones.empty())
2069 CG.addSplitRefRecursiveFunctions(OriginalFunction&: N.getFunction(), NewFunctions: Clones);
2070 break;
2071 }
2072
2073 // Let the CGSCC infra handle the changes to the original function.
2074 CurrentSCC = &updateCGAndAnalysisManagerForCGSCCPass(G&: CG, C&: *CurrentSCC, N, AM,
2075 UR, FAM);
2076 }
2077
2078 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
2079 // to the split functions.
2080 postSplitCleanup(F&: N.getFunction());
2081 CurrentSCC = &updateCGAndAnalysisManagerForFunctionPass(G&: CG, C&: *CurrentSCC, N,
2082 AM, UR, FAM);
2083 return *CurrentSCC;
2084}
2085
2086/// Replace a call to llvm.coro.prepare.retcon.
2087static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2088 LazyCallGraph::SCC &C) {
2089 auto CastFn = Prepare->getArgOperand(i: 0); // as an i8*
2090 auto Fn = CastFn->stripPointerCasts(); // as its original type
2091
2092 // Attempt to peephole this pattern:
2093 // %0 = bitcast [[TYPE]] @some_function to i8*
2094 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2095 // %2 = bitcast %1 to [[TYPE]]
2096 // ==>
2097 // %2 = @some_function
2098 for (Use &U : llvm::make_early_inc_range(Range: Prepare->uses())) {
2099 // Look for bitcasts back to the original function type.
2100 auto *Cast = dyn_cast<BitCastInst>(Val: U.getUser());
2101 if (!Cast || Cast->getType() != Fn->getType())
2102 continue;
2103
2104 // Replace and remove the cast.
2105 Cast->replaceAllUsesWith(V: Fn);
2106 Cast->eraseFromParent();
2107 }
2108
2109 // Replace any remaining uses with the function as an i8*.
2110 // This can never directly be a callee, so we don't need to update CG.
2111 Prepare->replaceAllUsesWith(V: CastFn);
2112 Prepare->eraseFromParent();
2113
2114 // Kill dead bitcasts.
2115 while (auto *Cast = dyn_cast<BitCastInst>(Val: CastFn)) {
2116 if (!Cast->use_empty())
2117 break;
2118 CastFn = Cast->getOperand(i_nocapture: 0);
2119 Cast->eraseFromParent();
2120 }
2121}
2122
2123static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2124 LazyCallGraph::SCC &C) {
2125 bool Changed = false;
2126 for (Use &P : llvm::make_early_inc_range(Range: PrepareFn->uses())) {
2127 // Intrinsics can only be used in calls.
2128 auto *Prepare = cast<CallInst>(Val: P.getUser());
2129 replacePrepare(Prepare, CG, C);
2130 Changed = true;
2131 }
2132
2133 return Changed;
2134}
2135
2136static void addPrepareFunction(const Module &M,
2137 SmallVectorImpl<Function *> &Fns,
2138 StringRef Name) {
2139 auto *PrepareFn = M.getFunction(Name);
2140 if (PrepareFn && !PrepareFn->use_empty())
2141 Fns.push_back(Elt: PrepareFn);
2142}
2143
2144static std::unique_ptr<coro::BaseABI>
2145CreateNewABI(Function &F, coro::Shape &S,
2146 std::function<bool(Instruction &)> IsMatCallback,
2147 const SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs) {
2148 if (S.CoroBegin->hasCustomABI()) {
2149 unsigned CustomABI = S.CoroBegin->getCustomABI();
2150 if (CustomABI >= GenCustomABIs.size())
2151 llvm_unreachable("Custom ABI not found amoung those specified");
2152 return GenCustomABIs[CustomABI](F, S);
2153 }
2154
2155 switch (S.ABI) {
2156 case coro::ABI::Switch:
2157 return std::make_unique<coro::SwitchABI>(args&: F, args&: S, args&: IsMatCallback);
2158 case coro::ABI::Async:
2159 return std::make_unique<coro::AsyncABI>(args&: F, args&: S, args&: IsMatCallback);
2160 case coro::ABI::Retcon:
2161 return std::make_unique<coro::AnyRetconABI>(args&: F, args&: S, args&: IsMatCallback);
2162 case coro::ABI::RetconOnce:
2163 return std::make_unique<coro::AnyRetconABI>(args&: F, args&: S, args&: IsMatCallback);
2164 }
2165 llvm_unreachable("Unknown ABI");
2166}
2167
2168CoroSplitPass::CoroSplitPass(bool OptimizeFrame)
2169 : CreateAndInitABI([](Function &F, coro::Shape &S) {
2170 std::unique_ptr<coro::BaseABI> ABI =
2171 CreateNewABI(F, S, IsMatCallback: coro::isTriviallyMaterializable, GenCustomABIs: {});
2172 ABI->init();
2173 return ABI;
2174 }),
2175 OptimizeFrame(OptimizeFrame) {}
2176
2177CoroSplitPass::CoroSplitPass(
2178 SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs, bool OptimizeFrame)
2179 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2180 std::unique_ptr<coro::BaseABI> ABI =
2181 CreateNewABI(F, S, IsMatCallback: coro::isTriviallyMaterializable, GenCustomABIs);
2182 ABI->init();
2183 return ABI;
2184 }),
2185 OptimizeFrame(OptimizeFrame) {}
2186
2187// For back compatibility, constructor takes a materializable callback and
2188// creates a generator for an ABI with a modified materializable callback.
2189CoroSplitPass::CoroSplitPass(std::function<bool(Instruction &)> IsMatCallback,
2190 bool OptimizeFrame)
2191 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2192 std::unique_ptr<coro::BaseABI> ABI =
2193 CreateNewABI(F, S, IsMatCallback, GenCustomABIs: {});
2194 ABI->init();
2195 return ABI;
2196 }),
2197 OptimizeFrame(OptimizeFrame) {}
2198
2199// For back compatibility, constructor takes a materializable callback and
2200// creates a generator for an ABI with a modified materializable callback.
2201CoroSplitPass::CoroSplitPass(
2202 std::function<bool(Instruction &)> IsMatCallback,
2203 SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs, bool OptimizeFrame)
2204 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2205 std::unique_ptr<coro::BaseABI> ABI =
2206 CreateNewABI(F, S, IsMatCallback, GenCustomABIs);
2207 ABI->init();
2208 return ABI;
2209 }),
2210 OptimizeFrame(OptimizeFrame) {}
2211
2212PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2213 CGSCCAnalysisManager &AM,
2214 LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2215 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2216 // non-zero number of nodes, so we assume that here and grab the first
2217 // node's function's module.
2218 Module &M = *C.begin()->getFunction().getParent();
2219 auto &FAM =
2220 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(IR&: C, ExtraArgs&: CG).getManager();
2221
2222 // Check for uses of llvm.coro.prepare.retcon/async.
2223 SmallVector<Function *, 2> PrepareFns;
2224 addPrepareFunction(M, Fns&: PrepareFns, Name: "llvm.coro.prepare.retcon");
2225 addPrepareFunction(M, Fns&: PrepareFns, Name: "llvm.coro.prepare.async");
2226
2227 // Find coroutines for processing.
2228 SmallVector<LazyCallGraph::Node *> Coroutines;
2229 for (LazyCallGraph::Node &N : C)
2230 if (N.getFunction().isPresplitCoroutine())
2231 Coroutines.push_back(Elt: &N);
2232
2233 if (Coroutines.empty() && PrepareFns.empty())
2234 return PreservedAnalyses::all();
2235
2236 auto *CurrentSCC = &C;
2237 // Split all the coroutines.
2238 for (LazyCallGraph::Node *N : Coroutines) {
2239 Function &F = N->getFunction();
2240 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2241 << "\n");
2242
2243 // The suspend-crossing algorithm in buildCoroutineFrame gets tripped up
2244 // by unreachable blocks, so remove them as a first pass. Remove the
2245 // unreachable blocks before collecting intrinsics into Shape.
2246 removeUnreachableBlocks(F);
2247
2248 coro::Shape Shape(F);
2249 if (!Shape.CoroBegin)
2250 continue;
2251
2252 F.setSplittedCoroutine();
2253
2254 std::unique_ptr<coro::BaseABI> ABI = CreateAndInitABI(F, Shape);
2255
2256 SmallVector<Function *, 4> Clones;
2257 auto &TTI = FAM.getResult<TargetIRAnalysis>(IR&: F);
2258 doSplitCoroutine(F, Clones, ABI&: *ABI, TTI, OptimizeFrame);
2259 CurrentSCC = &updateCallGraphAfterCoroutineSplit(
2260 N&: *N, Shape, Clones, C&: *CurrentSCC, CG, AM, UR, FAM);
2261
2262 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
2263 ORE.emit(RemarkBuilder: [&]() {
2264 return OptimizationRemark(DEBUG_TYPE, "CoroSplit", &F)
2265 << "Split '" << ore::NV("function", F.getName())
2266 << "' (frame_size=" << ore::NV("frame_size", Shape.FrameSize)
2267 << ", align=" << ore::NV("align", Shape.FrameAlign.value()) << ")";
2268 });
2269
2270 if (!Shape.CoroSuspends.empty()) {
2271 // Run the CGSCC pipeline on the original and newly split functions.
2272 UR.CWorklist.insert(X: CurrentSCC);
2273 for (Function *Clone : Clones)
2274 UR.CWorklist.insert(X: CG.lookupSCC(N&: CG.get(F&: *Clone)));
2275 } else if (Shape.ABI == coro::ABI::Async) {
2276 // Reprocess the function to inline the tail called return function of
2277 // coro.async.end.
2278 UR.CWorklist.insert(X: &C);
2279 }
2280 }
2281
2282 for (auto *PrepareFn : PrepareFns) {
2283 replaceAllPrepares(PrepareFn, CG, C&: *CurrentSCC);
2284 }
2285
2286 return PreservedAnalyses::none();
2287}
2288