1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This pass builds the coroutine frame and outlines resume and destroy parts
9// of the coroutine into separate functions.
10//
11// We present a coroutine to an LLVM as an ordinary function with suspension
12// points marked up with intrinsics. We let the optimizer party on the coroutine
13// as a single function for as long as possible. Shortly before the coroutine is
14// eligible to be inlined into its callers, we split up the coroutine into parts
15// corresponding to an initial, resume and destroy invocations of the coroutine,
16// add them to the current SCC and restart the IPO pipeline to optimize the
17// coroutine subfunctions we extracted before proceeding to the caller of the
18// coroutine.
19//===----------------------------------------------------------------------===//
20
21#include "llvm/Transforms/Coroutines/CoroSplit.h"
22#include "CoroCloner.h"
23#include "CoroInternal.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/PriorityWorklist.h"
26#include "llvm/ADT/STLExtras.h"
27#include "llvm/ADT/SmallPtrSet.h"
28#include "llvm/ADT/SmallVector.h"
29#include "llvm/ADT/StringExtras.h"
30#include "llvm/ADT/StringRef.h"
31#include "llvm/ADT/Twine.h"
32#include "llvm/Analysis/CFG.h"
33#include "llvm/Analysis/CallGraph.h"
34#include "llvm/Analysis/ConstantFolding.h"
35#include "llvm/Analysis/LazyCallGraph.h"
36#include "llvm/Analysis/OptimizationRemarkEmitter.h"
37#include "llvm/Analysis/TargetTransformInfo.h"
38#include "llvm/BinaryFormat/Dwarf.h"
39#include "llvm/IR/Argument.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/BasicBlock.h"
42#include "llvm/IR/CFG.h"
43#include "llvm/IR/CallingConv.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DIBuilder.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/DebugInfo.h"
48#include "llvm/IR/DerivedTypes.h"
49#include "llvm/IR/Dominators.h"
50#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/GlobalVariable.h"
52#include "llvm/IR/InstIterator.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Instructions.h"
56#include "llvm/IR/IntrinsicInst.h"
57#include "llvm/IR/LLVMContext.h"
58#include "llvm/IR/Module.h"
59#include "llvm/IR/Type.h"
60#include "llvm/IR/Value.h"
61#include "llvm/IR/Verifier.h"
62#include "llvm/Support/Casting.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/PrettyStackTrace.h"
65#include "llvm/Support/raw_ostream.h"
66#include "llvm/Transforms/Coroutines/MaterializationUtils.h"
67#include "llvm/Transforms/Scalar.h"
68#include "llvm/Transforms/Utils/BasicBlockUtils.h"
69#include "llvm/Transforms/Utils/CallGraphUpdater.h"
70#include "llvm/Transforms/Utils/Cloning.h"
71#include "llvm/Transforms/Utils/Local.h"
72#include <cassert>
73#include <cstddef>
74#include <cstdint>
75#include <initializer_list>
76#include <iterator>
77
78using namespace llvm;
79
80#define DEBUG_TYPE "coro-split"
81
82// FIXME:
83// Lower the intrinisc in CoroEarly phase if coroutine frame doesn't escape
84// and it is known that other transformations, for example, sanitizers
85// won't lead to incorrect code.
86static void lowerAwaitSuspend(IRBuilder<> &Builder, CoroAwaitSuspendInst *CB,
87 coro::Shape &Shape) {
88 auto Wrapper = CB->getWrapperFunction();
89 auto Awaiter = CB->getAwaiter();
90 auto FramePtr = CB->getFrame();
91
92 Builder.SetInsertPoint(CB);
93
94 CallBase *NewCall = nullptr;
95 // await_suspend has only 2 parameters, awaiter and handle.
96 // Copy parameter attributes from the intrinsic call, but remove the last,
97 // because the last parameter now becomes the function that is being called.
98 AttributeList NewAttributes =
99 CB->getAttributes().removeParamAttributes(C&: CB->getContext(), ArgNo: 2);
100
101 if (auto Invoke = dyn_cast<InvokeInst>(Val: CB)) {
102 auto WrapperInvoke =
103 Builder.CreateInvoke(Callee: Wrapper, NormalDest: Invoke->getNormalDest(),
104 UnwindDest: Invoke->getUnwindDest(), Args: {Awaiter, FramePtr});
105
106 WrapperInvoke->setCallingConv(Invoke->getCallingConv());
107 std::copy(first: Invoke->bundle_op_info_begin(), last: Invoke->bundle_op_info_end(),
108 result: WrapperInvoke->bundle_op_info_begin());
109 WrapperInvoke->setAttributes(NewAttributes);
110 WrapperInvoke->setDebugLoc(Invoke->getDebugLoc());
111 NewCall = WrapperInvoke;
112 } else if (auto Call = dyn_cast<CallInst>(Val: CB)) {
113 auto WrapperCall = Builder.CreateCall(Callee: Wrapper, Args: {Awaiter, FramePtr});
114
115 WrapperCall->setAttributes(NewAttributes);
116 WrapperCall->setDebugLoc(Call->getDebugLoc());
117 NewCall = WrapperCall;
118 } else {
119 llvm_unreachable("Unexpected coro_await_suspend invocation method");
120 }
121
122 if (CB->getCalledFunction()->getIntrinsicID() ==
123 Intrinsic::coro_await_suspend_handle) {
124 // Follow the lowered await_suspend call above with a lowered resume call
125 // to the returned coroutine.
126 if (auto *Invoke = dyn_cast<InvokeInst>(Val: CB)) {
127 // If the await_suspend call is an invoke, we continue in the next block.
128 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstInsertionPt());
129 }
130
131 coro::LowererBase LB(*Wrapper->getParent());
132 auto *ResumeAddr = LB.makeSubFnCall(Arg: NewCall, Index: CoroSubFnInst::ResumeIndex,
133 InsertPt: &*Builder.GetInsertPoint());
134
135 LLVMContext &Ctx = Builder.getContext();
136 FunctionType *ResumeTy = FunctionType::get(
137 Result: Type::getVoidTy(C&: Ctx), Params: PointerType::getUnqual(C&: Ctx), isVarArg: false);
138 auto *ResumeCall = Builder.CreateCall(FTy: ResumeTy, Callee: ResumeAddr, Args: {NewCall});
139 ResumeCall->setCallingConv(CallingConv::Fast);
140
141 // We can't insert the 'ret' instruction and adjust the cc until the
142 // function has been split, so remember this for later.
143 Shape.SymmetricTransfers.push_back(Elt: ResumeCall);
144
145 NewCall = ResumeCall;
146 }
147
148 CB->replaceAllUsesWith(V: NewCall);
149 CB->eraseFromParent();
150}
151
152static void lowerAwaitSuspends(Function &F, coro::Shape &Shape) {
153 IRBuilder<> Builder(F.getContext());
154 for (auto *AWS : Shape.CoroAwaitSuspends)
155 lowerAwaitSuspend(Builder, CB: AWS, Shape);
156}
157
158static void maybeFreeRetconStorage(IRBuilder<> &Builder,
159 const coro::Shape &Shape, Value *FramePtr,
160 CallGraph *CG) {
161 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce);
162 if (Shape.RetconLowering.IsFrameInlineInStorage)
163 return;
164
165 Shape.emitDealloc(Builder, Ptr: FramePtr, CG);
166}
167
168/// Replace an llvm.coro.end.async.
169/// Will inline the must tail call function call if there is one.
170/// \returns true if cleanup of the coro.end block is needed, false otherwise.
171static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
172 IRBuilder<> Builder(End);
173
174 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(Val: End);
175 if (!EndAsync) {
176 Builder.CreateRetVoid();
177 return true /*needs cleanup of coro.end block*/;
178 }
179
180 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
181 if (!MustTailCallFunc) {
182 Builder.CreateRetVoid();
183 return true /*needs cleanup of coro.end block*/;
184 }
185
186 // Move the must tail call from the predecessor block into the end block.
187 auto *CoroEndBlock = End->getParent();
188 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
189 assert(MustTailCallFuncBlock && "Must have a single predecessor block");
190 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
191 auto *MustTailCall = cast<CallInst>(Val: &*std::prev(x: It));
192 CoroEndBlock->splice(ToIt: End->getIterator(), FromBB: MustTailCallFuncBlock,
193 FromIt: MustTailCall->getIterator());
194
195 // Insert the return instruction.
196 Builder.SetInsertPoint(End);
197 Builder.CreateRetVoid();
198 InlineFunctionInfo FnInfo;
199
200 // Remove the rest of the block, by splitting it into an unreachable block.
201 auto *BB = End->getParent();
202 BB->splitBasicBlock(I: End);
203 BB->getTerminator()->eraseFromParent();
204
205 auto InlineRes = InlineFunction(CB&: *MustTailCall, IFI&: FnInfo);
206 assert(InlineRes.isSuccess() && "Expected inlining to succeed");
207 (void)InlineRes;
208
209 // We have cleaned up the coro.end block above.
210 return false;
211}
212
213/// Replace a non-unwind call to llvm.coro.end.
214static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
215 const coro::Shape &Shape, Value *FramePtr,
216 bool InRamp, CallGraph *CG) {
217 // Start inserting right before the coro.end.
218 IRBuilder<> Builder(End);
219
220 // Create the return instruction.
221 switch (Shape.ABI) {
222 // The cloned functions in switch-lowering always return void.
223 case coro::ABI::Switch:
224 assert(!cast<CoroEndInst>(End)->hasResults() &&
225 "switch coroutine should not return any values");
226 // coro.end doesn't immediately end the coroutine in the main function
227 // in this lowering, because we need to deallocate the coroutine.
228 if (InRamp)
229 return;
230 Builder.CreateRetVoid();
231 break;
232
233 // In async lowering this returns.
234 case coro::ABI::Async: {
235 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
236 if (!CoroEndBlockNeedsCleanup)
237 return;
238 break;
239 }
240
241 // In unique continuation lowering, the continuations always return void.
242 // But we may have implicitly allocated storage.
243 case coro::ABI::RetconOnce: {
244 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
245 auto *CoroEnd = cast<CoroEndInst>(Val: End);
246 auto *RetTy = Shape.getResumeFunctionType()->getReturnType();
247
248 if (!CoroEnd->hasResults()) {
249 assert(RetTy->isVoidTy());
250 Builder.CreateRetVoid();
251 break;
252 }
253
254 auto *CoroResults = CoroEnd->getResults();
255 unsigned NumReturns = CoroResults->numReturns();
256
257 if (auto *RetStructTy = dyn_cast<StructType>(Val: RetTy)) {
258 assert(RetStructTy->getNumElements() == NumReturns &&
259 "numbers of returns should match resume function singature");
260 Value *ReturnValue = PoisonValue::get(T: RetStructTy);
261 unsigned Idx = 0;
262 for (Value *RetValEl : CoroResults->return_values())
263 ReturnValue = Builder.CreateInsertValue(Agg: ReturnValue, Val: RetValEl, Idxs: Idx++);
264 Builder.CreateRet(V: ReturnValue);
265 } else if (NumReturns == 0) {
266 assert(RetTy->isVoidTy());
267 Builder.CreateRetVoid();
268 } else {
269 assert(NumReturns == 1);
270 Builder.CreateRet(V: *CoroResults->retval_begin());
271 }
272 CoroResults->replaceAllUsesWith(
273 V: ConstantTokenNone::get(Context&: CoroResults->getContext()));
274 CoroResults->eraseFromParent();
275 break;
276 }
277
278 // In non-unique continuation lowering, we signal completion by returning
279 // a null continuation.
280 case coro::ABI::Retcon: {
281 assert(!cast<CoroEndInst>(End)->hasResults() &&
282 "retcon coroutine should not return any values");
283 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
284 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
285 auto RetStructTy = dyn_cast<StructType>(Val: RetTy);
286 PointerType *ContinuationTy =
287 cast<PointerType>(Val: RetStructTy ? RetStructTy->getElementType(N: 0) : RetTy);
288
289 Value *ReturnValue = ConstantPointerNull::get(T: ContinuationTy);
290 if (RetStructTy) {
291 ReturnValue = Builder.CreateInsertValue(Agg: PoisonValue::get(T: RetStructTy),
292 Val: ReturnValue, Idxs: 0);
293 }
294 Builder.CreateRet(V: ReturnValue);
295 break;
296 }
297 }
298
299 // Remove the rest of the block, by splitting it into an unreachable block.
300 auto *BB = End->getParent();
301 BB->splitBasicBlock(I: End);
302 BB->getTerminator()->eraseFromParent();
303}
304
305// Mark a coroutine as done, which implies that the coroutine is finished and
306// never gets resumed.
307//
308// In resume-switched ABI, the done state is represented by storing zero in
309// ResumeFnAddr.
310//
311// NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
312// pointer to the frame in splitted function is not stored in `Shape`.
313static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
314 Value *FramePtr) {
315 assert(
316 Shape.ABI == coro::ABI::Switch &&
317 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
318 auto *GepIndex = Builder.CreateStructGEP(
319 Ty: Shape.FrameTy, Ptr: FramePtr, Idx: coro::Shape::SwitchFieldIndex::Resume,
320 Name: "ResumeFn.addr");
321 auto *NullPtr = ConstantPointerNull::get(T: cast<PointerType>(
322 Val: Shape.FrameTy->getTypeAtIndex(N: coro::Shape::SwitchFieldIndex::Resume)));
323 Builder.CreateStore(Val: NullPtr, Ptr: GepIndex);
324
325 // If the coroutine don't have unwind coro end, we could omit the store to
326 // the final suspend point since we could infer the coroutine is suspended
327 // at the final suspend point by the nullness of ResumeFnAddr.
328 // However, we can't skip it if the coroutine have unwind coro end. Since
329 // the coroutine reaches unwind coro end is considered suspended at the
330 // final suspend point (the ResumeFnAddr is null) but in fact the coroutine
331 // didn't complete yet. We need the IndexVal for the final suspend point
332 // to make the states clear.
333 if (Shape.SwitchLowering.HasUnwindCoroEnd &&
334 Shape.SwitchLowering.HasFinalSuspend) {
335 assert(cast<CoroSuspendInst>(Shape.CoroSuspends.back())->isFinal() &&
336 "The final suspend should only live in the last position of "
337 "CoroSuspends.");
338 ConstantInt *IndexVal = Shape.getIndex(Value: Shape.CoroSuspends.size() - 1);
339 auto *FinalIndex = Builder.CreateStructGEP(
340 Ty: Shape.FrameTy, Ptr: FramePtr, Idx: Shape.getSwitchIndexField(), Name: "index.addr");
341
342 Builder.CreateStore(Val: IndexVal, Ptr: FinalIndex);
343 }
344}
345
346/// Replace an unwind call to llvm.coro.end.
347static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
348 Value *FramePtr, bool InRamp, CallGraph *CG) {
349 IRBuilder<> Builder(End);
350
351 switch (Shape.ABI) {
352 // In switch-lowering, this does nothing in the main function.
353 case coro::ABI::Switch: {
354 // In C++'s specification, the coroutine should be marked as done
355 // if promise.unhandled_exception() throws. The frontend will
356 // call coro.end(true) along this path.
357 //
358 // FIXME: We should refactor this once there is other language
359 // which uses Switch-Resumed style other than C++.
360 markCoroutineAsDone(Builder, Shape, FramePtr);
361 if (InRamp)
362 return;
363 break;
364 }
365 // In async lowering this does nothing.
366 case coro::ABI::Async:
367 break;
368 // In continuation-lowering, this frees the continuation storage.
369 case coro::ABI::Retcon:
370 case coro::ABI::RetconOnce:
371 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
372 break;
373 }
374
375 // If coro.end has an associated bundle, add cleanupret instruction.
376 if (auto Bundle = End->getOperandBundle(ID: LLVMContext::OB_funclet)) {
377 auto *FromPad = cast<CleanupPadInst>(Val: Bundle->Inputs[0]);
378 auto *CleanupRet = Builder.CreateCleanupRet(CleanupPad: FromPad, UnwindBB: nullptr);
379 End->getParent()->splitBasicBlock(I: End);
380 CleanupRet->getParent()->getTerminator()->eraseFromParent();
381 }
382}
383
384static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
385 Value *FramePtr, bool InRamp, CallGraph *CG) {
386 if (End->isUnwind())
387 replaceUnwindCoroEnd(End, Shape, FramePtr, InRamp, CG);
388 else
389 replaceFallthroughCoroEnd(End, Shape, FramePtr, InRamp, CG);
390 End->eraseFromParent();
391}
392
393// In the resume function, we remove the last case (when coro::Shape is built,
394// the final suspend point (if present) is always the last element of
395// CoroSuspends array) since it is an undefined behavior to resume a coroutine
396// suspended at the final suspend point.
397// In the destroy function, if it isn't possible that the ResumeFnAddr is NULL
398// and the coroutine doesn't suspend at the final suspend point actually (this
399// is possible since the coroutine is considered suspended at the final suspend
400// point if promise.unhandled_exception() exits via an exception), we can
401// remove the last case.
402void coro::BaseCloner::handleFinalSuspend() {
403 assert(Shape.ABI == coro::ABI::Switch &&
404 Shape.SwitchLowering.HasFinalSuspend);
405
406 if (isSwitchDestroyFunction() && Shape.SwitchLowering.HasUnwindCoroEnd)
407 return;
408
409 auto *Switch = cast<SwitchInst>(Val&: VMap[Shape.SwitchLowering.ResumeSwitch]);
410 auto FinalCaseIt = std::prev(x: Switch->case_end());
411 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
412 Switch->removeCase(I: FinalCaseIt);
413 if (isSwitchDestroyFunction()) {
414 BasicBlock *OldSwitchBB = Switch->getParent();
415 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(I: Switch, BBName: "Switch");
416 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
417
418 if (NewF->isCoroOnlyDestroyWhenComplete()) {
419 // When the coroutine can only be destroyed when complete, we don't need
420 // to generate code for other cases.
421 Builder.CreateBr(Dest: ResumeBB);
422 } else {
423 auto *GepIndex = Builder.CreateStructGEP(
424 Ty: Shape.FrameTy, Ptr: NewFramePtr, Idx: coro::Shape::SwitchFieldIndex::Resume,
425 Name: "ResumeFn.addr");
426 auto *Load =
427 Builder.CreateLoad(Ty: Shape.getSwitchResumePointerType(), Ptr: GepIndex);
428 auto *Cond = Builder.CreateIsNull(Arg: Load);
429 Builder.CreateCondBr(Cond, True: ResumeBB, False: NewSwitchBB);
430 }
431 OldSwitchBB->getTerminator()->eraseFromParent();
432 }
433}
434
435static FunctionType *
436getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
437 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Val: Suspend);
438 auto *StructTy = cast<StructType>(Val: AsyncSuspend->getType());
439 auto &Context = Suspend->getParent()->getParent()->getContext();
440 auto *VoidTy = Type::getVoidTy(C&: Context);
441 return FunctionType::get(Result: VoidTy, Params: StructTy->elements(), isVarArg: false);
442}
443
444static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
445 const Twine &Suffix,
446 Module::iterator InsertBefore,
447 AnyCoroSuspendInst *ActiveSuspend) {
448 Module *M = OrigF.getParent();
449 auto *FnTy = (Shape.ABI != coro::ABI::Async)
450 ? Shape.getResumeFunctionType()
451 : getFunctionTypeFromAsyncSuspend(Suspend: ActiveSuspend);
452
453 Function *NewF =
454 Function::Create(Ty: FnTy, Linkage: GlobalValue::LinkageTypes::InternalLinkage,
455 N: OrigF.getName() + Suffix);
456
457 M->getFunctionList().insert(where: InsertBefore, New: NewF);
458
459 return NewF;
460}
461
462/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
463/// arguments to the continuation function.
464///
465/// This assumes that the builder has a meaningful insertion point.
466void coro::BaseCloner::replaceRetconOrAsyncSuspendUses() {
467 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
468 Shape.ABI == coro::ABI::Async);
469
470 auto NewS = VMap[ActiveSuspend];
471 if (NewS->use_empty())
472 return;
473
474 // Copy out all the continuation arguments after the buffer pointer into
475 // an easily-indexed data structure for convenience.
476 SmallVector<Value *, 8> Args;
477 // The async ABI includes all arguments -- including the first argument.
478 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
479 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(x: NewF->arg_begin()),
480 E = NewF->arg_end();
481 I != E; ++I)
482 Args.push_back(Elt: &*I);
483
484 // If the suspend returns a single scalar value, we can just do a simple
485 // replacement.
486 if (!isa<StructType>(Val: NewS->getType())) {
487 assert(Args.size() == 1);
488 NewS->replaceAllUsesWith(V: Args.front());
489 return;
490 }
491
492 // Try to peephole extracts of an aggregate return.
493 for (Use &U : llvm::make_early_inc_range(Range: NewS->uses())) {
494 auto *EVI = dyn_cast<ExtractValueInst>(Val: U.getUser());
495 if (!EVI || EVI->getNumIndices() != 1)
496 continue;
497
498 EVI->replaceAllUsesWith(V: Args[EVI->getIndices().front()]);
499 EVI->eraseFromParent();
500 }
501
502 // If we have no remaining uses, we're done.
503 if (NewS->use_empty())
504 return;
505
506 // Otherwise, we need to create an aggregate.
507 Value *Aggr = PoisonValue::get(T: NewS->getType());
508 for (auto [Idx, Arg] : llvm::enumerate(First&: Args))
509 Aggr = Builder.CreateInsertValue(Agg: Aggr, Val: Arg, Idxs: Idx);
510
511 NewS->replaceAllUsesWith(V: Aggr);
512}
513
514void coro::BaseCloner::replaceCoroSuspends() {
515 Value *SuspendResult;
516
517 switch (Shape.ABI) {
518 // In switch lowering, replace coro.suspend with the appropriate value
519 // for the type of function we're extracting.
520 // Replacing coro.suspend with (0) will result in control flow proceeding to
521 // a resume label associated with a suspend point, replacing it with (1) will
522 // result in control flow proceeding to a cleanup label associated with this
523 // suspend point.
524 case coro::ABI::Switch:
525 SuspendResult = Builder.getInt8(C: isSwitchDestroyFunction() ? 1 : 0);
526 break;
527
528 // In async lowering there are no uses of the result.
529 case coro::ABI::Async:
530 return;
531
532 // In returned-continuation lowering, the arguments from earlier
533 // continuations are theoretically arbitrary, and they should have been
534 // spilled.
535 case coro::ABI::RetconOnce:
536 case coro::ABI::Retcon:
537 return;
538 }
539
540 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
541 // The active suspend was handled earlier.
542 if (CS == ActiveSuspend)
543 continue;
544
545 auto *MappedCS = cast<AnyCoroSuspendInst>(Val&: VMap[CS]);
546 MappedCS->replaceAllUsesWith(V: SuspendResult);
547 MappedCS->eraseFromParent();
548 }
549}
550
551void coro::BaseCloner::replaceCoroEnds() {
552 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
553 // We use a null call graph because there's no call graph node for
554 // the cloned function yet. We'll just be rebuilding that later.
555 auto *NewCE = cast<AnyCoroEndInst>(Val&: VMap[CE]);
556 replaceCoroEnd(End: NewCE, Shape, FramePtr: NewFramePtr, /*in ramp*/ InRamp: false, CG: nullptr);
557 }
558}
559
560void coro::BaseCloner::replaceCoroIsInRamp() {
561 auto &Ctx = OrigF.getContext();
562 for (auto *II : Shape.CoroIsInRampInsts) {
563 auto *NewII = cast<CoroIsInRampInst>(Val&: VMap[II]);
564 NewII->replaceAllUsesWith(V: ConstantInt::getFalse(Context&: Ctx));
565 NewII->eraseFromParent();
566 }
567}
568
569static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
570 ValueToValueMapTy *VMap) {
571 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
572 return;
573 Value *CachedSlot = nullptr;
574 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
575 if (CachedSlot)
576 return CachedSlot;
577
578 // Check if the function has a swifterror argument.
579 for (auto &Arg : F.args()) {
580 if (Arg.isSwiftError()) {
581 CachedSlot = &Arg;
582 return &Arg;
583 }
584 }
585
586 // Create a swifterror alloca.
587 IRBuilder<> Builder(&F.getEntryBlock(),
588 F.getEntryBlock().getFirstNonPHIOrDbg());
589 auto Alloca = Builder.CreateAlloca(Ty: ValueTy);
590 Alloca->setSwiftError(true);
591
592 CachedSlot = Alloca;
593 return Alloca;
594 };
595
596 for (CallInst *Op : Shape.SwiftErrorOps) {
597 auto MappedOp = VMap ? cast<CallInst>(Val&: (*VMap)[Op]) : Op;
598 IRBuilder<> Builder(MappedOp);
599
600 // If there are no arguments, this is a 'get' operation.
601 Value *MappedResult;
602 if (Op->arg_empty()) {
603 auto ValueTy = Op->getType();
604 auto Slot = getSwiftErrorSlot(ValueTy);
605 MappedResult = Builder.CreateLoad(Ty: ValueTy, Ptr: Slot);
606 } else {
607 assert(Op->arg_size() == 1);
608 auto Value = MappedOp->getArgOperand(i: 0);
609 auto ValueTy = Value->getType();
610 auto Slot = getSwiftErrorSlot(ValueTy);
611 Builder.CreateStore(Val: Value, Ptr: Slot);
612 MappedResult = Slot;
613 }
614
615 MappedOp->replaceAllUsesWith(V: MappedResult);
616 MappedOp->eraseFromParent();
617 }
618
619 // If we're updating the original function, we've invalidated SwiftErrorOps.
620 if (VMap == nullptr) {
621 Shape.SwiftErrorOps.clear();
622 }
623}
624
625/// Returns all debug records in F.
626static SmallVector<DbgVariableRecord *>
627collectDbgVariableRecords(Function &F) {
628 SmallVector<DbgVariableRecord *> DbgVariableRecords;
629 for (auto &I : instructions(F)) {
630 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange()))
631 DbgVariableRecords.push_back(Elt: &DVR);
632 }
633 return DbgVariableRecords;
634}
635
636void coro::BaseCloner::replaceSwiftErrorOps() {
637 ::replaceSwiftErrorOps(F&: *NewF, Shape, VMap: &VMap);
638}
639
640void coro::BaseCloner::salvageDebugInfo() {
641 auto DbgVariableRecords = collectDbgVariableRecords(F&: *NewF);
642 SmallDenseMap<Argument *, AllocaInst *, 4> ArgToAllocaMap;
643
644 // Only 64-bit ABIs have a register we can refer to with the entry value.
645 bool UseEntryValue = OrigF.getParent()->getTargetTriple().isArch64Bit();
646 for (DbgVariableRecord *DVR : DbgVariableRecords)
647 coro::salvageDebugInfo(ArgToAllocaMap, DVR&: *DVR, UseEntryValue);
648
649 // Remove all salvaged dbg.declare intrinsics that became
650 // either unreachable or stale due to the CoroSplit transformation.
651 DominatorTree DomTree(*NewF);
652 auto IsUnreachableBlock = [&](BasicBlock *BB) {
653 return !isPotentiallyReachable(From: &NewF->getEntryBlock(), To: BB, ExclusionSet: nullptr,
654 DT: &DomTree);
655 };
656 auto RemoveOne = [&](DbgVariableRecord *DVI) {
657 if (IsUnreachableBlock(DVI->getParent()))
658 DVI->eraseFromParent();
659 else if (isa_and_nonnull<AllocaInst>(Val: DVI->getVariableLocationOp(OpIdx: 0))) {
660 // Count all non-debuginfo uses in reachable blocks.
661 unsigned Uses = 0;
662 for (auto *User : DVI->getVariableLocationOp(OpIdx: 0)->users())
663 if (auto *I = dyn_cast<Instruction>(Val: User))
664 if (!isa<AllocaInst>(Val: I) && !IsUnreachableBlock(I->getParent()))
665 ++Uses;
666 if (!Uses)
667 DVI->eraseFromParent();
668 }
669 };
670 for_each(Range&: DbgVariableRecords, F: RemoveOne);
671}
672
673void coro::BaseCloner::replaceEntryBlock() {
674 // In the original function, the AllocaSpillBlock is a block immediately
675 // following the allocation of the frame object which defines GEPs for
676 // all the allocas that have been moved into the frame, and it ends by
677 // branching to the original beginning of the coroutine. Make this
678 // the entry block of the cloned function.
679 auto *Entry = cast<BasicBlock>(Val&: VMap[Shape.AllocaSpillBlock]);
680 auto *OldEntry = &NewF->getEntryBlock();
681 Entry->setName("entry" + Suffix);
682 Entry->moveBefore(MovePos: OldEntry);
683 Entry->getTerminator()->eraseFromParent();
684
685 // Clear all predecessors of the new entry block. There should be
686 // exactly one predecessor, which we created when splitting out
687 // AllocaSpillBlock to begin with.
688 assert(Entry->hasOneUse());
689 auto BranchToEntry = cast<BranchInst>(Val: Entry->user_back());
690 assert(BranchToEntry->isUnconditional());
691 Builder.SetInsertPoint(BranchToEntry);
692 Builder.CreateUnreachable();
693 BranchToEntry->eraseFromParent();
694
695 // Branch from the entry to the appropriate place.
696 Builder.SetInsertPoint(Entry);
697 switch (Shape.ABI) {
698 case coro::ABI::Switch: {
699 // In switch-lowering, we built a resume-entry block in the original
700 // function. Make the entry block branch to this.
701 auto *SwitchBB =
702 cast<BasicBlock>(Val&: VMap[Shape.SwitchLowering.ResumeEntryBlock]);
703 Builder.CreateBr(Dest: SwitchBB);
704 SwitchBB->moveAfter(MovePos: Entry);
705 break;
706 }
707 case coro::ABI::Async:
708 case coro::ABI::Retcon:
709 case coro::ABI::RetconOnce: {
710 // In continuation ABIs, we want to branch to immediately after the
711 // active suspend point. Earlier phases will have put the suspend in its
712 // own basic block, so just thread our jump directly to its successor.
713 assert((Shape.ABI == coro::ABI::Async &&
714 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
715 ((Shape.ABI == coro::ABI::Retcon ||
716 Shape.ABI == coro::ABI::RetconOnce) &&
717 isa<CoroSuspendRetconInst>(ActiveSuspend)));
718 auto *MappedCS = cast<AnyCoroSuspendInst>(Val&: VMap[ActiveSuspend]);
719 auto Branch = cast<BranchInst>(Val: MappedCS->getNextNode());
720 assert(Branch->isUnconditional());
721 Builder.CreateBr(Dest: Branch->getSuccessor(i: 0));
722 break;
723 }
724 }
725
726 // Any static alloca that's still being used but not reachable from the new
727 // entry needs to be moved to the new entry.
728 Function *F = OldEntry->getParent();
729 DominatorTree DT{*F};
730 for (Instruction &I : llvm::make_early_inc_range(Range: instructions(F))) {
731 auto *Alloca = dyn_cast<AllocaInst>(Val: &I);
732 if (!Alloca || I.use_empty())
733 continue;
734 if (DT.isReachableFromEntry(A: I.getParent()) ||
735 !isa<ConstantInt>(Val: Alloca->getArraySize()))
736 continue;
737 I.moveBefore(BB&: *Entry, I: Entry->getFirstInsertionPt());
738 }
739}
740
741/// Derive the value of the new frame pointer.
742Value *coro::BaseCloner::deriveNewFramePointer() {
743 // Builder should be inserting to the front of the new entry block.
744
745 switch (Shape.ABI) {
746 // In switch-lowering, the argument is the frame pointer.
747 case coro::ABI::Switch:
748 return &*NewF->arg_begin();
749 // In async-lowering, one of the arguments is an async context as determined
750 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
751 // the resume function from the async context projection function associated
752 // with the active suspend. The frame is located as a tail to the async
753 // context header.
754 case coro::ABI::Async: {
755 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(Val: ActiveSuspend);
756 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
757 auto *CalleeContext = NewF->getArg(i: ContextIdx);
758 auto *ProjectionFunc =
759 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
760 auto DbgLoc =
761 cast<CoroSuspendAsyncInst>(Val&: VMap[ActiveSuspend])->getDebugLoc();
762 // Calling i8* (i8*)
763 auto *CallerContext = Builder.CreateCall(FTy: ProjectionFunc->getFunctionType(),
764 Callee: ProjectionFunc, Args: CalleeContext);
765 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
766 CallerContext->setDebugLoc(DbgLoc);
767 // The frame is located after the async_context header.
768 auto &Context = Builder.getContext();
769 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
770 Ty: Type::getInt8Ty(C&: Context), Ptr: CallerContext,
771 Idx0: Shape.AsyncLowering.FrameOffset, Name: "async.ctx.frameptr");
772 // Inline the projection function.
773 InlineFunctionInfo InlineInfo;
774 auto InlineRes = InlineFunction(CB&: *CallerContext, IFI&: InlineInfo);
775 assert(InlineRes.isSuccess());
776 (void)InlineRes;
777 return FramePtrAddr;
778 }
779 // In continuation-lowering, the argument is the opaque storage.
780 case coro::ABI::Retcon:
781 case coro::ABI::RetconOnce: {
782 Argument *NewStorage = &*NewF->arg_begin();
783 auto FramePtrTy = PointerType::getUnqual(C&: Shape.FrameTy->getContext());
784
785 // If the storage is inline, just bitcast to the storage to the frame type.
786 if (Shape.RetconLowering.IsFrameInlineInStorage)
787 return NewStorage;
788
789 // Otherwise, load the real frame from the opaque storage.
790 return Builder.CreateLoad(Ty: FramePtrTy, Ptr: NewStorage);
791 }
792 }
793 llvm_unreachable("bad ABI");
794}
795
796/// Adjust the scope line of the funclet to the first line number after the
797/// suspend point. This avoids a jump in the line table from the function
798/// declaration (where prologue instructions are attributed to) to the suspend
799/// point.
800/// Only adjust the scope line when the files are the same.
801/// If no candidate line number is found, fallback to the line of ActiveSuspend.
802static void updateScopeLine(Instruction *ActiveSuspend,
803 DISubprogram &SPToUpdate) {
804 if (!ActiveSuspend)
805 return;
806
807 // No subsequent instruction -> fallback to the location of ActiveSuspend.
808 if (!ActiveSuspend->getNextNode()) {
809 if (auto DL = ActiveSuspend->getDebugLoc())
810 if (SPToUpdate.getFile() == DL->getFile())
811 SPToUpdate.setScopeLine(DL->getLine());
812 return;
813 }
814
815 BasicBlock::iterator Successor = ActiveSuspend->getNextNode()->getIterator();
816 // Corosplit splits the BB around ActiveSuspend, so the meaningful
817 // instructions are not in the same BB.
818 while (auto *Branch = dyn_cast_or_null<BranchInst>(Val&: Successor)) {
819 if (!Branch->isUnconditional())
820 break;
821 Successor = Branch->getSuccessor(i: 0)->getFirstNonPHIOrDbg();
822 }
823
824 // Find the first successor of ActiveSuspend with a non-zero line location.
825 // If that matches the file of ActiveSuspend, use it.
826 BasicBlock *PBB = Successor->getParent();
827 for (; Successor != PBB->end(); Successor = std::next(x: Successor)) {
828 Successor = skipDebugIntrinsics(It: Successor);
829 auto DL = Successor->getDebugLoc();
830 if (!DL || DL.getLine() == 0)
831 continue;
832
833 if (SPToUpdate.getFile() == DL->getFile()) {
834 SPToUpdate.setScopeLine(DL.getLine());
835 return;
836 }
837
838 break;
839 }
840
841 // If the search above failed, fallback to the location of ActiveSuspend.
842 if (auto DL = ActiveSuspend->getDebugLoc())
843 if (SPToUpdate.getFile() == DL->getFile())
844 SPToUpdate.setScopeLine(DL->getLine());
845}
846
847static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
848 unsigned ParamIndex, uint64_t Size,
849 Align Alignment, bool NoAlias) {
850 AttrBuilder ParamAttrs(Context);
851 ParamAttrs.addAttribute(Val: Attribute::NonNull);
852 ParamAttrs.addAttribute(Val: Attribute::NoUndef);
853
854 if (NoAlias)
855 ParamAttrs.addAttribute(Val: Attribute::NoAlias);
856
857 ParamAttrs.addAlignmentAttr(Align: Alignment);
858 ParamAttrs.addDereferenceableAttr(Bytes: Size);
859 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
860}
861
862static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
863 unsigned ParamIndex) {
864 AttrBuilder ParamAttrs(Context);
865 ParamAttrs.addAttribute(Val: Attribute::SwiftAsync);
866 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
867}
868
869static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
870 unsigned ParamIndex) {
871 AttrBuilder ParamAttrs(Context);
872 ParamAttrs.addAttribute(Val: Attribute::SwiftSelf);
873 Attrs = Attrs.addParamAttributes(C&: Context, ArgNo: ParamIndex, B: ParamAttrs);
874}
875
876/// Clone the body of the original function into a resume function of
877/// some sort.
878void coro::BaseCloner::create() {
879 assert(NewF);
880
881 // Replace all args with dummy instructions. If an argument is the old frame
882 // pointer, the dummy will be replaced by the new frame pointer once it is
883 // computed below. Uses of all other arguments should have already been
884 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
885 // frame.
886 SmallVector<Instruction *> DummyArgs;
887 for (Argument &A : OrigF.args()) {
888 DummyArgs.push_back(Elt: new FreezeInst(PoisonValue::get(T: A.getType())));
889 VMap[&A] = DummyArgs.back();
890 }
891
892 SmallVector<ReturnInst *, 4> Returns;
893
894 // Ignore attempts to change certain attributes of the function.
895 // TODO: maybe there should be a way to suppress this during cloning?
896 auto savedVisibility = NewF->getVisibility();
897 auto savedUnnamedAddr = NewF->getUnnamedAddr();
898 auto savedDLLStorageClass = NewF->getDLLStorageClass();
899
900 // NewF's linkage (which CloneFunctionInto does *not* change) might not
901 // be compatible with the visibility of OrigF (which it *does* change),
902 // so protect against that.
903 auto savedLinkage = NewF->getLinkage();
904 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
905
906 CloneFunctionInto(NewFunc: NewF, OldFunc: &OrigF, VMap,
907 Changes: CloneFunctionChangeType::LocalChangesOnly, Returns);
908
909 auto &Context = NewF->getContext();
910
911 if (DISubprogram *SP = NewF->getSubprogram()) {
912 assert(SP != OrigF.getSubprogram() && SP->isDistinct());
913 updateScopeLine(ActiveSuspend, SPToUpdate&: *SP);
914
915 // Update the linkage name and the function name to reflect the modified
916 // name.
917 MDString *NewLinkageName = MDString::get(Context, Str: NewF->getName());
918 SP->replaceLinkageName(LN: NewLinkageName);
919 if (DISubprogram *Decl = SP->getDeclaration()) {
920 TempDISubprogram NewDecl = Decl->clone();
921 NewDecl->replaceLinkageName(LN: NewLinkageName);
922 SP->replaceDeclaration(Decl: MDNode::replaceWithUniqued(N: std::move(NewDecl)));
923 }
924 }
925
926 NewF->setLinkage(savedLinkage);
927 NewF->setVisibility(savedVisibility);
928 NewF->setUnnamedAddr(savedUnnamedAddr);
929 NewF->setDLLStorageClass(savedDLLStorageClass);
930 // The function sanitizer metadata needs to match the signature of the
931 // function it is being attached to. However this does not hold for split
932 // functions here. Thus remove the metadata for split functions.
933 if (Shape.ABI == coro::ABI::Switch &&
934 NewF->hasMetadata(KindID: LLVMContext::MD_func_sanitize))
935 NewF->eraseMetadata(KindID: LLVMContext::MD_func_sanitize);
936
937 // Replace the attributes of the new function:
938 auto OrigAttrs = NewF->getAttributes();
939 auto NewAttrs = AttributeList();
940
941 switch (Shape.ABI) {
942 case coro::ABI::Switch:
943 // Bootstrap attributes by copying function attributes from the
944 // original function. This should include optimization settings and so on.
945 NewAttrs = NewAttrs.addFnAttributes(
946 C&: Context, B: AttrBuilder(Context, OrigAttrs.getFnAttrs()));
947
948 addFramePointerAttrs(Attrs&: NewAttrs, Context, ParamIndex: 0, Size: Shape.FrameSize,
949 Alignment: Shape.FrameAlign, /*NoAlias=*/false);
950 break;
951 case coro::ABI::Async: {
952 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(Val: ActiveSuspend);
953 if (OrigF.hasParamAttribute(ArgNo: Shape.AsyncLowering.ContextArgNo,
954 Kind: Attribute::SwiftAsync)) {
955 uint32_t ArgAttributeIndices =
956 ActiveAsyncSuspend->getStorageArgumentIndex();
957 auto ContextArgIndex = ArgAttributeIndices & 0xff;
958 addAsyncContextAttrs(Attrs&: NewAttrs, Context, ParamIndex: ContextArgIndex);
959
960 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
961 // `swiftself`.
962 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
963 if (SwiftSelfIndex)
964 addSwiftSelfAttrs(Attrs&: NewAttrs, Context, ParamIndex: SwiftSelfIndex);
965 }
966
967 // Transfer the original function's attributes.
968 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
969 NewAttrs = NewAttrs.addFnAttributes(C&: Context, B: AttrBuilder(Context, FnAttrs));
970 break;
971 }
972 case coro::ABI::Retcon:
973 case coro::ABI::RetconOnce:
974 // If we have a continuation prototype, just use its attributes,
975 // full-stop.
976 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
977
978 /// FIXME: Is it really good to add the NoAlias attribute?
979 addFramePointerAttrs(Attrs&: NewAttrs, Context, ParamIndex: 0,
980 Size: Shape.getRetconCoroId()->getStorageSize(),
981 Alignment: Shape.getRetconCoroId()->getStorageAlignment(),
982 /*NoAlias=*/true);
983
984 break;
985 }
986
987 switch (Shape.ABI) {
988 // In these ABIs, the cloned functions always return 'void', and the
989 // existing return sites are meaningless. Note that for unique
990 // continuations, this includes the returns associated with suspends;
991 // this is fine because we can't suspend twice.
992 case coro::ABI::Switch:
993 case coro::ABI::RetconOnce:
994 // Remove old returns.
995 for (ReturnInst *Return : Returns)
996 changeToUnreachable(I: Return);
997 break;
998
999 // With multi-suspend continuations, we'll already have eliminated the
1000 // original returns and inserted returns before all the suspend points,
1001 // so we want to leave any returns in place.
1002 case coro::ABI::Retcon:
1003 break;
1004 // Async lowering will insert musttail call functions at all suspend points
1005 // followed by a return.
1006 // Don't change returns to unreachable because that will trip up the verifier.
1007 // These returns should be unreachable from the clone.
1008 case coro::ABI::Async:
1009 break;
1010 }
1011
1012 NewF->setAttributes(NewAttrs);
1013 NewF->setCallingConv(Shape.getResumeFunctionCC());
1014
1015 // Set up the new entry block.
1016 replaceEntryBlock();
1017
1018 // Turn symmetric transfers into musttail calls.
1019 for (CallInst *ResumeCall : Shape.SymmetricTransfers) {
1020 ResumeCall = cast<CallInst>(Val&: VMap[ResumeCall]);
1021 if (TTI.supportsTailCallFor(CB: ResumeCall)) {
1022 // FIXME: Could we support symmetric transfer effectively without
1023 // musttail?
1024 ResumeCall->setTailCallKind(CallInst::TCK_MustTail);
1025 }
1026
1027 // Put a 'ret void' after the call, and split any remaining instructions to
1028 // an unreachable block.
1029 BasicBlock *BB = ResumeCall->getParent();
1030 BB->splitBasicBlock(I: ResumeCall->getNextNode());
1031 Builder.SetInsertPoint(BB->getTerminator());
1032 Builder.CreateRetVoid();
1033 BB->getTerminator()->eraseFromParent();
1034 }
1035
1036 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1037 NewFramePtr = deriveNewFramePointer();
1038
1039 // Remap frame pointer.
1040 Value *OldFramePtr = VMap[Shape.FramePtr];
1041 NewFramePtr->takeName(V: OldFramePtr);
1042 OldFramePtr->replaceAllUsesWith(V: NewFramePtr);
1043
1044 // Remap vFrame pointer.
1045 auto *NewVFrame = Builder.CreateBitCast(
1046 V: NewFramePtr, DestTy: PointerType::getUnqual(C&: Builder.getContext()), Name: "vFrame");
1047 Value *OldVFrame = cast<Value>(Val&: VMap[Shape.CoroBegin]);
1048 if (OldVFrame != NewVFrame)
1049 OldVFrame->replaceAllUsesWith(V: NewVFrame);
1050
1051 // All uses of the arguments should have been resolved by this point,
1052 // so we can safely remove the dummy values.
1053 for (Instruction *DummyArg : DummyArgs) {
1054 DummyArg->replaceAllUsesWith(V: PoisonValue::get(T: DummyArg->getType()));
1055 DummyArg->deleteValue();
1056 }
1057
1058 switch (Shape.ABI) {
1059 case coro::ABI::Switch:
1060 // Rewrite final suspend handling as it is not done via switch (allows to
1061 // remove final case from the switch, since it is undefined behavior to
1062 // resume the coroutine suspended at the final suspend point.
1063 if (Shape.SwitchLowering.HasFinalSuspend)
1064 handleFinalSuspend();
1065 break;
1066 case coro::ABI::Async:
1067 case coro::ABI::Retcon:
1068 case coro::ABI::RetconOnce:
1069 // Replace uses of the active suspend with the corresponding
1070 // continuation-function arguments.
1071 assert(ActiveSuspend != nullptr &&
1072 "no active suspend when lowering a continuation-style coroutine");
1073 replaceRetconOrAsyncSuspendUses();
1074 break;
1075 }
1076
1077 // Handle suspends.
1078 replaceCoroSuspends();
1079
1080 // Handle swifterror.
1081 replaceSwiftErrorOps();
1082
1083 // Remove coro.end intrinsics.
1084 replaceCoroEnds();
1085
1086 replaceCoroIsInRamp();
1087
1088 // Salvage debug info that points into the coroutine frame.
1089 salvageDebugInfo();
1090}
1091
1092void coro::SwitchCloner::create() {
1093 // Create a new function matching the original type
1094 NewF = createCloneDeclaration(OrigF, Shape, Suffix, InsertBefore: OrigF.getParent()->end(),
1095 ActiveSuspend);
1096
1097 // Clone the function
1098 coro::BaseCloner::create();
1099
1100 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1101 // to suppress deallocation code.
1102 coro::replaceCoroFree(CoroId: cast<CoroIdInst>(Val&: VMap[Shape.CoroBegin->getId()]),
1103 /*Elide=*/FKind == coro::CloneKind::SwitchCleanup);
1104}
1105
1106static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1107 assert(Shape.ABI == coro::ABI::Async);
1108
1109 auto *FuncPtrStruct = cast<ConstantStruct>(
1110 Val: Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1111 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(i_nocapture: 0);
1112 auto *OrigContextSize = FuncPtrStruct->getOperand(i_nocapture: 1);
1113 auto *NewContextSize = ConstantInt::get(Ty: OrigContextSize->getType(),
1114 V: Shape.AsyncLowering.ContextSize);
1115 auto *NewFuncPtrStruct = ConstantStruct::get(
1116 T: FuncPtrStruct->getType(), Vs: OrigRelativeFunOffset, Vs: NewContextSize);
1117
1118 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1119}
1120
1121static TypeSize getFrameSizeForShape(coro::Shape &Shape) {
1122 // In the same function all coro.sizes should have the same result type.
1123 auto *SizeIntrin = Shape.CoroSizes.back();
1124 Module *M = SizeIntrin->getModule();
1125 const DataLayout &DL = M->getDataLayout();
1126 return DL.getTypeAllocSize(Ty: Shape.FrameTy);
1127}
1128
1129static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
1130 if (Shape.ABI == coro::ABI::Async)
1131 updateAsyncFuncPointerContextSize(Shape);
1132
1133 for (CoroAlignInst *CA : Shape.CoroAligns) {
1134 CA->replaceAllUsesWith(
1135 V: ConstantInt::get(Ty: CA->getType(), V: Shape.FrameAlign.value()));
1136 CA->eraseFromParent();
1137 }
1138
1139 if (Shape.CoroSizes.empty())
1140 return;
1141
1142 // In the same function all coro.sizes should have the same result type.
1143 auto *SizeIntrin = Shape.CoroSizes.back();
1144 auto *SizeConstant =
1145 ConstantInt::get(Ty: SizeIntrin->getType(), V: getFrameSizeForShape(Shape));
1146
1147 for (CoroSizeInst *CS : Shape.CoroSizes) {
1148 CS->replaceAllUsesWith(V: SizeConstant);
1149 CS->eraseFromParent();
1150 }
1151}
1152
1153static void postSplitCleanup(Function &F) {
1154 removeUnreachableBlocks(F);
1155
1156#ifndef NDEBUG
1157 // For now, we do a mandatory verification step because we don't
1158 // entirely trust this pass. Note that we don't want to add a verifier
1159 // pass to FPM below because it will also verify all the global data.
1160 if (verifyFunction(F, &errs()))
1161 report_fatal_error("Broken function");
1162#endif
1163}
1164
1165// Coroutine has no suspend points. Remove heap allocation for the coroutine
1166// frame if possible.
1167static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1168 auto *CoroBegin = Shape.CoroBegin;
1169 switch (Shape.ABI) {
1170 case coro::ABI::Switch: {
1171 auto SwitchId = Shape.getSwitchCoroId();
1172 auto *AllocInst = SwitchId->getCoroAlloc();
1173 coro::replaceCoroFree(CoroId: SwitchId, /*Elide=*/AllocInst != nullptr);
1174 if (AllocInst) {
1175 IRBuilder<> Builder(AllocInst);
1176 auto *Frame = Builder.CreateAlloca(Ty: Shape.FrameTy);
1177 Frame->setAlignment(Shape.FrameAlign);
1178 AllocInst->replaceAllUsesWith(V: Builder.getFalse());
1179 AllocInst->eraseFromParent();
1180 CoroBegin->replaceAllUsesWith(V: Frame);
1181 } else {
1182 CoroBegin->replaceAllUsesWith(V: CoroBegin->getMem());
1183 }
1184
1185 break;
1186 }
1187 case coro::ABI::Async:
1188 case coro::ABI::Retcon:
1189 case coro::ABI::RetconOnce:
1190 CoroBegin->replaceAllUsesWith(V: PoisonValue::get(T: CoroBegin->getType()));
1191 break;
1192 }
1193
1194 CoroBegin->eraseFromParent();
1195 Shape.CoroBegin = nullptr;
1196}
1197
1198// SimplifySuspendPoint needs to check that there is no calls between
1199// coro_save and coro_suspend, since any of the calls may potentially resume
1200// the coroutine and if that is the case we cannot eliminate the suspend point.
1201static bool hasCallsInBlockBetween(iterator_range<BasicBlock::iterator> R) {
1202 for (Instruction &I : R) {
1203 // Assume that no intrinsic can resume the coroutine.
1204 if (isa<IntrinsicInst>(Val: I))
1205 continue;
1206
1207 if (isa<CallBase>(Val: I))
1208 return true;
1209 }
1210 return false;
1211}
1212
1213static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1214 SmallPtrSet<BasicBlock *, 8> Set;
1215 SmallVector<BasicBlock *, 8> Worklist;
1216
1217 Set.insert(Ptr: SaveBB);
1218 Worklist.push_back(Elt: ResDesBB);
1219
1220 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1221 // returns a token consumed by suspend instruction, all blocks in between
1222 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1223 while (!Worklist.empty()) {
1224 auto *BB = Worklist.pop_back_val();
1225 Set.insert(Ptr: BB);
1226 for (auto *Pred : predecessors(BB))
1227 if (!Set.contains(Ptr: Pred))
1228 Worklist.push_back(Elt: Pred);
1229 }
1230
1231 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1232 Set.erase(Ptr: SaveBB);
1233 Set.erase(Ptr: ResDesBB);
1234
1235 for (auto *BB : Set)
1236 if (hasCallsInBlockBetween(R: {BB->getFirstNonPHIIt(), BB->end()}))
1237 return true;
1238
1239 return false;
1240}
1241
1242static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1243 auto *SaveBB = Save->getParent();
1244 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1245 BasicBlock::iterator SaveIt = Save->getIterator();
1246 BasicBlock::iterator ResumeOrDestroyIt = ResumeOrDestroy->getIterator();
1247
1248 if (SaveBB == ResumeOrDestroyBB)
1249 return hasCallsInBlockBetween(R: {std::next(x: SaveIt), ResumeOrDestroyIt});
1250
1251 // Any calls from Save to the end of the block?
1252 if (hasCallsInBlockBetween(R: {std::next(x: SaveIt), SaveBB->end()}))
1253 return true;
1254
1255 // Any calls from begging of the block up to ResumeOrDestroy?
1256 if (hasCallsInBlockBetween(
1257 R: {ResumeOrDestroyBB->getFirstNonPHIIt(), ResumeOrDestroyIt}))
1258 return true;
1259
1260 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1261 if (hasCallsInBlocksBetween(SaveBB, ResDesBB: ResumeOrDestroyBB))
1262 return true;
1263
1264 return false;
1265}
1266
1267// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1268// suspend point and replace it with nornal control flow.
1269static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1270 CoroBeginInst *CoroBegin) {
1271 Instruction *Prev = Suspend->getPrevNode();
1272 if (!Prev) {
1273 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1274 if (!Pred)
1275 return false;
1276 Prev = Pred->getTerminator();
1277 }
1278
1279 CallBase *CB = dyn_cast<CallBase>(Val: Prev);
1280 if (!CB)
1281 return false;
1282
1283 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1284
1285 // See if the callsite is for resumption or destruction of the coroutine.
1286 auto *SubFn = dyn_cast<CoroSubFnInst>(Val: Callee);
1287 if (!SubFn)
1288 return false;
1289
1290 // Does not refer to the current coroutine, we cannot do anything with it.
1291 if (SubFn->getFrame() != CoroBegin)
1292 return false;
1293
1294 // See if the transformation is safe. Specifically, see if there are any
1295 // calls in between Save and CallInstr. They can potenitally resume the
1296 // coroutine rendering this optimization unsafe.
1297 auto *Save = Suspend->getCoroSave();
1298 if (hasCallsBetween(Save, ResumeOrDestroy: CB))
1299 return false;
1300
1301 // Replace llvm.coro.suspend with the value that results in resumption over
1302 // the resume or cleanup path.
1303 Suspend->replaceAllUsesWith(V: SubFn->getRawIndex());
1304 Suspend->eraseFromParent();
1305 Save->eraseFromParent();
1306
1307 // No longer need a call to coro.resume or coro.destroy.
1308 if (auto *Invoke = dyn_cast<InvokeInst>(Val: CB)) {
1309 BranchInst::Create(IfTrue: Invoke->getNormalDest(), InsertBefore: Invoke->getIterator());
1310 }
1311
1312 // Grab the CalledValue from CB before erasing the CallInstr.
1313 auto *CalledValue = CB->getCalledOperand();
1314 CB->eraseFromParent();
1315
1316 // If no more users remove it. Usually it is a bitcast of SubFn.
1317 if (CalledValue != SubFn && CalledValue->user_empty())
1318 if (auto *I = dyn_cast<Instruction>(Val: CalledValue))
1319 I->eraseFromParent();
1320
1321 // Now we are good to remove SubFn.
1322 if (SubFn->user_empty())
1323 SubFn->eraseFromParent();
1324
1325 return true;
1326}
1327
1328// Remove suspend points that are simplified.
1329static void simplifySuspendPoints(coro::Shape &Shape) {
1330 // Currently, the only simplification we do is switch-lowering-specific.
1331 if (Shape.ABI != coro::ABI::Switch)
1332 return;
1333
1334 auto &S = Shape.CoroSuspends;
1335 size_t I = 0, N = S.size();
1336 if (N == 0)
1337 return;
1338
1339 size_t ChangedFinalIndex = std::numeric_limits<size_t>::max();
1340 while (true) {
1341 auto SI = cast<CoroSuspendInst>(Val: S[I]);
1342 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1343 // to resume a coroutine suspended at the final suspend point.
1344 if (!SI->isFinal() && simplifySuspendPoint(Suspend: SI, CoroBegin: Shape.CoroBegin)) {
1345 if (--N == I)
1346 break;
1347
1348 std::swap(a&: S[I], b&: S[N]);
1349
1350 if (cast<CoroSuspendInst>(Val: S[I])->isFinal()) {
1351 assert(Shape.SwitchLowering.HasFinalSuspend);
1352 ChangedFinalIndex = I;
1353 }
1354
1355 continue;
1356 }
1357 if (++I == N)
1358 break;
1359 }
1360 S.resize(N);
1361
1362 // Maintain final.suspend in case final suspend was swapped.
1363 // Due to we requrie the final suspend to be the last element of CoroSuspends.
1364 if (ChangedFinalIndex < N) {
1365 assert(cast<CoroSuspendInst>(S[ChangedFinalIndex])->isFinal());
1366 std::swap(a&: S[ChangedFinalIndex], b&: S.back());
1367 }
1368}
1369
1370namespace {
1371
1372struct SwitchCoroutineSplitter {
1373 static void split(Function &F, coro::Shape &Shape,
1374 SmallVectorImpl<Function *> &Clones,
1375 TargetTransformInfo &TTI) {
1376 assert(Shape.ABI == coro::ABI::Switch);
1377
1378 // Create a resume clone by cloning the body of the original function,
1379 // setting new entry block and replacing coro.suspend an appropriate value
1380 // to force resume or cleanup pass for every suspend point.
1381 createResumeEntryBlock(F, Shape);
1382 auto *ResumeClone = coro::SwitchCloner::createClone(
1383 OrigF&: F, Suffix: ".resume", Shape, FKind: coro::CloneKind::SwitchResume, TTI);
1384 auto *DestroyClone = coro::SwitchCloner::createClone(
1385 OrigF&: F, Suffix: ".destroy", Shape, FKind: coro::CloneKind::SwitchUnwind, TTI);
1386 auto *CleanupClone = coro::SwitchCloner::createClone(
1387 OrigF&: F, Suffix: ".cleanup", Shape, FKind: coro::CloneKind::SwitchCleanup, TTI);
1388
1389 postSplitCleanup(F&: *ResumeClone);
1390 postSplitCleanup(F&: *DestroyClone);
1391 postSplitCleanup(F&: *CleanupClone);
1392
1393 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1394 updateCoroFrame(Shape, ResumeFn: ResumeClone, DestroyFn: DestroyClone, CleanupFn: CleanupClone);
1395
1396 assert(Clones.empty());
1397 Clones.push_back(Elt: ResumeClone);
1398 Clones.push_back(Elt: DestroyClone);
1399 Clones.push_back(Elt: CleanupClone);
1400
1401 // Create a constant array referring to resume/destroy/clone functions
1402 // pointed by the last argument of @llvm.coro.info, so that CoroElide pass
1403 // can determined correct function to call.
1404 setCoroInfo(F, Shape, Fns: Clones);
1405 }
1406
1407 // Create a variant of ramp function that does not perform heap allocation
1408 // for a switch ABI coroutine.
1409 //
1410 // The newly split `.noalloc` ramp function has the following differences:
1411 // - Has one additional frame pointer parameter in lieu of dynamic
1412 // allocation.
1413 // - Suppressed allocations by replacing coro.alloc and coro.free.
1414 static Function *createNoAllocVariant(Function &F, coro::Shape &Shape,
1415 SmallVectorImpl<Function *> &Clones) {
1416 assert(Shape.ABI == coro::ABI::Switch);
1417 auto *OrigFnTy = F.getFunctionType();
1418 auto OldParams = OrigFnTy->params();
1419
1420 SmallVector<Type *> NewParams;
1421 NewParams.reserve(N: OldParams.size() + 1);
1422 NewParams.append(in_start: OldParams.begin(), in_end: OldParams.end());
1423 NewParams.push_back(Elt: PointerType::getUnqual(C&: Shape.FrameTy->getContext()));
1424
1425 auto *NewFnTy = FunctionType::get(Result: OrigFnTy->getReturnType(), Params: NewParams,
1426 isVarArg: OrigFnTy->isVarArg());
1427 Function *NoAllocF =
1428 Function::Create(Ty: NewFnTy, Linkage: F.getLinkage(), N: F.getName() + ".noalloc");
1429
1430 ValueToValueMapTy VMap;
1431 unsigned int Idx = 0;
1432 for (const auto &I : F.args()) {
1433 VMap[&I] = NoAllocF->getArg(i: Idx++);
1434 }
1435 // We just appended the frame pointer as the last argument of the new
1436 // function.
1437 auto FrameIdx = NoAllocF->arg_size() - 1;
1438 SmallVector<ReturnInst *, 4> Returns;
1439 CloneFunctionInto(NewFunc: NoAllocF, OldFunc: &F, VMap,
1440 Changes: CloneFunctionChangeType::LocalChangesOnly, Returns);
1441
1442 if (Shape.CoroBegin) {
1443 auto *NewCoroBegin =
1444 cast_if_present<CoroBeginInst>(Val&: VMap[Shape.CoroBegin]);
1445 auto *NewCoroId = cast<CoroIdInst>(Val: NewCoroBegin->getId());
1446 coro::replaceCoroFree(CoroId: NewCoroId, /*Elide=*/true);
1447 coro::suppressCoroAllocs(CoroId: NewCoroId);
1448 NewCoroBegin->replaceAllUsesWith(V: NoAllocF->getArg(i: FrameIdx));
1449 NewCoroBegin->eraseFromParent();
1450 }
1451
1452 Module *M = F.getParent();
1453 M->getFunctionList().insert(where: M->end(), New: NoAllocF);
1454
1455 removeUnreachableBlocks(F&: *NoAllocF);
1456 auto NewAttrs = NoAllocF->getAttributes();
1457 // When we elide allocation, we read these attributes to determine the
1458 // frame size and alignment.
1459 addFramePointerAttrs(Attrs&: NewAttrs, Context&: NoAllocF->getContext(), ParamIndex: FrameIdx,
1460 Size: Shape.FrameSize, Alignment: Shape.FrameAlign,
1461 /*NoAlias=*/false);
1462
1463 NoAllocF->setAttributes(NewAttrs);
1464
1465 Clones.push_back(Elt: NoAllocF);
1466 // Reset the original function's coro info, make the new noalloc variant
1467 // connected to the original ramp function.
1468 setCoroInfo(F, Shape, Fns: Clones);
1469 // After copying, set the linkage to internal linkage. Original function
1470 // may have different linkage, but optimization dependent on this function
1471 // generally relies on LTO.
1472 NoAllocF->setLinkage(llvm::GlobalValue::InternalLinkage);
1473 return NoAllocF;
1474 }
1475
1476private:
1477 // Create an entry block for a resume function with a switch that will jump to
1478 // suspend points.
1479 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
1480 LLVMContext &C = F.getContext();
1481
1482 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
1483 DISubprogram *DIS = F.getSubprogram();
1484 // If there is no DISubprogram for F, it implies the function is compiled
1485 // without debug info. So we also don't generate debug info for the
1486 // suspension points.
1487 bool AddDebugLabels = DIS && DIS->getUnit() &&
1488 (DIS->getUnit()->getEmissionKind() ==
1489 DICompileUnit::DebugEmissionKind::FullDebug);
1490
1491 // resume.entry:
1492 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32
1493 // 0, i32 2 % index = load i32, i32* %index.addr switch i32 %index, label
1494 // %unreachable [
1495 // i32 0, label %resume.0
1496 // i32 1, label %resume.1
1497 // ...
1498 // ]
1499
1500 auto *NewEntry = BasicBlock::Create(Context&: C, Name: "resume.entry", Parent: &F);
1501 auto *UnreachBB = BasicBlock::Create(Context&: C, Name: "unreachable", Parent: &F);
1502
1503 IRBuilder<> Builder(NewEntry);
1504 auto *FramePtr = Shape.FramePtr;
1505 auto *FrameTy = Shape.FrameTy;
1506 auto *GepIndex = Builder.CreateStructGEP(
1507 Ty: FrameTy, Ptr: FramePtr, Idx: Shape.getSwitchIndexField(), Name: "index.addr");
1508 auto *Index = Builder.CreateLoad(Ty: Shape.getIndexType(), Ptr: GepIndex, Name: "index");
1509 auto *Switch =
1510 Builder.CreateSwitch(V: Index, Dest: UnreachBB, NumCases: Shape.CoroSuspends.size());
1511 Shape.SwitchLowering.ResumeSwitch = Switch;
1512
1513 // Split all coro.suspend calls
1514 size_t SuspendIndex = 0;
1515 for (auto *AnyS : Shape.CoroSuspends) {
1516 auto *S = cast<CoroSuspendInst>(Val: AnyS);
1517 ConstantInt *IndexVal = Shape.getIndex(Value: SuspendIndex);
1518
1519 // Replace CoroSave with a store to Index:
1520 // %index.addr = getelementptr %f.frame... (index field number)
1521 // store i32 %IndexVal, i32* %index.addr1
1522 auto *Save = S->getCoroSave();
1523 Builder.SetInsertPoint(Save);
1524 if (S->isFinal()) {
1525 // The coroutine should be marked done if it reaches the final suspend
1526 // point.
1527 markCoroutineAsDone(Builder, Shape, FramePtr);
1528 } else {
1529 auto *GepIndex = Builder.CreateStructGEP(
1530 Ty: FrameTy, Ptr: FramePtr, Idx: Shape.getSwitchIndexField(), Name: "index.addr");
1531 Builder.CreateStore(Val: IndexVal, Ptr: GepIndex);
1532 }
1533
1534 Save->replaceAllUsesWith(V: ConstantTokenNone::get(Context&: C));
1535 Save->eraseFromParent();
1536
1537 // Split block before and after coro.suspend and add a jump from an entry
1538 // switch:
1539 //
1540 // whateverBB:
1541 // whatever
1542 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
1543 // switch i8 %0, label %suspend[i8 0, label %resume
1544 // i8 1, label %cleanup]
1545 // becomes:
1546 //
1547 // whateverBB:
1548 // whatever
1549 // br label %resume.0.landing
1550 //
1551 // resume.0: ; <--- jump from the switch in the resume.entry
1552 // #dbg_label(...) ; <--- artificial label for debuggers
1553 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
1554 // br label %resume.0.landing
1555 //
1556 // resume.0.landing:
1557 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
1558 // switch i8 % 1, label %suspend [i8 0, label %resume
1559 // i8 1, label %cleanup]
1560
1561 auto *SuspendBB = S->getParent();
1562 auto *ResumeBB =
1563 SuspendBB->splitBasicBlock(I: S, BBName: "resume." + Twine(SuspendIndex));
1564 auto *LandingBB = ResumeBB->splitBasicBlock(
1565 I: S->getNextNode(), BBName: ResumeBB->getName() + Twine(".landing"));
1566 Switch->addCase(OnVal: IndexVal, Dest: ResumeBB);
1567
1568 cast<BranchInst>(Val: SuspendBB->getTerminator())->setSuccessor(idx: 0, NewSucc: LandingBB);
1569 auto *PN = PHINode::Create(Ty: Builder.getInt8Ty(), NumReservedValues: 2, NameStr: "");
1570 PN->insertBefore(InsertPos: LandingBB->begin());
1571 S->replaceAllUsesWith(V: PN);
1572 PN->addIncoming(V: Builder.getInt8(C: -1), BB: SuspendBB);
1573 PN->addIncoming(V: S, BB: ResumeBB);
1574
1575 if (AddDebugLabels) {
1576 if (DebugLoc SuspendLoc = S->getDebugLoc()) {
1577 std::string LabelName =
1578 ("__coro_resume_" + Twine(SuspendIndex)).str();
1579 // Take the "inlined at" location recursively, if present. This is
1580 // mandatory as the DILabel insertion checks that the scopes of label
1581 // and the attached location match. This is not the case when the
1582 // suspend location has been inlined due to pointing to the original
1583 // scope.
1584 DILocation *DILoc = SuspendLoc;
1585 while (DILocation *InlinedAt = DILoc->getInlinedAt())
1586 DILoc = InlinedAt;
1587
1588 DILabel *ResumeLabel =
1589 DBuilder.createLabel(Scope: DIS, Name: LabelName, File: DILoc->getFile(),
1590 LineNo: SuspendLoc.getLine(), Column: SuspendLoc.getCol(),
1591 /*IsArtificial=*/true,
1592 /*CoroSuspendIdx=*/SuspendIndex,
1593 /*AlwaysPreserve=*/false);
1594 DBuilder.insertLabel(LabelInfo: ResumeLabel, DL: DILoc, InsertPt: ResumeBB->begin());
1595 }
1596 }
1597
1598 ++SuspendIndex;
1599 }
1600
1601 Builder.SetInsertPoint(UnreachBB);
1602 Builder.CreateUnreachable();
1603 DBuilder.finalize();
1604
1605 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
1606 }
1607
1608 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1609 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1610 Function *DestroyFn, Function *CleanupFn) {
1611 IRBuilder<> Builder(&*Shape.getInsertPtAfterFramePtr());
1612
1613 auto *ResumeAddr = Builder.CreateStructGEP(
1614 Ty: Shape.FrameTy, Ptr: Shape.FramePtr, Idx: coro::Shape::SwitchFieldIndex::Resume,
1615 Name: "resume.addr");
1616 Builder.CreateStore(Val: ResumeFn, Ptr: ResumeAddr);
1617
1618 Value *DestroyOrCleanupFn = DestroyFn;
1619
1620 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1621 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1622 // If there is a CoroAlloc and it returns false (meaning we elide the
1623 // allocation, use CleanupFn instead of DestroyFn).
1624 DestroyOrCleanupFn = Builder.CreateSelect(C: CA, True: DestroyFn, False: CleanupFn);
1625 }
1626
1627 auto *DestroyAddr = Builder.CreateStructGEP(
1628 Ty: Shape.FrameTy, Ptr: Shape.FramePtr, Idx: coro::Shape::SwitchFieldIndex::Destroy,
1629 Name: "destroy.addr");
1630 Builder.CreateStore(Val: DestroyOrCleanupFn, Ptr: DestroyAddr);
1631 }
1632
1633 // Create a global constant array containing pointers to functions provided
1634 // and set Info parameter of CoroBegin to point at this constant. Example:
1635 //
1636 // @f.resumers = internal constant [2 x void(%f.frame*)*]
1637 // [void(%f.frame*)* @f.resume, void(%f.frame*)*
1638 // @f.destroy]
1639 // define void @f() {
1640 // ...
1641 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1642 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to
1643 // i8*))
1644 //
1645 // Assumes that all the functions have the same signature.
1646 static void setCoroInfo(Function &F, coro::Shape &Shape,
1647 ArrayRef<Function *> Fns) {
1648 // This only works under the switch-lowering ABI because coro elision
1649 // only works on the switch-lowering ABI.
1650 SmallVector<Constant *, 4> Args(Fns);
1651 assert(!Args.empty());
1652 Function *Part = *Fns.begin();
1653 Module *M = Part->getParent();
1654 auto *ArrTy = ArrayType::get(ElementType: Part->getType(), NumElements: Args.size());
1655
1656 auto *ConstVal = ConstantArray::get(T: ArrTy, V: Args);
1657 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1658 GlobalVariable::PrivateLinkage, ConstVal,
1659 F.getName() + Twine(".resumers"));
1660
1661 // Update coro.begin instruction to refer to this constant.
1662 LLVMContext &C = F.getContext();
1663 auto *BC = ConstantExpr::getPointerCast(C: GV, Ty: PointerType::getUnqual(C));
1664 Shape.getSwitchCoroId()->setInfo(BC);
1665 }
1666};
1667
1668} // namespace
1669
1670static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1671 Value *Continuation) {
1672 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1673 auto &Context = Suspend->getParent()->getParent()->getContext();
1674 auto *Int8PtrTy = PointerType::getUnqual(C&: Context);
1675
1676 IRBuilder<> Builder(ResumeIntrinsic);
1677 auto *Val = Builder.CreateBitOrPointerCast(V: Continuation, DestTy: Int8PtrTy);
1678 ResumeIntrinsic->replaceAllUsesWith(V: Val);
1679 ResumeIntrinsic->eraseFromParent();
1680 Suspend->setOperand(i_nocapture: CoroSuspendAsyncInst::ResumeFunctionArg,
1681 Val_nocapture: PoisonValue::get(T: Int8PtrTy));
1682}
1683
1684/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1685static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1686 ArrayRef<Value *> FnArgs,
1687 SmallVectorImpl<Value *> &CallArgs) {
1688 size_t ArgIdx = 0;
1689 for (auto *paramTy : FnTy->params()) {
1690 assert(ArgIdx < FnArgs.size());
1691 if (paramTy != FnArgs[ArgIdx]->getType())
1692 CallArgs.push_back(
1693 Elt: Builder.CreateBitOrPointerCast(V: FnArgs[ArgIdx], DestTy: paramTy));
1694 else
1695 CallArgs.push_back(Elt: FnArgs[ArgIdx]);
1696 ++ArgIdx;
1697 }
1698}
1699
1700CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1701 TargetTransformInfo &TTI,
1702 ArrayRef<Value *> Arguments,
1703 IRBuilder<> &Builder) {
1704 auto *FnTy = MustTailCallFn->getFunctionType();
1705 // Coerce the arguments, llvm optimizations seem to ignore the types in
1706 // vaarg functions and throws away casts in optimized mode.
1707 SmallVector<Value *, 8> CallArgs;
1708 coerceArguments(Builder, FnTy, FnArgs: Arguments, CallArgs);
1709
1710 auto *TailCall = Builder.CreateCall(FTy: FnTy, Callee: MustTailCallFn, Args: CallArgs);
1711 // Skip targets which don't support tail call.
1712 if (TTI.supportsTailCallFor(CB: TailCall)) {
1713 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1714 }
1715 TailCall->setDebugLoc(Loc);
1716 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1717 return TailCall;
1718}
1719
1720void coro::AsyncABI::splitCoroutine(Function &F, coro::Shape &Shape,
1721 SmallVectorImpl<Function *> &Clones,
1722 TargetTransformInfo &TTI) {
1723 assert(Shape.ABI == coro::ABI::Async);
1724 assert(Clones.empty());
1725 // Reset various things that the optimizer might have decided it
1726 // "knows" about the coroutine function due to not seeing a return.
1727 F.removeFnAttr(Kind: Attribute::NoReturn);
1728 F.removeRetAttr(Kind: Attribute::NoAlias);
1729 F.removeRetAttr(Kind: Attribute::NonNull);
1730
1731 auto &Context = F.getContext();
1732 auto *Int8PtrTy = PointerType::getUnqual(C&: Context);
1733
1734 auto *Id = Shape.getAsyncCoroId();
1735 IRBuilder<> Builder(Id);
1736
1737 auto *FramePtr = Id->getStorage();
1738 FramePtr = Builder.CreateBitOrPointerCast(V: FramePtr, DestTy: Int8PtrTy);
1739 FramePtr = Builder.CreateConstInBoundsGEP1_32(
1740 Ty: Type::getInt8Ty(C&: Context), Ptr: FramePtr, Idx0: Shape.AsyncLowering.FrameOffset,
1741 Name: "async.ctx.frameptr");
1742
1743 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1744 {
1745 // Make sure we don't invalidate Shape.FramePtr.
1746 TrackingVH<Value> Handle(Shape.FramePtr);
1747 Shape.CoroBegin->replaceAllUsesWith(V: FramePtr);
1748 Shape.FramePtr = Handle.getValPtr();
1749 }
1750
1751 // Create all the functions in order after the main function.
1752 auto NextF = std::next(x: F.getIterator());
1753
1754 // Create a continuation function for each of the suspend points.
1755 Clones.reserve(N: Shape.CoroSuspends.size());
1756 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1757 auto *Suspend = cast<CoroSuspendAsyncInst>(Val: CS);
1758
1759 // Create the clone declaration.
1760 auto ResumeNameSuffix = ".resume.";
1761 auto ProjectionFunctionName =
1762 Suspend->getAsyncContextProjectionFunction()->getName();
1763 bool UseSwiftMangling = false;
1764 if (ProjectionFunctionName == "__swift_async_resume_project_context") {
1765 ResumeNameSuffix = "TQ";
1766 UseSwiftMangling = true;
1767 } else if (ProjectionFunctionName == "__swift_async_resume_get_context") {
1768 ResumeNameSuffix = "TY";
1769 UseSwiftMangling = true;
1770 }
1771 auto *Continuation = createCloneDeclaration(
1772 OrigF&: F, Shape,
1773 Suffix: UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1774 : ResumeNameSuffix + Twine(Idx),
1775 InsertBefore: NextF, ActiveSuspend: Suspend);
1776 Clones.push_back(Elt: Continuation);
1777
1778 // Insert a branch to a new return block immediately before the suspend
1779 // point.
1780 auto *SuspendBB = Suspend->getParent();
1781 auto *NewSuspendBB = SuspendBB->splitBasicBlock(I: Suspend);
1782 auto *Branch = cast<BranchInst>(Val: SuspendBB->getTerminator());
1783
1784 // Place it before the first suspend.
1785 auto *ReturnBB =
1786 BasicBlock::Create(Context&: F.getContext(), Name: "coro.return", Parent: &F, InsertBefore: NewSuspendBB);
1787 Branch->setSuccessor(idx: 0, NewSucc: ReturnBB);
1788
1789 IRBuilder<> Builder(ReturnBB);
1790
1791 // Insert the call to the tail call function and inline it.
1792 auto *Fn = Suspend->getMustTailCallFunction();
1793 SmallVector<Value *, 8> Args(Suspend->args());
1794 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1795 N: CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1796 auto *TailCall = coro::createMustTailCall(Loc: Suspend->getDebugLoc(), MustTailCallFn: Fn, TTI,
1797 Arguments: FnArgs, Builder);
1798 Builder.CreateRetVoid();
1799 InlineFunctionInfo FnInfo;
1800 (void)InlineFunction(CB&: *TailCall, IFI&: FnInfo);
1801
1802 // Replace the lvm.coro.async.resume intrisic call.
1803 replaceAsyncResumeFunction(Suspend, Continuation);
1804 }
1805
1806 assert(Clones.size() == Shape.CoroSuspends.size());
1807
1808 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1809 auto *Suspend = CS;
1810 auto *Clone = Clones[Idx];
1811
1812 coro::BaseCloner::createClone(OrigF&: F, Suffix: "resume." + Twine(Idx), Shape, NewF: Clone,
1813 ActiveSuspend: Suspend, TTI);
1814 }
1815}
1816
1817void coro::AnyRetconABI::splitCoroutine(Function &F, coro::Shape &Shape,
1818 SmallVectorImpl<Function *> &Clones,
1819 TargetTransformInfo &TTI) {
1820 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce);
1821 assert(Clones.empty());
1822
1823 // Reset various things that the optimizer might have decided it
1824 // "knows" about the coroutine function due to not seeing a return.
1825 F.removeFnAttr(Kind: Attribute::NoReturn);
1826 F.removeRetAttr(Kind: Attribute::NoAlias);
1827 F.removeRetAttr(Kind: Attribute::NonNull);
1828
1829 // Allocate the frame.
1830 auto *Id = Shape.getRetconCoroId();
1831 Value *RawFramePtr;
1832 if (Shape.RetconLowering.IsFrameInlineInStorage) {
1833 RawFramePtr = Id->getStorage();
1834 } else {
1835 IRBuilder<> Builder(Id);
1836
1837 // Determine the size of the frame.
1838 const DataLayout &DL = F.getDataLayout();
1839 auto Size = DL.getTypeAllocSize(Ty: Shape.FrameTy);
1840
1841 // Allocate. We don't need to update the call graph node because we're
1842 // going to recompute it from scratch after splitting.
1843 // FIXME: pass the required alignment
1844 RawFramePtr = Shape.emitAlloc(Builder, Size: Builder.getInt64(C: Size), CG: nullptr);
1845 RawFramePtr =
1846 Builder.CreateBitCast(V: RawFramePtr, DestTy: Shape.CoroBegin->getType());
1847
1848 // Stash the allocated frame pointer in the continuation storage.
1849 Builder.CreateStore(Val: RawFramePtr, Ptr: Id->getStorage());
1850 }
1851
1852 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1853 {
1854 // Make sure we don't invalidate Shape.FramePtr.
1855 TrackingVH<Value> Handle(Shape.FramePtr);
1856 Shape.CoroBegin->replaceAllUsesWith(V: RawFramePtr);
1857 Shape.FramePtr = Handle.getValPtr();
1858 }
1859
1860 // Create a unique return block.
1861 BasicBlock *ReturnBB = nullptr;
1862 PHINode *ContinuationPhi = nullptr;
1863 SmallVector<PHINode *, 4> ReturnPHIs;
1864
1865 // Create all the functions in order after the main function.
1866 auto NextF = std::next(x: F.getIterator());
1867
1868 // Create a continuation function for each of the suspend points.
1869 Clones.reserve(N: Shape.CoroSuspends.size());
1870 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1871 auto Suspend = cast<CoroSuspendRetconInst>(Val: CS);
1872
1873 // Create the clone declaration.
1874 auto Continuation = createCloneDeclaration(
1875 OrigF&: F, Shape, Suffix: ".resume." + Twine(Idx), InsertBefore: NextF, ActiveSuspend: nullptr);
1876 Clones.push_back(Elt: Continuation);
1877
1878 // Insert a branch to the unified return block immediately before
1879 // the suspend point.
1880 auto SuspendBB = Suspend->getParent();
1881 auto NewSuspendBB = SuspendBB->splitBasicBlock(I: Suspend);
1882 auto Branch = cast<BranchInst>(Val: SuspendBB->getTerminator());
1883
1884 // Create the unified return block.
1885 if (!ReturnBB) {
1886 // Place it before the first suspend.
1887 ReturnBB =
1888 BasicBlock::Create(Context&: F.getContext(), Name: "coro.return", Parent: &F, InsertBefore: NewSuspendBB);
1889 Shape.RetconLowering.ReturnBlock = ReturnBB;
1890
1891 IRBuilder<> Builder(ReturnBB);
1892
1893 // First, the continuation.
1894 ContinuationPhi =
1895 Builder.CreatePHI(Ty: Continuation->getType(), NumReservedValues: Shape.CoroSuspends.size());
1896
1897 // Create PHIs for all other return values.
1898 assert(ReturnPHIs.empty());
1899
1900 // Next, all the directly-yielded values.
1901 for (auto *ResultTy : Shape.getRetconResultTypes())
1902 ReturnPHIs.push_back(
1903 Elt: Builder.CreatePHI(Ty: ResultTy, NumReservedValues: Shape.CoroSuspends.size()));
1904
1905 // Build the return value.
1906 auto RetTy = F.getReturnType();
1907
1908 // Cast the continuation value if necessary.
1909 // We can't rely on the types matching up because that type would
1910 // have to be infinite.
1911 auto CastedContinuationTy =
1912 (ReturnPHIs.empty() ? RetTy : RetTy->getStructElementType(N: 0));
1913 auto *CastedContinuation =
1914 Builder.CreateBitCast(V: ContinuationPhi, DestTy: CastedContinuationTy);
1915
1916 Value *RetV = CastedContinuation;
1917 if (!ReturnPHIs.empty()) {
1918 auto ValueIdx = 0;
1919 RetV = PoisonValue::get(T: RetTy);
1920 RetV = Builder.CreateInsertValue(Agg: RetV, Val: CastedContinuation, Idxs: ValueIdx++);
1921
1922 for (auto Phi : ReturnPHIs)
1923 RetV = Builder.CreateInsertValue(Agg: RetV, Val: Phi, Idxs: ValueIdx++);
1924 }
1925
1926 Builder.CreateRet(V: RetV);
1927 }
1928
1929 // Branch to the return block.
1930 Branch->setSuccessor(idx: 0, NewSucc: ReturnBB);
1931 assert(ContinuationPhi);
1932 ContinuationPhi->addIncoming(V: Continuation, BB: SuspendBB);
1933 for (auto [Phi, VUse] :
1934 llvm::zip_equal(t&: ReturnPHIs, u: Suspend->value_operands()))
1935 Phi->addIncoming(V: VUse, BB: SuspendBB);
1936 }
1937
1938 assert(Clones.size() == Shape.CoroSuspends.size());
1939
1940 for (auto [Idx, CS] : llvm::enumerate(First&: Shape.CoroSuspends)) {
1941 auto Suspend = CS;
1942 auto Clone = Clones[Idx];
1943
1944 coro::BaseCloner::createClone(OrigF&: F, Suffix: "resume." + Twine(Idx), Shape, NewF: Clone,
1945 ActiveSuspend: Suspend, TTI);
1946 }
1947}
1948
1949namespace {
1950class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1951 Function &F;
1952
1953public:
1954 PrettyStackTraceFunction(Function &F) : F(F) {}
1955 void print(raw_ostream &OS) const override {
1956 OS << "While splitting coroutine ";
1957 F.printAsOperand(O&: OS, /*print type*/ PrintType: false, M: F.getParent());
1958 OS << "\n";
1959 }
1960};
1961} // namespace
1962
1963/// Remove calls to llvm.coro.end in the original function.
1964static void removeCoroEndsFromRampFunction(const coro::Shape &Shape) {
1965 if (Shape.ABI != coro::ABI::Switch) {
1966 for (auto *End : Shape.CoroEnds) {
1967 replaceCoroEnd(End, Shape, FramePtr: Shape.FramePtr, /*in ramp*/ InRamp: true, CG: nullptr);
1968 }
1969 } else {
1970 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds)
1971 End->eraseFromParent();
1972 }
1973}
1974
1975static void removeCoroIsInRampFromRampFunction(const coro::Shape &Shape) {
1976 for (auto *II : Shape.CoroIsInRampInsts) {
1977 auto &Ctx = II->getContext();
1978 II->replaceAllUsesWith(V: ConstantInt::getTrue(Context&: Ctx));
1979 II->eraseFromParent();
1980 }
1981}
1982
1983static bool hasSafeElideCaller(Function &F) {
1984 for (auto *U : F.users()) {
1985 if (auto *CB = dyn_cast<CallBase>(Val: U)) {
1986 auto *Caller = CB->getFunction();
1987 if (Caller && Caller->isPresplitCoroutine() &&
1988 CB->hasFnAttr(Kind: llvm::Attribute::CoroElideSafe))
1989 return true;
1990 }
1991 }
1992 return false;
1993}
1994
1995void coro::SwitchABI::splitCoroutine(Function &F, coro::Shape &Shape,
1996 SmallVectorImpl<Function *> &Clones,
1997 TargetTransformInfo &TTI) {
1998 SwitchCoroutineSplitter::split(F, Shape, Clones, TTI);
1999}
2000
2001static void doSplitCoroutine(Function &F, SmallVectorImpl<Function *> &Clones,
2002 coro::BaseABI &ABI, TargetTransformInfo &TTI,
2003 bool OptimizeFrame) {
2004 PrettyStackTraceFunction prettyStackTrace(F);
2005
2006 auto &Shape = ABI.Shape;
2007 assert(Shape.CoroBegin);
2008
2009 lowerAwaitSuspends(F, Shape);
2010
2011 simplifySuspendPoints(Shape);
2012
2013 normalizeCoroutine(F, Shape, TTI);
2014 ABI.buildCoroutineFrame(OptimizeFrame);
2015 replaceFrameSizeAndAlignment(Shape);
2016
2017 bool isNoSuspendCoroutine = Shape.CoroSuspends.empty();
2018
2019 bool shouldCreateNoAllocVariant =
2020 !isNoSuspendCoroutine && Shape.ABI == coro::ABI::Switch &&
2021 hasSafeElideCaller(F) && !F.hasFnAttribute(Kind: llvm::Attribute::NoInline);
2022
2023 // If there are no suspend points, no split required, just remove
2024 // the allocation and deallocation blocks, they are not needed.
2025 if (isNoSuspendCoroutine) {
2026 handleNoSuspendCoroutine(Shape);
2027 } else {
2028 ABI.splitCoroutine(F, Shape, Clones, TTI);
2029 }
2030
2031 // Replace all the swifterror operations in the original function.
2032 // This invalidates SwiftErrorOps in the Shape.
2033 replaceSwiftErrorOps(F, Shape, VMap: nullptr);
2034
2035 // Salvage debug intrinsics that point into the coroutine frame in the
2036 // original function. The Cloner has already salvaged debug info in the new
2037 // coroutine funclets.
2038 SmallDenseMap<Argument *, AllocaInst *, 4> ArgToAllocaMap;
2039 auto DbgVariableRecords = collectDbgVariableRecords(F);
2040 for (DbgVariableRecord *DVR : DbgVariableRecords)
2041 coro::salvageDebugInfo(ArgToAllocaMap, DVR&: *DVR, UseEntryValue: false /*UseEntryValue*/);
2042
2043 removeCoroEndsFromRampFunction(Shape);
2044 removeCoroIsInRampFromRampFunction(Shape);
2045
2046 if (shouldCreateNoAllocVariant)
2047 SwitchCoroutineSplitter::createNoAllocVariant(F, Shape, Clones);
2048}
2049
2050static LazyCallGraph::SCC &updateCallGraphAfterCoroutineSplit(
2051 LazyCallGraph::Node &N, const coro::Shape &Shape,
2052 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
2053 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
2054 FunctionAnalysisManager &FAM) {
2055
2056 auto *CurrentSCC = &C;
2057 if (!Clones.empty()) {
2058 switch (Shape.ABI) {
2059 case coro::ABI::Switch:
2060 // Each clone in the Switch lowering is independent of the other clones.
2061 // Let the LazyCallGraph know about each one separately.
2062 for (Function *Clone : Clones)
2063 CG.addSplitFunction(OriginalFunction&: N.getFunction(), NewFunction&: *Clone);
2064 break;
2065 case coro::ABI::Async:
2066 case coro::ABI::Retcon:
2067 case coro::ABI::RetconOnce:
2068 // Each clone in the Async/Retcon lowering references of the other clones.
2069 // Let the LazyCallGraph know about all of them at once.
2070 if (!Clones.empty())
2071 CG.addSplitRefRecursiveFunctions(OriginalFunction&: N.getFunction(), NewFunctions: Clones);
2072 break;
2073 }
2074
2075 // Let the CGSCC infra handle the changes to the original function.
2076 CurrentSCC = &updateCGAndAnalysisManagerForCGSCCPass(G&: CG, C&: *CurrentSCC, N, AM,
2077 UR, FAM);
2078 }
2079
2080 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
2081 // to the split functions.
2082 postSplitCleanup(F&: N.getFunction());
2083 CurrentSCC = &updateCGAndAnalysisManagerForFunctionPass(G&: CG, C&: *CurrentSCC, N,
2084 AM, UR, FAM);
2085 return *CurrentSCC;
2086}
2087
2088/// Replace a call to llvm.coro.prepare.retcon.
2089static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2090 LazyCallGraph::SCC &C) {
2091 auto CastFn = Prepare->getArgOperand(i: 0); // as an i8*
2092 auto Fn = CastFn->stripPointerCasts(); // as its original type
2093
2094 // Attempt to peephole this pattern:
2095 // %0 = bitcast [[TYPE]] @some_function to i8*
2096 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2097 // %2 = bitcast %1 to [[TYPE]]
2098 // ==>
2099 // %2 = @some_function
2100 for (Use &U : llvm::make_early_inc_range(Range: Prepare->uses())) {
2101 // Look for bitcasts back to the original function type.
2102 auto *Cast = dyn_cast<BitCastInst>(Val: U.getUser());
2103 if (!Cast || Cast->getType() != Fn->getType())
2104 continue;
2105
2106 // Replace and remove the cast.
2107 Cast->replaceAllUsesWith(V: Fn);
2108 Cast->eraseFromParent();
2109 }
2110
2111 // Replace any remaining uses with the function as an i8*.
2112 // This can never directly be a callee, so we don't need to update CG.
2113 Prepare->replaceAllUsesWith(V: CastFn);
2114 Prepare->eraseFromParent();
2115
2116 // Kill dead bitcasts.
2117 while (auto *Cast = dyn_cast<BitCastInst>(Val: CastFn)) {
2118 if (!Cast->use_empty())
2119 break;
2120 CastFn = Cast->getOperand(i_nocapture: 0);
2121 Cast->eraseFromParent();
2122 }
2123}
2124
2125static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2126 LazyCallGraph::SCC &C) {
2127 bool Changed = false;
2128 for (Use &P : llvm::make_early_inc_range(Range: PrepareFn->uses())) {
2129 // Intrinsics can only be used in calls.
2130 auto *Prepare = cast<CallInst>(Val: P.getUser());
2131 replacePrepare(Prepare, CG, C);
2132 Changed = true;
2133 }
2134
2135 return Changed;
2136}
2137
2138static void addPrepareFunction(const Module &M,
2139 SmallVectorImpl<Function *> &Fns,
2140 StringRef Name) {
2141 auto *PrepareFn = M.getFunction(Name);
2142 if (PrepareFn && !PrepareFn->use_empty())
2143 Fns.push_back(Elt: PrepareFn);
2144}
2145
2146static std::unique_ptr<coro::BaseABI>
2147CreateNewABI(Function &F, coro::Shape &S,
2148 std::function<bool(Instruction &)> IsMatCallback,
2149 const SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs) {
2150 if (S.CoroBegin->hasCustomABI()) {
2151 unsigned CustomABI = S.CoroBegin->getCustomABI();
2152 if (CustomABI >= GenCustomABIs.size())
2153 llvm_unreachable("Custom ABI not found amoung those specified");
2154 return GenCustomABIs[CustomABI](F, S);
2155 }
2156
2157 switch (S.ABI) {
2158 case coro::ABI::Switch:
2159 return std::make_unique<coro::SwitchABI>(args&: F, args&: S, args&: IsMatCallback);
2160 case coro::ABI::Async:
2161 return std::make_unique<coro::AsyncABI>(args&: F, args&: S, args&: IsMatCallback);
2162 case coro::ABI::Retcon:
2163 return std::make_unique<coro::AnyRetconABI>(args&: F, args&: S, args&: IsMatCallback);
2164 case coro::ABI::RetconOnce:
2165 return std::make_unique<coro::AnyRetconABI>(args&: F, args&: S, args&: IsMatCallback);
2166 }
2167 llvm_unreachable("Unknown ABI");
2168}
2169
2170CoroSplitPass::CoroSplitPass(bool OptimizeFrame)
2171 : CreateAndInitABI([](Function &F, coro::Shape &S) {
2172 std::unique_ptr<coro::BaseABI> ABI =
2173 CreateNewABI(F, S, IsMatCallback: coro::isTriviallyMaterializable, GenCustomABIs: {});
2174 ABI->init();
2175 return ABI;
2176 }),
2177 OptimizeFrame(OptimizeFrame) {}
2178
2179CoroSplitPass::CoroSplitPass(
2180 SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs, bool OptimizeFrame)
2181 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2182 std::unique_ptr<coro::BaseABI> ABI =
2183 CreateNewABI(F, S, IsMatCallback: coro::isTriviallyMaterializable, GenCustomABIs);
2184 ABI->init();
2185 return ABI;
2186 }),
2187 OptimizeFrame(OptimizeFrame) {}
2188
2189// For back compatibility, constructor takes a materializable callback and
2190// creates a generator for an ABI with a modified materializable callback.
2191CoroSplitPass::CoroSplitPass(std::function<bool(Instruction &)> IsMatCallback,
2192 bool OptimizeFrame)
2193 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2194 std::unique_ptr<coro::BaseABI> ABI =
2195 CreateNewABI(F, S, IsMatCallback, GenCustomABIs: {});
2196 ABI->init();
2197 return ABI;
2198 }),
2199 OptimizeFrame(OptimizeFrame) {}
2200
2201// For back compatibility, constructor takes a materializable callback and
2202// creates a generator for an ABI with a modified materializable callback.
2203CoroSplitPass::CoroSplitPass(
2204 std::function<bool(Instruction &)> IsMatCallback,
2205 SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs, bool OptimizeFrame)
2206 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2207 std::unique_ptr<coro::BaseABI> ABI =
2208 CreateNewABI(F, S, IsMatCallback, GenCustomABIs);
2209 ABI->init();
2210 return ABI;
2211 }),
2212 OptimizeFrame(OptimizeFrame) {}
2213
2214PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2215 CGSCCAnalysisManager &AM,
2216 LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2217 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2218 // non-zero number of nodes, so we assume that here and grab the first
2219 // node's function's module.
2220 Module &M = *C.begin()->getFunction().getParent();
2221 auto &FAM =
2222 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(IR&: C, ExtraArgs&: CG).getManager();
2223
2224 // Check for uses of llvm.coro.prepare.retcon/async.
2225 SmallVector<Function *, 2> PrepareFns;
2226 addPrepareFunction(M, Fns&: PrepareFns, Name: "llvm.coro.prepare.retcon");
2227 addPrepareFunction(M, Fns&: PrepareFns, Name: "llvm.coro.prepare.async");
2228
2229 // Find coroutines for processing.
2230 SmallVector<LazyCallGraph::Node *> Coroutines;
2231 for (LazyCallGraph::Node &N : C)
2232 if (N.getFunction().isPresplitCoroutine())
2233 Coroutines.push_back(Elt: &N);
2234
2235 if (Coroutines.empty() && PrepareFns.empty())
2236 return PreservedAnalyses::all();
2237
2238 auto *CurrentSCC = &C;
2239 // Split all the coroutines.
2240 for (LazyCallGraph::Node *N : Coroutines) {
2241 Function &F = N->getFunction();
2242 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2243 << "\n");
2244
2245 // The suspend-crossing algorithm in buildCoroutineFrame gets tripped up
2246 // by unreachable blocks, so remove them as a first pass. Remove the
2247 // unreachable blocks before collecting intrinsics into Shape.
2248 removeUnreachableBlocks(F);
2249
2250 coro::Shape Shape(F);
2251 if (!Shape.CoroBegin)
2252 continue;
2253
2254 F.setSplittedCoroutine();
2255
2256 std::unique_ptr<coro::BaseABI> ABI = CreateAndInitABI(F, Shape);
2257
2258 SmallVector<Function *, 4> Clones;
2259 auto &TTI = FAM.getResult<TargetIRAnalysis>(IR&: F);
2260 doSplitCoroutine(F, Clones, ABI&: *ABI, TTI, OptimizeFrame);
2261 CurrentSCC = &updateCallGraphAfterCoroutineSplit(
2262 N&: *N, Shape, Clones, C&: *CurrentSCC, CG, AM, UR, FAM);
2263
2264 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
2265 ORE.emit(RemarkBuilder: [&]() {
2266 return OptimizationRemark(DEBUG_TYPE, "CoroSplit", &F)
2267 << "Split '" << ore::NV("function", F.getName())
2268 << "' (frame_size=" << ore::NV("frame_size", Shape.FrameSize)
2269 << ", align=" << ore::NV("align", Shape.FrameAlign.value()) << ")";
2270 });
2271
2272 if (!Shape.CoroSuspends.empty()) {
2273 // Run the CGSCC pipeline on the original and newly split functions.
2274 UR.CWorklist.insert(X: CurrentSCC);
2275 for (Function *Clone : Clones)
2276 UR.CWorklist.insert(X: CG.lookupSCC(N&: CG.get(F&: *Clone)));
2277 } else if (Shape.ABI == coro::ABI::Async) {
2278 // Reprocess the function to inline the tail called return function of
2279 // coro.async.end.
2280 UR.CWorklist.insert(X: &C);
2281 }
2282 }
2283
2284 for (auto *PrepareFn : PrepareFns) {
2285 replaceAllPrepares(PrepareFn, CG, C&: *CurrentSCC);
2286 }
2287
2288 return PreservedAnalyses::none();
2289}
2290