1//===----- CGCoroutine.cpp - Emit LLVM Code for C++ coroutines ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with C++ code generation of coroutines.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCleanup.h"
14#include "CGDebugInfo.h"
15#include "CodeGenFunction.h"
16#include "clang/AST/StmtCXX.h"
17#include "clang/AST/StmtVisitor.h"
18#include "llvm/ADT/ScopeExit.h"
19
20using namespace clang;
21using namespace CodeGen;
22
23using llvm::Value;
24using llvm::BasicBlock;
25
26namespace {
27enum class AwaitKind { Init, Normal, Yield, Final };
28static constexpr llvm::StringLiteral AwaitKindStr[] = {"init", "await", "yield",
29 "final"};
30}
31
32struct clang::CodeGen::CGCoroData {
33 // What is the current await expression kind and how many
34 // await/yield expressions were encountered so far.
35 // These are used to generate pretty labels for await expressions in LLVM IR.
36 AwaitKind CurrentAwaitKind = AwaitKind::Init;
37 unsigned AwaitNum = 0;
38 unsigned YieldNum = 0;
39
40 // How many co_return statements are in the coroutine. Used to decide whether
41 // we need to add co_return; equivalent at the end of the user authored body.
42 unsigned CoreturnCount = 0;
43
44 // A branch to this block is emitted when coroutine needs to suspend.
45 llvm::BasicBlock *SuspendBB = nullptr;
46 // A branch to this block after final.cleanup or final.ready
47 llvm::BasicBlock *FinalExit = nullptr;
48
49 // The promise type's 'unhandled_exception' handler, if it defines one.
50 Stmt *ExceptionHandler = nullptr;
51
52 // A temporary i1 alloca that stores whether 'await_resume' threw an
53 // exception. If it did, 'true' is stored in this variable, and the coroutine
54 // body must be skipped. If the promise type does not define an exception
55 // handler, this is null.
56 llvm::Value *ResumeEHVar = nullptr;
57
58 // Stores the jump destination just before the coroutine memory is freed.
59 // This is the destination that every suspend point jumps to for the cleanup
60 // branch.
61 CodeGenFunction::JumpDest CleanupJD;
62
63 // Stores the jump destination just before the final suspend. The co_return
64 // statements jumps to this point after calling return_xxx promise member.
65 CodeGenFunction::JumpDest FinalJD;
66
67 // Stores the llvm.coro.id emitted in the function so that we can supply it
68 // as the first argument to coro.begin, coro.alloc and coro.free intrinsics.
69 // Note: llvm.coro.id returns a token that cannot be directly expressed in a
70 // builtin.
71 llvm::CallInst *CoroId = nullptr;
72
73 // Stores the llvm.coro.begin emitted in the function so that we can replace
74 // all coro.frame intrinsics with direct SSA value of coro.begin that returns
75 // the address of the coroutine frame of the current coroutine.
76 llvm::CallInst *CoroBegin = nullptr;
77
78 // Stores the last emitted coro.free for the deallocate expressions, we use it
79 // to wrap dealloc code with if(auto mem = coro.free) dealloc(mem).
80 llvm::CallInst *LastCoroFree = nullptr;
81
82 // If coro.id came from the builtin, remember the expression to give better
83 // diagnostic. If CoroIdExpr is nullptr, the coro.id was created by
84 // EmitCoroutineBody.
85 CallExpr const *CoroIdExpr = nullptr;
86};
87
88// Defining these here allows to keep CGCoroData private to this file.
89clang::CodeGen::CodeGenFunction::CGCoroInfo::CGCoroInfo() {}
90CodeGenFunction::CGCoroInfo::~CGCoroInfo() {}
91
92static void createCoroData(CodeGenFunction &CGF,
93 CodeGenFunction::CGCoroInfo &CurCoro,
94 llvm::CallInst *CoroId,
95 CallExpr const *CoroIdExpr = nullptr) {
96 if (CurCoro.Data) {
97 if (CurCoro.Data->CoroIdExpr)
98 CGF.CGM.Error(loc: CoroIdExpr->getBeginLoc(),
99 error: "only one __builtin_coro_id can be used in a function");
100 else if (CoroIdExpr)
101 CGF.CGM.Error(loc: CoroIdExpr->getBeginLoc(),
102 error: "__builtin_coro_id shall not be used in a C++ coroutine");
103 else
104 llvm_unreachable("EmitCoroutineBodyStatement called twice?");
105
106 return;
107 }
108
109 CurCoro.Data = std::make_unique<CGCoroData>();
110 CurCoro.Data->CoroId = CoroId;
111 CurCoro.Data->CoroIdExpr = CoroIdExpr;
112}
113
114// Synthesize a pretty name for a suspend point.
115static SmallString<32> buildSuspendPrefixStr(CGCoroData &Coro, AwaitKind Kind) {
116 unsigned No = 0;
117 switch (Kind) {
118 case AwaitKind::Init:
119 case AwaitKind::Final:
120 break;
121 case AwaitKind::Normal:
122 No = ++Coro.AwaitNum;
123 break;
124 case AwaitKind::Yield:
125 No = ++Coro.YieldNum;
126 break;
127 }
128 SmallString<32> Prefix(AwaitKindStr[static_cast<unsigned>(Kind)]);
129 if (No > 1) {
130 Twine(No).toVector(Out&: Prefix);
131 }
132 return Prefix;
133}
134
135// Check if function can throw based on prototype noexcept, also works for
136// destructors which are implicitly noexcept but can be marked noexcept(false).
137static bool FunctionCanThrow(const FunctionDecl *D) {
138 const auto *Proto = D->getType()->getAs<FunctionProtoType>();
139 if (!Proto) {
140 // Function proto is not found, we conservatively assume throwing.
141 return true;
142 }
143 return !isNoexceptExceptionSpec(ESpecType: Proto->getExceptionSpecType()) ||
144 Proto->canThrow() != CT_Cannot;
145}
146
147static bool StmtCanThrow(const Stmt *S) {
148 if (const auto *CE = dyn_cast<CallExpr>(Val: S)) {
149 const auto *Callee = CE->getDirectCallee();
150 if (!Callee)
151 // We don't have direct callee. Conservatively assume throwing.
152 return true;
153
154 if (FunctionCanThrow(D: Callee))
155 return true;
156
157 // Fall through to visit the children.
158 }
159
160 if (const auto *TE = dyn_cast<CXXBindTemporaryExpr>(Val: S)) {
161 // Special handling of CXXBindTemporaryExpr here as calling of Dtor of the
162 // temporary is not part of `children()` as covered in the fall through.
163 // We need to mark entire statement as throwing if the destructor of the
164 // temporary throws.
165 const auto *Dtor = TE->getTemporary()->getDestructor();
166 if (FunctionCanThrow(D: Dtor))
167 return true;
168
169 // Fall through to visit the children.
170 }
171
172 for (const auto *child : S->children())
173 if (StmtCanThrow(S: child))
174 return true;
175
176 return false;
177}
178
179// Emit suspend expression which roughly looks like:
180//
181// auto && x = CommonExpr();
182// if (!x.await_ready()) {
183// llvm_coro_save();
184// llvm_coro_await_suspend(&x, frame, wrapper) (*) (**)
185// llvm_coro_suspend(); (***)
186// }
187// x.await_resume();
188//
189// where the result of the entire expression is the result of x.await_resume()
190//
191// (*) llvm_coro_await_suspend_{void, bool, handle} is lowered to
192// wrapper(&x, frame) when it's certain not to interfere with
193// coroutine transform. await_suspend expression is
194// asynchronous to the coroutine body and not all analyses
195// and transformations can handle it correctly at the moment.
196//
197// Wrapper function encapsulates x.await_suspend(...) call and looks like:
198//
199// auto __await_suspend_wrapper(auto& awaiter, void* frame) {
200// std::coroutine_handle<> handle(frame);
201// return awaiter.await_suspend(handle);
202// }
203//
204// (**) If x.await_suspend return type is bool, it allows to veto a suspend:
205// if (x.await_suspend(...))
206// llvm_coro_suspend();
207//
208// (***) llvm_coro_suspend() encodes three possible continuations as
209// a switch instruction:
210//
211// %where-to = call i8 @llvm.coro.suspend(...)
212// switch i8 %where-to, label %coro.ret [ ; jump to epilogue to suspend
213// i8 0, label %yield.ready ; go here when resumed
214// i8 1, label %yield.cleanup ; go here when destroyed
215// ]
216//
217// See llvm's docs/Coroutines.rst for more details.
218//
219namespace {
220 struct LValueOrRValue {
221 LValue LV;
222 RValue RV;
223 };
224}
225static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Coro,
226 CoroutineSuspendExpr const &S,
227 AwaitKind Kind, AggValueSlot aggSlot,
228 bool ignoreResult, bool forLValue) {
229 auto *E = S.getCommonExpr();
230
231 auto CommonBinder =
232 CodeGenFunction::OpaqueValueMappingData::bind(CGF, ov: S.getOpaqueValue(), e: E);
233 llvm::scope_exit UnbindCommonOnExit([&] { CommonBinder.unbind(CGF); });
234
235 auto Prefix = buildSuspendPrefixStr(Coro, Kind);
236 BasicBlock *ReadyBlock = CGF.createBasicBlock(name: Prefix + Twine(".ready"));
237 BasicBlock *SuspendBlock = CGF.createBasicBlock(name: Prefix + Twine(".suspend"));
238 BasicBlock *CleanupBlock = CGF.createBasicBlock(name: Prefix + Twine(".cleanup"));
239
240 // If expression is ready, no need to suspend.
241 CGF.EmitBranchOnBoolExpr(Cond: S.getReadyExpr(), TrueBlock: ReadyBlock, FalseBlock: SuspendBlock, TrueCount: 0);
242
243 // Otherwise, emit suspend logic.
244 CGF.EmitBlock(BB: SuspendBlock);
245
246 auto &Builder = CGF.Builder;
247 llvm::Function *CoroSave = CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::coro_save);
248 auto *NullPtr = llvm::ConstantPointerNull::get(T: CGF.CGM.Int8PtrTy);
249 auto *SaveCall = Builder.CreateCall(Callee: CoroSave, Args: {NullPtr});
250
251 auto SuspendWrapper = CodeGenFunction(CGF.CGM).generateAwaitSuspendWrapper(
252 CoroName: CGF.CurFn->getName(), SuspendPointName: Prefix, S);
253
254 CGF.CurCoro.InSuspendBlock = true;
255
256 assert(CGF.CurCoro.Data && CGF.CurCoro.Data->CoroBegin &&
257 "expected to be called in coroutine context");
258
259 SmallVector<llvm::Value *, 3> SuspendIntrinsicCallArgs;
260 SuspendIntrinsicCallArgs.push_back(
261 Elt: CGF.getOrCreateOpaqueLValueMapping(e: S.getOpaqueValue()).getPointer(CGF));
262
263 SuspendIntrinsicCallArgs.push_back(Elt: CGF.CurCoro.Data->CoroBegin);
264 SuspendIntrinsicCallArgs.push_back(Elt: SuspendWrapper);
265
266 const auto SuspendReturnType = S.getSuspendReturnType();
267 llvm::Intrinsic::ID AwaitSuspendIID;
268
269 switch (SuspendReturnType) {
270 case CoroutineSuspendExpr::SuspendReturnType::SuspendVoid:
271 AwaitSuspendIID = llvm::Intrinsic::coro_await_suspend_void;
272 break;
273 case CoroutineSuspendExpr::SuspendReturnType::SuspendBool:
274 AwaitSuspendIID = llvm::Intrinsic::coro_await_suspend_bool;
275 break;
276 case CoroutineSuspendExpr::SuspendReturnType::SuspendHandle:
277 AwaitSuspendIID = llvm::Intrinsic::coro_await_suspend_handle;
278 break;
279 }
280
281 llvm::Function *AwaitSuspendIntrinsic = CGF.CGM.getIntrinsic(IID: AwaitSuspendIID);
282
283 // SuspendHandle might throw since it also resumes the returned handle.
284 const bool AwaitSuspendCanThrow =
285 SuspendReturnType ==
286 CoroutineSuspendExpr::SuspendReturnType::SuspendHandle ||
287 StmtCanThrow(S: S.getSuspendExpr());
288
289 llvm::CallBase *SuspendRet = nullptr;
290 // FIXME: add call attributes?
291 if (AwaitSuspendCanThrow)
292 SuspendRet =
293 CGF.EmitCallOrInvoke(Callee: AwaitSuspendIntrinsic, Args: SuspendIntrinsicCallArgs);
294 else
295 SuspendRet = CGF.EmitNounwindRuntimeCall(callee: AwaitSuspendIntrinsic,
296 args: SuspendIntrinsicCallArgs);
297
298 assert(SuspendRet);
299 CGF.CurCoro.InSuspendBlock = false;
300
301 switch (SuspendReturnType) {
302 case CoroutineSuspendExpr::SuspendReturnType::SuspendVoid:
303 assert(SuspendRet->getType()->isVoidTy());
304 break;
305 case CoroutineSuspendExpr::SuspendReturnType::SuspendBool: {
306 assert(SuspendRet->getType()->isIntegerTy());
307
308 // Veto suspension if requested by bool returning await_suspend.
309 BasicBlock *RealSuspendBlock =
310 CGF.createBasicBlock(name: Prefix + Twine(".suspend.bool"));
311 CGF.Builder.CreateCondBr(Cond: SuspendRet, True: RealSuspendBlock, False: ReadyBlock);
312 CGF.EmitBlock(BB: RealSuspendBlock);
313 break;
314 }
315 case CoroutineSuspendExpr::SuspendReturnType::SuspendHandle: {
316 assert(SuspendRet->getType()->isVoidTy());
317 break;
318 }
319 }
320
321 // Emit the suspend point.
322 const bool IsFinalSuspend = (Kind == AwaitKind::Final);
323 llvm::Function *CoroSuspend =
324 CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::coro_suspend);
325 auto *SuspendResult = Builder.CreateCall(
326 Callee: CoroSuspend, Args: {SaveCall, Builder.getInt1(V: IsFinalSuspend)});
327
328 // Create a switch capturing three possible continuations.
329 auto *Switch = Builder.CreateSwitch(V: SuspendResult, Dest: Coro.SuspendBB, NumCases: 2);
330 Switch->addCase(OnVal: Builder.getInt8(C: 0), Dest: ReadyBlock);
331 Switch->addCase(OnVal: Builder.getInt8(C: 1), Dest: CleanupBlock);
332
333 // Emit cleanup for this suspend point.
334 CGF.EmitBlock(BB: CleanupBlock);
335 CGF.EmitBranchThroughCleanup(Dest: Coro.CleanupJD);
336 if (IsFinalSuspend)
337 Coro.FinalExit = CleanupBlock->getSingleSuccessor();
338
339 // Emit await_resume expression.
340 CGF.EmitBlock(BB: ReadyBlock);
341
342 // Exception handling requires additional IR. If the 'await_resume' function
343 // is marked as 'noexcept', we avoid generating this additional IR.
344 CXXTryStmt *TryStmt = nullptr;
345 if (Coro.ExceptionHandler && Kind == AwaitKind::Init &&
346 StmtCanThrow(S: S.getResumeExpr())) {
347 Coro.ResumeEHVar =
348 CGF.CreateTempAlloca(Ty: Builder.getInt1Ty(), Name: Prefix + Twine("resume.eh"));
349 Builder.CreateFlagStore(Value: true, Addr: Coro.ResumeEHVar);
350
351 auto Loc = S.getResumeExpr()->getExprLoc();
352 auto *Catch = new (CGF.getContext())
353 CXXCatchStmt(Loc, /*exDecl=*/nullptr, Coro.ExceptionHandler);
354 auto *TryBody = CompoundStmt::Create(C: CGF.getContext(), Stmts: S.getResumeExpr(),
355 FPFeatures: FPOptionsOverride(), LB: Loc, RB: Loc);
356 TryStmt = CXXTryStmt::Create(C: CGF.getContext(), tryLoc: Loc, tryBlock: TryBody, handlers: Catch);
357 CGF.EnterCXXTryStmt(S: *TryStmt);
358 CGF.EmitStmt(S: TryBody);
359 // We don't use EmitCXXTryStmt here. We need to store to ResumeEHVar that
360 // doesn't exist in the body.
361 Builder.CreateFlagStore(Value: false, Addr: Coro.ResumeEHVar);
362 CGF.ExitCXXTryStmt(S: *TryStmt);
363 LValueOrRValue Res;
364 // We are not supposed to obtain the value from init suspend await_resume().
365 Res.RV = RValue::getIgnored();
366 return Res;
367 }
368
369 LValueOrRValue Res;
370 if (forLValue)
371 Res.LV = CGF.EmitLValue(E: S.getResumeExpr());
372 else
373 Res.RV = CGF.EmitAnyExpr(E: S.getResumeExpr(), aggSlot, ignoreResult);
374
375 return Res;
376}
377
378RValue CodeGenFunction::EmitCoawaitExpr(const CoawaitExpr &E,
379 AggValueSlot aggSlot,
380 bool ignoreResult) {
381 return emitSuspendExpression(CGF&: *this, Coro&: *CurCoro.Data, S: E,
382 Kind: CurCoro.Data->CurrentAwaitKind, aggSlot,
383 ignoreResult, /*forLValue*/false).RV;
384}
385RValue CodeGenFunction::EmitCoyieldExpr(const CoyieldExpr &E,
386 AggValueSlot aggSlot,
387 bool ignoreResult) {
388 return emitSuspendExpression(CGF&: *this, Coro&: *CurCoro.Data, S: E, Kind: AwaitKind::Yield,
389 aggSlot, ignoreResult, /*forLValue*/false).RV;
390}
391
392void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) {
393 ++CurCoro.Data->CoreturnCount;
394 const Expr *RV = S.getOperand();
395 if (RV && RV->getType()->isVoidType() && !isa<InitListExpr>(Val: RV)) {
396 // Make sure to evaluate the non initlist expression of a co_return
397 // with a void expression for side effects.
398 RunCleanupsScope cleanupScope(*this);
399 EmitIgnoredExpr(E: RV);
400 }
401 EmitStmt(S: S.getPromiseCall());
402 EmitBranchThroughCleanup(Dest: CurCoro.Data->FinalJD);
403}
404
405
406#ifndef NDEBUG
407static QualType getCoroutineSuspendExprReturnType(const ASTContext &Ctx,
408 const CoroutineSuspendExpr *E) {
409 const auto *RE = E->getResumeExpr();
410 // Is it possible for RE to be a CXXBindTemporaryExpr wrapping
411 // a MemberCallExpr?
412 assert(isa<CallExpr>(RE) && "unexpected suspend expression type");
413 return cast<CallExpr>(RE)->getCallReturnType(Ctx);
414}
415#endif
416
417llvm::Function *
418CodeGenFunction::generateAwaitSuspendWrapper(Twine const &CoroName,
419 Twine const &SuspendPointName,
420 CoroutineSuspendExpr const &S) {
421 std::string FuncName =
422 (CoroName + ".__await_suspend_wrapper__" + SuspendPointName).str();
423
424 ASTContext &C = getContext();
425
426 FunctionArgList args;
427
428 ImplicitParamDecl AwaiterDecl(C, C.VoidPtrTy, ImplicitParamKind::Other);
429 ImplicitParamDecl FrameDecl(C, C.VoidPtrTy, ImplicitParamKind::Other);
430 QualType ReturnTy = S.getSuspendExpr()->getType();
431
432 args.push_back(Elt: &AwaiterDecl);
433 args.push_back(Elt: &FrameDecl);
434
435 const CGFunctionInfo &FI =
436 CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: ReturnTy, args);
437
438 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(Info: FI);
439
440 llvm::Function *Fn = llvm::Function::Create(
441 Ty: LTy, Linkage: llvm::GlobalValue::InternalLinkage, N: FuncName, M: &CGM.getModule());
442
443 Fn->addParamAttr(ArgNo: 0, Kind: llvm::Attribute::AttrKind::NonNull);
444 Fn->addParamAttr(ArgNo: 0, Kind: llvm::Attribute::AttrKind::NoUndef);
445
446 Fn->addParamAttr(ArgNo: 1, Kind: llvm::Attribute::AttrKind::NoUndef);
447
448 Fn->setMustProgress();
449 Fn->addFnAttr(Kind: llvm::Attribute::AttrKind::AlwaysInline);
450 Fn->addFnAttr(Kind: "sample-profile-suffix-elision-policy", Val: "selected");
451
452 StartFunction(GD: GlobalDecl(), RetTy: ReturnTy, Fn, FnInfo: FI, Args: args);
453
454 // FIXME: add TBAA metadata to the loads
455 llvm::Value *AwaiterPtr = Builder.CreateLoad(Addr: GetAddrOfLocalVar(VD: &AwaiterDecl));
456 auto AwaiterLValue =
457 MakeNaturalAlignAddrLValue(V: AwaiterPtr, T: AwaiterDecl.getType());
458
459 CurAwaitSuspendWrapper.FramePtr =
460 Builder.CreateLoad(Addr: GetAddrOfLocalVar(VD: &FrameDecl));
461
462 auto AwaiterBinder = CodeGenFunction::OpaqueValueMappingData::bind(
463 CGF&: *this, ov: S.getOpaqueValue(), lv: AwaiterLValue);
464
465 auto *SuspendRet = EmitScalarExpr(E: S.getSuspendExpr());
466
467 llvm::scope_exit UnbindCommonOnExit([&] { AwaiterBinder.unbind(CGF&: *this); });
468 if (SuspendRet != nullptr) {
469 Fn->addRetAttr(Kind: llvm::Attribute::AttrKind::NoUndef);
470 Builder.CreateStore(Val: SuspendRet, Addr: ReturnValue);
471 }
472
473 CurAwaitSuspendWrapper.FramePtr = nullptr;
474 FinishFunction();
475 return Fn;
476}
477
478LValue
479CodeGenFunction::EmitCoawaitLValue(const CoawaitExpr *E) {
480 assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() &&
481 "Can't have a scalar return unless the return type is a "
482 "reference type!");
483 return emitSuspendExpression(CGF&: *this, Coro&: *CurCoro.Data, S: *E,
484 Kind: CurCoro.Data->CurrentAwaitKind, aggSlot: AggValueSlot::ignored(),
485 /*ignoreResult*/false, /*forLValue*/true).LV;
486}
487
488LValue
489CodeGenFunction::EmitCoyieldLValue(const CoyieldExpr *E) {
490 assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() &&
491 "Can't have a scalar return unless the return type is a "
492 "reference type!");
493 return emitSuspendExpression(CGF&: *this, Coro&: *CurCoro.Data, S: *E,
494 Kind: AwaitKind::Yield, aggSlot: AggValueSlot::ignored(),
495 /*ignoreResult*/false, /*forLValue*/true).LV;
496}
497
498// Hunts for the parameter reference in the parameter copy/move declaration.
499namespace {
500struct GetParamRef : public StmtVisitor<GetParamRef> {
501public:
502 DeclRefExpr *Expr = nullptr;
503 GetParamRef() {}
504 void VisitDeclRefExpr(DeclRefExpr *E) {
505 assert(Expr == nullptr && "multilple declref in param move");
506 Expr = E;
507 }
508 void VisitStmt(Stmt *S) {
509 for (auto *C : S->children()) {
510 if (C)
511 Visit(S: C);
512 }
513 }
514};
515}
516
517// This class replaces references to parameters to their copies by changing
518// the addresses in CGF.LocalDeclMap and restoring back the original values in
519// its destructor.
520
521namespace {
522 struct ParamReferenceReplacerRAII {
523 CodeGenFunction::DeclMapTy SavedLocals;
524 CodeGenFunction::DeclMapTy& LocalDeclMap;
525
526 ParamReferenceReplacerRAII(CodeGenFunction::DeclMapTy &LocalDeclMap)
527 : LocalDeclMap(LocalDeclMap) {}
528
529 void addCopy(DeclStmt const *PM) {
530 // Figure out what param it refers to.
531
532 assert(PM->isSingleDecl());
533 VarDecl const*VD = static_cast<VarDecl const*>(PM->getSingleDecl());
534 Expr const *InitExpr = VD->getInit();
535 GetParamRef Visitor;
536 Visitor.Visit(S: const_cast<Expr*>(InitExpr));
537 assert(Visitor.Expr);
538 DeclRefExpr *DREOrig = Visitor.Expr;
539 auto *PD = DREOrig->getDecl();
540
541 auto it = LocalDeclMap.find(Val: PD);
542 assert(it != LocalDeclMap.end() && "parameter is not found");
543 SavedLocals.insert(KV: { PD, it->second });
544
545 auto copyIt = LocalDeclMap.find(Val: VD);
546 assert(copyIt != LocalDeclMap.end() && "parameter copy is not found");
547 it->second = copyIt->getSecond();
548 }
549
550 ~ParamReferenceReplacerRAII() {
551 for (auto&& SavedLocal : SavedLocals) {
552 LocalDeclMap.insert(KV: {SavedLocal.first, SavedLocal.second});
553 }
554 }
555 };
556}
557
558// For WinEH exception representation backend needs to know what funclet coro.end
559// belongs to. That information is passed in a funclet bundle.
560static SmallVector<llvm::OperandBundleDef, 1>
561getBundlesForCoroEnd(CodeGenFunction &CGF) {
562 SmallVector<llvm::OperandBundleDef, 1> BundleList;
563
564 if (llvm::Instruction *EHPad = CGF.CurrentFuncletPad)
565 BundleList.emplace_back(Args: "funclet", Args&: EHPad);
566
567 return BundleList;
568}
569
570namespace {
571// We will insert coro.end to cut any of the destructors for objects that
572// do not need to be destroyed once the coroutine is resumed.
573// See llvm/docs/Coroutines.rst for more details about coro.end.
574struct CallCoroEnd final : public EHScopeStack::Cleanup {
575 void Emit(CodeGenFunction &CGF, Flags flags) override {
576 auto &CGM = CGF.CGM;
577 auto *NullPtr = llvm::ConstantPointerNull::get(T: CGF.Int8PtrTy);
578 llvm::Function *CoroEndFn = CGM.getIntrinsic(IID: llvm::Intrinsic::coro_end);
579 // See if we have a funclet bundle to associate coro.end with. (WinEH)
580 auto Bundles = getBundlesForCoroEnd(CGF);
581 CGF.Builder.CreateCall(
582 Callee: CoroEndFn,
583 Args: {NullPtr, CGF.Builder.getTrue(),
584 llvm::ConstantTokenNone::get(Context&: CoroEndFn->getContext())},
585 OpBundles: Bundles);
586 if (Bundles.empty()) {
587 // Otherwise, (landingpad model), create a conditional branch that leads
588 // either to a cleanup block or a block with EH resume instruction.
589 auto *ResumeBB = CGF.getEHResumeBlock(/*isCleanup=*/true);
590 auto *CleanupContBB = CGF.createBasicBlock(name: "cleanup.cont");
591 auto *CoroIsInRampFn = CGM.getIntrinsic(IID: llvm::Intrinsic::coro_is_in_ramp);
592 auto *CoroIsInRamp = CGF.Builder.CreateCall(Callee: CoroIsInRampFn);
593 CGF.Builder.CreateCondBr(Cond: CoroIsInRamp, True: CleanupContBB, False: ResumeBB);
594 CGF.EmitBlock(BB: CleanupContBB);
595 }
596 }
597};
598}
599
600namespace {
601// Make sure to call coro.delete on scope exit.
602struct CallCoroDelete final : public EHScopeStack::Cleanup {
603 Stmt *Deallocate;
604
605 // Emit "if (coro.free(CoroId, CoroBegin)) Deallocate;"
606
607 // Note: That deallocation will be emitted twice: once for a normal exit and
608 // once for exceptional exit. This usage is safe because Deallocate does not
609 // contain any declarations. The SubStmtBuilder::makeNewAndDeleteExpr()
610 // builds a single call to a deallocation function which is safe to emit
611 // multiple times.
612 void Emit(CodeGenFunction &CGF, Flags) override {
613 // Remember the current point, as we are going to emit deallocation code
614 // first to get to coro.free instruction that is an argument to a delete
615 // call.
616 BasicBlock *SaveInsertBlock = CGF.Builder.GetInsertBlock();
617
618 auto *FreeBB = CGF.createBasicBlock(name: "coro.free");
619 CGF.EmitBlock(BB: FreeBB);
620 CGF.EmitStmt(S: Deallocate);
621
622 auto *AfterFreeBB = CGF.createBasicBlock(name: "after.coro.free");
623 CGF.EmitBlock(BB: AfterFreeBB);
624
625 // We should have captured coro.free from the emission of deallocate.
626 auto *CoroFree = CGF.CurCoro.Data->LastCoroFree;
627 if (!CoroFree) {
628 CGF.CGM.Error(loc: Deallocate->getBeginLoc(),
629 error: "Deallocation expressoin does not refer to coro.free");
630 return;
631 }
632
633 // Get back to the block we were originally and move coro.free there.
634 auto *InsertPt = SaveInsertBlock->getTerminator();
635 CoroFree->moveBefore(InsertPos: InsertPt->getIterator());
636 CGF.Builder.SetInsertPoint(InsertPt);
637
638 // Add if (auto *mem = coro.free) Deallocate;
639 auto *NullPtr = llvm::ConstantPointerNull::get(T: CGF.Int8PtrTy);
640 auto *Cond = CGF.Builder.CreateICmpNE(LHS: CoroFree, RHS: NullPtr);
641 CGF.Builder.CreateCondBr(Cond, True: FreeBB, False: AfterFreeBB);
642
643 // No longer need old terminator.
644 InsertPt->eraseFromParent();
645 CGF.Builder.SetInsertPoint(AfterFreeBB);
646
647 auto *CoroDeadFn = CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::coro_dead);
648 CGF.Builder.CreateCall(Callee: CoroDeadFn, Args: {CGF.CurCoro.Data->CoroBegin});
649 }
650 explicit CallCoroDelete(Stmt *DeallocStmt) : Deallocate(DeallocStmt) {}
651};
652}
653
654namespace {
655struct GetReturnObjectManager {
656 CodeGenFunction &CGF;
657 CGBuilderTy &Builder;
658 const CoroutineBodyStmt &S;
659 // When true, performs RVO for the return object.
660 bool DirectEmit = false;
661
662 Address GroActiveFlag;
663 CodeGenFunction::AutoVarEmission GroEmission;
664
665 GetReturnObjectManager(CodeGenFunction &CGF, const CoroutineBodyStmt &S)
666 : CGF(CGF), Builder(CGF.Builder), S(S), GroActiveFlag(Address::invalid()),
667 GroEmission(CodeGenFunction::AutoVarEmission::invalid()) {
668 // The call to get_­return_­object is sequenced before the call to
669 // initial_­suspend and is invoked at most once, but there are caveats
670 // regarding on whether the prvalue result object may be initialized
671 // directly/eager or delayed, depending on the types involved.
672 //
673 // More info at https://github.com/cplusplus/papers/issues/1414
674 //
675 // The general cases:
676 // 1. Same type of get_return_object and coroutine return type (direct
677 // emission):
678 // - Constructed in the return slot.
679 // 2. Different types (delayed emission):
680 // - Constructed temporary object prior to initial suspend initialized with
681 // a call to get_return_object()
682 // - When coroutine needs to to return to the caller and needs to construct
683 // return value for the coroutine it is initialized with expiring value of
684 // the temporary obtained above.
685 //
686 // Direct emission for void returning coroutines or GROs.
687 DirectEmit = [&]() {
688 auto *RVI = S.getReturnValueInit();
689 assert(RVI && "expected RVI");
690 auto GroType = RVI->getType();
691 return CGF.getContext().hasSameType(T1: GroType, T2: CGF.FnRetTy);
692 }();
693 }
694
695 // The gro variable has to outlive coroutine frame and coroutine promise, but,
696 // it can only be initialized after coroutine promise was created. Thus,
697 // EmitGroActive emits a flag and sets it to false. Later when coroutine
698 // promise is available we initialize the gro and set the flag indicating that
699 // the cleanup is now active.
700 void EmitGroActive() {
701 if (DirectEmit)
702 return;
703
704 auto *GroDeclStmt = dyn_cast_or_null<DeclStmt>(Val: S.getResultDecl());
705 if (!GroDeclStmt) {
706 // If get_return_object returns void, no need to do an alloca.
707 return;
708 }
709
710 // Set GRO flag that it is not initialized yet
711 GroActiveFlag = CGF.CreateTempAlloca(Ty: Builder.getInt1Ty(), align: CharUnits::One(),
712 Name: "gro.active");
713 Builder.CreateStore(Val: Builder.getFalse(), Addr: GroActiveFlag);
714 }
715
716 void EmitGroAlloca() {
717 if (DirectEmit)
718 return;
719
720 auto *GroDeclStmt = dyn_cast_or_null<DeclStmt>(Val: S.getResultDecl());
721 if (!GroDeclStmt) {
722 // If get_return_object returns void, no need to do an alloca.
723 return;
724 }
725
726 auto *GroVarDecl = cast<VarDecl>(Val: GroDeclStmt->getSingleDecl());
727
728 GroEmission = CGF.EmitAutoVarAlloca(var: *GroVarDecl);
729
730 if (!GroVarDecl->isNRVOVariable()) {
731 // NRVO variables don't have allocas and won't have the same issue.
732 auto *GroAlloca = dyn_cast_or_null<llvm::AllocaInst>(
733 Val: GroEmission.getOriginalAllocatedAddress().getPointer());
734 assert(GroAlloca && "expected alloca to be emitted");
735 GroAlloca->setMetadata(KindID: llvm::LLVMContext::MD_coro_outside_frame,
736 Node: llvm::MDNode::get(Context&: CGF.CGM.getLLVMContext(), MDs: {}));
737 }
738
739 // Remember the top of EHStack before emitting the cleanup.
740 auto old_top = CGF.EHStack.stable_begin();
741 CGF.EmitAutoVarCleanups(emission: GroEmission);
742 auto top = CGF.EHStack.stable_begin();
743
744 // Make the cleanup conditional on gro.active
745 for (auto b = CGF.EHStack.find(sp: top), e = CGF.EHStack.find(sp: old_top); b != e;
746 b++) {
747 if (auto *Cleanup = dyn_cast<EHCleanupScope>(Val: &*b)) {
748 assert(!Cleanup->hasActiveFlag() && "cleanup already has active flag?");
749 Cleanup->setActiveFlag(GroActiveFlag);
750 Cleanup->setTestFlagInEHCleanup();
751 Cleanup->setTestFlagInNormalCleanup();
752 }
753 }
754 }
755
756 void EmitGroInit() {
757 if (DirectEmit) {
758 // ReturnValue should be valid as long as the coroutine's return type
759 // is not void. The assertion could help us to reduce the check later.
760 assert(CGF.ReturnValue.isValid() == (bool)S.getReturnStmt());
761 // Now we have the promise, initialize the GRO.
762 // We need to emit `get_return_object` first. According to:
763 // [dcl.fct.def.coroutine]p7
764 // The call to get_return_­object is sequenced before the call to
765 // initial_suspend and is invoked at most once.
766 //
767 // So we couldn't emit return value when we emit return statment,
768 // otherwise the call to get_return_object wouldn't be in front
769 // of initial_suspend.
770 if (CGF.ReturnValue.isValid()) {
771 CGF.EmitAnyExprToMem(E: S.getReturnValue(), Location: CGF.ReturnValue,
772 Quals: S.getReturnValue()->getType().getQualifiers(),
773 /*IsInit*/ IsInitializer: true);
774 }
775 return;
776 }
777
778 if (!GroActiveFlag.isValid()) {
779 // No Gro variable was allocated. Simply emit the call to
780 // get_return_object.
781 CGF.EmitStmt(S: S.getResultDecl());
782 return;
783 }
784
785 CGF.EmitAutoVarInit(emission: GroEmission);
786 Builder.CreateStore(Val: Builder.getTrue(), Addr: GroActiveFlag);
787 }
788 // The GRO returns either when it is first suspended or when it completes
789 // without ever being suspended. The EmitGroConv function evaluates these
790 // conditions and perform the conversion if needed.
791 //
792 // Before EmitGroConv():
793 // final.exit:
794 // switch i32 %cleanup.dest, label %destroy [
795 // i32 0, label %after.ready
796 // ]
797 //
798 // after.ready:
799 // ; (empty)
800 //
801 // After EmitGroConv():
802 // final.exit:
803 // switch i32 %cleanup.dest, label %destroy [
804 // i32 0, label %pre.gro.conv
805 // ]
806 //
807 // pre.gro.conv:
808 // %IsFinalExit = phi i1 [ false, %any.suspend ], [ true, %final.exit ]
809 // %InRamp = call i1 @llvm.coro.is_in_ramp()
810 // br i1 %InRamp, label %gro.conv, label %after.gro.conv
811 //
812 // gro.conv:
813 // ; GRO conversion
814 // br label %after.gro.conv
815 //
816 // after.gro.conv:
817 // br i1 %IsFinalExit, label %after.ready, label %coro.ret
818 void EmitGroConv(BasicBlock *RetBB) {
819 auto *AfterReadyBB = Builder.GetInsertBlock();
820 Builder.ClearInsertionPoint();
821
822 auto *PreConvBB = CGF.CurCoro.Data->SuspendBB;
823 CGF.EmitBlock(BB: PreConvBB);
824 // If final.exit exists, redirect it to PreConvBB
825 llvm::PHINode *IsFinalExit = nullptr;
826 if (BasicBlock *FinalExit = CGF.CurCoro.Data->FinalExit) {
827 assert(AfterReadyBB &&
828 AfterReadyBB->getSinglePredecessor() == FinalExit &&
829 "Expect fallthrough from final.exit block");
830 AfterReadyBB->replaceAllUsesWith(V: PreConvBB);
831 PreConvBB->moveBefore(MovePos: AfterReadyBB);
832
833 // If true, coroutine completes and should be destroyed after conversion
834 IsFinalExit =
835 Builder.CreatePHI(Ty: Builder.getInt1Ty(), NumReservedValues: llvm::pred_size(BB: PreConvBB));
836 for (auto *Pred : llvm::predecessors(BB: PreConvBB)) {
837 auto *V = (Pred == FinalExit) ? Builder.getTrue() : Builder.getFalse();
838 IsFinalExit->addIncoming(V, BB: Pred);
839 }
840 }
841 auto *InRampFn = CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::coro_is_in_ramp);
842 auto *InRamp = Builder.CreateCall(Callee: InRampFn, Args: {}, Name: "InRamp");
843 auto *ConvBB = CGF.createBasicBlock(name: "gro.conv");
844 auto *AfterConvBB = CGF.createBasicBlock(name: "after.gro.conv");
845 Builder.CreateCondBr(Cond: InRamp, True: ConvBB, False: AfterConvBB);
846
847 CGF.EmitBlock(BB: ConvBB);
848 CGF.EmitAnyExprToMem(E: S.getReturnValue(), Location: CGF.ReturnValue,
849 Quals: S.getReturnValue()->getType().getQualifiers(),
850 /*IsInit*/ IsInitializer: true);
851 Builder.CreateBr(Dest: AfterConvBB);
852
853 CGF.EmitBlock(BB: AfterConvBB);
854 if (IsFinalExit)
855 Builder.CreateCondBr(Cond: IsFinalExit, True: AfterReadyBB, False: RetBB);
856 else
857 Builder.CreateBr(Dest: RetBB);
858 Builder.SetInsertPoint(AfterReadyBB);
859 }
860};
861} // namespace
862
863static void emitBodyAndFallthrough(CodeGenFunction &CGF,
864 const CoroutineBodyStmt &S, Stmt *Body) {
865 CGF.EmitStmt(S: Body);
866 const bool CanFallthrough = CGF.Builder.GetInsertBlock();
867 if (CanFallthrough)
868 if (Stmt *OnFallthrough = S.getFallthroughHandler())
869 CGF.EmitStmt(S: OnFallthrough);
870}
871
872void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
873 auto *NullPtr = llvm::ConstantPointerNull::get(T: Builder.getPtrTy());
874 auto &TI = CGM.getContext().getTargetInfo();
875 unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth();
876
877 auto *EntryBB = Builder.GetInsertBlock();
878 auto *AllocBB = createBasicBlock(name: "coro.alloc");
879 auto *InitBB = createBasicBlock(name: "coro.init");
880 auto *FinalBB = createBasicBlock(name: "coro.final");
881 auto *RetBB = createBasicBlock(name: "coro.ret");
882
883 auto *CoroId = Builder.CreateCall(
884 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::coro_id),
885 Args: {Builder.getInt32(C: NewAlign), NullPtr, NullPtr, NullPtr});
886 createCoroData(CGF&: *this, CurCoro, CoroId);
887
888 GetReturnObjectManager GroManager(*this, S);
889 CurCoro.Data->SuspendBB =
890 GroManager.DirectEmit ? RetBB : createBasicBlock(name: "pre.gvo.conv");
891 assert(ShouldEmitLifetimeMarkers &&
892 "Must emit lifetime intrinsics for coroutines");
893
894 // Backend is allowed to elide memory allocations, to help it, emit
895 // auto mem = coro.alloc() ? 0 : ... allocation code ...;
896 auto *CoroAlloc = Builder.CreateCall(
897 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::coro_alloc), Args: {CoroId});
898
899 Builder.CreateCondBr(Cond: CoroAlloc, True: AllocBB, False: InitBB);
900
901 EmitBlock(BB: AllocBB);
902 auto *AllocateCall = EmitScalarExpr(E: S.getAllocate());
903 auto *AllocOrInvokeContBB = Builder.GetInsertBlock();
904
905 // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
906 if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) {
907 auto *RetOnFailureBB = createBasicBlock(name: "coro.ret.on.failure");
908
909 // See if allocation was successful.
910 auto *NullPtr = llvm::ConstantPointerNull::get(T: Int8PtrTy);
911 auto *Cond = Builder.CreateICmpNE(LHS: AllocateCall, RHS: NullPtr);
912 // Expect the allocation to be successful.
913 emitCondLikelihoodViaExpectIntrinsic(Cond, LH: Stmt::LH_Likely);
914 Builder.CreateCondBr(Cond, True: InitBB, False: RetOnFailureBB);
915
916 // If not, return OnAllocFailure object.
917 EmitBlock(BB: RetOnFailureBB);
918 EmitStmt(S: RetOnAllocFailure);
919 }
920 else {
921 Builder.CreateBr(Dest: InitBB);
922 }
923
924 EmitBlock(BB: InitBB);
925
926 // Pass the result of the allocation to coro.begin.
927 auto *Phi = Builder.CreatePHI(Ty: VoidPtrTy, NumReservedValues: 2);
928 Phi->addIncoming(V: NullPtr, BB: EntryBB);
929 Phi->addIncoming(V: AllocateCall, BB: AllocOrInvokeContBB);
930 auto *CoroBegin = Builder.CreateCall(
931 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::coro_begin), Args: {CoroId, Phi});
932 CurCoro.Data->CoroBegin = CoroBegin;
933
934 CurCoro.Data->CleanupJD = getJumpDestInCurrentScope(Target: RetBB);
935 {
936 CGDebugInfo *DI = getDebugInfo();
937 ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap);
938 CodeGenFunction::RunCleanupsScope ResumeScope(*this);
939 EHStack.pushCleanup<CallCoroDelete>(Kind: NormalAndEHCleanup, A: S.getDeallocate());
940
941 // Create mapping between parameters and copy-params for coroutine function.
942 llvm::ArrayRef<const Stmt *> ParamMoves = S.getParamMoves();
943 assert(
944 (ParamMoves.size() == 0 || (ParamMoves.size() == FnArgs.size())) &&
945 "ParamMoves and FnArgs should be the same size for coroutine function");
946 if (ParamMoves.size() == FnArgs.size() && DI)
947 for (const auto Pair : llvm::zip(t&: FnArgs, u&: ParamMoves))
948 DI->getCoroutineParameterMappings().insert(
949 KV: {std::get<0>(t: Pair), std::get<1>(t: Pair)});
950
951 // Create parameter copies. We do it before creating a promise, since an
952 // evolution of coroutine TS may allow promise constructor to observe
953 // parameter copies.
954 for (const ParmVarDecl *Parm : FnArgs) {
955 // If the original param is in an alloca, exclude it from the coroutine
956 // frame. The parameter copy will be part of the frame, but the original
957 // parameter memory should remain on the stack. This is necessary to
958 // ensure that parameters destroyed in callees, as with `trivial_abi` or
959 // in the MSVC C++ ABI, are appropriately destroyed after setting up the
960 // coroutine.
961 Address ParmAddr = GetAddrOfLocalVar(VD: Parm);
962 if (auto *ParmAlloca =
963 dyn_cast<llvm::AllocaInst>(Val: ParmAddr.getBasePointer())) {
964 ParmAlloca->setMetadata(KindID: llvm::LLVMContext::MD_coro_outside_frame,
965 Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: {}));
966 }
967 }
968 for (auto *PM : S.getParamMoves()) {
969 EmitStmt(S: PM);
970 ParamReplacer.addCopy(PM: cast<DeclStmt>(Val: PM));
971 // TODO: if(CoroParam(...)) need to surround ctor and dtor
972 // for the copy, so that llvm can elide it if the copy is
973 // not needed.
974 }
975
976 GroManager.EmitGroActive();
977 EmitStmt(S: S.getPromiseDeclStmt());
978
979 Address PromiseAddr = GetAddrOfLocalVar(VD: S.getPromiseDecl());
980 // Update CoroId to refer to the promise. We could not do it earlier because
981 // promise local variable was not emitted yet.
982 CoroId->setArgOperand(i: 1, v: PromiseAddr.emitRawPointer(CGF&: *this));
983
984 // Now we have the promise, initialize the GRO
985 GroManager.EmitGroAlloca();
986 GroManager.EmitGroInit();
987
988 EHStack.pushCleanup<CallCoroEnd>(Kind: EHCleanup);
989
990 CurCoro.Data->CurrentAwaitKind = AwaitKind::Init;
991 CurCoro.Data->ExceptionHandler = S.getExceptionHandler();
992 EmitStmt(S: S.getInitSuspendStmt());
993 CurCoro.Data->FinalJD = getJumpDestInCurrentScope(Target: FinalBB);
994
995 CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal;
996
997 if (CurCoro.Data->ExceptionHandler) {
998 // If we generated IR to record whether an exception was thrown from
999 // 'await_resume', then use that IR to determine whether the coroutine
1000 // body should be skipped.
1001 // If we didn't generate the IR (perhaps because 'await_resume' was marked
1002 // as 'noexcept'), then we skip this check.
1003 BasicBlock *ContBB = nullptr;
1004 if (CurCoro.Data->ResumeEHVar) {
1005 BasicBlock *BodyBB = createBasicBlock(name: "coro.resumed.body");
1006 ContBB = createBasicBlock(name: "coro.resumed.cont");
1007 Value *SkipBody = Builder.CreateFlagLoad(Addr: CurCoro.Data->ResumeEHVar,
1008 Name: "coro.resumed.eh");
1009 Builder.CreateCondBr(Cond: SkipBody, True: ContBB, False: BodyBB);
1010 EmitBlock(BB: BodyBB);
1011 }
1012
1013 auto Loc = S.getBeginLoc();
1014 CXXCatchStmt Catch(Loc, /*exDecl=*/nullptr,
1015 CurCoro.Data->ExceptionHandler);
1016 auto *TryStmt =
1017 CXXTryStmt::Create(C: getContext(), tryLoc: Loc, tryBlock: S.getBody(), handlers: &Catch);
1018
1019 EnterCXXTryStmt(S: *TryStmt);
1020 emitBodyAndFallthrough(CGF&: *this, S, Body: TryStmt->getTryBlock());
1021 ExitCXXTryStmt(S: *TryStmt);
1022
1023 if (ContBB)
1024 EmitBlock(BB: ContBB);
1025 }
1026 else {
1027 emitBodyAndFallthrough(CGF&: *this, S, Body: S.getBody());
1028 }
1029
1030 // See if we need to generate final suspend.
1031 const bool CanFallthrough = Builder.GetInsertBlock();
1032 const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0;
1033 if (CanFallthrough || HasCoreturns) {
1034 EmitBlock(BB: FinalBB);
1035 CurCoro.Data->CurrentAwaitKind = AwaitKind::Final;
1036 EmitStmt(S: S.getFinalSuspendStmt());
1037 } else {
1038 // We don't need FinalBB. Emit it to make sure the block is deleted.
1039 EmitBlock(BB: FinalBB, /*IsFinished=*/true);
1040 }
1041
1042 // We need conversion if get_return_object's type doesn't matches the
1043 // coroutine return type.
1044 if (!GroManager.DirectEmit)
1045 GroManager.EmitGroConv(RetBB);
1046 }
1047
1048 EmitBlock(BB: RetBB);
1049 // Emit coro.end before ret instruction, since resume and destroy parts of the
1050 // coroutine should return void.
1051 llvm::Function *CoroEnd = CGM.getIntrinsic(IID: llvm::Intrinsic::coro_end);
1052 Builder.CreateCall(Callee: CoroEnd,
1053 Args: {NullPtr, Builder.getFalse(),
1054 llvm::ConstantTokenNone::get(Context&: CoroEnd->getContext())});
1055
1056 if (auto *Ret = cast_or_null<ReturnStmt>(Val: S.getReturnStmt())) {
1057 // Since we already emitted the return value above, so we shouldn't
1058 // emit it again here.
1059 Expr *PreviousRetValue = Ret->getRetValue();
1060 Ret->setRetValue(nullptr);
1061 EmitStmt(S: Ret);
1062 // Set the return value back. The code generator, as the AST **Consumer**,
1063 // shouldn't change the AST.
1064 Ret->setRetValue(PreviousRetValue);
1065 }
1066 // LLVM require the frontend to mark the coroutine.
1067 CurFn->setPresplitCoroutine();
1068
1069 if (CXXRecordDecl *RD = FnRetTy->getAsCXXRecordDecl();
1070 RD && RD->hasAttr<CoroOnlyDestroyWhenCompleteAttr>())
1071 CurFn->setCoroDestroyOnlyWhenComplete();
1072}
1073
1074// Emit coroutine intrinsic and patch up arguments of the token type.
1075RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
1076 unsigned int IID) {
1077 SmallVector<llvm::Value *, 8> Args;
1078 switch (IID) {
1079 default:
1080 break;
1081 // The coro.frame builtin is replaced with an SSA value of the coro.begin
1082 // intrinsic.
1083 case llvm::Intrinsic::coro_frame: {
1084 if (CurCoro.Data && CurCoro.Data->CoroBegin) {
1085 return RValue::get(V: CurCoro.Data->CoroBegin);
1086 }
1087
1088 if (CurAwaitSuspendWrapper.FramePtr) {
1089 return RValue::get(V: CurAwaitSuspendWrapper.FramePtr);
1090 }
1091
1092 CGM.Error(loc: E->getBeginLoc(), error: "this builtin expect that __builtin_coro_begin "
1093 "has been used earlier in this function");
1094 auto *NullPtr = llvm::ConstantPointerNull::get(T: Builder.getPtrTy());
1095 return RValue::get(V: NullPtr);
1096 }
1097 case llvm::Intrinsic::coro_size: {
1098 auto &Context = getContext();
1099 llvm::IntegerType *T =
1100 Builder.getIntNTy(N: Context.getTypeSize(T: Context.getSizeType()));
1101 llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::coro_size, Tys: T);
1102 return RValue::get(V: Builder.CreateCall(Callee: F));
1103 }
1104 case llvm::Intrinsic::coro_align: {
1105 auto &Context = getContext();
1106 llvm::IntegerType *T =
1107 Builder.getIntNTy(N: Context.getTypeSize(T: Context.getSizeType()));
1108 llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::coro_align, Tys: T);
1109 return RValue::get(V: Builder.CreateCall(Callee: F));
1110 }
1111 // The following three intrinsics take a token parameter referring to a token
1112 // returned by earlier call to @llvm.coro.id. Since we cannot represent it in
1113 // builtins, we patch it up here.
1114 case llvm::Intrinsic::coro_alloc:
1115 case llvm::Intrinsic::coro_begin:
1116 case llvm::Intrinsic::coro_free: {
1117 if (CurCoro.Data && CurCoro.Data->CoroId) {
1118 Args.push_back(Elt: CurCoro.Data->CoroId);
1119 break;
1120 }
1121 CGM.Error(loc: E->getBeginLoc(), error: "this builtin expect that __builtin_coro_id has"
1122 " been used earlier in this function");
1123 // Fallthrough to the next case to add TokenNone as the first argument.
1124 [[fallthrough]];
1125 }
1126 // @llvm.coro.suspend takes a token parameter. Add token 'none' as the first
1127 // argument.
1128 case llvm::Intrinsic::coro_suspend:
1129 Args.push_back(Elt: llvm::ConstantTokenNone::get(Context&: getLLVMContext()));
1130 break;
1131 }
1132 for (const Expr *Arg : E->arguments())
1133 Args.push_back(Elt: EmitScalarExpr(E: Arg));
1134 // @llvm.coro.end takes a token parameter. Add token 'none' as the last
1135 // argument.
1136 if (IID == llvm::Intrinsic::coro_end)
1137 Args.push_back(Elt: llvm::ConstantTokenNone::get(Context&: getLLVMContext()));
1138
1139 llvm::Function *F = CGM.getIntrinsic(IID);
1140 llvm::CallInst *Call = Builder.CreateCall(Callee: F, Args);
1141
1142 // Note: The following code is to enable to emit coro.id and coro.begin by
1143 // hand to experiment with coroutines in C.
1144 // If we see @llvm.coro.id remember it in the CoroData. We will update
1145 // coro.alloc, coro.begin and coro.free intrinsics to refer to it.
1146 if (IID == llvm::Intrinsic::coro_id) {
1147 createCoroData(CGF&: *this, CurCoro, CoroId: Call, CoroIdExpr: E);
1148 }
1149 else if (IID == llvm::Intrinsic::coro_begin) {
1150 if (CurCoro.Data)
1151 CurCoro.Data->CoroBegin = Call;
1152 }
1153 else if (IID == llvm::Intrinsic::coro_free) {
1154 // Remember the last coro_free as we need it to build the conditional
1155 // deletion of the coroutine frame.
1156 if (CurCoro.Data)
1157 CurCoro.Data->LastCoroFree = Call;
1158 }
1159 return RValue::get(V: Call);
1160}
1161