1//===- CoroElide.cpp - Coroutine Frame Allocation Elision Pass ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/Transforms/Coroutines/CoroElide.h"
10#include "CoroInternal.h"
11#include "llvm/ADT/DenseMap.h"
12#include "llvm/ADT/Statistic.h"
13#include "llvm/Analysis/AliasAnalysis.h"
14#include "llvm/Analysis/InstructionSimplify.h"
15#include "llvm/Analysis/OptimizationRemarkEmitter.h"
16#include "llvm/IR/Dominators.h"
17#include "llvm/IR/InstIterator.h"
18#include "llvm/Support/ErrorHandling.h"
19#include "llvm/Support/FileSystem.h"
20#include <optional>
21
22using namespace llvm;
23
24#define DEBUG_TYPE "coro-elide"
25
26STATISTIC(NumOfCoroElided, "The # of coroutine get elided.");
27
28#ifndef NDEBUG
29static cl::opt<std::string> CoroElideInfoOutputFilename(
30 "coro-elide-info-output-file", cl::value_desc("filename"),
31 cl::desc("File to record the coroutines got elided"), cl::Hidden);
32#endif
33
34namespace {
35// Created on demand if the coro-elide pass has work to do.
36class FunctionElideInfo {
37public:
38 FunctionElideInfo(Function *F) : ContainingFunction(F) {
39 this->collectPostSplitCoroIds();
40 }
41
42 bool hasCoroIds() const { return !CoroIds.empty(); }
43
44 const SmallVectorImpl<CoroIdInst *> &getCoroIds() const { return CoroIds; }
45
46private:
47 Function *ContainingFunction;
48 SmallVector<CoroIdInst *, 4> CoroIds;
49 // Used in canCoroBeginEscape to distinguish coro.suspend switchs.
50 SmallPtrSet<const SwitchInst *, 4> CoroSuspendSwitches;
51
52 void collectPostSplitCoroIds();
53 friend class CoroIdElider;
54};
55
56class CoroIdElider {
57public:
58 CoroIdElider(CoroIdInst *CoroId, FunctionElideInfo &FEI, AAResults &AA,
59 DominatorTree &DT, OptimizationRemarkEmitter &ORE);
60 void elideHeapAllocations(uint64_t FrameSize, Align FrameAlign);
61 bool lifetimeEligibleForElide() const;
62 bool attemptElide();
63 bool canCoroBeginEscape(const CoroBeginInst *,
64 const SmallPtrSetImpl<BasicBlock *> &) const;
65
66private:
67 CoroIdInst *CoroId;
68 FunctionElideInfo &FEI;
69 AAResults &AA;
70 DominatorTree &DT;
71 OptimizationRemarkEmitter &ORE;
72
73 SmallVector<CoroBeginInst *, 1> CoroBegins;
74 SmallVector<CoroAllocInst *, 1> CoroAllocs;
75 SmallVector<CoroSubFnInst *, 4> ResumeAddr;
76 SmallVector<CoroSubFnInst *, 4> DestroyAddr;
77 DenseMap<CoroBeginInst *, SmallVector<IntrinsicInst *, 4>> BeginDeadMap;
78};
79} // end anonymous namespace
80
81// Go through the list of coro.subfn.addr intrinsics and replace them with the
82// provided constant.
83static void replaceWithConstant(Constant *Value,
84 SmallVectorImpl<CoroSubFnInst *> &Users) {
85 for (CoroSubFnInst *I : Users)
86 replaceAndRecursivelySimplify(I, SimpleV: Value);
87}
88
89// See if any operand of the call instruction references the coroutine frame.
90static bool operandReferences(CallInst *CI, AllocaInst *Frame, AAResults &AA) {
91 for (Value *Op : CI->operand_values())
92 if (Op->getType()->isPointerTy() && !AA.isNoAlias(V1: Op, V2: Frame))
93 return true;
94 return false;
95}
96
97// Look for any tail calls referencing the coroutine frame and remove tail
98// attribute from them, since now coroutine frame resides on the stack and tail
99// call implies that the function does not references anything on the stack.
100// However if it's a musttail call, we cannot remove the tailcall attribute.
101// It's safe to keep it there as the musttail call is for symmetric transfer,
102// and by that point the frame should have been destroyed and hence not
103// interfering with operands.
104static void removeTailCallAttribute(AllocaInst *Frame, AAResults &AA) {
105 Function &F = *Frame->getFunction();
106 for (Instruction &I : instructions(F))
107 if (auto *Call = dyn_cast<CallInst>(Val: &I))
108 if (Call->isTailCall() && operandReferences(CI: Call, Frame, AA) &&
109 !Call->isMustTailCall())
110 Call->setTailCall(false);
111}
112
113// Given a resume function @f.resume(%f.frame* %frame), returns the size
114// and expected alignment of %f.frame type.
115static std::optional<std::pair<uint64_t, Align>>
116getFrameLayout(Function *Resume) {
117 // Pull information from the function attributes.
118 auto Size = Resume->getParamDereferenceableBytes(ArgNo: 0);
119 if (!Size)
120 return std::nullopt;
121 return std::make_pair(x&: Size, y: Resume->getParamAlign(ArgNo: 0).valueOrOne());
122}
123
124// Finds first non alloca instruction in the entry block of a function.
125static Instruction *getFirstNonAllocaInTheEntryBlock(Function *F) {
126 for (Instruction &I : F->getEntryBlock())
127 if (!isa<AllocaInst>(Val: &I))
128 return &I;
129 llvm_unreachable("no terminator in the entry block");
130}
131
132#ifndef NDEBUG
133static std::unique_ptr<raw_fd_ostream> getOrCreateLogFile() {
134 assert(!CoroElideInfoOutputFilename.empty() &&
135 "coro-elide-info-output-file shouldn't be empty");
136 std::error_code EC;
137 auto Result = std::make_unique<raw_fd_ostream>(CoroElideInfoOutputFilename,
138 EC, sys::fs::OF_Append);
139 if (!EC)
140 return Result;
141 llvm::errs() << "Error opening coro-elide-info-output-file '"
142 << CoroElideInfoOutputFilename << " for appending!\n";
143 return std::make_unique<raw_fd_ostream>(2, false); // stderr.
144}
145#endif
146
147void FunctionElideInfo::collectPostSplitCoroIds() {
148 for (auto &I : instructions(F: this->ContainingFunction)) {
149 if (auto *CII = dyn_cast<CoroIdInst>(Val: &I))
150 if (CII->getInfo().isPostSplit())
151 // If it is the coroutine itself, don't touch it.
152 if (CII->getCoroutine() != CII->getFunction())
153 CoroIds.push_back(Elt: CII);
154
155 // Consider case like:
156 // %0 = call i8 @llvm.coro.suspend(...)
157 // switch i8 %0, label %suspend [i8 0, label %resume
158 // i8 1, label %cleanup]
159 // and collect the SwitchInsts which are used by escape analysis later.
160 if (auto *CSI = dyn_cast<CoroSuspendInst>(Val: &I))
161 if (CSI->hasOneUse() && isa<SwitchInst>(Val: CSI->use_begin()->getUser())) {
162 SwitchInst *SWI = cast<SwitchInst>(Val: CSI->use_begin()->getUser());
163 if (SWI->getNumCases() == 2)
164 CoroSuspendSwitches.insert(Ptr: SWI);
165 }
166 }
167}
168
169CoroIdElider::CoroIdElider(CoroIdInst *CoroId, FunctionElideInfo &FEI,
170 AAResults &AA, DominatorTree &DT,
171 OptimizationRemarkEmitter &ORE)
172 : CoroId(CoroId), FEI(FEI), AA(AA), DT(DT), ORE(ORE) {
173 // Collect all coro.begin and coro.allocs associated with this coro.id.
174 for (User *U : CoroId->users()) {
175 if (auto *CB = dyn_cast<CoroBeginInst>(Val: U))
176 CoroBegins.push_back(Elt: CB);
177 else if (auto *CA = dyn_cast<CoroAllocInst>(Val: U))
178 CoroAllocs.push_back(Elt: CA);
179 }
180
181 for (CoroBeginInst *CB : CoroBegins) {
182 for (User *U : CB->users()) {
183 auto &CoroDeads = BeginDeadMap[CB];
184 // Collect all coro.subfn.addrs associated with coro.begin.
185 // Note, we only devirtualize the calls if their coro.subfn.addr refers to
186 // coro.begin directly. If we run into cases where this check is too
187 // conservative, we can consider relaxing the check.
188 if (auto *II = dyn_cast<CoroSubFnInst>(Val: U)) {
189 switch (II->getIndex()) {
190 case CoroSubFnInst::ResumeIndex:
191 ResumeAddr.push_back(Elt: II);
192 break;
193 case CoroSubFnInst::DestroyIndex:
194 CoroDeads.push_back(Elt: II); // coro.destroy implies coro.dead
195 DestroyAddr.push_back(Elt: II);
196 break;
197 default:
198 llvm_unreachable("unexpected coro.subfn.addr constant");
199 }
200 } else if (auto *II = dyn_cast<CoroDeadInst>(Val: U))
201 CoroDeads.push_back(Elt: II);
202 }
203 }
204}
205
206// To elide heap allocations we need to suppress code blocks guarded by
207// llvm.coro.alloc and llvm.coro.free instructions.
208void CoroIdElider::elideHeapAllocations(uint64_t FrameSize, Align FrameAlign) {
209 LLVMContext &C = FEI.ContainingFunction->getContext();
210 BasicBlock::iterator InsertPt =
211 getFirstNonAllocaInTheEntryBlock(F: FEI.ContainingFunction)->getIterator();
212
213 // Replacing llvm.coro.alloc with false will suppress dynamic
214 // allocation as it is expected for the frontend to generate the code that
215 // looks like:
216 // id = coro.id(...)
217 // mem = coro.alloc(id) ? malloc(coro.size()) : 0;
218 // coro.begin(id, mem)
219 auto *False = ConstantInt::getFalse(Context&: C);
220 for (auto *CA : CoroAllocs) {
221 CA->replaceAllUsesWith(V: False);
222 CA->eraseFromParent();
223 }
224
225 // FIXME: Design how to transmit alignment information for every alloca that
226 // is spilled into the coroutine frame and recreate the alignment information
227 // here. Possibly we will need to do a mini SROA here and break the coroutine
228 // frame into individual AllocaInst recreating the original alignment.
229 const DataLayout &DL = FEI.ContainingFunction->getDataLayout();
230 auto FrameTy = ArrayType::get(ElementType: Type::getInt8Ty(C), NumElements: FrameSize);
231 auto *Frame = new AllocaInst(FrameTy, DL.getAllocaAddrSpace(), "", InsertPt);
232 Frame->setAlignment(FrameAlign);
233 auto *FrameVoidPtr =
234 new BitCastInst(Frame, PointerType::getUnqual(C), "vFrame", InsertPt);
235
236 for (auto *CB : CoroBegins) {
237 coro::elideCoroFree(FramePtr: CB);
238 CB->replaceAllUsesWith(V: FrameVoidPtr);
239 CB->eraseFromParent();
240 }
241
242 // Since now coroutine frame lives on the stack we need to make sure that
243 // any tail call referencing it, must be made non-tail call.
244 removeTailCallAttribute(Frame, AA);
245}
246
247bool CoroIdElider::canCoroBeginEscape(
248 const CoroBeginInst *CB, const SmallPtrSetImpl<BasicBlock *> &TIs) const {
249 const auto &It = BeginDeadMap.find(Val: CB);
250 assert(It != BeginDeadMap.end());
251
252 // Limit the number of blocks we visit.
253 unsigned Limit = 32 * (1 + It->second.size());
254
255 SmallVector<const BasicBlock *, 32> Worklist;
256 Worklist.push_back(Elt: CB->getParent());
257
258 SmallPtrSet<const BasicBlock *, 32> Visited;
259 // Consider basicblock of coro.dead/destroy as visited one, so that we
260 // skip the path pass through it.
261 for (auto *DA : It->second)
262 Visited.insert(Ptr: DA->getParent());
263
264 SmallPtrSet<const BasicBlock *, 32> EscapingBBs;
265 for (auto *U : CB->users()) {
266 // The use from coroutine intrinsics are not a problem.
267 if (isa<CoroFreeInst, CoroSubFnInst, CoroSaveInst>(Val: U))
268 continue;
269
270 // Think all other usages may be an escaping candidate conservatively.
271 //
272 // Note that the major user of switch ABI coroutine (the C++) will store
273 // resume.fn, destroy.fn and the index to the coroutine frame immediately.
274 // So the parent of the coro.begin in C++ will be always escaping.
275 // Then we can't get any performance benefits for C++ by improving the
276 // precision of the method.
277 //
278 // The reason why we still judge it is we want to make LLVM Coroutine in
279 // switch ABIs to be self contained as much as possible instead of a
280 // by-product of C++20 Coroutines.
281 EscapingBBs.insert(Ptr: cast<Instruction>(Val: U)->getParent());
282 }
283
284 bool PotentiallyEscaped = false;
285
286 do {
287 const auto *BB = Worklist.pop_back_val();
288 if (!Visited.insert(Ptr: BB).second)
289 continue;
290
291 // A Path insensitive marker to test whether the coro.begin escapes.
292 // It is intentional to make it path insensitive while it may not be
293 // precise since we don't want the process to be too slow.
294 PotentiallyEscaped |= EscapingBBs.count(Ptr: BB);
295
296 if (TIs.count(Ptr: BB)) {
297 if (isa<ReturnInst>(Val: BB->getTerminator()) || PotentiallyEscaped)
298 return true;
299
300 // If the function ends with the exceptional terminator, the memory used
301 // by the coroutine frame can be released by stack unwinding
302 // automatically. So we can think the coro.begin doesn't escape if it
303 // exits the function by exceptional terminator.
304
305 continue;
306 }
307
308 // Conservatively say that there is potentially a path.
309 if (!--Limit)
310 return true;
311
312 auto TI = BB->getTerminator();
313 // Although the default dest of coro.suspend switches is suspend pointer
314 // which means a escape path to normal terminator, it is reasonable to skip
315 // it since coroutine frame doesn't change outside the coroutine body.
316 if (isa<SwitchInst>(Val: TI) &&
317 FEI.CoroSuspendSwitches.count(Ptr: cast<SwitchInst>(Val: TI))) {
318 Worklist.push_back(Elt: cast<SwitchInst>(Val: TI)->getSuccessor(idx: 1));
319 Worklist.push_back(Elt: cast<SwitchInst>(Val: TI)->getSuccessor(idx: 2));
320 } else
321 Worklist.append(in_start: succ_begin(BB), in_end: succ_end(BB));
322
323 } while (!Worklist.empty());
324
325 // We have exhausted all possible paths and are certain that coro.begin can
326 // not reach to any of terminators.
327 return false;
328}
329
330bool CoroIdElider::lifetimeEligibleForElide() const {
331 // If no CoroAllocs, we cannot suppress allocation, so elision is not
332 // possible.
333 if (CoroAllocs.empty())
334 return false;
335
336 // Check that for every coro.begin there is at least one coro.dead/destroy
337 // directly referencing the SSA value of that coro.begin along each
338 // non-exceptional path.
339 //
340 // If the value escaped, then coro.dead/destroy would have been referencing a
341 // memory location storing that value and not the virtual register.
342
343 SmallPtrSet<BasicBlock *, 8> Terminators;
344 // First gather all of the terminators for the function.
345 // Consider the final coro.suspend as the real terminator when the current
346 // function is a coroutine.
347 for (BasicBlock &B : *FEI.ContainingFunction) {
348 auto *TI = B.getTerminator();
349
350 if (TI->getNumSuccessors() != 0 || isa<UnreachableInst>(Val: TI))
351 continue;
352
353 Terminators.insert(Ptr: &B);
354 }
355
356 // Filter out the coro.dead/destroy that lie along exceptional paths.
357 for (const auto *CB : CoroBegins) {
358 auto It = BeginDeadMap.find(Val: CB);
359 if (It == BeginDeadMap.end())
360 return false;
361
362 // If every terminators is dominated by coro.dead/destroy, we could know the
363 // corresponding coro.begin wouldn't escape.
364 auto DominatesTerminator = [&](auto *TI) {
365 return llvm::any_of(It->second, [&](auto *Destroy) {
366 return DT.dominates(Destroy, TI->getTerminator());
367 });
368 };
369
370 if (llvm::all_of(Range&: Terminators, P: DominatesTerminator))
371 continue;
372
373 // Otherwise canCoroBeginEscape would decide whether there is any paths from
374 // coro.begin to Terminators which not pass through any of the
375 // coro.dead/destroy. This is a slower analysis.
376 //
377 // canCoroBeginEscape is relatively slow, so we avoid to run it as much as
378 // possible.
379 if (canCoroBeginEscape(CB, TIs: Terminators))
380 return false;
381 }
382
383 // We have checked all CoroBegins and their paths to the terminators without
384 // finding disqualifying code patterns, so we can perform heap allocations.
385 return true;
386}
387
388bool CoroIdElider::attemptElide() {
389 // PostSplit coro.id refers to an array of subfunctions in its Info
390 // argument.
391 ConstantArray *Resumers = CoroId->getInfo().Resumers;
392 assert(Resumers && "PostSplit coro.id Info argument must refer to an array"
393 "of coroutine subfunctions");
394 auto *ResumeAddrConstant =
395 Resumers->getAggregateElement(Elt: CoroSubFnInst::ResumeIndex);
396
397 replaceWithConstant(Value: ResumeAddrConstant, Users&: ResumeAddr);
398
399 bool EligibleForElide = lifetimeEligibleForElide();
400
401 auto *DestroyAddrConstant = Resumers->getAggregateElement(
402 Elt: EligibleForElide ? CoroSubFnInst::CleanupIndex
403 : CoroSubFnInst::DestroyIndex);
404
405 replaceWithConstant(Value: DestroyAddrConstant, Users&: DestroyAddr);
406
407 auto FrameSizeAndAlign = getFrameLayout(Resume: cast<Function>(Val: ResumeAddrConstant));
408
409 auto CallerFunctionName = FEI.ContainingFunction->getName();
410 auto CalleeCoroutineName = CoroId->getCoroutine()->getName();
411
412 if (EligibleForElide && FrameSizeAndAlign) {
413 elideHeapAllocations(FrameSize: FrameSizeAndAlign->first, FrameAlign: FrameSizeAndAlign->second);
414 NumOfCoroElided++;
415
416#ifndef NDEBUG
417 if (!CoroElideInfoOutputFilename.empty())
418 *getOrCreateLogFile() << "Elide " << CalleeCoroutineName << " in "
419 << FEI.ContainingFunction->getName() << "\n";
420#endif
421
422 ORE.emit(RemarkBuilder: [&]() {
423 return OptimizationRemark(DEBUG_TYPE, "CoroElide", CoroId)
424 << "'" << ore::NV("callee", CalleeCoroutineName)
425 << "' elided in '" << ore::NV("caller", CallerFunctionName)
426 << "' (frame_size="
427 << ore::NV("frame_size", FrameSizeAndAlign->first) << ", align="
428 << ore::NV("align", FrameSizeAndAlign->second.value()) << ")";
429 });
430 } else {
431 ORE.emit(RemarkBuilder: [&]() {
432 auto Remark = OptimizationRemarkMissed(DEBUG_TYPE, "CoroElide", CoroId)
433 << "'" << ore::NV("callee", CalleeCoroutineName)
434 << "' not elided in '"
435 << ore::NV("caller", CallerFunctionName);
436
437 if (FrameSizeAndAlign)
438 return Remark << "' (frame_size="
439 << ore::NV("frame_size", FrameSizeAndAlign->first)
440 << ", align="
441 << ore::NV("align", FrameSizeAndAlign->second.value())
442 << ")";
443 else
444 return Remark << "' (frame_size=unknown, align=unknown)";
445 });
446 }
447
448 return true;
449}
450
451PreservedAnalyses CoroElidePass::run(Function &F, FunctionAnalysisManager &AM) {
452 auto &M = *F.getParent();
453 if (!coro::declaresIntrinsics(M, List: Intrinsic::coro_id))
454 return PreservedAnalyses::all();
455
456 FunctionElideInfo FEI{&F};
457 // Elide is not necessary if there's no coro.id within the function.
458 if (!FEI.hasCoroIds())
459 return PreservedAnalyses::all();
460
461 AAResults &AA = AM.getResult<AAManager>(IR&: F);
462 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
463 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
464
465 bool Changed = false;
466 for (auto *CII : FEI.getCoroIds()) {
467 CoroIdElider CIE(CII, FEI, AA, DT, ORE);
468 Changed |= CIE.attemptElide();
469 }
470
471 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
472}
473