1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// its definition precedes and its uses follow a suspend block. This is
10// referred to as a suspend crossing value.
11//
12// Using the information discovered we form a Coroutine Frame structure to
13// contain those values. All uses of those values are replaced with appropriate
14// GEP + load from the coroutine frame. At the point of the definition we spill
15// the value into the coroutine frame.
16//===----------------------------------------------------------------------===//
17
18#include "CoroInternal.h"
19#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/SmallString.h"
21#include "llvm/Analysis/StackLifetime.h"
22#include "llvm/IR/DIBuilder.h"
23#include "llvm/IR/DebugInfo.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/InstIterator.h"
27#include "llvm/IR/IntrinsicInst.h"
28#include "llvm/IR/MDBuilder.h"
29#include "llvm/IR/Module.h"
30#include "llvm/Support/Compiler.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/OptimizedStructLayout.h"
33#include "llvm/Transforms/Coroutines/ABI.h"
34#include "llvm/Transforms/Coroutines/CoroInstr.h"
35#include "llvm/Transforms/Coroutines/MaterializationUtils.h"
36#include "llvm/Transforms/Coroutines/SpillUtils.h"
37#include "llvm/Transforms/Coroutines/SuspendCrossingInfo.h"
38#include "llvm/Transforms/Utils/BasicBlockUtils.h"
39#include "llvm/Transforms/Utils/Local.h"
40#include "llvm/Transforms/Utils/PromoteMemToReg.h"
41#include <algorithm>
42#include <optional>
43
44using namespace llvm;
45
46#define DEBUG_TYPE "coro-frame"
47
48namespace {
49class FrameTypeBuilder;
50// Mapping from the to-be-spilled value to all the users that need reload.
51struct FrameDataInfo {
52 // All the values (that are not allocas) that needs to be spilled to the
53 // frame.
54 coro::SpillInfo &Spills;
55 // Allocas contains all values defined as allocas that need to live in the
56 // frame.
57 SmallVectorImpl<coro::AllocaInfo> &Allocas;
58
59 FrameDataInfo(coro::SpillInfo &Spills,
60 SmallVectorImpl<coro::AllocaInfo> &Allocas)
61 : Spills(Spills), Allocas(Allocas) {}
62
63 SmallVector<Value *, 8> getAllDefs() const {
64 SmallVector<Value *, 8> Defs;
65 for (const auto &P : Spills)
66 Defs.push_back(Elt: P.first);
67 for (const auto &A : Allocas)
68 Defs.push_back(Elt: A.Alloca);
69 return Defs;
70 }
71
72 uint32_t getFieldIndex(Value *V) const {
73 auto Itr = FieldIndexMap.find(Val: V);
74 assert(Itr != FieldIndexMap.end() &&
75 "Value does not have a frame field index");
76 return Itr->second;
77 }
78
79 void setFieldIndex(Value *V, uint32_t Index) {
80 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
81 "Cannot set the index for the same field twice.");
82 FieldIndexMap[V] = Index;
83 }
84
85 Align getAlign(Value *V) const {
86 auto Iter = FieldAlignMap.find(Val: V);
87 assert(Iter != FieldAlignMap.end());
88 return Iter->second;
89 }
90
91 void setAlign(Value *V, Align AL) {
92 assert(FieldAlignMap.count(V) == 0);
93 FieldAlignMap.insert(KV: {V, AL});
94 }
95
96 uint64_t getDynamicAlign(Value *V) const {
97 auto Iter = FieldDynamicAlignMap.find(Val: V);
98 assert(Iter != FieldDynamicAlignMap.end());
99 return Iter->second;
100 }
101
102 void setDynamicAlign(Value *V, uint64_t Align) {
103 assert(FieldDynamicAlignMap.count(V) == 0);
104 FieldDynamicAlignMap.insert(KV: {V, Align});
105 }
106
107 uint64_t getOffset(Value *V) const {
108 auto Iter = FieldOffsetMap.find(Val: V);
109 assert(Iter != FieldOffsetMap.end());
110 return Iter->second;
111 }
112
113 void setOffset(Value *V, uint64_t Offset) {
114 assert(FieldOffsetMap.count(V) == 0);
115 FieldOffsetMap.insert(KV: {V, Offset});
116 }
117
118 // Remap the index of every field in the frame, using the final layout index.
119 void updateLayoutIndex(FrameTypeBuilder &B);
120
121private:
122 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
123 // twice by mistake.
124 bool LayoutIndexUpdateStarted = false;
125 // Map from values to their slot indexes on the frame. They will be first set
126 // with their original insertion field index. After the frame is built, their
127 // indexes will be updated into the final layout index.
128 DenseMap<Value *, uint32_t> FieldIndexMap;
129 // Map from values to their alignment on the frame. They would be set after
130 // the frame is built.
131 DenseMap<Value *, Align> FieldAlignMap;
132 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
133 // Map from values to their offset on the frame. They would be set after
134 // the frame is built.
135 DenseMap<Value *, uint64_t> FieldOffsetMap;
136};
137} // namespace
138
139#ifndef NDEBUG
140static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills) {
141 dbgs() << "------------- " << Title << " --------------\n";
142 for (const auto &E : Spills) {
143 E.first->dump();
144 dbgs() << " user: ";
145 for (auto *I : E.second)
146 I->dump();
147 }
148}
149
150static void dumpAllocas(const SmallVectorImpl<coro::AllocaInfo> &Allocas) {
151 dbgs() << "------------- Allocas --------------\n";
152 for (const auto &A : Allocas) {
153 A.Alloca->dump();
154 }
155}
156#endif
157
158namespace {
159using FieldIDType = size_t;
160// We cannot rely solely on natural alignment of a type when building a
161// coroutine frame and if the alignment specified on the Alloca instruction
162// differs from the natural alignment of the alloca type we will need to insert
163// padding.
164class FrameTypeBuilder {
165private:
166 struct Field {
167 uint64_t Size;
168 uint64_t Offset;
169 Type *Ty;
170 FieldIDType LayoutFieldIndex;
171 Align Alignment;
172 Align TyAlignment;
173 uint64_t DynamicAlignBuffer;
174 };
175
176 const DataLayout &DL;
177 LLVMContext &Context;
178 uint64_t StructSize = 0;
179 Align StructAlign;
180 bool IsFinished = false;
181
182 std::optional<Align> MaxFrameAlignment;
183
184 SmallVector<Field, 8> Fields;
185 DenseMap<Value*, unsigned> FieldIndexByKey;
186
187public:
188 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
189 std::optional<Align> MaxFrameAlignment)
190 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
191
192 /// Add a field to this structure for the storage of an `alloca`
193 /// instruction.
194 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
195 bool IsHeader = false) {
196 Type *Ty = AI->getAllocatedType();
197
198 // Make an array type if this is a static array allocation.
199 if (AI->isArrayAllocation()) {
200 if (auto *CI = dyn_cast<ConstantInt>(Val: AI->getArraySize()))
201 Ty = ArrayType::get(ElementType: Ty, NumElements: CI->getValue().getZExtValue());
202 else
203 report_fatal_error(reason: "Coroutines cannot handle non static allocas yet");
204 }
205
206 return addField(Ty, MaybeFieldAlignment: AI->getAlign(), IsHeader);
207 }
208
209 /// We want to put the allocas whose lifetime-ranges are not overlapped
210 /// into one slot of coroutine frame.
211 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
212 ///
213 /// cppcoro::task<void> alternative_paths(bool cond) {
214 /// if (cond) {
215 /// big_structure a;
216 /// process(a);
217 /// co_await something();
218 /// } else {
219 /// big_structure b;
220 /// process2(b);
221 /// co_await something();
222 /// }
223 /// }
224 ///
225 /// We want to put variable a and variable b in the same slot to
226 /// reduce the size of coroutine frame.
227 ///
228 /// This function use StackLifetime algorithm to partition the AllocaInsts in
229 /// Spills to non-overlapped sets in order to put Alloca in the same
230 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
231 /// field for the allocas in the same non-overlapped set by using the largest
232 /// type as the field type.
233 ///
234 /// Side Effects: Because We sort the allocas, the order of allocas in the
235 /// frame may be different with the order in the source code.
236 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
237 coro::Shape &Shape, bool OptimizeFrame);
238
239 /// Add a field to this structure.
240 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
241 bool IsHeader = false,
242 bool IsSpillOfValue = false) {
243 assert(!IsFinished && "adding fields to a finished builder");
244 assert(Ty && "must provide a type for a field");
245
246 // The field size is always the alloc size of the type.
247 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
248
249 // For an alloca with size=0, we don't need to add a field and they
250 // can just point to any index in the frame. Use index 0.
251 if (FieldSize == 0) {
252 return 0;
253 }
254
255 // The field alignment might not be the type alignment, but we need
256 // to remember the type alignment anyway to build the type.
257 // If we are spilling values we don't need to worry about ABI alignment
258 // concerns.
259 Align ABIAlign = DL.getABITypeAlign(Ty);
260 Align TyAlignment = ABIAlign;
261 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
262 TyAlignment = *MaxFrameAlignment;
263 Align FieldAlignment = MaybeFieldAlignment.value_or(u&: TyAlignment);
264
265 // The field alignment could be bigger than the max frame case, in that case
266 // we request additional storage to be able to dynamically align the
267 // pointer.
268 uint64_t DynamicAlignBuffer = 0;
269 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
270 DynamicAlignBuffer =
271 offsetToAlignment(Value: MaxFrameAlignment->value(), Alignment: FieldAlignment);
272 FieldAlignment = *MaxFrameAlignment;
273 FieldSize = FieldSize + DynamicAlignBuffer;
274 }
275
276 // Lay out header fields immediately.
277 uint64_t Offset;
278 if (IsHeader) {
279 Offset = alignTo(Size: StructSize, A: FieldAlignment);
280 StructSize = Offset + FieldSize;
281
282 // Everything else has a flexible offset.
283 } else {
284 Offset = OptimizedStructLayoutField::FlexibleOffset;
285 }
286
287 Fields.push_back(Elt: {.Size: FieldSize, .Offset: Offset, .Ty: Ty, .LayoutFieldIndex: 0, .Alignment: FieldAlignment, .TyAlignment: TyAlignment,
288 .DynamicAlignBuffer: DynamicAlignBuffer});
289 return Fields.size() - 1;
290 }
291
292 /// Finish the layout and create the struct type with the given name.
293 StructType *finish(StringRef Name);
294
295 uint64_t getStructSize() const {
296 assert(IsFinished && "not yet finished!");
297 return StructSize;
298 }
299
300 Align getStructAlign() const {
301 assert(IsFinished && "not yet finished!");
302 return StructAlign;
303 }
304
305 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
306 assert(IsFinished && "not yet finished!");
307 return Fields[Id].LayoutFieldIndex;
308 }
309
310 Field getLayoutField(FieldIDType Id) const {
311 assert(IsFinished && "not yet finished!");
312 return Fields[Id];
313 }
314};
315} // namespace
316
317void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
318 auto Updater = [&](Value *I) {
319 auto Field = B.getLayoutField(Id: getFieldIndex(V: I));
320 setFieldIndex(V: I, Index: Field.LayoutFieldIndex);
321 setAlign(V: I, AL: Field.Alignment);
322 uint64_t dynamicAlign =
323 Field.DynamicAlignBuffer
324 ? Field.DynamicAlignBuffer + Field.Alignment.value()
325 : 0;
326 setDynamicAlign(V: I, Align: dynamicAlign);
327 setOffset(V: I, Offset: Field.Offset);
328 };
329 LayoutIndexUpdateStarted = true;
330 for (auto &S : Spills)
331 Updater(S.first);
332 for (const auto &A : Allocas)
333 Updater(A.Alloca);
334 LayoutIndexUpdateStarted = false;
335}
336
337void FrameTypeBuilder::addFieldForAllocas(const Function &F,
338 FrameDataInfo &FrameData,
339 coro::Shape &Shape,
340 bool OptimizeFrame) {
341 using AllocaSetType = SmallVector<AllocaInst *, 4>;
342 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
343
344 // We need to add field for allocas at the end of this function.
345 llvm::scope_exit AddFieldForAllocasAtExit([&]() {
346 for (auto AllocaList : NonOverlapedAllocas) {
347 auto *LargestAI = *AllocaList.begin();
348 FieldIDType Id = addFieldForAlloca(AI: LargestAI);
349 for (auto *Alloca : AllocaList)
350 FrameData.setFieldIndex(V: Alloca, Index: Id);
351 }
352 });
353
354 if (!OptimizeFrame) {
355 for (const auto &A : FrameData.Allocas) {
356 AllocaInst *Alloca = A.Alloca;
357 NonOverlapedAllocas.emplace_back(Args: AllocaSetType(1, Alloca));
358 }
359 return;
360 }
361
362 // Because there are paths from the lifetime.start to coro.end
363 // for each alloca, the liferanges for every alloca is overlaped
364 // in the blocks who contain coro.end and the successor blocks.
365 // So we choose to skip there blocks when we calculate the liferange
366 // for each alloca. It should be reasonable since there shouldn't be uses
367 // in these blocks and the coroutine frame shouldn't be used outside the
368 // coroutine body.
369 //
370 // Note that the user of coro.suspend may not be SwitchInst. However, this
371 // case seems too complex to handle. And it is harmless to skip these
372 // patterns since it just prevend putting the allocas to live in the same
373 // slot.
374 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
375 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
376 for (auto *U : CoroSuspendInst->users()) {
377 if (auto *ConstSWI = dyn_cast<SwitchInst>(Val: U)) {
378 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
379 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
380 SWI->setDefaultDest(SWI->getSuccessor(idx: 1));
381 }
382 }
383 }
384
385 auto ExtractAllocas = [&]() {
386 AllocaSetType Allocas;
387 Allocas.reserve(N: FrameData.Allocas.size());
388 for (const auto &A : FrameData.Allocas)
389 Allocas.push_back(Elt: A.Alloca);
390 return Allocas;
391 };
392 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
393 StackLifetime::LivenessType::May);
394 StackLifetimeAnalyzer.run();
395 auto DoAllocasInterfere = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
396 return StackLifetimeAnalyzer.getLiveRange(AI: AI1).overlaps(
397 Other: StackLifetimeAnalyzer.getLiveRange(AI: AI2));
398 };
399 auto GetAllocaSize = [&](const coro::AllocaInfo &A) {
400 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
401 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
402 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
403 return RetSize->getFixedValue();
404 };
405 // Put larger allocas in the front. So the larger allocas have higher
406 // priority to merge, which can save more space potentially. Also each
407 // AllocaSet would be ordered. So we can get the largest Alloca in one
408 // AllocaSet easily.
409 sort(C&: FrameData.Allocas, Comp: [&](const auto &Iter1, const auto &Iter2) {
410 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
411 });
412 for (const auto &A : FrameData.Allocas) {
413 AllocaInst *Alloca = A.Alloca;
414 bool Merged = false;
415 // Try to find if the Alloca does not interfere with any existing
416 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
417 // NonOverlappedAllocaSet.
418 for (auto &AllocaSet : NonOverlapedAllocas) {
419 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
420 bool NoInterference = none_of(Range&: AllocaSet, P: [&](auto Iter) {
421 return DoAllocasInterfere(Alloca, Iter);
422 });
423 // If the alignment of A is multiple of the alignment of B, the address
424 // of A should satisfy the requirement for aligning for B.
425 //
426 // There may be other more fine-grained strategies to handle the alignment
427 // infomation during the merging process. But it seems hard to handle
428 // these strategies and benefit little.
429 bool Alignable = [&]() -> bool {
430 auto *LargestAlloca = *AllocaSet.begin();
431 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
432 0;
433 }();
434 bool CouldMerge = NoInterference && Alignable;
435 if (!CouldMerge)
436 continue;
437 AllocaSet.push_back(Elt: Alloca);
438 Merged = true;
439 break;
440 }
441 if (!Merged) {
442 NonOverlapedAllocas.emplace_back(Args: AllocaSetType(1, Alloca));
443 }
444 }
445 // Recover the default target destination for each Switch statement
446 // reserved.
447 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
448 SwitchInst *SWI = SwitchAndDefaultDest.first;
449 BasicBlock *DestBB = SwitchAndDefaultDest.second;
450 SWI->setDefaultDest(DestBB);
451 }
452 // This Debug Info could tell us which allocas are merged into one slot.
453 LLVM_DEBUG(for (auto &AllocaSet
454 : NonOverlapedAllocas) {
455 if (AllocaSet.size() > 1) {
456 dbgs() << "In Function:" << F.getName() << "\n";
457 dbgs() << "Find Union Set "
458 << "\n";
459 dbgs() << "\tAllocas are \n";
460 for (auto Alloca : AllocaSet)
461 dbgs() << "\t\t" << *Alloca << "\n";
462 }
463 });
464}
465
466StructType *FrameTypeBuilder::finish(StringRef Name) {
467 assert(!IsFinished && "already finished!");
468
469 // Prepare the optimal-layout field array.
470 // The Id in the layout field is a pointer to our Field for it.
471 SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
472 LayoutFields.reserve(N: Fields.size());
473 for (auto &Field : Fields) {
474 LayoutFields.emplace_back(Args: &Field, Args&: Field.Size, Args&: Field.Alignment,
475 Args&: Field.Offset);
476 }
477
478 // Perform layout.
479 auto SizeAndAlign = performOptimizedStructLayout(Fields: LayoutFields);
480 StructSize = SizeAndAlign.first;
481 StructAlign = SizeAndAlign.second;
482
483 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
484 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
485 };
486
487 // We need to produce a packed struct type if there's a field whose
488 // assigned offset isn't a multiple of its natural type alignment.
489 bool Packed = [&] {
490 for (auto &LayoutField : LayoutFields) {
491 auto &F = getField(LayoutField);
492 if (!isAligned(Lhs: F.TyAlignment, SizeInBytes: LayoutField.Offset))
493 return true;
494 }
495 return false;
496 }();
497
498 // Build the struct body.
499 SmallVector<Type*, 16> FieldTypes;
500 FieldTypes.reserve(N: LayoutFields.size() * 3 / 2);
501 uint64_t LastOffset = 0;
502 for (auto &LayoutField : LayoutFields) {
503 auto &F = getField(LayoutField);
504
505 auto Offset = LayoutField.Offset;
506
507 // Add a padding field if there's a padding gap and we're either
508 // building a packed struct or the padding gap is more than we'd
509 // get from aligning to the field type's natural alignment.
510 assert(Offset >= LastOffset);
511 if (Offset != LastOffset) {
512 if (Packed || alignTo(Size: LastOffset, A: F.TyAlignment) != Offset)
513 FieldTypes.push_back(Elt: ArrayType::get(ElementType: Type::getInt8Ty(C&: Context),
514 NumElements: Offset - LastOffset));
515 }
516
517 F.Offset = Offset;
518 F.LayoutFieldIndex = FieldTypes.size();
519
520 FieldTypes.push_back(Elt: F.Ty);
521 if (F.DynamicAlignBuffer) {
522 FieldTypes.push_back(
523 Elt: ArrayType::get(ElementType: Type::getInt8Ty(C&: Context), NumElements: F.DynamicAlignBuffer));
524 }
525 LastOffset = Offset + F.Size;
526 }
527
528 StructType *Ty = StructType::create(Context, Elements: FieldTypes, Name, isPacked: Packed);
529
530#ifndef NDEBUG
531 // Check that the IR layout matches the offsets we expect.
532 auto Layout = DL.getStructLayout(Ty);
533 for (auto &F : Fields) {
534 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
535 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
536 }
537#endif
538
539 IsFinished = true;
540
541 return Ty;
542}
543
544static void cacheDIVar(FrameDataInfo &FrameData,
545 DenseMap<Value *, DILocalVariable *> &DIVarCache) {
546 for (auto *V : FrameData.getAllDefs()) {
547 if (DIVarCache.contains(Val: V))
548 continue;
549
550 auto CacheIt = [&DIVarCache, V](const auto &Container) {
551 auto *I = llvm::find_if(Container, [](auto *DDI) {
552 return DDI->getExpression()->getNumElements() == 0;
553 });
554 if (I != Container.end())
555 DIVarCache.insert({V, (*I)->getVariable()});
556 };
557 CacheIt(findDVRDeclares(V));
558 CacheIt(findDVRDeclareValues(V));
559 }
560}
561
562/// Create name for Type. It uses MDString to store new created string to
563/// avoid memory leak.
564static StringRef solveTypeName(Type *Ty) {
565 if (Ty->isIntegerTy()) {
566 // The longest name in common may be '__int_128', which has 9 bits.
567 SmallString<16> Buffer;
568 raw_svector_ostream OS(Buffer);
569 OS << "__int_" << cast<IntegerType>(Val: Ty)->getBitWidth();
570 auto *MDName = MDString::get(Context&: Ty->getContext(), Str: OS.str());
571 return MDName->getString();
572 }
573
574 if (Ty->isFloatingPointTy()) {
575 if (Ty->isFloatTy())
576 return "__float_";
577 if (Ty->isDoubleTy())
578 return "__double_";
579 return "__floating_type_";
580 }
581
582 if (Ty->isPointerTy())
583 return "PointerType";
584
585 if (Ty->isStructTy()) {
586 if (!cast<StructType>(Val: Ty)->hasName())
587 return "__LiteralStructType_";
588
589 auto Name = Ty->getStructName();
590
591 SmallString<16> Buffer(Name);
592 for (auto &Iter : Buffer)
593 if (Iter == '.' || Iter == ':')
594 Iter = '_';
595 auto *MDName = MDString::get(Context&: Ty->getContext(), Str: Buffer.str());
596 return MDName->getString();
597 }
598
599 return "UnknownType";
600}
601
602static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
603 const DataLayout &Layout, DIScope *Scope,
604 unsigned LineNum,
605 DenseMap<Type *, DIType *> &DITypeCache) {
606 if (DIType *DT = DITypeCache.lookup(Val: Ty))
607 return DT;
608
609 StringRef Name = solveTypeName(Ty);
610
611 DIType *RetType = nullptr;
612
613 if (Ty->isIntegerTy()) {
614 auto BitWidth = cast<IntegerType>(Val: Ty)->getBitWidth();
615 RetType = Builder.createBasicType(Name, SizeInBits: BitWidth, Encoding: dwarf::DW_ATE_signed,
616 Flags: llvm::DINode::FlagArtificial);
617 } else if (Ty->isFloatingPointTy()) {
618 RetType = Builder.createBasicType(Name, SizeInBits: Layout.getTypeSizeInBits(Ty),
619 Encoding: dwarf::DW_ATE_float,
620 Flags: llvm::DINode::FlagArtificial);
621 } else if (Ty->isPointerTy()) {
622 // Construct PointerType points to null (aka void *) instead of exploring
623 // pointee type to avoid infinite search problem. For example, we would be
624 // in trouble if we traverse recursively:
625 //
626 // struct Node {
627 // Node* ptr;
628 // };
629 RetType =
630 Builder.createPointerType(PointeeTy: nullptr, SizeInBits: Layout.getTypeSizeInBits(Ty),
631 AlignInBits: Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
632 /*DWARFAddressSpace=*/std::nullopt, Name);
633 } else if (Ty->isStructTy()) {
634 auto *DIStruct = Builder.createStructType(
635 Scope, Name, File: Scope->getFile(), LineNumber: LineNum, SizeInBits: Layout.getTypeSizeInBits(Ty),
636 AlignInBits: Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
637 Flags: llvm::DINode::FlagArtificial, DerivedFrom: nullptr, Elements: llvm::DINodeArray());
638
639 auto *StructTy = cast<StructType>(Val: Ty);
640 SmallVector<Metadata *, 16> Elements;
641 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
642 DIType *DITy = solveDIType(Builder, Ty: StructTy->getElementType(N: I), Layout,
643 Scope: DIStruct, LineNum, DITypeCache);
644 assert(DITy);
645 Elements.push_back(Elt: Builder.createMemberType(
646 Scope: DIStruct, Name: DITy->getName(), File: DIStruct->getFile(), LineNo: LineNum,
647 SizeInBits: DITy->getSizeInBits(), AlignInBits: DITy->getAlignInBits(),
648 OffsetInBits: Layout.getStructLayout(Ty: StructTy)->getElementOffsetInBits(Idx: I),
649 Flags: llvm::DINode::FlagArtificial, Ty: DITy));
650 }
651
652 Builder.replaceArrays(T&: DIStruct, Elements: Builder.getOrCreateArray(Elements));
653
654 RetType = DIStruct;
655 } else {
656 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
657 TypeSize Size = Layout.getTypeSizeInBits(Ty);
658 auto *CharSizeType = Builder.createBasicType(
659 Name, SizeInBits: 8, Encoding: dwarf::DW_ATE_unsigned_char, Flags: llvm::DINode::FlagArtificial);
660
661 if (Size <= 8)
662 RetType = CharSizeType;
663 else {
664 if (Size % 8 != 0)
665 Size = TypeSize::getFixed(ExactSize: Size + 8 - (Size % 8));
666
667 RetType = Builder.createArrayType(
668 Size, AlignInBits: Layout.getPrefTypeAlign(Ty).value(), Ty: CharSizeType,
669 Subscripts: Builder.getOrCreateArray(Elements: Builder.getOrCreateSubrange(Lo: 0, Count: Size / 8)));
670 }
671 }
672
673 DITypeCache.insert(KV: {Ty, RetType});
674 return RetType;
675}
676
677/// Build artificial debug info for C++ coroutine frames to allow users to
678/// inspect the contents of the frame directly
679///
680/// Create Debug information for coroutine frame with debug name "__coro_frame".
681/// The debug information for the fields of coroutine frame is constructed from
682/// the following way:
683/// 1. For all the value in the Frame, we search the use of dbg.declare to find
684/// the corresponding debug variables for the value. If we can find the
685/// debug variable, we can get full and accurate debug information.
686/// 2. If we can't get debug information in step 1 and 2, we could only try to
687/// build the DIType by Type. We did this in solveDIType. We only handle
688/// integer, float, double, integer type and struct type for now.
689static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
690 FrameDataInfo &FrameData) {
691 DISubprogram *DIS = F.getSubprogram();
692 // If there is no DISubprogram for F, it implies the function is compiled
693 // without debug info. So we also don't generate debug info for the frame.
694
695 if (!DIS || !DIS->getUnit())
696 return;
697
698 if (!dwarf::isCPlusPlus(S: static_cast<llvm::dwarf::SourceLanguage>(
699 DIS->getUnit()->getSourceLanguage().getUnversionedName())) ||
700 DIS->getUnit()->getEmissionKind() !=
701 DICompileUnit::DebugEmissionKind::FullDebug)
702 return;
703
704 assert(Shape.ABI == coro::ABI::Switch &&
705 "We could only build debug infomation for C++ coroutine now.\n");
706
707 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
708
709 DIFile *DFile = DIS->getFile();
710 unsigned LineNum = DIS->getLine();
711
712 DICompositeType *FrameDITy = DBuilder.createStructType(
713 Scope: DIS->getUnit(), Name: Twine(F.getName() + ".coro_frame_ty").str(),
714 File: DFile, LineNumber: LineNum, SizeInBits: Shape.FrameSize * 8,
715 AlignInBits: Shape.FrameAlign.value() * 8, Flags: llvm::DINode::FlagArtificial, DerivedFrom: nullptr,
716 Elements: llvm::DINodeArray());
717 StructType *FrameTy = Shape.FrameTy;
718 SmallVector<Metadata *, 16> Elements;
719 DataLayout Layout = F.getDataLayout();
720
721 DenseMap<Value *, DILocalVariable *> DIVarCache;
722 cacheDIVar(FrameData, DIVarCache);
723
724 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
725 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
726 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
727
728 DenseMap<unsigned, StringRef> NameCache;
729 NameCache.insert(KV: {ResumeIndex, "__resume_fn"});
730 NameCache.insert(KV: {DestroyIndex, "__destroy_fn"});
731 NameCache.insert(KV: {IndexIndex, "__coro_index"});
732
733 Type *ResumeFnTy = FrameTy->getElementType(N: ResumeIndex),
734 *DestroyFnTy = FrameTy->getElementType(N: DestroyIndex),
735 *IndexTy = FrameTy->getElementType(N: IndexIndex);
736
737 DenseMap<unsigned, DIType *> TyCache;
738 TyCache.insert(
739 KV: {ResumeIndex, DBuilder.createPointerType(
740 PointeeTy: nullptr, SizeInBits: Layout.getTypeSizeInBits(Ty: ResumeFnTy))});
741 TyCache.insert(
742 KV: {DestroyIndex, DBuilder.createPointerType(
743 PointeeTy: nullptr, SizeInBits: Layout.getTypeSizeInBits(Ty: DestroyFnTy))});
744
745 /// FIXME: If we fill the field `SizeInBits` with the actual size of
746 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
747 TyCache.insert(KV: {IndexIndex, DBuilder.createBasicType(
748 Name: "__coro_index",
749 SizeInBits: (Layout.getTypeSizeInBits(Ty: IndexTy) < 8)
750 ? 8
751 : Layout.getTypeSizeInBits(Ty: IndexTy),
752 Encoding: dwarf::DW_ATE_unsigned_char)});
753
754 for (auto *V : FrameData.getAllDefs()) {
755 auto It = DIVarCache.find(Val: V);
756 if (It == DIVarCache.end())
757 continue;
758
759 auto Index = FrameData.getFieldIndex(V);
760
761 NameCache.insert(KV: {Index, It->second->getName()});
762 TyCache.insert(KV: {Index, It->second->getType()});
763 }
764
765 // Cache from index to (Align, Offset Pair)
766 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
767 // The Align and Offset of Resume function and Destroy function are fixed.
768 OffsetCache.insert(KV: {ResumeIndex, {8, 0}});
769 OffsetCache.insert(KV: {DestroyIndex, {8, 8}});
770 OffsetCache.insert(
771 KV: {IndexIndex,
772 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
773
774 for (auto *V : FrameData.getAllDefs()) {
775 auto Index = FrameData.getFieldIndex(V);
776
777 OffsetCache.insert(
778 KV: {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
779 }
780
781 DenseMap<Type *, DIType *> DITypeCache;
782 // This counter is used to avoid same type names. e.g., there would be
783 // many i32 and i64 types in one coroutine. And we would use i32_0 and
784 // i32_1 to avoid the same type. Since it makes no sense the name of the
785 // fields confilicts with each other.
786 unsigned UnknownTypeNum = 0;
787 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
788 auto OCIt = OffsetCache.find(Val: Index);
789 if (OCIt == OffsetCache.end())
790 continue;
791
792 std::string Name;
793 uint64_t SizeInBits;
794 uint32_t AlignInBits;
795 uint64_t OffsetInBits;
796 DIType *DITy = nullptr;
797
798 Type *Ty = FrameTy->getElementType(N: Index);
799 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
800 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue();
801 AlignInBits = OCIt->second.first * 8;
802 OffsetInBits = OCIt->second.second * 8;
803
804 if (auto It = NameCache.find(Val: Index); It != NameCache.end()) {
805 Name = It->second.str();
806 DITy = TyCache[Index];
807 } else {
808 DITy = solveDIType(Builder&: DBuilder, Ty, Layout, Scope: FrameDITy, LineNum, DITypeCache);
809 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
810 Name = DITy->getName().str();
811 Name += "_" + std::to_string(val: UnknownTypeNum);
812 UnknownTypeNum++;
813 }
814
815 Elements.push_back(Elt: DBuilder.createMemberType(
816 Scope: FrameDITy, Name, File: DFile, LineNo: LineNum, SizeInBits, AlignInBits, OffsetInBits,
817 Flags: llvm::DINode::FlagArtificial, Ty: DITy));
818 }
819
820 DBuilder.replaceArrays(T&: FrameDITy, Elements: DBuilder.getOrCreateArray(Elements));
821
822 auto *FrameDIVar =
823 DBuilder.createAutoVariable(Scope: DIS, Name: "__coro_frame", File: DFile, LineNo: LineNum,
824 Ty: FrameDITy, AlwaysPreserve: true, Flags: DINode::FlagArtificial);
825
826 // Subprogram would have ContainedNodes field which records the debug
827 // variables it contained. So we need to add __coro_frame to the
828 // ContainedNodes of it.
829 //
830 // If we don't add __coro_frame to the RetainedNodes, user may get
831 // `no symbol __coro_frame in context` rather than `__coro_frame`
832 // is optimized out, which is more precise.
833 auto RetainedNodes = DIS->getRetainedNodes();
834 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
835 RetainedNodes.end());
836 RetainedNodesVec.push_back(Elt: FrameDIVar);
837 DIS->replaceOperandWith(I: 7, New: (MDTuple::get(Context&: F.getContext(), MDs: RetainedNodesVec)));
838
839 // Construct the location for the frame debug variable. The column number
840 // is fake but it should be fine.
841 DILocation *DILoc =
842 DILocation::get(Context&: DIS->getContext(), Line: LineNum, /*Column=*/1, Scope: DIS);
843 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
844
845 DbgVariableRecord *NewDVR =
846 new DbgVariableRecord(ValueAsMetadata::get(V: Shape.FramePtr), FrameDIVar,
847 DBuilder.createExpression(), DILoc,
848 DbgVariableRecord::LocationType::Declare);
849 BasicBlock::iterator It = Shape.getInsertPtAfterFramePtr();
850 It->getParent()->insertDbgRecordBefore(DR: NewDVR, Here: It);
851}
852
853// Build a struct that will keep state for an active coroutine.
854// struct f.frame {
855// ResumeFnTy ResumeFnAddr;
856// ResumeFnTy DestroyFnAddr;
857// ... promise (if present) ...
858// int ResumeIndex;
859// ... spills ...
860// };
861static StructType *buildFrameType(Function &F, coro::Shape &Shape,
862 FrameDataInfo &FrameData,
863 bool OptimizeFrame) {
864 LLVMContext &C = F.getContext();
865 const DataLayout &DL = F.getDataLayout();
866
867 // We will use this value to cap the alignment of spilled values.
868 std::optional<Align> MaxFrameAlignment;
869 if (Shape.ABI == coro::ABI::Async)
870 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
871 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
872
873 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
874 std::optional<FieldIDType> SwitchIndexFieldId;
875
876 if (Shape.ABI == coro::ABI::Switch) {
877 auto *FnPtrTy = PointerType::getUnqual(C);
878
879 // Add header fields for the resume and destroy functions.
880 // We can rely on these being perfectly packed.
881 (void)B.addField(Ty: FnPtrTy, MaybeFieldAlignment: std::nullopt, /*header*/ IsHeader: true);
882 (void)B.addField(Ty: FnPtrTy, MaybeFieldAlignment: std::nullopt, /*header*/ IsHeader: true);
883
884 // PromiseAlloca field needs to be explicitly added here because it's
885 // a header field with a fixed offset based on its alignment. Hence it
886 // needs special handling and cannot be added to FrameData.Allocas.
887 if (PromiseAlloca)
888 FrameData.setFieldIndex(
889 V: PromiseAlloca, Index: B.addFieldForAlloca(AI: PromiseAlloca, /*header*/ IsHeader: true));
890
891 // Add a field to store the suspend index. This doesn't need to
892 // be in the header.
893 unsigned IndexBits = std::max(a: 1U, b: Log2_64_Ceil(Value: Shape.CoroSuspends.size()));
894 Type *IndexType = Type::getIntNTy(C, N: IndexBits);
895
896 SwitchIndexFieldId = B.addField(Ty: IndexType, MaybeFieldAlignment: std::nullopt);
897 } else {
898 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
899 }
900
901 // Because multiple allocas may own the same field slot,
902 // we add allocas to field here.
903 B.addFieldForAllocas(F, FrameData, Shape, OptimizeFrame);
904 // Add PromiseAlloca to Allocas list so that
905 // 1. updateLayoutIndex could update its index after
906 // `performOptimizedStructLayout`
907 // 2. it is processed in insertSpills.
908 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
909 // We assume that the promise alloca won't be modified before
910 // CoroBegin and no alias will be create before CoroBegin.
911 FrameData.Allocas.emplace_back(
912 Args&: PromiseAlloca, Args: DenseMap<Instruction *, std::optional<APInt>>{}, Args: false);
913 // Create an entry for every spilled value.
914 for (auto &S : FrameData.Spills) {
915 Type *FieldType = S.first->getType();
916 MaybeAlign MA;
917 // For byval arguments, we need to store the pointed value in the frame,
918 // instead of the pointer itself.
919 if (const Argument *A = dyn_cast<Argument>(Val: S.first)) {
920 if (A->hasByValAttr()) {
921 FieldType = A->getParamByValType();
922 MA = A->getParamAlign();
923 }
924 }
925 FieldIDType Id =
926 B.addField(Ty: FieldType, MaybeFieldAlignment: MA, IsHeader: false /*header*/, IsSpillOfValue: true /*IsSpillOfValue*/);
927 FrameData.setFieldIndex(V: S.first, Index: Id);
928 }
929
930 StructType *FrameTy = [&] {
931 SmallString<32> Name(F.getName());
932 Name.append(RHS: ".Frame");
933 return B.finish(Name);
934 }();
935
936 FrameData.updateLayoutIndex(B);
937 Shape.FrameAlign = B.getStructAlign();
938 Shape.FrameSize = B.getStructSize();
939
940 switch (Shape.ABI) {
941 case coro::ABI::Switch: {
942 // In the switch ABI, remember the switch-index field.
943 auto IndexField = B.getLayoutField(Id: *SwitchIndexFieldId);
944 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
945 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
946 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
947
948 // Also round the frame size up to a multiple of its alignment, as is
949 // generally expected in C/C++.
950 Shape.FrameSize = alignTo(Size: Shape.FrameSize, A: Shape.FrameAlign);
951 break;
952 }
953
954 // In the retcon ABI, remember whether the frame is inline in the storage.
955 case coro::ABI::Retcon:
956 case coro::ABI::RetconOnce: {
957 auto Id = Shape.getRetconCoroId();
958 Shape.RetconLowering.IsFrameInlineInStorage
959 = (B.getStructSize() <= Id->getStorageSize() &&
960 B.getStructAlign() <= Id->getStorageAlignment());
961 break;
962 }
963 case coro::ABI::Async: {
964 Shape.AsyncLowering.FrameOffset =
965 alignTo(Size: Shape.AsyncLowering.ContextHeaderSize, A: Shape.FrameAlign);
966 // Also make the final context size a multiple of the context alignment to
967 // make allocation easier for allocators.
968 Shape.AsyncLowering.ContextSize =
969 alignTo(Size: Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
970 A: Shape.AsyncLowering.getContextAlignment());
971 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
972 report_fatal_error(
973 reason: "The alignment requirment of frame variables cannot be higher than "
974 "the alignment of the async function context");
975 }
976 break;
977 }
978 }
979
980 return FrameTy;
981}
982
983/// If MaybeArgument is a byval Argument, return its byval type. Also removes
984/// the captures attribute, so that the argument *value* may be stored directly
985/// on the coroutine frame.
986static Type *extractByvalIfArgument(Value *MaybeArgument) {
987 if (auto *Arg = dyn_cast<Argument>(Val: MaybeArgument)) {
988 Arg->getParent()->removeParamAttr(ArgNo: Arg->getArgNo(), Kind: Attribute::Captures);
989
990 if (Arg->hasByValAttr())
991 return Arg->getParamByValType();
992 }
993 return nullptr;
994}
995
996/// Store Def into the coroutine frame.
997static void createStoreIntoFrame(IRBuilder<> &Builder, Value *Def,
998 Type *ByValTy, const coro::Shape &Shape,
999 const FrameDataInfo &FrameData) {
1000 auto Index = FrameData.getFieldIndex(V: Def);
1001 auto *G = Builder.CreateConstInBoundsGEP2_32(
1002 Ty: Shape.FrameTy, Ptr: Shape.FramePtr, Idx0: 0, Idx1: Index,
1003 Name: Def->getName() + Twine(".spill.addr"));
1004 auto SpillAlignment = Align(FrameData.getAlign(V: Def));
1005
1006 // For byval arguments, store the pointed-to value in the frame.
1007 if (ByValTy)
1008 Builder.CreateAlignedStore(Val: Builder.CreateLoad(Ty: ByValTy, Ptr: Def), Ptr: G,
1009 Align: SpillAlignment);
1010 else
1011 Builder.CreateAlignedStore(Val: Def, Ptr: G, Align: SpillAlignment);
1012}
1013
1014/// Returns a GEP into the coroutine frame at the offset where Orig is located.
1015static Value *createGEPToFramePointer(const FrameDataInfo &FrameData,
1016 IRBuilder<> &Builder, coro::Shape &Shape,
1017 Value *Orig) {
1018 LLVMContext &Ctx = Shape.CoroBegin->getContext();
1019 FieldIDType Index = FrameData.getFieldIndex(V: Orig);
1020 SmallVector<Value *, 3> Indices = {
1021 ConstantInt::get(Ty: Type::getInt32Ty(C&: Ctx), V: 0),
1022 ConstantInt::get(Ty: Type::getInt32Ty(C&: Ctx), V: Index),
1023 };
1024
1025 // If Orig is an array alloca, preserve the original type by adding an extra
1026 // zero offset.
1027 if (auto *AI = dyn_cast<AllocaInst>(Val: Orig)) {
1028 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: AI->getArraySize())) {
1029 auto Count = CI->getValue().getZExtValue();
1030 if (Count > 1)
1031 Indices.push_back(Elt: ConstantInt::get(Ty: Type::getInt32Ty(C&: Ctx), V: 0));
1032 } else {
1033 report_fatal_error(reason: "Coroutines cannot handle non static allocas yet");
1034 }
1035 }
1036
1037 auto *GEP = Builder.CreateInBoundsGEP(Ty: Shape.FrameTy, Ptr: Shape.FramePtr, IdxList: Indices);
1038 if (auto *AI = dyn_cast<AllocaInst>(Val: Orig)) {
1039 if (FrameData.getDynamicAlign(V: Orig) != 0) {
1040 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1041 auto *M = AI->getModule();
1042 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1043 auto *PtrValue = Builder.CreatePtrToInt(V: GEP, DestTy: IntPtrTy);
1044 auto *AlignMask = ConstantInt::get(Ty: IntPtrTy, V: AI->getAlign().value() - 1);
1045 PtrValue = Builder.CreateAdd(LHS: PtrValue, RHS: AlignMask);
1046 PtrValue = Builder.CreateAnd(LHS: PtrValue, RHS: Builder.CreateNot(V: AlignMask));
1047 return Builder.CreateIntToPtr(V: PtrValue, DestTy: AI->getType());
1048 }
1049 // If the type of GEP is not equal to the type of AllocaInst, it implies
1050 // that the AllocaInst may be reused in the Frame slot of other AllocaInst.
1051 // Note: If the strategy dealing with alignment changes, this cast must be
1052 // refined
1053 if (GEP->getType() != Orig->getType())
1054 GEP = Builder.CreateAddrSpaceCast(V: GEP, DestTy: Orig->getType(),
1055 Name: Orig->getName() + Twine(".cast"));
1056 }
1057 return GEP;
1058}
1059
1060/// Find dbg.declare or dbg.declare_value records referencing `Def`. If none are
1061/// found, walk up the load chain to find one.
1062template <DbgVariableRecord::LocationType record_type>
1063static TinyPtrVector<DbgVariableRecord *>
1064findDbgRecordsThroughLoads(Function &F, Value *Def) {
1065 static_assert(record_type == DbgVariableRecord::LocationType::Declare ||
1066 record_type == DbgVariableRecord::LocationType::DeclareValue);
1067 constexpr auto FindFunc =
1068 record_type == DbgVariableRecord::LocationType::Declare
1069 ? findDVRDeclares
1070 : findDVRDeclareValues;
1071
1072 TinyPtrVector<DbgVariableRecord *> Records = FindFunc(Def);
1073
1074 if (!F.getSubprogram())
1075 return Records;
1076
1077 Value *CurDef = Def;
1078 while (Records.empty() && isa<LoadInst>(Val: CurDef)) {
1079 auto *LdInst = cast<LoadInst>(Val: CurDef);
1080 if (!LdInst->getType()->isPointerTy())
1081 break;
1082 CurDef = LdInst->getPointerOperand();
1083 if (!isa<AllocaInst, LoadInst>(Val: CurDef))
1084 break;
1085 Records = FindFunc(CurDef);
1086 }
1087
1088 return Records;
1089}
1090
1091// Replace all alloca and SSA values that are accessed across suspend points
1092// with GetElementPointer from coroutine frame + loads and stores. Create an
1093// AllocaSpillBB that will become the new entry block for the resume parts of
1094// the coroutine:
1095//
1096// %hdl = coro.begin(...)
1097// whatever
1098//
1099// becomes:
1100//
1101// %hdl = coro.begin(...)
1102// br label %AllocaSpillBB
1103//
1104// AllocaSpillBB:
1105// ; geps corresponding to allocas that were moved to coroutine frame
1106// br label PostSpill
1107//
1108// PostSpill:
1109// whatever
1110//
1111//
1112static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1113 LLVMContext &C = Shape.CoroBegin->getContext();
1114 Function *F = Shape.CoroBegin->getFunction();
1115 IRBuilder<> Builder(C);
1116 StructType *FrameTy = Shape.FrameTy;
1117 DominatorTree DT(*F);
1118 SmallDenseMap<Argument *, AllocaInst *, 4> ArgToAllocaMap;
1119
1120 MDBuilder MDB(C);
1121 // Create a TBAA tag for accesses to certain coroutine frame slots, so that
1122 // subsequent alias analysis will understand they do not intersect with
1123 // user memory.
1124 // We do this only if a suitable TBAA root already exists in the module.
1125 MDNode *TBAATag = nullptr;
1126 if (auto *CppTBAAStr = MDString::getIfExists(Context&: C, Str: "Simple C++ TBAA")) {
1127 auto *TBAARoot = MDNode::getIfExists(Context&: C, MDs: CppTBAAStr);
1128 // Create a "fake" scalar type; all other types defined in the source
1129 // language will be assumed non-aliasing with this type.
1130 MDNode *Scalar = MDB.createTBAAScalarTypeNode(
1131 Name: (F->getName() + ".Frame Slot").str(), Parent: TBAARoot);
1132 TBAATag = MDB.createTBAAStructTagNode(BaseType: Scalar, AccessType: Scalar, Offset: 0);
1133 }
1134 for (auto const &E : FrameData.Spills) {
1135 Value *Def = E.first;
1136 Type *ByValTy = extractByvalIfArgument(MaybeArgument: Def);
1137
1138 Builder.SetInsertPoint(coro::getSpillInsertionPt(Shape, Def, DT));
1139 createStoreIntoFrame(Builder, Def, ByValTy, Shape, FrameData);
1140
1141 BasicBlock *CurrentBlock = nullptr;
1142 Value *CurrentReload = nullptr;
1143 for (auto *U : E.second) {
1144 // If we have not seen the use block, create a load instruction to reload
1145 // the spilled value from the coroutine frame. Populates the Value pointer
1146 // reference provided with the frame GEP.
1147 if (CurrentBlock != U->getParent()) {
1148 CurrentBlock = U->getParent();
1149 Builder.SetInsertPoint(TheBB: CurrentBlock,
1150 IP: CurrentBlock->getFirstInsertionPt());
1151
1152 auto *GEP = createGEPToFramePointer(FrameData, Builder, Shape, Orig: E.first);
1153 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1154 if (ByValTy) {
1155 CurrentReload = GEP;
1156 } else {
1157 auto SpillAlignment = Align(FrameData.getAlign(V: Def));
1158 auto *LI = Builder.CreateAlignedLoad(
1159 Ty: FrameTy->getElementType(N: FrameData.getFieldIndex(V: E.first)), Ptr: GEP,
1160 Align: SpillAlignment, Name: E.first->getName() + Twine(".reload"));
1161 if (TBAATag)
1162 LI->setMetadata(KindID: LLVMContext::MD_tbaa, Node: TBAATag);
1163 CurrentReload = LI;
1164 }
1165
1166 TinyPtrVector<DbgVariableRecord *> DVRs = findDbgRecordsThroughLoads<
1167 DbgVariableRecord::LocationType::Declare>(F&: *F, Def);
1168
1169 auto SalvageOne = [&](DbgVariableRecord *DDI) {
1170 // This dbg.declare is preserved for all coro-split function
1171 // fragments. It will be unreachable in the main function, and
1172 // processed by coro::salvageDebugInfo() by the Cloner.
1173 DbgVariableRecord *NewDVR = new DbgVariableRecord(
1174 ValueAsMetadata::get(V: CurrentReload), DDI->getVariable(),
1175 DDI->getExpression(), DDI->getDebugLoc(),
1176 DbgVariableRecord::LocationType::Declare);
1177 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1178 DR: NewDVR, Here: Builder.GetInsertPoint());
1179 // This dbg.declare is for the main function entry point. It
1180 // will be deleted in all coro-split functions.
1181 coro::salvageDebugInfo(ArgToAllocaMap, DVR&: *DDI, UseEntryValue: false /*UseEntryValue*/);
1182 };
1183 for_each(Range&: DVRs, F: SalvageOne);
1184 }
1185
1186 TinyPtrVector<DbgVariableRecord *> DVRDeclareValues =
1187 findDbgRecordsThroughLoads<
1188 DbgVariableRecord::LocationType::DeclareValue>(F&: *F, Def);
1189
1190 auto SalvageOneCoro = [&](auto *DDI) {
1191 // This dbg.declare_value is preserved for all coro-split function
1192 // fragments. It will be unreachable in the main function, and
1193 // processed by coro::salvageDebugInfo() by the Cloner. However, convert
1194 // it to a dbg.declare to make sure future passes don't have to deal
1195 // with a dbg.declare_value.
1196 auto *VAM = ValueAsMetadata::get(V: CurrentReload);
1197 Type *Ty = VAM->getValue()->getType();
1198 // If the metadata type is not a pointer, emit a dbg.value instead.
1199 DbgVariableRecord *NewDVR = new DbgVariableRecord(
1200 ValueAsMetadata::get(V: CurrentReload), DDI->getVariable(),
1201 DDI->getExpression(), DDI->getDebugLoc(),
1202 Ty->isPointerTy() ? DbgVariableRecord::LocationType::Declare
1203 : DbgVariableRecord::LocationType::Value);
1204 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1205 DR: NewDVR, Here: Builder.GetInsertPoint());
1206 // This dbg.declare_value is for the main function entry point. It
1207 // will be deleted in all coro-split functions.
1208 coro::salvageDebugInfo(ArgToAllocaMap, DVR&: *DDI, UseEntryValue: false /*UseEntryValue*/);
1209 };
1210 for_each(Range&: DVRDeclareValues, F: SalvageOneCoro);
1211
1212 // If we have a single edge PHINode, remove it and replace it with a
1213 // reload from the coroutine frame. (We already took care of multi edge
1214 // PHINodes by normalizing them in the rewritePHIs function).
1215 if (auto *PN = dyn_cast<PHINode>(Val: U)) {
1216 assert(PN->getNumIncomingValues() == 1 &&
1217 "unexpected number of incoming "
1218 "values in the PHINode");
1219 PN->replaceAllUsesWith(V: CurrentReload);
1220 PN->eraseFromParent();
1221 continue;
1222 }
1223
1224 // Replace all uses of CurrentValue in the current instruction with
1225 // reload.
1226 U->replaceUsesOfWith(From: Def, To: CurrentReload);
1227 // Instructions are added to Def's user list if the attached
1228 // debug records use Def. Update those now.
1229 for (DbgVariableRecord &DVR : filterDbgVars(R: U->getDbgRecordRange()))
1230 DVR.replaceVariableLocationOp(OldValue: Def, NewValue: CurrentReload, AllowEmpty: true);
1231 }
1232 }
1233
1234 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1235
1236 auto SpillBlock = FramePtrBB->splitBasicBlock(
1237 I: Shape.getInsertPtAfterFramePtr(), BBName: "AllocaSpillBB");
1238 SpillBlock->splitBasicBlock(I: &SpillBlock->front(), BBName: "PostSpill");
1239 Shape.AllocaSpillBlock = SpillBlock;
1240
1241 // retcon and retcon.once lowering assumes all uses have been sunk.
1242 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1243 Shape.ABI == coro::ABI::Async) {
1244 // If we found any allocas, replace all of their remaining uses with Geps.
1245 Builder.SetInsertPoint(TheBB: SpillBlock, IP: SpillBlock->begin());
1246 for (const auto &P : FrameData.Allocas) {
1247 AllocaInst *Alloca = P.Alloca;
1248 auto *G = createGEPToFramePointer(FrameData, Builder, Shape, Orig: Alloca);
1249
1250 // Remove any lifetime intrinsics, now that these are no longer allocas.
1251 for (User *U : make_early_inc_range(Range: Alloca->users())) {
1252 auto *I = cast<Instruction>(Val: U);
1253 if (I->isLifetimeStartOrEnd())
1254 I->eraseFromParent();
1255 }
1256
1257 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1258 // here, as we are changing location of the instruction.
1259 G->takeName(V: Alloca);
1260 Alloca->replaceAllUsesWith(V: G);
1261 Alloca->eraseFromParent();
1262 }
1263 return;
1264 }
1265
1266 // If we found any alloca, replace all of their remaining uses with GEP
1267 // instructions. To remain debugbility, we replace the uses of allocas for
1268 // dbg.declares and dbg.values with the reload from the frame.
1269 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1270 // as some of the uses may not be dominated by CoroBegin.
1271 Builder.SetInsertPoint(TheBB: Shape.AllocaSpillBlock,
1272 IP: Shape.AllocaSpillBlock->begin());
1273 SmallVector<Instruction *, 4> UsersToUpdate;
1274 for (const auto &A : FrameData.Allocas) {
1275 AllocaInst *Alloca = A.Alloca;
1276 UsersToUpdate.clear();
1277 for (User *U : make_early_inc_range(Range: Alloca->users())) {
1278 auto *I = cast<Instruction>(Val: U);
1279 // It is meaningless to retain the lifetime intrinsics refer for the
1280 // member of coroutine frames and the meaningless lifetime intrinsics
1281 // are possible to block further optimizations.
1282 if (I->isLifetimeStartOrEnd())
1283 I->eraseFromParent();
1284 else if (DT.dominates(Def: Shape.CoroBegin, User: I))
1285 UsersToUpdate.push_back(Elt: I);
1286 }
1287
1288 if (UsersToUpdate.empty())
1289 continue;
1290 auto *G = createGEPToFramePointer(FrameData, Builder, Shape, Orig: Alloca);
1291 G->setName(Alloca->getName() + Twine(".reload.addr"));
1292
1293 SmallVector<DbgVariableRecord *> DbgVariableRecords;
1294 findDbgUsers(V: Alloca, DbgVariableRecords);
1295 for (auto *DVR : DbgVariableRecords)
1296 DVR->replaceVariableLocationOp(OldValue: Alloca, NewValue: G);
1297
1298 for (Instruction *I : UsersToUpdate)
1299 I->replaceUsesOfWith(From: Alloca, To: G);
1300 }
1301 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1302 for (const auto &A : FrameData.Allocas) {
1303 AllocaInst *Alloca = A.Alloca;
1304 if (A.MayWriteBeforeCoroBegin) {
1305 // isEscaped really means potentially modified before CoroBegin.
1306 if (Alloca->isArrayAllocation())
1307 report_fatal_error(
1308 reason: "Coroutines cannot handle copying of array allocas yet");
1309
1310 auto *G = createGEPToFramePointer(FrameData, Builder, Shape, Orig: Alloca);
1311 auto *Value = Builder.CreateLoad(Ty: Alloca->getAllocatedType(), Ptr: Alloca);
1312 Builder.CreateStore(Val: Value, Ptr: G);
1313 }
1314 // For each alias to Alloca created before CoroBegin but used after
1315 // CoroBegin, we recreate them after CoroBegin by applying the offset
1316 // to the pointer in the frame.
1317 for (const auto &Alias : A.Aliases) {
1318 auto *FramePtr =
1319 createGEPToFramePointer(FrameData, Builder, Shape, Orig: Alloca);
1320 auto &Value = *Alias.second;
1321 auto ITy = IntegerType::get(C, NumBits: Value.getBitWidth());
1322 auto *AliasPtr =
1323 Builder.CreatePtrAdd(Ptr: FramePtr, Offset: ConstantInt::get(Ty: ITy, V: Value));
1324 Alias.first->replaceUsesWithIf(
1325 New: AliasPtr, ShouldReplace: [&](Use &U) { return DT.dominates(Def: Shape.CoroBegin, U); });
1326 }
1327 }
1328
1329 // PromiseAlloca is not collected in FrameData.Allocas. So we don't handle
1330 // the case that the PromiseAlloca may have writes before CoroBegin in the
1331 // above codes. And it may be problematic in edge cases. See
1332 // https://github.com/llvm/llvm-project/issues/57861 for an example.
1333 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) {
1334 AllocaInst *PA = Shape.SwitchLowering.PromiseAlloca;
1335 // If there is memory accessing to promise alloca before CoroBegin;
1336 bool HasAccessingPromiseBeforeCB = llvm::any_of(Range: PA->uses(), P: [&](Use &U) {
1337 auto *Inst = dyn_cast<Instruction>(Val: U.getUser());
1338 if (!Inst || DT.dominates(Def: Shape.CoroBegin, User: Inst))
1339 return false;
1340
1341 if (auto *CI = dyn_cast<CallInst>(Val: Inst)) {
1342 // It is fine if the call wouldn't write to the Promise.
1343 // This is possible for @llvm.coro.id intrinsics, which
1344 // would take the promise as the second argument as a
1345 // marker.
1346 if (CI->onlyReadsMemory() ||
1347 CI->onlyReadsMemory(OpNo: CI->getArgOperandNo(U: &U)))
1348 return false;
1349 return true;
1350 }
1351
1352 return isa<StoreInst>(Val: Inst) ||
1353 // It may take too much time to track the uses.
1354 // Be conservative about the case the use may escape.
1355 isa<GetElementPtrInst>(Val: Inst) ||
1356 // There would always be a bitcast for the promise alloca
1357 // before we enabled Opaque pointers. And now given
1358 // opaque pointers are enabled by default. This should be
1359 // fine.
1360 isa<BitCastInst>(Val: Inst);
1361 });
1362 if (HasAccessingPromiseBeforeCB) {
1363 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1364 auto *G = createGEPToFramePointer(FrameData, Builder, Shape, Orig: PA);
1365 auto *Value = Builder.CreateLoad(Ty: PA->getAllocatedType(), Ptr: PA);
1366 Builder.CreateStore(Val: Value, Ptr: G);
1367 }
1368 }
1369}
1370
1371// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1372// PHI in InsertedBB.
1373static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1374 BasicBlock *InsertedBB,
1375 BasicBlock *PredBB,
1376 PHINode *UntilPHI = nullptr) {
1377 auto *PN = cast<PHINode>(Val: &SuccBB->front());
1378 do {
1379 int Index = PN->getBasicBlockIndex(BB: InsertedBB);
1380 Value *V = PN->getIncomingValue(i: Index);
1381 PHINode *InputV = PHINode::Create(
1382 Ty: V->getType(), NumReservedValues: 1, NameStr: V->getName() + Twine(".") + SuccBB->getName());
1383 InputV->insertBefore(InsertPos: InsertedBB->begin());
1384 InputV->addIncoming(V, BB: PredBB);
1385 PN->setIncomingValue(i: Index, V: InputV);
1386 PN = dyn_cast<PHINode>(Val: PN->getNextNode());
1387 } while (PN != UntilPHI);
1388}
1389
1390// Rewrites the PHI Nodes in a cleanuppad.
1391static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1392 CleanupPadInst *CleanupPad) {
1393 // For every incoming edge to a CleanupPad we will create a new block holding
1394 // all incoming values in single-value PHI nodes. We will then create another
1395 // block to act as a dispather (as all unwind edges for related EH blocks
1396 // must be the same).
1397 //
1398 // cleanuppad:
1399 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1400 // %3 = cleanuppad within none []
1401 //
1402 // It will create:
1403 //
1404 // cleanuppad.corodispatch
1405 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
1406 // %3 = cleanuppad within none []
1407 // switch i8 % 2, label %unreachable
1408 // [i8 0, label %cleanuppad.from.catchswitch
1409 // i8 1, label %cleanuppad.from.catch.1]
1410 // cleanuppad.from.catchswitch:
1411 // %4 = phi i32 [%0, %catchswitch]
1412 // br %label cleanuppad
1413 // cleanuppad.from.catch.1:
1414 // %6 = phi i32 [%1, %catch.1]
1415 // br %label cleanuppad
1416 // cleanuppad:
1417 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1418 // [%6, %cleanuppad.from.catch.1]
1419
1420 // Unreachable BB, in case switching on an invalid value in the dispatcher.
1421 auto *UnreachBB = BasicBlock::Create(
1422 Context&: CleanupPadBB->getContext(), Name: "unreachable", Parent: CleanupPadBB->getParent());
1423 IRBuilder<> Builder(UnreachBB);
1424 Builder.CreateUnreachable();
1425
1426 // Create a new cleanuppad which will be the dispatcher.
1427 auto *NewCleanupPadBB =
1428 BasicBlock::Create(Context&: CleanupPadBB->getContext(),
1429 Name: CleanupPadBB->getName() + Twine(".corodispatch"),
1430 Parent: CleanupPadBB->getParent(), InsertBefore: CleanupPadBB);
1431 Builder.SetInsertPoint(NewCleanupPadBB);
1432 auto *SwitchType = Builder.getInt8Ty();
1433 auto *SetDispatchValuePN =
1434 Builder.CreatePHI(Ty: SwitchType, NumReservedValues: pred_size(BB: CleanupPadBB));
1435 CleanupPad->removeFromParent();
1436 CleanupPad->insertAfter(InsertPos: SetDispatchValuePN->getIterator());
1437 auto *SwitchOnDispatch = Builder.CreateSwitch(V: SetDispatchValuePN, Dest: UnreachBB,
1438 NumCases: pred_size(BB: CleanupPadBB));
1439
1440 int SwitchIndex = 0;
1441 SmallVector<BasicBlock *, 8> Preds(predecessors(BB: CleanupPadBB));
1442 for (BasicBlock *Pred : Preds) {
1443 // Create a new cleanuppad and move the PHI values to there.
1444 auto *CaseBB = BasicBlock::Create(Context&: CleanupPadBB->getContext(),
1445 Name: CleanupPadBB->getName() +
1446 Twine(".from.") + Pred->getName(),
1447 Parent: CleanupPadBB->getParent(), InsertBefore: CleanupPadBB);
1448 updatePhiNodes(DestBB: CleanupPadBB, OldPred: Pred, NewPred: CaseBB);
1449 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1450 Pred->getName());
1451 Builder.SetInsertPoint(CaseBB);
1452 Builder.CreateBr(Dest: CleanupPadBB);
1453 movePHIValuesToInsertedBlock(SuccBB: CleanupPadBB, InsertedBB: CaseBB, PredBB: NewCleanupPadBB);
1454
1455 // Update this Pred to the new unwind point.
1456 setUnwindEdgeTo(TI: Pred->getTerminator(), Succ: NewCleanupPadBB);
1457
1458 // Setup the switch in the dispatcher.
1459 auto *SwitchConstant = ConstantInt::get(Ty: SwitchType, V: SwitchIndex);
1460 SetDispatchValuePN->addIncoming(V: SwitchConstant, BB: Pred);
1461 SwitchOnDispatch->addCase(OnVal: SwitchConstant, Dest: CaseBB);
1462 SwitchIndex++;
1463 }
1464}
1465
1466static void cleanupSinglePredPHIs(Function &F) {
1467 SmallVector<PHINode *, 32> Worklist;
1468 for (auto &BB : F) {
1469 for (auto &Phi : BB.phis()) {
1470 if (Phi.getNumIncomingValues() == 1) {
1471 Worklist.push_back(Elt: &Phi);
1472 } else
1473 break;
1474 }
1475 }
1476 while (!Worklist.empty()) {
1477 auto *Phi = Worklist.pop_back_val();
1478 auto *OriginalValue = Phi->getIncomingValue(i: 0);
1479 Phi->replaceAllUsesWith(V: OriginalValue);
1480 }
1481}
1482
1483static void rewritePHIs(BasicBlock &BB) {
1484 // For every incoming edge we will create a block holding all
1485 // incoming values in a single PHI nodes.
1486 //
1487 // loop:
1488 // %n.val = phi i32[%n, %entry], [%inc, %loop]
1489 //
1490 // It will create:
1491 //
1492 // loop.from.entry:
1493 // %n.loop.pre = phi i32 [%n, %entry]
1494 // br %label loop
1495 // loop.from.loop:
1496 // %inc.loop.pre = phi i32 [%inc, %loop]
1497 // br %label loop
1498 //
1499 // After this rewrite, further analysis will ignore any phi nodes with more
1500 // than one incoming edge.
1501
1502 // TODO: Simplify PHINodes in the basic block to remove duplicate
1503 // predecessors.
1504
1505 // Special case for CleanupPad: all EH blocks must have the same unwind edge
1506 // so we need to create an additional "dispatcher" block.
1507 if (!BB.empty()) {
1508 if (auto *CleanupPad =
1509 dyn_cast_or_null<CleanupPadInst>(Val: BB.getFirstNonPHIIt())) {
1510 SmallVector<BasicBlock *, 8> Preds(predecessors(BB: &BB));
1511 for (BasicBlock *Pred : Preds) {
1512 if (CatchSwitchInst *CS =
1513 dyn_cast<CatchSwitchInst>(Val: Pred->getTerminator())) {
1514 // CleanupPad with a CatchSwitch predecessor: therefore this is an
1515 // unwind destination that needs to be handle specially.
1516 assert(CS->getUnwindDest() == &BB);
1517 (void)CS;
1518 rewritePHIsForCleanupPad(CleanupPadBB: &BB, CleanupPad);
1519 return;
1520 }
1521 }
1522 }
1523 }
1524
1525 LandingPadInst *LandingPad = nullptr;
1526 PHINode *ReplPHI = nullptr;
1527 if (!BB.empty()) {
1528 if ((LandingPad =
1529 dyn_cast_or_null<LandingPadInst>(Val: BB.getFirstNonPHIIt()))) {
1530 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1531 // We replace the original landing pad with a PHINode that will collect the
1532 // results from all of them.
1533 ReplPHI = PHINode::Create(Ty: LandingPad->getType(), NumReservedValues: 1, NameStr: "");
1534 ReplPHI->insertBefore(InsertPos: LandingPad->getIterator());
1535 ReplPHI->takeName(V: LandingPad);
1536 LandingPad->replaceAllUsesWith(V: ReplPHI);
1537 // We will erase the original landing pad at the end of this function after
1538 // ehAwareSplitEdge cloned it in the transition blocks.
1539 }
1540 }
1541
1542 SmallVector<BasicBlock *, 8> Preds(predecessors(BB: &BB));
1543 for (BasicBlock *Pred : Preds) {
1544 auto *IncomingBB = ehAwareSplitEdge(BB: Pred, Succ: &BB, OriginalPad: LandingPad, LandingPadReplacement: ReplPHI);
1545 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1546
1547 // Stop the moving of values at ReplPHI, as this is either null or the PHI
1548 // that replaced the landing pad.
1549 movePHIValuesToInsertedBlock(SuccBB: &BB, InsertedBB: IncomingBB, PredBB: Pred, UntilPHI: ReplPHI);
1550 }
1551
1552 if (LandingPad) {
1553 // Calls to ehAwareSplitEdge function cloned the original lading pad.
1554 // No longer need it.
1555 LandingPad->eraseFromParent();
1556 }
1557}
1558
1559static void rewritePHIs(Function &F) {
1560 SmallVector<BasicBlock *, 8> WorkList;
1561
1562 for (BasicBlock &BB : F)
1563 if (auto *PN = dyn_cast<PHINode>(Val: &BB.front()))
1564 if (PN->getNumIncomingValues() > 1)
1565 WorkList.push_back(Elt: &BB);
1566
1567 for (BasicBlock *BB : WorkList)
1568 rewritePHIs(BB&: *BB);
1569}
1570
1571// Splits the block at a particular instruction unless it is the first
1572// instruction in the block with a single predecessor.
1573static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
1574 auto *BB = I->getParent();
1575 if (&BB->front() == I) {
1576 if (BB->getSinglePredecessor()) {
1577 BB->setName(Name);
1578 return BB;
1579 }
1580 }
1581 return BB->splitBasicBlock(I, BBName: Name);
1582}
1583
1584// Split above and below a particular instruction so that it
1585// will be all alone by itself in a block.
1586static void splitAround(Instruction *I, const Twine &Name) {
1587 splitBlockIfNotFirst(I, Name);
1588 splitBlockIfNotFirst(I: I->getNextNode(), Name: "After" + Name);
1589}
1590
1591/// After we split the coroutine, will the given basic block be along
1592/// an obvious exit path for the resumption function?
1593static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
1594 unsigned depth = 3) {
1595 // If we've bottomed out our depth count, stop searching and assume
1596 // that the path might loop back.
1597 if (depth == 0) return false;
1598
1599 // If this is a suspend block, we're about to exit the resumption function.
1600 if (coro::isSuspendBlock(BB))
1601 return true;
1602
1603 // Recurse into the successors.
1604 for (auto *Succ : successors(BB)) {
1605 if (!willLeaveFunctionImmediatelyAfter(BB: Succ, depth: depth - 1))
1606 return false;
1607 }
1608
1609 // If none of the successors leads back in a loop, we're on an exit/abort.
1610 return true;
1611}
1612
1613static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
1614 // Look for a free that isn't sufficiently obviously followed by
1615 // either a suspend or a termination, i.e. something that will leave
1616 // the coro resumption frame.
1617 for (auto *U : AI->users()) {
1618 auto FI = dyn_cast<CoroAllocaFreeInst>(Val: U);
1619 if (!FI) continue;
1620
1621 if (!willLeaveFunctionImmediatelyAfter(BB: FI->getParent()))
1622 return true;
1623 }
1624
1625 // If we never found one, we don't need a stack save.
1626 return false;
1627}
1628
1629/// Turn each of the given local allocas into a normal (dynamic) alloca
1630/// instruction.
1631static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
1632 SmallVectorImpl<Instruction*> &DeadInsts) {
1633 for (auto *AI : LocalAllocas) {
1634 IRBuilder<> Builder(AI);
1635
1636 // Save the stack depth. Try to avoid doing this if the stackrestore
1637 // is going to immediately precede a return or something.
1638 Value *StackSave = nullptr;
1639 if (localAllocaNeedsStackSave(AI))
1640 StackSave = Builder.CreateStackSave();
1641
1642 // Allocate memory.
1643 auto Alloca = Builder.CreateAlloca(Ty: Builder.getInt8Ty(), ArraySize: AI->getSize());
1644 Alloca->setAlignment(AI->getAlignment());
1645
1646 for (auto *U : AI->users()) {
1647 // Replace gets with the allocation.
1648 if (isa<CoroAllocaGetInst>(Val: U)) {
1649 U->replaceAllUsesWith(V: Alloca);
1650
1651 // Replace frees with stackrestores. This is safe because
1652 // alloca.alloc is required to obey a stack discipline, although we
1653 // don't enforce that structurally.
1654 } else {
1655 auto FI = cast<CoroAllocaFreeInst>(Val: U);
1656 if (StackSave) {
1657 Builder.SetInsertPoint(FI);
1658 Builder.CreateStackRestore(Ptr: StackSave);
1659 }
1660 }
1661 DeadInsts.push_back(Elt: cast<Instruction>(Val: U));
1662 }
1663
1664 DeadInsts.push_back(Elt: AI);
1665 }
1666}
1667
1668/// Get the current swifterror value.
1669static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
1670 coro::Shape &Shape) {
1671 // Make a fake function pointer as a sort of intrinsic.
1672 auto FnTy = FunctionType::get(Result: ValueTy, Params: {}, isVarArg: false);
1673 auto Fn = ConstantPointerNull::get(T: Builder.getPtrTy());
1674
1675 auto Call = Builder.CreateCall(FTy: FnTy, Callee: Fn, Args: {});
1676 Shape.SwiftErrorOps.push_back(Elt: Call);
1677
1678 return Call;
1679}
1680
1681/// Set the given value as the current swifterror value.
1682///
1683/// Returns a slot that can be used as a swifterror slot.
1684static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
1685 coro::Shape &Shape) {
1686 // Make a fake function pointer as a sort of intrinsic.
1687 auto FnTy = FunctionType::get(Result: Builder.getPtrTy(),
1688 Params: {V->getType()}, isVarArg: false);
1689 auto Fn = ConstantPointerNull::get(T: Builder.getPtrTy());
1690
1691 auto Call = Builder.CreateCall(FTy: FnTy, Callee: Fn, Args: { V });
1692 Shape.SwiftErrorOps.push_back(Elt: Call);
1693
1694 return Call;
1695}
1696
1697/// Set the swifterror value from the given alloca before a call,
1698/// then put in back in the alloca afterwards.
1699///
1700/// Returns an address that will stand in for the swifterror slot
1701/// until splitting.
1702static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
1703 AllocaInst *Alloca,
1704 coro::Shape &Shape) {
1705 auto ValueTy = Alloca->getAllocatedType();
1706 IRBuilder<> Builder(Call);
1707
1708 // Load the current value from the alloca and set it as the
1709 // swifterror value.
1710 auto ValueBeforeCall = Builder.CreateLoad(Ty: ValueTy, Ptr: Alloca);
1711 auto Addr = emitSetSwiftErrorValue(Builder, V: ValueBeforeCall, Shape);
1712
1713 // Move to after the call. Since swifterror only has a guaranteed
1714 // value on normal exits, we can ignore implicit and explicit unwind
1715 // edges.
1716 if (isa<CallInst>(Val: Call)) {
1717 Builder.SetInsertPoint(Call->getNextNode());
1718 } else {
1719 auto Invoke = cast<InvokeInst>(Val: Call);
1720 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1721 }
1722
1723 // Get the current swifterror value and store it to the alloca.
1724 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1725 Builder.CreateStore(Val: ValueAfterCall, Ptr: Alloca);
1726
1727 return Addr;
1728}
1729
1730/// Eliminate a formerly-swifterror alloca by inserting the get/set
1731/// intrinsics and attempting to MemToReg the alloca away.
1732static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
1733 coro::Shape &Shape) {
1734 for (Use &Use : llvm::make_early_inc_range(Range: Alloca->uses())) {
1735 // swifterror values can only be used in very specific ways.
1736 // We take advantage of that here.
1737 auto User = Use.getUser();
1738 if (isa<LoadInst>(Val: User) || isa<StoreInst>(Val: User))
1739 continue;
1740
1741 assert(isa<CallInst>(User) || isa<InvokeInst>(User));
1742 auto Call = cast<Instruction>(Val: User);
1743
1744 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1745
1746 // Use the returned slot address as the call argument.
1747 Use.set(Addr);
1748 }
1749
1750 // All the uses should be loads and stores now.
1751 assert(isAllocaPromotable(Alloca));
1752}
1753
1754/// "Eliminate" a swifterror argument by reducing it to the alloca case
1755/// and then loading and storing in the prologue and epilog.
1756///
1757/// The argument keeps the swifterror flag.
1758static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
1759 coro::Shape &Shape,
1760 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1761 IRBuilder<> Builder(&F.getEntryBlock(),
1762 F.getEntryBlock().getFirstNonPHIOrDbg());
1763
1764 auto ArgTy = cast<PointerType>(Val: Arg.getType());
1765 auto ValueTy = PointerType::getUnqual(C&: F.getContext());
1766
1767 // Reduce to the alloca case:
1768
1769 // Create an alloca and replace all uses of the arg with it.
1770 auto Alloca = Builder.CreateAlloca(Ty: ValueTy, AddrSpace: ArgTy->getAddressSpace());
1771 Arg.replaceAllUsesWith(V: Alloca);
1772
1773 // Set an initial value in the alloca. swifterror is always null on entry.
1774 auto InitialValue = Constant::getNullValue(Ty: ValueTy);
1775 Builder.CreateStore(Val: InitialValue, Ptr: Alloca);
1776
1777 // Find all the suspends in the function and save and restore around them.
1778 for (auto *Suspend : Shape.CoroSuspends) {
1779 (void) emitSetAndGetSwiftErrorValueAround(Call: Suspend, Alloca, Shape);
1780 }
1781
1782 // Find all the coro.ends in the function and restore the error value.
1783 for (auto *End : Shape.CoroEnds) {
1784 Builder.SetInsertPoint(End);
1785 auto FinalValue = Builder.CreateLoad(Ty: ValueTy, Ptr: Alloca);
1786 (void) emitSetSwiftErrorValue(Builder, V: FinalValue, Shape);
1787 }
1788
1789 // Now we can use the alloca logic.
1790 AllocasToPromote.push_back(Elt: Alloca);
1791 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1792}
1793
1794/// Eliminate all problematic uses of swifterror arguments and allocas
1795/// from the function. We'll fix them up later when splitting the function.
1796static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
1797 SmallVector<AllocaInst*, 4> AllocasToPromote;
1798
1799 // Look for a swifterror argument.
1800 for (auto &Arg : F.args()) {
1801 if (!Arg.hasSwiftErrorAttr()) continue;
1802
1803 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1804 break;
1805 }
1806
1807 // Look for swifterror allocas.
1808 for (auto &Inst : F.getEntryBlock()) {
1809 auto Alloca = dyn_cast<AllocaInst>(Val: &Inst);
1810 if (!Alloca || !Alloca->isSwiftError()) continue;
1811
1812 // Clear the swifterror flag.
1813 Alloca->setSwiftError(false);
1814
1815 AllocasToPromote.push_back(Elt: Alloca);
1816 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1817 }
1818
1819 // If we have any allocas to promote, compute a dominator tree and
1820 // promote them en masse.
1821 if (!AllocasToPromote.empty()) {
1822 DominatorTree DT(F);
1823 PromoteMemToReg(Allocas: AllocasToPromote, DT);
1824 }
1825}
1826
1827/// For each local variable that all of its user are only used inside one of
1828/// suspended region, we sink their lifetime.start markers to the place where
1829/// after the suspend block. Doing so minimizes the lifetime of each variable,
1830/// hence minimizing the amount of data we end up putting on the frame.
1831static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
1832 SuspendCrossingInfo &Checker,
1833 const DominatorTree &DT) {
1834 if (F.hasOptNone())
1835 return;
1836
1837 // Collect all possible basic blocks which may dominate all uses of allocas.
1838 SmallPtrSet<BasicBlock *, 4> DomSet;
1839 DomSet.insert(Ptr: &F.getEntryBlock());
1840 for (auto *CSI : Shape.CoroSuspends) {
1841 BasicBlock *SuspendBlock = CSI->getParent();
1842 assert(coro::isSuspendBlock(SuspendBlock) &&
1843 SuspendBlock->getSingleSuccessor() &&
1844 "should have split coro.suspend into its own block");
1845 DomSet.insert(Ptr: SuspendBlock->getSingleSuccessor());
1846 }
1847
1848 for (Instruction &I : instructions(F)) {
1849 AllocaInst* AI = dyn_cast<AllocaInst>(Val: &I);
1850 if (!AI)
1851 continue;
1852
1853 for (BasicBlock *DomBB : DomSet) {
1854 bool Valid = true;
1855 SmallVector<Instruction *, 1> Lifetimes;
1856
1857 auto isLifetimeStart = [](Instruction* I) {
1858 if (auto* II = dyn_cast<IntrinsicInst>(Val: I))
1859 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1860 return false;
1861 };
1862
1863 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
1864 if (isLifetimeStart(U)) {
1865 Lifetimes.push_back(Elt: U);
1866 return true;
1867 }
1868 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
1869 return false;
1870 if (isLifetimeStart(U->user_back())) {
1871 Lifetimes.push_back(Elt: U->user_back());
1872 return true;
1873 }
1874 return false;
1875 };
1876
1877 for (User *U : AI->users()) {
1878 Instruction *UI = cast<Instruction>(Val: U);
1879 // For all users except lifetime.start markers, if they are all
1880 // dominated by one of the basic blocks and do not cross
1881 // suspend points as well, then there is no need to spill the
1882 // instruction.
1883 if (!DT.dominates(A: DomBB, B: UI->getParent()) ||
1884 Checker.isDefinitionAcrossSuspend(DefBB: DomBB, U: UI)) {
1885 // Skip lifetime.start, GEP and bitcast used by lifetime.start
1886 // markers.
1887 if (collectLifetimeStart(UI, AI))
1888 continue;
1889 Valid = false;
1890 break;
1891 }
1892 }
1893 // Sink lifetime.start markers to dominate block when they are
1894 // only used outside the region.
1895 if (Valid && Lifetimes.size() != 0) {
1896 auto *NewLifetime = Lifetimes[0]->clone();
1897 NewLifetime->replaceUsesOfWith(From: NewLifetime->getOperand(i: 0), To: AI);
1898 NewLifetime->insertBefore(InsertPos: DomBB->getTerminator()->getIterator());
1899
1900 // All the outsided lifetime.start markers are no longer necessary.
1901 for (Instruction *S : Lifetimes)
1902 S->eraseFromParent();
1903
1904 break;
1905 }
1906 }
1907 }
1908}
1909
1910static std::optional<std::pair<Value &, DIExpression &>>
1911salvageDebugInfoImpl(SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
1912 bool UseEntryValue, Function *F, Value *Storage,
1913 DIExpression *Expr, bool SkipOutermostLoad) {
1914 IRBuilder<> Builder(F->getContext());
1915 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
1916 while (isa<IntrinsicInst>(Val: InsertPt))
1917 ++InsertPt;
1918 Builder.SetInsertPoint(TheBB: &F->getEntryBlock(), IP: InsertPt);
1919
1920 while (auto *Inst = dyn_cast_or_null<Instruction>(Val: Storage)) {
1921 if (auto *LdInst = dyn_cast<LoadInst>(Val: Inst)) {
1922 Storage = LdInst->getPointerOperand();
1923 // FIXME: This is a heuristic that works around the fact that
1924 // LLVM IR debug intrinsics cannot yet distinguish between
1925 // memory and value locations: Because a dbg.declare(alloca) is
1926 // implicitly a memory location no DW_OP_deref operation for the
1927 // last direct load from an alloca is necessary. This condition
1928 // effectively drops the *last* DW_OP_deref in the expression.
1929 if (!SkipOutermostLoad)
1930 Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefBefore);
1931 } else if (auto *StInst = dyn_cast<StoreInst>(Val: Inst)) {
1932 Storage = StInst->getValueOperand();
1933 } else {
1934 SmallVector<uint64_t, 16> Ops;
1935 SmallVector<Value *, 0> AdditionalValues;
1936 Value *Op = llvm::salvageDebugInfoImpl(
1937 I&: *Inst, CurrentLocOps: Expr ? Expr->getNumLocationOperands() : 0, Ops,
1938 AdditionalValues);
1939 if (!Op || !AdditionalValues.empty()) {
1940 // If salvaging failed or salvaging produced more than one location
1941 // operand, give up.
1942 break;
1943 }
1944 Storage = Op;
1945 Expr = DIExpression::appendOpsToArg(Expr, Ops, ArgNo: 0, /*StackValue*/ false);
1946 }
1947 SkipOutermostLoad = false;
1948 }
1949 if (!Storage)
1950 return std::nullopt;
1951
1952 auto *StorageAsArg = dyn_cast<Argument>(Val: Storage);
1953
1954 const bool IsSingleLocationExpression = Expr->isSingleLocationExpression();
1955 // Use an EntryValue when requested (UseEntryValue) for swift async Arguments.
1956 // Entry values in variadic expressions are not supported.
1957 const bool WillUseEntryValue =
1958 UseEntryValue && StorageAsArg &&
1959 StorageAsArg->hasAttribute(Kind: Attribute::SwiftAsync) &&
1960 !Expr->isEntryValue() && IsSingleLocationExpression;
1961
1962 if (WillUseEntryValue)
1963 Expr = DIExpression::prepend(Expr, Flags: DIExpression::EntryValue);
1964
1965 // If the coroutine frame is an Argument, store it in an alloca to improve
1966 // its availability (e.g. registers may be clobbered).
1967 // Avoid this if the value is guaranteed to be available through other means
1968 // (e.g. swift ABI guarantees).
1969 // Avoid this if multiple location expressions are involved, as LLVM does not
1970 // know how to prepend a deref in this scenario.
1971 if (StorageAsArg && !WillUseEntryValue && IsSingleLocationExpression) {
1972 auto &Cached = ArgToAllocaMap[StorageAsArg];
1973 if (!Cached) {
1974 Cached = Builder.CreateAlloca(Ty: Storage->getType(), AddrSpace: 0, ArraySize: nullptr,
1975 Name: Storage->getName() + ".debug");
1976 Builder.CreateStore(Val: Storage, Ptr: Cached);
1977 }
1978 Storage = Cached;
1979 // FIXME: LLVM lacks nuanced semantics to differentiate between
1980 // memory and direct locations at the IR level. The backend will
1981 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
1982 // location. Thus, if there are deref and offset operations in the
1983 // expression, we need to add a DW_OP_deref at the *start* of the
1984 // expression to first load the contents of the alloca before
1985 // adjusting it with the expression.
1986 Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefBefore);
1987 }
1988
1989 Expr = Expr->foldConstantMath();
1990 return {{*Storage, *Expr}};
1991}
1992
1993void coro::salvageDebugInfo(
1994 SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
1995 DbgVariableRecord &DVR, bool UseEntryValue) {
1996
1997 Function *F = DVR.getFunction();
1998 // Follow the pointer arithmetic all the way to the incoming
1999 // function argument and convert into a DIExpression.
2000 bool SkipOutermostLoad = DVR.isDbgDeclare() || DVR.isDbgDeclareValue();
2001 Value *OriginalStorage = DVR.getVariableLocationOp(OpIdx: 0);
2002
2003 auto SalvagedInfo =
2004 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, Storage: OriginalStorage,
2005 Expr: DVR.getExpression(), SkipOutermostLoad);
2006 if (!SalvagedInfo)
2007 return;
2008
2009 Value *Storage = &SalvagedInfo->first;
2010 DIExpression *Expr = &SalvagedInfo->second;
2011
2012 DVR.replaceVariableLocationOp(OldValue: OriginalStorage, NewValue: Storage);
2013 DVR.setExpression(Expr);
2014 // We only hoist dbg.declare and dbg.declare_value today since it doesn't make
2015 // sense to hoist dbg.value since it does not have the same function wide
2016 // guarantees that dbg.declare does.
2017 if (DVR.getType() == DbgVariableRecord::LocationType::Declare ||
2018 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue) {
2019 std::optional<BasicBlock::iterator> InsertPt;
2020 if (auto *I = dyn_cast<Instruction>(Val: Storage)) {
2021 InsertPt = I->getInsertionPointAfterDef();
2022 // Update DILocation only if variable was not inlined.
2023 DebugLoc ILoc = I->getDebugLoc();
2024 DebugLoc DVRLoc = DVR.getDebugLoc();
2025 if (ILoc && DVRLoc &&
2026 DVRLoc->getScope()->getSubprogram() ==
2027 ILoc->getScope()->getSubprogram())
2028 DVR.setDebugLoc(ILoc);
2029 } else if (isa<Argument>(Val: Storage))
2030 InsertPt = F->getEntryBlock().begin();
2031 if (InsertPt) {
2032 DVR.removeFromParent();
2033 // If there is a dbg.declare_value being reinserted, insert it as a
2034 // dbg.declare instead, so that subsequent passes don't have to deal with
2035 // a dbg.declare_value.
2036 if (DVR.getType() == DbgVariableRecord::LocationType::DeclareValue) {
2037 auto *MD = DVR.getRawLocation();
2038 if (auto *VAM = dyn_cast<ValueAsMetadata>(Val: MD)) {
2039 Type *Ty = VAM->getValue()->getType();
2040 if (Ty->isPointerTy())
2041 DVR.Type = DbgVariableRecord::LocationType::Declare;
2042 else
2043 DVR.Type = DbgVariableRecord::LocationType::Value;
2044 }
2045 }
2046 (*InsertPt)->getParent()->insertDbgRecordBefore(DR: &DVR, Here: *InsertPt);
2047 }
2048 }
2049}
2050
2051void coro::normalizeCoroutine(Function &F, coro::Shape &Shape,
2052 TargetTransformInfo &TTI) {
2053 // Don't eliminate swifterror in async functions that won't be split.
2054 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2055 eliminateSwiftError(F, Shape);
2056
2057 if (Shape.ABI == coro::ABI::Switch &&
2058 Shape.SwitchLowering.PromiseAlloca) {
2059 Shape.getSwitchCoroId()->clearPromise();
2060 }
2061
2062 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2063 // intrinsics are in their own blocks to simplify the logic of building up
2064 // SuspendCrossing data.
2065 for (auto *CSI : Shape.CoroSuspends) {
2066 if (auto *Save = CSI->getCoroSave())
2067 splitAround(I: Save, Name: "CoroSave");
2068 splitAround(I: CSI, Name: "CoroSuspend");
2069 }
2070
2071 // Put CoroEnds into their own blocks.
2072 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2073 splitAround(I: CE, Name: "CoroEnd");
2074
2075 // Emit the musttail call function in a new block before the CoroEnd.
2076 // We do this here so that the right suspend crossing info is computed for
2077 // the uses of the musttail call function call. (Arguments to the coro.end
2078 // instructions would be ignored)
2079 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(Val: CE)) {
2080 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2081 if (!MustTailCallFn)
2082 continue;
2083 IRBuilder<> Builder(AsyncEnd);
2084 SmallVector<Value *, 8> Args(AsyncEnd->args());
2085 auto Arguments = ArrayRef<Value *>(Args).drop_front(N: 3);
2086 auto *Call = coro::createMustTailCall(
2087 Loc: AsyncEnd->getDebugLoc(), MustTailCallFn, TTI, Arguments, Builder);
2088 splitAround(I: Call, Name: "MustTailCall.Before.CoroEnd");
2089 }
2090 }
2091
2092 // Later code makes structural assumptions about single predecessors phis e.g
2093 // that they are not live across a suspend point.
2094 cleanupSinglePredPHIs(F);
2095
2096 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2097 // never have its definition separated from the PHI by the suspend point.
2098 rewritePHIs(F);
2099}
2100
2101void coro::BaseABI::buildCoroutineFrame(bool OptimizeFrame) {
2102 SuspendCrossingInfo Checker(F, Shape.CoroSuspends, Shape.CoroEnds);
2103 doRematerializations(F, Checker, IsMaterializable);
2104
2105 const DominatorTree DT(F);
2106 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2107 Shape.ABI != coro::ABI::RetconOnce)
2108 sinkLifetimeStartMarkers(F, Shape, Checker, DT);
2109
2110 // All values (that are not allocas) that needs to be spilled to the frame.
2111 coro::SpillInfo Spills;
2112 // All values defined as allocas that need to live in the frame.
2113 SmallVector<coro::AllocaInfo, 8> Allocas;
2114
2115 // Collect the spills for arguments and other not-materializable values.
2116 coro::collectSpillsFromArgs(Spills, F, Checker);
2117 SmallVector<Instruction *, 4> DeadInstructions;
2118 SmallVector<CoroAllocaAllocInst *, 4> LocalAllocas;
2119 coro::collectSpillsAndAllocasFromInsts(Spills, Allocas, DeadInstructions,
2120 LocalAllocas, F, Checker, DT, Shape);
2121 coro::collectSpillsFromDbgInfo(Spills, F, Checker);
2122
2123 LLVM_DEBUG(dumpAllocas(Allocas));
2124 LLVM_DEBUG(dumpSpills("Spills", Spills));
2125
2126 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2127 Shape.ABI == coro::ABI::Async)
2128 sinkSpillUsesAfterCoroBegin(DT, CoroBegin: Shape.CoroBegin, Spills, Allocas);
2129
2130 // Build frame
2131 FrameDataInfo FrameData(Spills, Allocas);
2132 Shape.FrameTy = buildFrameType(F, Shape, FrameData, OptimizeFrame);
2133 Shape.FramePtr = Shape.CoroBegin;
2134 // For now, this works for C++ programs only.
2135 buildFrameDebugInfo(F, Shape, FrameData);
2136 // Insert spills and reloads
2137 insertSpills(FrameData, Shape);
2138 lowerLocalAllocas(LocalAllocas, DeadInsts&: DeadInstructions);
2139
2140 for (auto *I : DeadInstructions)
2141 I->eraseFromParent();
2142}
2143