1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// its definition precedes and its uses follow a suspend block. This is
10// referred to as a suspend crossing value.
11//
12// Using the information discovered we form a Coroutine Frame structure to
13// contain those values. All uses of those values are replaced with appropriate
14// GEP + load from the coroutine frame. At the point of the definition we spill
15// the value into the coroutine frame.
16//===----------------------------------------------------------------------===//
17
18#include "CoroInternal.h"
19#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/SmallString.h"
21#include "llvm/Analysis/StackLifetime.h"
22#include "llvm/IR/DIBuilder.h"
23#include "llvm/IR/DebugInfo.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/InstIterator.h"
27#include "llvm/IR/IntrinsicInst.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/Compiler.h"
30#include "llvm/Support/Debug.h"
31#include "llvm/Support/OptimizedStructLayout.h"
32#include "llvm/Transforms/Coroutines/ABI.h"
33#include "llvm/Transforms/Coroutines/CoroInstr.h"
34#include "llvm/Transforms/Coroutines/MaterializationUtils.h"
35#include "llvm/Transforms/Coroutines/SpillUtils.h"
36#include "llvm/Transforms/Coroutines/SuspendCrossingInfo.h"
37#include "llvm/Transforms/Utils/BasicBlockUtils.h"
38#include "llvm/Transforms/Utils/Local.h"
39#include "llvm/Transforms/Utils/PromoteMemToReg.h"
40#include <algorithm>
41#include <optional>
42
43using namespace llvm;
44
45#define DEBUG_TYPE "coro-frame"
46
47namespace {
48class FrameTypeBuilder;
49// Mapping from the to-be-spilled value to all the users that need reload.
50struct FrameDataInfo {
51 // All the values (that are not allocas) that needs to be spilled to the
52 // frame.
53 coro::SpillInfo &Spills;
54 // Allocas contains all values defined as allocas that need to live in the
55 // frame.
56 SmallVectorImpl<coro::AllocaInfo> &Allocas;
57
58 FrameDataInfo(coro::SpillInfo &Spills,
59 SmallVectorImpl<coro::AllocaInfo> &Allocas)
60 : Spills(Spills), Allocas(Allocas) {}
61
62 SmallVector<Value *, 8> getAllDefs() const {
63 SmallVector<Value *, 8> Defs;
64 for (const auto &P : Spills)
65 Defs.push_back(Elt: P.first);
66 for (const auto &A : Allocas)
67 Defs.push_back(Elt: A.Alloca);
68 return Defs;
69 }
70
71 uint32_t getFieldIndex(Value *V) const {
72 auto Itr = FieldIndexMap.find(Val: V);
73 assert(Itr != FieldIndexMap.end() &&
74 "Value does not have a frame field index");
75 return Itr->second;
76 }
77
78 void setFieldIndex(Value *V, uint32_t Index) {
79 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
80 "Cannot set the index for the same field twice.");
81 FieldIndexMap[V] = Index;
82 }
83
84 Align getAlign(Value *V) const {
85 auto Iter = FieldAlignMap.find(Val: V);
86 assert(Iter != FieldAlignMap.end());
87 return Iter->second;
88 }
89
90 void setAlign(Value *V, Align AL) {
91 assert(FieldAlignMap.count(V) == 0);
92 FieldAlignMap.insert(KV: {V, AL});
93 }
94
95 uint64_t getDynamicAlign(Value *V) const {
96 auto Iter = FieldDynamicAlignMap.find(Val: V);
97 assert(Iter != FieldDynamicAlignMap.end());
98 return Iter->second;
99 }
100
101 void setDynamicAlign(Value *V, uint64_t Align) {
102 assert(FieldDynamicAlignMap.count(V) == 0);
103 FieldDynamicAlignMap.insert(KV: {V, Align});
104 }
105
106 uint64_t getOffset(Value *V) const {
107 auto Iter = FieldOffsetMap.find(Val: V);
108 assert(Iter != FieldOffsetMap.end());
109 return Iter->second;
110 }
111
112 void setOffset(Value *V, uint64_t Offset) {
113 assert(FieldOffsetMap.count(V) == 0);
114 FieldOffsetMap.insert(KV: {V, Offset});
115 }
116
117 // Remap the index of every field in the frame, using the final layout index.
118 void updateLayoutIndex(FrameTypeBuilder &B);
119
120private:
121 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
122 // twice by mistake.
123 bool LayoutIndexUpdateStarted = false;
124 // Map from values to their slot indexes on the frame. They will be first set
125 // with their original insertion field index. After the frame is built, their
126 // indexes will be updated into the final layout index.
127 DenseMap<Value *, uint32_t> FieldIndexMap;
128 // Map from values to their alignment on the frame. They would be set after
129 // the frame is built.
130 DenseMap<Value *, Align> FieldAlignMap;
131 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
132 // Map from values to their offset on the frame. They would be set after
133 // the frame is built.
134 DenseMap<Value *, uint64_t> FieldOffsetMap;
135};
136} // namespace
137
138#ifndef NDEBUG
139static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills) {
140 dbgs() << "------------- " << Title << " --------------\n";
141 for (const auto &E : Spills) {
142 E.first->dump();
143 dbgs() << " user: ";
144 for (auto *I : E.second)
145 I->dump();
146 }
147}
148
149static void dumpAllocas(const SmallVectorImpl<coro::AllocaInfo> &Allocas) {
150 dbgs() << "------------- Allocas --------------\n";
151 for (const auto &A : Allocas) {
152 A.Alloca->dump();
153 }
154}
155#endif
156
157namespace {
158using FieldIDType = size_t;
159// We cannot rely solely on natural alignment of a type when building a
160// coroutine frame and if the alignment specified on the Alloca instruction
161// differs from the natural alignment of the alloca type we will need to insert
162// padding.
163class FrameTypeBuilder {
164private:
165 struct Field {
166 uint64_t Size;
167 uint64_t Offset;
168 Type *Ty;
169 FieldIDType LayoutFieldIndex;
170 Align Alignment;
171 Align TyAlignment;
172 uint64_t DynamicAlignBuffer;
173 };
174
175 const DataLayout &DL;
176 LLVMContext &Context;
177 uint64_t StructSize = 0;
178 Align StructAlign;
179 bool IsFinished = false;
180
181 std::optional<Align> MaxFrameAlignment;
182
183 SmallVector<Field, 8> Fields;
184 DenseMap<Value*, unsigned> FieldIndexByKey;
185
186public:
187 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
188 std::optional<Align> MaxFrameAlignment)
189 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
190
191 /// Add a field to this structure for the storage of an `alloca`
192 /// instruction.
193 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
194 bool IsHeader = false) {
195 Type *Ty = AI->getAllocatedType();
196
197 // Make an array type if this is a static array allocation.
198 if (AI->isArrayAllocation()) {
199 if (auto *CI = dyn_cast<ConstantInt>(Val: AI->getArraySize()))
200 Ty = ArrayType::get(ElementType: Ty, NumElements: CI->getValue().getZExtValue());
201 else
202 report_fatal_error(reason: "Coroutines cannot handle non static allocas yet");
203 }
204
205 return addField(Ty, MaybeFieldAlignment: AI->getAlign(), IsHeader);
206 }
207
208 /// We want to put the allocas whose lifetime-ranges are not overlapped
209 /// into one slot of coroutine frame.
210 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
211 ///
212 /// cppcoro::task<void> alternative_paths(bool cond) {
213 /// if (cond) {
214 /// big_structure a;
215 /// process(a);
216 /// co_await something();
217 /// } else {
218 /// big_structure b;
219 /// process2(b);
220 /// co_await something();
221 /// }
222 /// }
223 ///
224 /// We want to put variable a and variable b in the same slot to
225 /// reduce the size of coroutine frame.
226 ///
227 /// This function use StackLifetime algorithm to partition the AllocaInsts in
228 /// Spills to non-overlapped sets in order to put Alloca in the same
229 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
230 /// field for the allocas in the same non-overlapped set by using the largest
231 /// type as the field type.
232 ///
233 /// Side Effects: Because We sort the allocas, the order of allocas in the
234 /// frame may be different with the order in the source code.
235 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
236 coro::Shape &Shape, bool OptimizeFrame);
237
238 /// Add a field to this structure.
239 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
240 bool IsHeader = false,
241 bool IsSpillOfValue = false) {
242 assert(!IsFinished && "adding fields to a finished builder");
243 assert(Ty && "must provide a type for a field");
244
245 // The field size is always the alloc size of the type.
246 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
247
248 // For an alloca with size=0, we don't need to add a field and they
249 // can just point to any index in the frame. Use index 0.
250 if (FieldSize == 0) {
251 return 0;
252 }
253
254 // The field alignment might not be the type alignment, but we need
255 // to remember the type alignment anyway to build the type.
256 // If we are spilling values we don't need to worry about ABI alignment
257 // concerns.
258 Align ABIAlign = DL.getABITypeAlign(Ty);
259 Align TyAlignment = ABIAlign;
260 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
261 TyAlignment = *MaxFrameAlignment;
262 Align FieldAlignment = MaybeFieldAlignment.value_or(u&: TyAlignment);
263
264 // The field alignment could be bigger than the max frame case, in that case
265 // we request additional storage to be able to dynamically align the
266 // pointer.
267 uint64_t DynamicAlignBuffer = 0;
268 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
269 DynamicAlignBuffer =
270 offsetToAlignment(Value: MaxFrameAlignment->value(), Alignment: FieldAlignment);
271 FieldAlignment = *MaxFrameAlignment;
272 FieldSize = FieldSize + DynamicAlignBuffer;
273 }
274
275 // Lay out header fields immediately.
276 uint64_t Offset;
277 if (IsHeader) {
278 Offset = alignTo(Size: StructSize, A: FieldAlignment);
279 StructSize = Offset + FieldSize;
280
281 // Everything else has a flexible offset.
282 } else {
283 Offset = OptimizedStructLayoutField::FlexibleOffset;
284 }
285
286 Fields.push_back(Elt: {.Size: FieldSize, .Offset: Offset, .Ty: Ty, .LayoutFieldIndex: 0, .Alignment: FieldAlignment, .TyAlignment: TyAlignment,
287 .DynamicAlignBuffer: DynamicAlignBuffer});
288 return Fields.size() - 1;
289 }
290
291 /// Finish the layout and create the struct type with the given name.
292 StructType *finish(StringRef Name);
293
294 uint64_t getStructSize() const {
295 assert(IsFinished && "not yet finished!");
296 return StructSize;
297 }
298
299 Align getStructAlign() const {
300 assert(IsFinished && "not yet finished!");
301 return StructAlign;
302 }
303
304 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
305 assert(IsFinished && "not yet finished!");
306 return Fields[Id].LayoutFieldIndex;
307 }
308
309 Field getLayoutField(FieldIDType Id) const {
310 assert(IsFinished && "not yet finished!");
311 return Fields[Id];
312 }
313};
314} // namespace
315
316void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
317 auto Updater = [&](Value *I) {
318 auto Field = B.getLayoutField(Id: getFieldIndex(V: I));
319 setFieldIndex(V: I, Index: Field.LayoutFieldIndex);
320 setAlign(V: I, AL: Field.Alignment);
321 uint64_t dynamicAlign =
322 Field.DynamicAlignBuffer
323 ? Field.DynamicAlignBuffer + Field.Alignment.value()
324 : 0;
325 setDynamicAlign(V: I, Align: dynamicAlign);
326 setOffset(V: I, Offset: Field.Offset);
327 };
328 LayoutIndexUpdateStarted = true;
329 for (auto &S : Spills)
330 Updater(S.first);
331 for (const auto &A : Allocas)
332 Updater(A.Alloca);
333 LayoutIndexUpdateStarted = false;
334}
335
336void FrameTypeBuilder::addFieldForAllocas(const Function &F,
337 FrameDataInfo &FrameData,
338 coro::Shape &Shape,
339 bool OptimizeFrame) {
340 using AllocaSetType = SmallVector<AllocaInst *, 4>;
341 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
342
343 // We need to add field for allocas at the end of this function.
344 auto AddFieldForAllocasAtExit = make_scope_exit(F: [&]() {
345 for (auto AllocaList : NonOverlapedAllocas) {
346 auto *LargestAI = *AllocaList.begin();
347 FieldIDType Id = addFieldForAlloca(AI: LargestAI);
348 for (auto *Alloca : AllocaList)
349 FrameData.setFieldIndex(V: Alloca, Index: Id);
350 }
351 });
352
353 if (!OptimizeFrame) {
354 for (const auto &A : FrameData.Allocas) {
355 AllocaInst *Alloca = A.Alloca;
356 NonOverlapedAllocas.emplace_back(Args: AllocaSetType(1, Alloca));
357 }
358 return;
359 }
360
361 // Because there are paths from the lifetime.start to coro.end
362 // for each alloca, the liferanges for every alloca is overlaped
363 // in the blocks who contain coro.end and the successor blocks.
364 // So we choose to skip there blocks when we calculate the liferange
365 // for each alloca. It should be reasonable since there shouldn't be uses
366 // in these blocks and the coroutine frame shouldn't be used outside the
367 // coroutine body.
368 //
369 // Note that the user of coro.suspend may not be SwitchInst. However, this
370 // case seems too complex to handle. And it is harmless to skip these
371 // patterns since it just prevend putting the allocas to live in the same
372 // slot.
373 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
374 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
375 for (auto *U : CoroSuspendInst->users()) {
376 if (auto *ConstSWI = dyn_cast<SwitchInst>(Val: U)) {
377 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
378 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
379 SWI->setDefaultDest(SWI->getSuccessor(idx: 1));
380 }
381 }
382 }
383
384 auto ExtractAllocas = [&]() {
385 AllocaSetType Allocas;
386 Allocas.reserve(N: FrameData.Allocas.size());
387 for (const auto &A : FrameData.Allocas)
388 Allocas.push_back(Elt: A.Alloca);
389 return Allocas;
390 };
391 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
392 StackLifetime::LivenessType::May);
393 StackLifetimeAnalyzer.run();
394 auto DoAllocasInterfere = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
395 return StackLifetimeAnalyzer.getLiveRange(AI: AI1).overlaps(
396 Other: StackLifetimeAnalyzer.getLiveRange(AI: AI2));
397 };
398 auto GetAllocaSize = [&](const coro::AllocaInfo &A) {
399 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
400 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
401 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
402 return RetSize->getFixedValue();
403 };
404 // Put larger allocas in the front. So the larger allocas have higher
405 // priority to merge, which can save more space potentially. Also each
406 // AllocaSet would be ordered. So we can get the largest Alloca in one
407 // AllocaSet easily.
408 sort(C&: FrameData.Allocas, Comp: [&](const auto &Iter1, const auto &Iter2) {
409 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
410 });
411 for (const auto &A : FrameData.Allocas) {
412 AllocaInst *Alloca = A.Alloca;
413 bool Merged = false;
414 // Try to find if the Alloca does not interfere with any existing
415 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
416 // NonOverlappedAllocaSet.
417 for (auto &AllocaSet : NonOverlapedAllocas) {
418 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
419 bool NoInterference = none_of(Range&: AllocaSet, P: [&](auto Iter) {
420 return DoAllocasInterfere(Alloca, Iter);
421 });
422 // If the alignment of A is multiple of the alignment of B, the address
423 // of A should satisfy the requirement for aligning for B.
424 //
425 // There may be other more fine-grained strategies to handle the alignment
426 // infomation during the merging process. But it seems hard to handle
427 // these strategies and benefit little.
428 bool Alignable = [&]() -> bool {
429 auto *LargestAlloca = *AllocaSet.begin();
430 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
431 0;
432 }();
433 bool CouldMerge = NoInterference && Alignable;
434 if (!CouldMerge)
435 continue;
436 AllocaSet.push_back(Elt: Alloca);
437 Merged = true;
438 break;
439 }
440 if (!Merged) {
441 NonOverlapedAllocas.emplace_back(Args: AllocaSetType(1, Alloca));
442 }
443 }
444 // Recover the default target destination for each Switch statement
445 // reserved.
446 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
447 SwitchInst *SWI = SwitchAndDefaultDest.first;
448 BasicBlock *DestBB = SwitchAndDefaultDest.second;
449 SWI->setDefaultDest(DestBB);
450 }
451 // This Debug Info could tell us which allocas are merged into one slot.
452 LLVM_DEBUG(for (auto &AllocaSet
453 : NonOverlapedAllocas) {
454 if (AllocaSet.size() > 1) {
455 dbgs() << "In Function:" << F.getName() << "\n";
456 dbgs() << "Find Union Set "
457 << "\n";
458 dbgs() << "\tAllocas are \n";
459 for (auto Alloca : AllocaSet)
460 dbgs() << "\t\t" << *Alloca << "\n";
461 }
462 });
463}
464
465StructType *FrameTypeBuilder::finish(StringRef Name) {
466 assert(!IsFinished && "already finished!");
467
468 // Prepare the optimal-layout field array.
469 // The Id in the layout field is a pointer to our Field for it.
470 SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
471 LayoutFields.reserve(N: Fields.size());
472 for (auto &Field : Fields) {
473 LayoutFields.emplace_back(Args: &Field, Args&: Field.Size, Args&: Field.Alignment,
474 Args&: Field.Offset);
475 }
476
477 // Perform layout.
478 auto SizeAndAlign = performOptimizedStructLayout(Fields: LayoutFields);
479 StructSize = SizeAndAlign.first;
480 StructAlign = SizeAndAlign.second;
481
482 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
483 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
484 };
485
486 // We need to produce a packed struct type if there's a field whose
487 // assigned offset isn't a multiple of its natural type alignment.
488 bool Packed = [&] {
489 for (auto &LayoutField : LayoutFields) {
490 auto &F = getField(LayoutField);
491 if (!isAligned(Lhs: F.TyAlignment, SizeInBytes: LayoutField.Offset))
492 return true;
493 }
494 return false;
495 }();
496
497 // Build the struct body.
498 SmallVector<Type*, 16> FieldTypes;
499 FieldTypes.reserve(N: LayoutFields.size() * 3 / 2);
500 uint64_t LastOffset = 0;
501 for (auto &LayoutField : LayoutFields) {
502 auto &F = getField(LayoutField);
503
504 auto Offset = LayoutField.Offset;
505
506 // Add a padding field if there's a padding gap and we're either
507 // building a packed struct or the padding gap is more than we'd
508 // get from aligning to the field type's natural alignment.
509 assert(Offset >= LastOffset);
510 if (Offset != LastOffset) {
511 if (Packed || alignTo(Size: LastOffset, A: F.TyAlignment) != Offset)
512 FieldTypes.push_back(Elt: ArrayType::get(ElementType: Type::getInt8Ty(C&: Context),
513 NumElements: Offset - LastOffset));
514 }
515
516 F.Offset = Offset;
517 F.LayoutFieldIndex = FieldTypes.size();
518
519 FieldTypes.push_back(Elt: F.Ty);
520 if (F.DynamicAlignBuffer) {
521 FieldTypes.push_back(
522 Elt: ArrayType::get(ElementType: Type::getInt8Ty(C&: Context), NumElements: F.DynamicAlignBuffer));
523 }
524 LastOffset = Offset + F.Size;
525 }
526
527 StructType *Ty = StructType::create(Context, Elements: FieldTypes, Name, isPacked: Packed);
528
529#ifndef NDEBUG
530 // Check that the IR layout matches the offsets we expect.
531 auto Layout = DL.getStructLayout(Ty);
532 for (auto &F : Fields) {
533 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
534 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
535 }
536#endif
537
538 IsFinished = true;
539
540 return Ty;
541}
542
543static void cacheDIVar(FrameDataInfo &FrameData,
544 DenseMap<Value *, DILocalVariable *> &DIVarCache) {
545 for (auto *V : FrameData.getAllDefs()) {
546 if (DIVarCache.contains(Val: V))
547 continue;
548
549 auto CacheIt = [&DIVarCache, V](const auto &Container) {
550 auto *I = llvm::find_if(Container, [](auto *DDI) {
551 return DDI->getExpression()->getNumElements() == 0;
552 });
553 if (I != Container.end())
554 DIVarCache.insert({V, (*I)->getVariable()});
555 };
556 CacheIt(findDbgDeclares(V));
557 CacheIt(findDVRDeclares(V));
558 }
559}
560
561/// Create name for Type. It uses MDString to store new created string to
562/// avoid memory leak.
563static StringRef solveTypeName(Type *Ty) {
564 if (Ty->isIntegerTy()) {
565 // The longest name in common may be '__int_128', which has 9 bits.
566 SmallString<16> Buffer;
567 raw_svector_ostream OS(Buffer);
568 OS << "__int_" << cast<IntegerType>(Val: Ty)->getBitWidth();
569 auto *MDName = MDString::get(Context&: Ty->getContext(), Str: OS.str());
570 return MDName->getString();
571 }
572
573 if (Ty->isFloatingPointTy()) {
574 if (Ty->isFloatTy())
575 return "__float_";
576 if (Ty->isDoubleTy())
577 return "__double_";
578 return "__floating_type_";
579 }
580
581 if (Ty->isPointerTy())
582 return "PointerType";
583
584 if (Ty->isStructTy()) {
585 if (!cast<StructType>(Val: Ty)->hasName())
586 return "__LiteralStructType_";
587
588 auto Name = Ty->getStructName();
589
590 SmallString<16> Buffer(Name);
591 for (auto &Iter : Buffer)
592 if (Iter == '.' || Iter == ':')
593 Iter = '_';
594 auto *MDName = MDString::get(Context&: Ty->getContext(), Str: Buffer.str());
595 return MDName->getString();
596 }
597
598 return "UnknownType";
599}
600
601static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
602 const DataLayout &Layout, DIScope *Scope,
603 unsigned LineNum,
604 DenseMap<Type *, DIType *> &DITypeCache) {
605 if (DIType *DT = DITypeCache.lookup(Val: Ty))
606 return DT;
607
608 StringRef Name = solveTypeName(Ty);
609
610 DIType *RetType = nullptr;
611
612 if (Ty->isIntegerTy()) {
613 auto BitWidth = cast<IntegerType>(Val: Ty)->getBitWidth();
614 RetType = Builder.createBasicType(Name, SizeInBits: BitWidth, Encoding: dwarf::DW_ATE_signed,
615 Flags: llvm::DINode::FlagArtificial);
616 } else if (Ty->isFloatingPointTy()) {
617 RetType = Builder.createBasicType(Name, SizeInBits: Layout.getTypeSizeInBits(Ty),
618 Encoding: dwarf::DW_ATE_float,
619 Flags: llvm::DINode::FlagArtificial);
620 } else if (Ty->isPointerTy()) {
621 // Construct PointerType points to null (aka void *) instead of exploring
622 // pointee type to avoid infinite search problem. For example, we would be
623 // in trouble if we traverse recursively:
624 //
625 // struct Node {
626 // Node* ptr;
627 // };
628 RetType =
629 Builder.createPointerType(PointeeTy: nullptr, SizeInBits: Layout.getTypeSizeInBits(Ty),
630 AlignInBits: Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
631 /*DWARFAddressSpace=*/std::nullopt, Name);
632 } else if (Ty->isStructTy()) {
633 auto *DIStruct = Builder.createStructType(
634 Scope, Name, File: Scope->getFile(), LineNumber: LineNum, SizeInBits: Layout.getTypeSizeInBits(Ty),
635 AlignInBits: Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
636 Flags: llvm::DINode::FlagArtificial, DerivedFrom: nullptr, Elements: llvm::DINodeArray());
637
638 auto *StructTy = cast<StructType>(Val: Ty);
639 SmallVector<Metadata *, 16> Elements;
640 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
641 DIType *DITy = solveDIType(Builder, Ty: StructTy->getElementType(N: I), Layout,
642 Scope, LineNum, DITypeCache);
643 assert(DITy);
644 Elements.push_back(Elt: Builder.createMemberType(
645 Scope, Name: DITy->getName(), File: Scope->getFile(), LineNo: LineNum,
646 SizeInBits: DITy->getSizeInBits(), AlignInBits: DITy->getAlignInBits(),
647 OffsetInBits: Layout.getStructLayout(Ty: StructTy)->getElementOffsetInBits(Idx: I),
648 Flags: llvm::DINode::FlagArtificial, Ty: DITy));
649 }
650
651 Builder.replaceArrays(T&: DIStruct, Elements: Builder.getOrCreateArray(Elements));
652
653 RetType = DIStruct;
654 } else {
655 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
656 TypeSize Size = Layout.getTypeSizeInBits(Ty);
657 auto *CharSizeType = Builder.createBasicType(
658 Name, SizeInBits: 8, Encoding: dwarf::DW_ATE_unsigned_char, Flags: llvm::DINode::FlagArtificial);
659
660 if (Size <= 8)
661 RetType = CharSizeType;
662 else {
663 if (Size % 8 != 0)
664 Size = TypeSize::getFixed(ExactSize: Size + 8 - (Size % 8));
665
666 RetType = Builder.createArrayType(
667 Size, AlignInBits: Layout.getPrefTypeAlign(Ty).value(), Ty: CharSizeType,
668 Subscripts: Builder.getOrCreateArray(Elements: Builder.getOrCreateSubrange(Lo: 0, Count: Size / 8)));
669 }
670 }
671
672 DITypeCache.insert(KV: {Ty, RetType});
673 return RetType;
674}
675
676/// Build artificial debug info for C++ coroutine frames to allow users to
677/// inspect the contents of the frame directly
678///
679/// Create Debug information for coroutine frame with debug name "__coro_frame".
680/// The debug information for the fields of coroutine frame is constructed from
681/// the following way:
682/// 1. For all the value in the Frame, we search the use of dbg.declare to find
683/// the corresponding debug variables for the value. If we can find the
684/// debug variable, we can get full and accurate debug information.
685/// 2. If we can't get debug information in step 1 and 2, we could only try to
686/// build the DIType by Type. We did this in solveDIType. We only handle
687/// integer, float, double, integer type and struct type for now.
688static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
689 FrameDataInfo &FrameData) {
690 DISubprogram *DIS = F.getSubprogram();
691 // If there is no DISubprogram for F, it implies the function is compiled
692 // without debug info. So we also don't generate debug info for the frame.
693 if (!DIS || !DIS->getUnit() ||
694 !dwarf::isCPlusPlus(
695 S: (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()) ||
696 DIS->getUnit()->getEmissionKind() != DICompileUnit::DebugEmissionKind::FullDebug)
697 return;
698
699 assert(Shape.ABI == coro::ABI::Switch &&
700 "We could only build debug infomation for C++ coroutine now.\n");
701
702 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
703
704 assert(Shape.getPromiseAlloca() &&
705 "Coroutine with switch ABI should own Promise alloca");
706
707 DIFile *DFile = DIS->getFile();
708 unsigned LineNum = DIS->getLine();
709
710 DICompositeType *FrameDITy = DBuilder.createStructType(
711 Scope: DIS->getUnit(), Name: Twine(F.getName() + ".coro_frame_ty").str(),
712 File: DFile, LineNumber: LineNum, SizeInBits: Shape.FrameSize * 8,
713 AlignInBits: Shape.FrameAlign.value() * 8, Flags: llvm::DINode::FlagArtificial, DerivedFrom: nullptr,
714 Elements: llvm::DINodeArray());
715 StructType *FrameTy = Shape.FrameTy;
716 SmallVector<Metadata *, 16> Elements;
717 DataLayout Layout = F.getDataLayout();
718
719 DenseMap<Value *, DILocalVariable *> DIVarCache;
720 cacheDIVar(FrameData, DIVarCache);
721
722 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
723 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
724 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
725
726 DenseMap<unsigned, StringRef> NameCache;
727 NameCache.insert(KV: {ResumeIndex, "__resume_fn"});
728 NameCache.insert(KV: {DestroyIndex, "__destroy_fn"});
729 NameCache.insert(KV: {IndexIndex, "__coro_index"});
730
731 Type *ResumeFnTy = FrameTy->getElementType(N: ResumeIndex),
732 *DestroyFnTy = FrameTy->getElementType(N: DestroyIndex),
733 *IndexTy = FrameTy->getElementType(N: IndexIndex);
734
735 DenseMap<unsigned, DIType *> TyCache;
736 TyCache.insert(
737 KV: {ResumeIndex, DBuilder.createPointerType(
738 PointeeTy: nullptr, SizeInBits: Layout.getTypeSizeInBits(Ty: ResumeFnTy))});
739 TyCache.insert(
740 KV: {DestroyIndex, DBuilder.createPointerType(
741 PointeeTy: nullptr, SizeInBits: Layout.getTypeSizeInBits(Ty: DestroyFnTy))});
742
743 /// FIXME: If we fill the field `SizeInBits` with the actual size of
744 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
745 TyCache.insert(KV: {IndexIndex, DBuilder.createBasicType(
746 Name: "__coro_index",
747 SizeInBits: (Layout.getTypeSizeInBits(Ty: IndexTy) < 8)
748 ? 8
749 : Layout.getTypeSizeInBits(Ty: IndexTy),
750 Encoding: dwarf::DW_ATE_unsigned_char)});
751
752 for (auto *V : FrameData.getAllDefs()) {
753 auto It = DIVarCache.find(Val: V);
754 if (It == DIVarCache.end())
755 continue;
756
757 auto Index = FrameData.getFieldIndex(V);
758
759 NameCache.insert(KV: {Index, It->second->getName()});
760 TyCache.insert(KV: {Index, It->second->getType()});
761 }
762
763 // Cache from index to (Align, Offset Pair)
764 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
765 // The Align and Offset of Resume function and Destroy function are fixed.
766 OffsetCache.insert(KV: {ResumeIndex, {8, 0}});
767 OffsetCache.insert(KV: {DestroyIndex, {8, 8}});
768 OffsetCache.insert(
769 KV: {IndexIndex,
770 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
771
772 for (auto *V : FrameData.getAllDefs()) {
773 auto Index = FrameData.getFieldIndex(V);
774
775 OffsetCache.insert(
776 KV: {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
777 }
778
779 DenseMap<Type *, DIType *> DITypeCache;
780 // This counter is used to avoid same type names. e.g., there would be
781 // many i32 and i64 types in one coroutine. And we would use i32_0 and
782 // i32_1 to avoid the same type. Since it makes no sense the name of the
783 // fields confilicts with each other.
784 unsigned UnknownTypeNum = 0;
785 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
786 auto OCIt = OffsetCache.find(Val: Index);
787 if (OCIt == OffsetCache.end())
788 continue;
789
790 std::string Name;
791 uint64_t SizeInBits;
792 uint32_t AlignInBits;
793 uint64_t OffsetInBits;
794 DIType *DITy = nullptr;
795
796 Type *Ty = FrameTy->getElementType(N: Index);
797 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
798 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue();
799 AlignInBits = OCIt->second.first * 8;
800 OffsetInBits = OCIt->second.second * 8;
801
802 if (auto It = NameCache.find(Val: Index); It != NameCache.end()) {
803 Name = It->second.str();
804 DITy = TyCache[Index];
805 } else {
806 DITy = solveDIType(Builder&: DBuilder, Ty, Layout, Scope: FrameDITy, LineNum, DITypeCache);
807 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
808 Name = DITy->getName().str();
809 Name += "_" + std::to_string(val: UnknownTypeNum);
810 UnknownTypeNum++;
811 }
812
813 Elements.push_back(Elt: DBuilder.createMemberType(
814 Scope: FrameDITy, Name, File: DFile, LineNo: LineNum, SizeInBits, AlignInBits, OffsetInBits,
815 Flags: llvm::DINode::FlagArtificial, Ty: DITy));
816 }
817
818 DBuilder.replaceArrays(T&: FrameDITy, Elements: DBuilder.getOrCreateArray(Elements));
819
820 auto *FrameDIVar =
821 DBuilder.createAutoVariable(Scope: DIS, Name: "__coro_frame", File: DFile, LineNo: LineNum,
822 Ty: FrameDITy, AlwaysPreserve: true, Flags: DINode::FlagArtificial);
823
824 // Subprogram would have ContainedNodes field which records the debug
825 // variables it contained. So we need to add __coro_frame to the
826 // ContainedNodes of it.
827 //
828 // If we don't add __coro_frame to the RetainedNodes, user may get
829 // `no symbol __coro_frame in context` rather than `__coro_frame`
830 // is optimized out, which is more precise.
831 auto RetainedNodes = DIS->getRetainedNodes();
832 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
833 RetainedNodes.end());
834 RetainedNodesVec.push_back(Elt: FrameDIVar);
835 DIS->replaceOperandWith(I: 7, New: (MDTuple::get(Context&: F.getContext(), MDs: RetainedNodesVec)));
836
837 // Construct the location for the frame debug variable. The column number
838 // is fake but it should be fine.
839 DILocation *DILoc =
840 DILocation::get(Context&: DIS->getContext(), Line: LineNum, /*Column=*/1, Scope: DIS);
841 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
842
843 DbgVariableRecord *NewDVR =
844 new DbgVariableRecord(ValueAsMetadata::get(V: Shape.FramePtr), FrameDIVar,
845 DBuilder.createExpression(), DILoc,
846 DbgVariableRecord::LocationType::Declare);
847 BasicBlock::iterator It = Shape.getInsertPtAfterFramePtr();
848 It->getParent()->insertDbgRecordBefore(DR: NewDVR, Here: It);
849}
850
851// Build a struct that will keep state for an active coroutine.
852// struct f.frame {
853// ResumeFnTy ResumeFnAddr;
854// ResumeFnTy DestroyFnAddr;
855// ... promise (if present) ...
856// int ResumeIndex;
857// ... spills ...
858// };
859static StructType *buildFrameType(Function &F, coro::Shape &Shape,
860 FrameDataInfo &FrameData,
861 bool OptimizeFrame) {
862 LLVMContext &C = F.getContext();
863 const DataLayout &DL = F.getDataLayout();
864
865 // We will use this value to cap the alignment of spilled values.
866 std::optional<Align> MaxFrameAlignment;
867 if (Shape.ABI == coro::ABI::Async)
868 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
869 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
870
871 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
872 std::optional<FieldIDType> SwitchIndexFieldId;
873
874 if (Shape.ABI == coro::ABI::Switch) {
875 auto *FnPtrTy = PointerType::getUnqual(C);
876
877 // Add header fields for the resume and destroy functions.
878 // We can rely on these being perfectly packed.
879 (void)B.addField(Ty: FnPtrTy, MaybeFieldAlignment: std::nullopt, /*header*/ IsHeader: true);
880 (void)B.addField(Ty: FnPtrTy, MaybeFieldAlignment: std::nullopt, /*header*/ IsHeader: true);
881
882 // PromiseAlloca field needs to be explicitly added here because it's
883 // a header field with a fixed offset based on its alignment. Hence it
884 // needs special handling and cannot be added to FrameData.Allocas.
885 if (PromiseAlloca)
886 FrameData.setFieldIndex(
887 V: PromiseAlloca, Index: B.addFieldForAlloca(AI: PromiseAlloca, /*header*/ IsHeader: true));
888
889 // Add a field to store the suspend index. This doesn't need to
890 // be in the header.
891 unsigned IndexBits = std::max(a: 1U, b: Log2_64_Ceil(Value: Shape.CoroSuspends.size()));
892 Type *IndexType = Type::getIntNTy(C, N: IndexBits);
893
894 SwitchIndexFieldId = B.addField(Ty: IndexType, MaybeFieldAlignment: std::nullopt);
895 } else {
896 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
897 }
898
899 // Because multiple allocas may own the same field slot,
900 // we add allocas to field here.
901 B.addFieldForAllocas(F, FrameData, Shape, OptimizeFrame);
902 // Add PromiseAlloca to Allocas list so that
903 // 1. updateLayoutIndex could update its index after
904 // `performOptimizedStructLayout`
905 // 2. it is processed in insertSpills.
906 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
907 // We assume that the promise alloca won't be modified before
908 // CoroBegin and no alias will be create before CoroBegin.
909 FrameData.Allocas.emplace_back(
910 Args&: PromiseAlloca, Args: DenseMap<Instruction *, std::optional<APInt>>{}, Args: false);
911 // Create an entry for every spilled value.
912 for (auto &S : FrameData.Spills) {
913 Type *FieldType = S.first->getType();
914 // For byval arguments, we need to store the pointed value in the frame,
915 // instead of the pointer itself.
916 if (const Argument *A = dyn_cast<Argument>(Val: S.first))
917 if (A->hasByValAttr())
918 FieldType = A->getParamByValType();
919 FieldIDType Id = B.addField(Ty: FieldType, MaybeFieldAlignment: std::nullopt, IsHeader: false /*header*/,
920 IsSpillOfValue: true /*IsSpillOfValue*/);
921 FrameData.setFieldIndex(V: S.first, Index: Id);
922 }
923
924 StructType *FrameTy = [&] {
925 SmallString<32> Name(F.getName());
926 Name.append(RHS: ".Frame");
927 return B.finish(Name);
928 }();
929
930 FrameData.updateLayoutIndex(B);
931 Shape.FrameAlign = B.getStructAlign();
932 Shape.FrameSize = B.getStructSize();
933
934 switch (Shape.ABI) {
935 case coro::ABI::Switch: {
936 // In the switch ABI, remember the switch-index field.
937 auto IndexField = B.getLayoutField(Id: *SwitchIndexFieldId);
938 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
939 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
940 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
941
942 // Also round the frame size up to a multiple of its alignment, as is
943 // generally expected in C/C++.
944 Shape.FrameSize = alignTo(Size: Shape.FrameSize, A: Shape.FrameAlign);
945 break;
946 }
947
948 // In the retcon ABI, remember whether the frame is inline in the storage.
949 case coro::ABI::Retcon:
950 case coro::ABI::RetconOnce: {
951 auto Id = Shape.getRetconCoroId();
952 Shape.RetconLowering.IsFrameInlineInStorage
953 = (B.getStructSize() <= Id->getStorageSize() &&
954 B.getStructAlign() <= Id->getStorageAlignment());
955 break;
956 }
957 case coro::ABI::Async: {
958 Shape.AsyncLowering.FrameOffset =
959 alignTo(Size: Shape.AsyncLowering.ContextHeaderSize, A: Shape.FrameAlign);
960 // Also make the final context size a multiple of the context alignment to
961 // make allocation easier for allocators.
962 Shape.AsyncLowering.ContextSize =
963 alignTo(Size: Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
964 A: Shape.AsyncLowering.getContextAlignment());
965 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
966 report_fatal_error(
967 reason: "The alignment requirment of frame variables cannot be higher than "
968 "the alignment of the async function context");
969 }
970 break;
971 }
972 }
973
974 return FrameTy;
975}
976
977// Replace all alloca and SSA values that are accessed across suspend points
978// with GetElementPointer from coroutine frame + loads and stores. Create an
979// AllocaSpillBB that will become the new entry block for the resume parts of
980// the coroutine:
981//
982// %hdl = coro.begin(...)
983// whatever
984//
985// becomes:
986//
987// %hdl = coro.begin(...)
988// br label %AllocaSpillBB
989//
990// AllocaSpillBB:
991// ; geps corresponding to allocas that were moved to coroutine frame
992// br label PostSpill
993//
994// PostSpill:
995// whatever
996//
997//
998static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
999 LLVMContext &C = Shape.CoroBegin->getContext();
1000 Function *F = Shape.CoroBegin->getFunction();
1001 IRBuilder<> Builder(C);
1002 StructType *FrameTy = Shape.FrameTy;
1003 Value *FramePtr = Shape.FramePtr;
1004 DominatorTree DT(*F);
1005 SmallDenseMap<Argument *, AllocaInst *, 4> ArgToAllocaMap;
1006
1007 // Create a GEP with the given index into the coroutine frame for the original
1008 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1009 // original type.
1010 auto GetFramePointer = [&](Value *Orig) -> Value * {
1011 FieldIDType Index = FrameData.getFieldIndex(V: Orig);
1012 SmallVector<Value *, 3> Indices = {
1013 ConstantInt::get(Ty: Type::getInt32Ty(C), V: 0),
1014 ConstantInt::get(Ty: Type::getInt32Ty(C), V: Index),
1015 };
1016
1017 if (auto *AI = dyn_cast<AllocaInst>(Val: Orig)) {
1018 if (auto *CI = dyn_cast<ConstantInt>(Val: AI->getArraySize())) {
1019 auto Count = CI->getValue().getZExtValue();
1020 if (Count > 1) {
1021 Indices.push_back(Elt: ConstantInt::get(Ty: Type::getInt32Ty(C), V: 0));
1022 }
1023 } else {
1024 report_fatal_error(reason: "Coroutines cannot handle non static allocas yet");
1025 }
1026 }
1027
1028 auto GEP = cast<GetElementPtrInst>(
1029 Val: Builder.CreateInBoundsGEP(Ty: FrameTy, Ptr: FramePtr, IdxList: Indices));
1030 if (auto *AI = dyn_cast<AllocaInst>(Val: Orig)) {
1031 if (FrameData.getDynamicAlign(V: Orig) != 0) {
1032 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1033 auto *M = AI->getModule();
1034 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1035 auto *PtrValue = Builder.CreatePtrToInt(V: GEP, DestTy: IntPtrTy);
1036 auto *AlignMask =
1037 ConstantInt::get(Ty: IntPtrTy, V: AI->getAlign().value() - 1);
1038 PtrValue = Builder.CreateAdd(LHS: PtrValue, RHS: AlignMask);
1039 PtrValue = Builder.CreateAnd(LHS: PtrValue, RHS: Builder.CreateNot(V: AlignMask));
1040 return Builder.CreateIntToPtr(V: PtrValue, DestTy: AI->getType());
1041 }
1042 // If the type of GEP is not equal to the type of AllocaInst, it implies
1043 // that the AllocaInst may be reused in the Frame slot of other
1044 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1045 // the Frame storage.
1046 //
1047 // Note: If we change the strategy dealing with alignment, we need to refine
1048 // this casting.
1049 if (GEP->getType() != Orig->getType())
1050 return Builder.CreateAddrSpaceCast(V: GEP, DestTy: Orig->getType(),
1051 Name: Orig->getName() + Twine(".cast"));
1052 }
1053 return GEP;
1054 };
1055
1056 for (auto const &E : FrameData.Spills) {
1057 Value *Def = E.first;
1058 auto SpillAlignment = Align(FrameData.getAlign(V: Def));
1059 // Create a store instruction storing the value into the
1060 // coroutine frame.
1061 BasicBlock::iterator InsertPt = coro::getSpillInsertionPt(Shape, Def, DT);
1062
1063 Type *ByValTy = nullptr;
1064 if (auto *Arg = dyn_cast<Argument>(Val: Def)) {
1065 // If we're spilling an Argument, make sure we clear 'captures'
1066 // from the coroutine function.
1067 Arg->getParent()->removeParamAttr(ArgNo: Arg->getArgNo(), Kind: Attribute::Captures);
1068
1069 if (Arg->hasByValAttr())
1070 ByValTy = Arg->getParamByValType();
1071 }
1072
1073 auto Index = FrameData.getFieldIndex(V: Def);
1074 Builder.SetInsertPoint(TheBB: InsertPt->getParent(), IP: InsertPt);
1075 auto *G = Builder.CreateConstInBoundsGEP2_32(
1076 Ty: FrameTy, Ptr: FramePtr, Idx0: 0, Idx1: Index, Name: Def->getName() + Twine(".spill.addr"));
1077 if (ByValTy) {
1078 // For byval arguments, we need to store the pointed value in the frame,
1079 // instead of the pointer itself.
1080 auto *Value = Builder.CreateLoad(Ty: ByValTy, Ptr: Def);
1081 Builder.CreateAlignedStore(Val: Value, Ptr: G, Align: SpillAlignment);
1082 } else {
1083 Builder.CreateAlignedStore(Val: Def, Ptr: G, Align: SpillAlignment);
1084 }
1085
1086 BasicBlock *CurrentBlock = nullptr;
1087 Value *CurrentReload = nullptr;
1088 for (auto *U : E.second) {
1089 // If we have not seen the use block, create a load instruction to reload
1090 // the spilled value from the coroutine frame. Populates the Value pointer
1091 // reference provided with the frame GEP.
1092 if (CurrentBlock != U->getParent()) {
1093 CurrentBlock = U->getParent();
1094 Builder.SetInsertPoint(TheBB: CurrentBlock,
1095 IP: CurrentBlock->getFirstInsertionPt());
1096
1097 auto *GEP = GetFramePointer(E.first);
1098 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1099 if (ByValTy)
1100 CurrentReload = GEP;
1101 else
1102 CurrentReload = Builder.CreateAlignedLoad(
1103 Ty: FrameTy->getElementType(N: FrameData.getFieldIndex(V: E.first)), Ptr: GEP,
1104 Align: SpillAlignment, Name: E.first->getName() + Twine(".reload"));
1105
1106 TinyPtrVector<DbgDeclareInst *> DIs = findDbgDeclares(V: Def);
1107 TinyPtrVector<DbgVariableRecord *> DVRs = findDVRDeclares(V: Def);
1108 // Try best to find dbg.declare. If the spill is a temp, there may not
1109 // be a direct dbg.declare. Walk up the load chain to find one from an
1110 // alias.
1111 if (F->getSubprogram()) {
1112 auto *CurDef = Def;
1113 while (DIs.empty() && DVRs.empty() && isa<LoadInst>(Val: CurDef)) {
1114 auto *LdInst = cast<LoadInst>(Val: CurDef);
1115 // Only consider ptr to ptr same type load.
1116 if (LdInst->getPointerOperandType() != LdInst->getType())
1117 break;
1118 CurDef = LdInst->getPointerOperand();
1119 if (!isa<AllocaInst, LoadInst>(Val: CurDef))
1120 break;
1121 DIs = findDbgDeclares(V: CurDef);
1122 DVRs = findDVRDeclares(V: CurDef);
1123 }
1124 }
1125
1126 auto SalvageOne = [&](auto *DDI) {
1127 // This dbg.declare is preserved for all coro-split function
1128 // fragments. It will be unreachable in the main function, and
1129 // processed by coro::salvageDebugInfo() by the Cloner.
1130 DbgVariableRecord *NewDVR = new DbgVariableRecord(
1131 ValueAsMetadata::get(V: CurrentReload), DDI->getVariable(),
1132 DDI->getExpression(), DDI->getDebugLoc(),
1133 DbgVariableRecord::LocationType::Declare);
1134 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1135 DR: NewDVR, Here: Builder.GetInsertPoint());
1136 // This dbg.declare is for the main function entry point. It
1137 // will be deleted in all coro-split functions.
1138 coro::salvageDebugInfo(ArgToAllocaMap, *DDI, false /*UseEntryValue*/);
1139 };
1140 for_each(Range&: DIs, F: SalvageOne);
1141 for_each(Range&: DVRs, F: SalvageOne);
1142 }
1143
1144 // If we have a single edge PHINode, remove it and replace it with a
1145 // reload from the coroutine frame. (We already took care of multi edge
1146 // PHINodes by normalizing them in the rewritePHIs function).
1147 if (auto *PN = dyn_cast<PHINode>(Val: U)) {
1148 assert(PN->getNumIncomingValues() == 1 &&
1149 "unexpected number of incoming "
1150 "values in the PHINode");
1151 PN->replaceAllUsesWith(V: CurrentReload);
1152 PN->eraseFromParent();
1153 continue;
1154 }
1155
1156 // Replace all uses of CurrentValue in the current instruction with
1157 // reload.
1158 U->replaceUsesOfWith(From: Def, To: CurrentReload);
1159 // Instructions are added to Def's user list if the attached
1160 // debug records use Def. Update those now.
1161 for (DbgVariableRecord &DVR : filterDbgVars(R: U->getDbgRecordRange()))
1162 DVR.replaceVariableLocationOp(OldValue: Def, NewValue: CurrentReload, AllowEmpty: true);
1163 }
1164 }
1165
1166 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1167
1168 auto SpillBlock = FramePtrBB->splitBasicBlock(
1169 I: Shape.getInsertPtAfterFramePtr(), BBName: "AllocaSpillBB");
1170 SpillBlock->splitBasicBlock(I: &SpillBlock->front(), BBName: "PostSpill");
1171 Shape.AllocaSpillBlock = SpillBlock;
1172
1173 // retcon and retcon.once lowering assumes all uses have been sunk.
1174 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1175 Shape.ABI == coro::ABI::Async) {
1176 // If we found any allocas, replace all of their remaining uses with Geps.
1177 Builder.SetInsertPoint(TheBB: SpillBlock, IP: SpillBlock->begin());
1178 for (const auto &P : FrameData.Allocas) {
1179 AllocaInst *Alloca = P.Alloca;
1180 auto *G = GetFramePointer(Alloca);
1181
1182 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1183 // here, as we are changing location of the instruction.
1184 G->takeName(V: Alloca);
1185 Alloca->replaceAllUsesWith(V: G);
1186 Alloca->eraseFromParent();
1187 }
1188 return;
1189 }
1190
1191 // If we found any alloca, replace all of their remaining uses with GEP
1192 // instructions. To remain debugbility, we replace the uses of allocas for
1193 // dbg.declares and dbg.values with the reload from the frame.
1194 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1195 // as some of the uses may not be dominated by CoroBegin.
1196 Builder.SetInsertPoint(TheBB: Shape.AllocaSpillBlock,
1197 IP: Shape.AllocaSpillBlock->begin());
1198 SmallVector<Instruction *, 4> UsersToUpdate;
1199 for (const auto &A : FrameData.Allocas) {
1200 AllocaInst *Alloca = A.Alloca;
1201 UsersToUpdate.clear();
1202 for (User *U : make_early_inc_range(Range: Alloca->users())) {
1203 auto *I = cast<Instruction>(Val: U);
1204 // It is meaningless to retain the lifetime intrinsics refer for the
1205 // member of coroutine frames and the meaningless lifetime intrinsics
1206 // are possible to block further optimizations.
1207 if (I->isLifetimeStartOrEnd())
1208 I->eraseFromParent();
1209 else if (DT.dominates(Def: Shape.CoroBegin, User: I))
1210 UsersToUpdate.push_back(Elt: I);
1211 }
1212
1213 if (UsersToUpdate.empty())
1214 continue;
1215 auto *G = GetFramePointer(Alloca);
1216 G->setName(Alloca->getName() + Twine(".reload.addr"));
1217
1218 SmallVector<DbgVariableIntrinsic *, 4> DIs;
1219 SmallVector<DbgVariableRecord *> DbgVariableRecords;
1220 findDbgUsers(DbgInsts&: DIs, V: Alloca, DbgVariableRecords: &DbgVariableRecords);
1221 for (auto *DVI : DIs)
1222 DVI->replaceUsesOfWith(From: Alloca, To: G);
1223 for (auto *DVR : DbgVariableRecords)
1224 DVR->replaceVariableLocationOp(OldValue: Alloca, NewValue: G);
1225
1226 for (Instruction *I : UsersToUpdate)
1227 I->replaceUsesOfWith(From: Alloca, To: G);
1228 }
1229 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1230 for (const auto &A : FrameData.Allocas) {
1231 AllocaInst *Alloca = A.Alloca;
1232 if (A.MayWriteBeforeCoroBegin) {
1233 // isEscaped really means potentially modified before CoroBegin.
1234 if (Alloca->isArrayAllocation())
1235 report_fatal_error(
1236 reason: "Coroutines cannot handle copying of array allocas yet");
1237
1238 auto *G = GetFramePointer(Alloca);
1239 auto *Value = Builder.CreateLoad(Ty: Alloca->getAllocatedType(), Ptr: Alloca);
1240 Builder.CreateStore(Val: Value, Ptr: G);
1241 }
1242 // For each alias to Alloca created before CoroBegin but used after
1243 // CoroBegin, we recreate them after CoroBegin by applying the offset
1244 // to the pointer in the frame.
1245 for (const auto &Alias : A.Aliases) {
1246 auto *FramePtr = GetFramePointer(Alloca);
1247 auto &Value = *Alias.second;
1248 auto ITy = IntegerType::get(C, NumBits: Value.getBitWidth());
1249 auto *AliasPtr =
1250 Builder.CreatePtrAdd(Ptr: FramePtr, Offset: ConstantInt::get(Ty: ITy, V: Value));
1251 Alias.first->replaceUsesWithIf(
1252 New: AliasPtr, ShouldReplace: [&](Use &U) { return DT.dominates(Def: Shape.CoroBegin, U); });
1253 }
1254 }
1255
1256 // PromiseAlloca is not collected in FrameData.Allocas. So we don't handle
1257 // the case that the PromiseAlloca may have writes before CoroBegin in the
1258 // above codes. And it may be problematic in edge cases. See
1259 // https://github.com/llvm/llvm-project/issues/57861 for an example.
1260 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) {
1261 AllocaInst *PA = Shape.SwitchLowering.PromiseAlloca;
1262 // If there is memory accessing to promise alloca before CoroBegin;
1263 bool HasAccessingPromiseBeforeCB = llvm::any_of(Range: PA->uses(), P: [&](Use &U) {
1264 auto *Inst = dyn_cast<Instruction>(Val: U.getUser());
1265 if (!Inst || DT.dominates(Def: Shape.CoroBegin, User: Inst))
1266 return false;
1267
1268 if (auto *CI = dyn_cast<CallInst>(Val: Inst)) {
1269 // It is fine if the call wouldn't write to the Promise.
1270 // This is possible for @llvm.coro.id intrinsics, which
1271 // would take the promise as the second argument as a
1272 // marker.
1273 if (CI->onlyReadsMemory() ||
1274 CI->onlyReadsMemory(OpNo: CI->getArgOperandNo(U: &U)))
1275 return false;
1276 return true;
1277 }
1278
1279 return isa<StoreInst>(Val: Inst) ||
1280 // It may take too much time to track the uses.
1281 // Be conservative about the case the use may escape.
1282 isa<GetElementPtrInst>(Val: Inst) ||
1283 // There would always be a bitcast for the promise alloca
1284 // before we enabled Opaque pointers. And now given
1285 // opaque pointers are enabled by default. This should be
1286 // fine.
1287 isa<BitCastInst>(Val: Inst);
1288 });
1289 if (HasAccessingPromiseBeforeCB) {
1290 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1291 auto *G = GetFramePointer(PA);
1292 auto *Value = Builder.CreateLoad(Ty: PA->getAllocatedType(), Ptr: PA);
1293 Builder.CreateStore(Val: Value, Ptr: G);
1294 }
1295 }
1296}
1297
1298// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1299// PHI in InsertedBB.
1300static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1301 BasicBlock *InsertedBB,
1302 BasicBlock *PredBB,
1303 PHINode *UntilPHI = nullptr) {
1304 auto *PN = cast<PHINode>(Val: &SuccBB->front());
1305 do {
1306 int Index = PN->getBasicBlockIndex(BB: InsertedBB);
1307 Value *V = PN->getIncomingValue(i: Index);
1308 PHINode *InputV = PHINode::Create(
1309 Ty: V->getType(), NumReservedValues: 1, NameStr: V->getName() + Twine(".") + SuccBB->getName());
1310 InputV->insertBefore(InsertPos: InsertedBB->begin());
1311 InputV->addIncoming(V, BB: PredBB);
1312 PN->setIncomingValue(i: Index, V: InputV);
1313 PN = dyn_cast<PHINode>(Val: PN->getNextNode());
1314 } while (PN != UntilPHI);
1315}
1316
1317// Rewrites the PHI Nodes in a cleanuppad.
1318static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1319 CleanupPadInst *CleanupPad) {
1320 // For every incoming edge to a CleanupPad we will create a new block holding
1321 // all incoming values in single-value PHI nodes. We will then create another
1322 // block to act as a dispather (as all unwind edges for related EH blocks
1323 // must be the same).
1324 //
1325 // cleanuppad:
1326 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1327 // %3 = cleanuppad within none []
1328 //
1329 // It will create:
1330 //
1331 // cleanuppad.corodispatch
1332 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
1333 // %3 = cleanuppad within none []
1334 // switch i8 % 2, label %unreachable
1335 // [i8 0, label %cleanuppad.from.catchswitch
1336 // i8 1, label %cleanuppad.from.catch.1]
1337 // cleanuppad.from.catchswitch:
1338 // %4 = phi i32 [%0, %catchswitch]
1339 // br %label cleanuppad
1340 // cleanuppad.from.catch.1:
1341 // %6 = phi i32 [%1, %catch.1]
1342 // br %label cleanuppad
1343 // cleanuppad:
1344 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1345 // [%6, %cleanuppad.from.catch.1]
1346
1347 // Unreachable BB, in case switching on an invalid value in the dispatcher.
1348 auto *UnreachBB = BasicBlock::Create(
1349 Context&: CleanupPadBB->getContext(), Name: "unreachable", Parent: CleanupPadBB->getParent());
1350 IRBuilder<> Builder(UnreachBB);
1351 Builder.CreateUnreachable();
1352
1353 // Create a new cleanuppad which will be the dispatcher.
1354 auto *NewCleanupPadBB =
1355 BasicBlock::Create(Context&: CleanupPadBB->getContext(),
1356 Name: CleanupPadBB->getName() + Twine(".corodispatch"),
1357 Parent: CleanupPadBB->getParent(), InsertBefore: CleanupPadBB);
1358 Builder.SetInsertPoint(NewCleanupPadBB);
1359 auto *SwitchType = Builder.getInt8Ty();
1360 auto *SetDispatchValuePN =
1361 Builder.CreatePHI(Ty: SwitchType, NumReservedValues: pred_size(BB: CleanupPadBB));
1362 CleanupPad->removeFromParent();
1363 CleanupPad->insertAfter(InsertPos: SetDispatchValuePN->getIterator());
1364 auto *SwitchOnDispatch = Builder.CreateSwitch(V: SetDispatchValuePN, Dest: UnreachBB,
1365 NumCases: pred_size(BB: CleanupPadBB));
1366
1367 int SwitchIndex = 0;
1368 SmallVector<BasicBlock *, 8> Preds(predecessors(BB: CleanupPadBB));
1369 for (BasicBlock *Pred : Preds) {
1370 // Create a new cleanuppad and move the PHI values to there.
1371 auto *CaseBB = BasicBlock::Create(Context&: CleanupPadBB->getContext(),
1372 Name: CleanupPadBB->getName() +
1373 Twine(".from.") + Pred->getName(),
1374 Parent: CleanupPadBB->getParent(), InsertBefore: CleanupPadBB);
1375 updatePhiNodes(DestBB: CleanupPadBB, OldPred: Pred, NewPred: CaseBB);
1376 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1377 Pred->getName());
1378 Builder.SetInsertPoint(CaseBB);
1379 Builder.CreateBr(Dest: CleanupPadBB);
1380 movePHIValuesToInsertedBlock(SuccBB: CleanupPadBB, InsertedBB: CaseBB, PredBB: NewCleanupPadBB);
1381
1382 // Update this Pred to the new unwind point.
1383 setUnwindEdgeTo(TI: Pred->getTerminator(), Succ: NewCleanupPadBB);
1384
1385 // Setup the switch in the dispatcher.
1386 auto *SwitchConstant = ConstantInt::get(Ty: SwitchType, V: SwitchIndex);
1387 SetDispatchValuePN->addIncoming(V: SwitchConstant, BB: Pred);
1388 SwitchOnDispatch->addCase(OnVal: SwitchConstant, Dest: CaseBB);
1389 SwitchIndex++;
1390 }
1391}
1392
1393static void cleanupSinglePredPHIs(Function &F) {
1394 SmallVector<PHINode *, 32> Worklist;
1395 for (auto &BB : F) {
1396 for (auto &Phi : BB.phis()) {
1397 if (Phi.getNumIncomingValues() == 1) {
1398 Worklist.push_back(Elt: &Phi);
1399 } else
1400 break;
1401 }
1402 }
1403 while (!Worklist.empty()) {
1404 auto *Phi = Worklist.pop_back_val();
1405 auto *OriginalValue = Phi->getIncomingValue(i: 0);
1406 Phi->replaceAllUsesWith(V: OriginalValue);
1407 }
1408}
1409
1410static void rewritePHIs(BasicBlock &BB) {
1411 // For every incoming edge we will create a block holding all
1412 // incoming values in a single PHI nodes.
1413 //
1414 // loop:
1415 // %n.val = phi i32[%n, %entry], [%inc, %loop]
1416 //
1417 // It will create:
1418 //
1419 // loop.from.entry:
1420 // %n.loop.pre = phi i32 [%n, %entry]
1421 // br %label loop
1422 // loop.from.loop:
1423 // %inc.loop.pre = phi i32 [%inc, %loop]
1424 // br %label loop
1425 //
1426 // After this rewrite, further analysis will ignore any phi nodes with more
1427 // than one incoming edge.
1428
1429 // TODO: Simplify PHINodes in the basic block to remove duplicate
1430 // predecessors.
1431
1432 // Special case for CleanupPad: all EH blocks must have the same unwind edge
1433 // so we need to create an additional "dispatcher" block.
1434 if (!BB.empty()) {
1435 if (auto *CleanupPad =
1436 dyn_cast_or_null<CleanupPadInst>(Val: BB.getFirstNonPHIIt())) {
1437 SmallVector<BasicBlock *, 8> Preds(predecessors(BB: &BB));
1438 for (BasicBlock *Pred : Preds) {
1439 if (CatchSwitchInst *CS =
1440 dyn_cast<CatchSwitchInst>(Val: Pred->getTerminator())) {
1441 // CleanupPad with a CatchSwitch predecessor: therefore this is an
1442 // unwind destination that needs to be handle specially.
1443 assert(CS->getUnwindDest() == &BB);
1444 (void)CS;
1445 rewritePHIsForCleanupPad(CleanupPadBB: &BB, CleanupPad);
1446 return;
1447 }
1448 }
1449 }
1450 }
1451
1452 LandingPadInst *LandingPad = nullptr;
1453 PHINode *ReplPHI = nullptr;
1454 if (!BB.empty()) {
1455 if ((LandingPad =
1456 dyn_cast_or_null<LandingPadInst>(Val: BB.getFirstNonPHIIt()))) {
1457 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1458 // We replace the original landing pad with a PHINode that will collect the
1459 // results from all of them.
1460 ReplPHI = PHINode::Create(Ty: LandingPad->getType(), NumReservedValues: 1, NameStr: "");
1461 ReplPHI->insertBefore(InsertPos: LandingPad->getIterator());
1462 ReplPHI->takeName(V: LandingPad);
1463 LandingPad->replaceAllUsesWith(V: ReplPHI);
1464 // We will erase the original landing pad at the end of this function after
1465 // ehAwareSplitEdge cloned it in the transition blocks.
1466 }
1467 }
1468
1469 SmallVector<BasicBlock *, 8> Preds(predecessors(BB: &BB));
1470 for (BasicBlock *Pred : Preds) {
1471 auto *IncomingBB = ehAwareSplitEdge(BB: Pred, Succ: &BB, OriginalPad: LandingPad, LandingPadReplacement: ReplPHI);
1472 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1473
1474 // Stop the moving of values at ReplPHI, as this is either null or the PHI
1475 // that replaced the landing pad.
1476 movePHIValuesToInsertedBlock(SuccBB: &BB, InsertedBB: IncomingBB, PredBB: Pred, UntilPHI: ReplPHI);
1477 }
1478
1479 if (LandingPad) {
1480 // Calls to ehAwareSplitEdge function cloned the original lading pad.
1481 // No longer need it.
1482 LandingPad->eraseFromParent();
1483 }
1484}
1485
1486static void rewritePHIs(Function &F) {
1487 SmallVector<BasicBlock *, 8> WorkList;
1488
1489 for (BasicBlock &BB : F)
1490 if (auto *PN = dyn_cast<PHINode>(Val: &BB.front()))
1491 if (PN->getNumIncomingValues() > 1)
1492 WorkList.push_back(Elt: &BB);
1493
1494 for (BasicBlock *BB : WorkList)
1495 rewritePHIs(BB&: *BB);
1496}
1497
1498// Splits the block at a particular instruction unless it is the first
1499// instruction in the block with a single predecessor.
1500static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
1501 auto *BB = I->getParent();
1502 if (&BB->front() == I) {
1503 if (BB->getSinglePredecessor()) {
1504 BB->setName(Name);
1505 return BB;
1506 }
1507 }
1508 return BB->splitBasicBlock(I, BBName: Name);
1509}
1510
1511// Split above and below a particular instruction so that it
1512// will be all alone by itself in a block.
1513static void splitAround(Instruction *I, const Twine &Name) {
1514 splitBlockIfNotFirst(I, Name);
1515 splitBlockIfNotFirst(I: I->getNextNode(), Name: "After" + Name);
1516}
1517
1518/// After we split the coroutine, will the given basic block be along
1519/// an obvious exit path for the resumption function?
1520static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
1521 unsigned depth = 3) {
1522 // If we've bottomed out our depth count, stop searching and assume
1523 // that the path might loop back.
1524 if (depth == 0) return false;
1525
1526 // If this is a suspend block, we're about to exit the resumption function.
1527 if (coro::isSuspendBlock(BB))
1528 return true;
1529
1530 // Recurse into the successors.
1531 for (auto *Succ : successors(BB)) {
1532 if (!willLeaveFunctionImmediatelyAfter(BB: Succ, depth: depth - 1))
1533 return false;
1534 }
1535
1536 // If none of the successors leads back in a loop, we're on an exit/abort.
1537 return true;
1538}
1539
1540static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
1541 // Look for a free that isn't sufficiently obviously followed by
1542 // either a suspend or a termination, i.e. something that will leave
1543 // the coro resumption frame.
1544 for (auto *U : AI->users()) {
1545 auto FI = dyn_cast<CoroAllocaFreeInst>(Val: U);
1546 if (!FI) continue;
1547
1548 if (!willLeaveFunctionImmediatelyAfter(BB: FI->getParent()))
1549 return true;
1550 }
1551
1552 // If we never found one, we don't need a stack save.
1553 return false;
1554}
1555
1556/// Turn each of the given local allocas into a normal (dynamic) alloca
1557/// instruction.
1558static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
1559 SmallVectorImpl<Instruction*> &DeadInsts) {
1560 for (auto *AI : LocalAllocas) {
1561 IRBuilder<> Builder(AI);
1562
1563 // Save the stack depth. Try to avoid doing this if the stackrestore
1564 // is going to immediately precede a return or something.
1565 Value *StackSave = nullptr;
1566 if (localAllocaNeedsStackSave(AI))
1567 StackSave = Builder.CreateStackSave();
1568
1569 // Allocate memory.
1570 auto Alloca = Builder.CreateAlloca(Ty: Builder.getInt8Ty(), ArraySize: AI->getSize());
1571 Alloca->setAlignment(AI->getAlignment());
1572
1573 for (auto *U : AI->users()) {
1574 // Replace gets with the allocation.
1575 if (isa<CoroAllocaGetInst>(Val: U)) {
1576 U->replaceAllUsesWith(V: Alloca);
1577
1578 // Replace frees with stackrestores. This is safe because
1579 // alloca.alloc is required to obey a stack discipline, although we
1580 // don't enforce that structurally.
1581 } else {
1582 auto FI = cast<CoroAllocaFreeInst>(Val: U);
1583 if (StackSave) {
1584 Builder.SetInsertPoint(FI);
1585 Builder.CreateStackRestore(Ptr: StackSave);
1586 }
1587 }
1588 DeadInsts.push_back(Elt: cast<Instruction>(Val: U));
1589 }
1590
1591 DeadInsts.push_back(Elt: AI);
1592 }
1593}
1594
1595/// Get the current swifterror value.
1596static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
1597 coro::Shape &Shape) {
1598 // Make a fake function pointer as a sort of intrinsic.
1599 auto FnTy = FunctionType::get(Result: ValueTy, Params: {}, isVarArg: false);
1600 auto Fn = ConstantPointerNull::get(T: Builder.getPtrTy());
1601
1602 auto Call = Builder.CreateCall(FTy: FnTy, Callee: Fn, Args: {});
1603 Shape.SwiftErrorOps.push_back(Elt: Call);
1604
1605 return Call;
1606}
1607
1608/// Set the given value as the current swifterror value.
1609///
1610/// Returns a slot that can be used as a swifterror slot.
1611static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
1612 coro::Shape &Shape) {
1613 // Make a fake function pointer as a sort of intrinsic.
1614 auto FnTy = FunctionType::get(Result: Builder.getPtrTy(),
1615 Params: {V->getType()}, isVarArg: false);
1616 auto Fn = ConstantPointerNull::get(T: Builder.getPtrTy());
1617
1618 auto Call = Builder.CreateCall(FTy: FnTy, Callee: Fn, Args: { V });
1619 Shape.SwiftErrorOps.push_back(Elt: Call);
1620
1621 return Call;
1622}
1623
1624/// Set the swifterror value from the given alloca before a call,
1625/// then put in back in the alloca afterwards.
1626///
1627/// Returns an address that will stand in for the swifterror slot
1628/// until splitting.
1629static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
1630 AllocaInst *Alloca,
1631 coro::Shape &Shape) {
1632 auto ValueTy = Alloca->getAllocatedType();
1633 IRBuilder<> Builder(Call);
1634
1635 // Load the current value from the alloca and set it as the
1636 // swifterror value.
1637 auto ValueBeforeCall = Builder.CreateLoad(Ty: ValueTy, Ptr: Alloca);
1638 auto Addr = emitSetSwiftErrorValue(Builder, V: ValueBeforeCall, Shape);
1639
1640 // Move to after the call. Since swifterror only has a guaranteed
1641 // value on normal exits, we can ignore implicit and explicit unwind
1642 // edges.
1643 if (isa<CallInst>(Val: Call)) {
1644 Builder.SetInsertPoint(Call->getNextNode());
1645 } else {
1646 auto Invoke = cast<InvokeInst>(Val: Call);
1647 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1648 }
1649
1650 // Get the current swifterror value and store it to the alloca.
1651 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1652 Builder.CreateStore(Val: ValueAfterCall, Ptr: Alloca);
1653
1654 return Addr;
1655}
1656
1657/// Eliminate a formerly-swifterror alloca by inserting the get/set
1658/// intrinsics and attempting to MemToReg the alloca away.
1659static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
1660 coro::Shape &Shape) {
1661 for (Use &Use : llvm::make_early_inc_range(Range: Alloca->uses())) {
1662 // swifterror values can only be used in very specific ways.
1663 // We take advantage of that here.
1664 auto User = Use.getUser();
1665 if (isa<LoadInst>(Val: User) || isa<StoreInst>(Val: User))
1666 continue;
1667
1668 assert(isa<CallInst>(User) || isa<InvokeInst>(User));
1669 auto Call = cast<Instruction>(Val: User);
1670
1671 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1672
1673 // Use the returned slot address as the call argument.
1674 Use.set(Addr);
1675 }
1676
1677 // All the uses should be loads and stores now.
1678 assert(isAllocaPromotable(Alloca));
1679}
1680
1681/// "Eliminate" a swifterror argument by reducing it to the alloca case
1682/// and then loading and storing in the prologue and epilog.
1683///
1684/// The argument keeps the swifterror flag.
1685static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
1686 coro::Shape &Shape,
1687 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1688 IRBuilder<> Builder(&F.getEntryBlock(),
1689 F.getEntryBlock().getFirstNonPHIOrDbg());
1690
1691 auto ArgTy = cast<PointerType>(Val: Arg.getType());
1692 auto ValueTy = PointerType::getUnqual(C&: F.getContext());
1693
1694 // Reduce to the alloca case:
1695
1696 // Create an alloca and replace all uses of the arg with it.
1697 auto Alloca = Builder.CreateAlloca(Ty: ValueTy, AddrSpace: ArgTy->getAddressSpace());
1698 Arg.replaceAllUsesWith(V: Alloca);
1699
1700 // Set an initial value in the alloca. swifterror is always null on entry.
1701 auto InitialValue = Constant::getNullValue(Ty: ValueTy);
1702 Builder.CreateStore(Val: InitialValue, Ptr: Alloca);
1703
1704 // Find all the suspends in the function and save and restore around them.
1705 for (auto *Suspend : Shape.CoroSuspends) {
1706 (void) emitSetAndGetSwiftErrorValueAround(Call: Suspend, Alloca, Shape);
1707 }
1708
1709 // Find all the coro.ends in the function and restore the error value.
1710 for (auto *End : Shape.CoroEnds) {
1711 Builder.SetInsertPoint(End);
1712 auto FinalValue = Builder.CreateLoad(Ty: ValueTy, Ptr: Alloca);
1713 (void) emitSetSwiftErrorValue(Builder, V: FinalValue, Shape);
1714 }
1715
1716 // Now we can use the alloca logic.
1717 AllocasToPromote.push_back(Elt: Alloca);
1718 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1719}
1720
1721/// Eliminate all problematic uses of swifterror arguments and allocas
1722/// from the function. We'll fix them up later when splitting the function.
1723static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
1724 SmallVector<AllocaInst*, 4> AllocasToPromote;
1725
1726 // Look for a swifterror argument.
1727 for (auto &Arg : F.args()) {
1728 if (!Arg.hasSwiftErrorAttr()) continue;
1729
1730 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1731 break;
1732 }
1733
1734 // Look for swifterror allocas.
1735 for (auto &Inst : F.getEntryBlock()) {
1736 auto Alloca = dyn_cast<AllocaInst>(Val: &Inst);
1737 if (!Alloca || !Alloca->isSwiftError()) continue;
1738
1739 // Clear the swifterror flag.
1740 Alloca->setSwiftError(false);
1741
1742 AllocasToPromote.push_back(Elt: Alloca);
1743 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1744 }
1745
1746 // If we have any allocas to promote, compute a dominator tree and
1747 // promote them en masse.
1748 if (!AllocasToPromote.empty()) {
1749 DominatorTree DT(F);
1750 PromoteMemToReg(Allocas: AllocasToPromote, DT);
1751 }
1752}
1753
1754/// For each local variable that all of its user are only used inside one of
1755/// suspended region, we sink their lifetime.start markers to the place where
1756/// after the suspend block. Doing so minimizes the lifetime of each variable,
1757/// hence minimizing the amount of data we end up putting on the frame.
1758static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
1759 SuspendCrossingInfo &Checker,
1760 const DominatorTree &DT) {
1761 if (F.hasOptNone())
1762 return;
1763
1764 // Collect all possible basic blocks which may dominate all uses of allocas.
1765 SmallPtrSet<BasicBlock *, 4> DomSet;
1766 DomSet.insert(Ptr: &F.getEntryBlock());
1767 for (auto *CSI : Shape.CoroSuspends) {
1768 BasicBlock *SuspendBlock = CSI->getParent();
1769 assert(coro::isSuspendBlock(SuspendBlock) &&
1770 SuspendBlock->getSingleSuccessor() &&
1771 "should have split coro.suspend into its own block");
1772 DomSet.insert(Ptr: SuspendBlock->getSingleSuccessor());
1773 }
1774
1775 for (Instruction &I : instructions(F)) {
1776 AllocaInst* AI = dyn_cast<AllocaInst>(Val: &I);
1777 if (!AI)
1778 continue;
1779
1780 for (BasicBlock *DomBB : DomSet) {
1781 bool Valid = true;
1782 SmallVector<Instruction *, 1> Lifetimes;
1783
1784 auto isLifetimeStart = [](Instruction* I) {
1785 if (auto* II = dyn_cast<IntrinsicInst>(Val: I))
1786 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1787 return false;
1788 };
1789
1790 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
1791 if (isLifetimeStart(U)) {
1792 Lifetimes.push_back(Elt: U);
1793 return true;
1794 }
1795 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
1796 return false;
1797 if (isLifetimeStart(U->user_back())) {
1798 Lifetimes.push_back(Elt: U->user_back());
1799 return true;
1800 }
1801 return false;
1802 };
1803
1804 for (User *U : AI->users()) {
1805 Instruction *UI = cast<Instruction>(Val: U);
1806 // For all users except lifetime.start markers, if they are all
1807 // dominated by one of the basic blocks and do not cross
1808 // suspend points as well, then there is no need to spill the
1809 // instruction.
1810 if (!DT.dominates(A: DomBB, B: UI->getParent()) ||
1811 Checker.isDefinitionAcrossSuspend(DefBB: DomBB, U: UI)) {
1812 // Skip lifetime.start, GEP and bitcast used by lifetime.start
1813 // markers.
1814 if (collectLifetimeStart(UI, AI))
1815 continue;
1816 Valid = false;
1817 break;
1818 }
1819 }
1820 // Sink lifetime.start markers to dominate block when they are
1821 // only used outside the region.
1822 if (Valid && Lifetimes.size() != 0) {
1823 auto *NewLifetime = Lifetimes[0]->clone();
1824 NewLifetime->replaceUsesOfWith(From: NewLifetime->getOperand(i: 1), To: AI);
1825 NewLifetime->insertBefore(InsertPos: DomBB->getTerminator()->getIterator());
1826
1827 // All the outsided lifetime.start markers are no longer necessary.
1828 for (Instruction *S : Lifetimes)
1829 S->eraseFromParent();
1830
1831 break;
1832 }
1833 }
1834 }
1835}
1836
1837static std::optional<std::pair<Value &, DIExpression &>>
1838salvageDebugInfoImpl(SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
1839 bool UseEntryValue, Function *F, Value *Storage,
1840 DIExpression *Expr, bool SkipOutermostLoad) {
1841 IRBuilder<> Builder(F->getContext());
1842 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
1843 while (isa<IntrinsicInst>(Val: InsertPt))
1844 ++InsertPt;
1845 Builder.SetInsertPoint(TheBB: &F->getEntryBlock(), IP: InsertPt);
1846
1847 while (auto *Inst = dyn_cast_or_null<Instruction>(Val: Storage)) {
1848 if (auto *LdInst = dyn_cast<LoadInst>(Val: Inst)) {
1849 Storage = LdInst->getPointerOperand();
1850 // FIXME: This is a heuristic that works around the fact that
1851 // LLVM IR debug intrinsics cannot yet distinguish between
1852 // memory and value locations: Because a dbg.declare(alloca) is
1853 // implicitly a memory location no DW_OP_deref operation for the
1854 // last direct load from an alloca is necessary. This condition
1855 // effectively drops the *last* DW_OP_deref in the expression.
1856 if (!SkipOutermostLoad)
1857 Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefBefore);
1858 } else if (auto *StInst = dyn_cast<StoreInst>(Val: Inst)) {
1859 Storage = StInst->getValueOperand();
1860 } else {
1861 SmallVector<uint64_t, 16> Ops;
1862 SmallVector<Value *, 0> AdditionalValues;
1863 Value *Op = llvm::salvageDebugInfoImpl(
1864 I&: *Inst, CurrentLocOps: Expr ? Expr->getNumLocationOperands() : 0, Ops,
1865 AdditionalValues);
1866 if (!Op || !AdditionalValues.empty()) {
1867 // If salvaging failed or salvaging produced more than one location
1868 // operand, give up.
1869 break;
1870 }
1871 Storage = Op;
1872 Expr = DIExpression::appendOpsToArg(Expr, Ops, ArgNo: 0, /*StackValue*/ false);
1873 }
1874 SkipOutermostLoad = false;
1875 }
1876 if (!Storage)
1877 return std::nullopt;
1878
1879 auto *StorageAsArg = dyn_cast<Argument>(Val: Storage);
1880 const bool IsSwiftAsyncArg =
1881 StorageAsArg && StorageAsArg->hasAttribute(Kind: Attribute::SwiftAsync);
1882
1883 // Swift async arguments are described by an entry value of the ABI-defined
1884 // register containing the coroutine context.
1885 // Entry values in variadic expressions are not supported.
1886 if (IsSwiftAsyncArg && UseEntryValue && !Expr->isEntryValue() &&
1887 Expr->isSingleLocationExpression())
1888 Expr = DIExpression::prepend(Expr, Flags: DIExpression::EntryValue);
1889
1890 // If the coroutine frame is an Argument, store it in an alloca to improve
1891 // its availability (e.g. registers may be clobbered).
1892 // Avoid this if the value is guaranteed to be available through other means
1893 // (e.g. swift ABI guarantees).
1894 if (StorageAsArg && !IsSwiftAsyncArg) {
1895 auto &Cached = ArgToAllocaMap[StorageAsArg];
1896 if (!Cached) {
1897 Cached = Builder.CreateAlloca(Ty: Storage->getType(), AddrSpace: 0, ArraySize: nullptr,
1898 Name: Storage->getName() + ".debug");
1899 Builder.CreateStore(Val: Storage, Ptr: Cached);
1900 }
1901 Storage = Cached;
1902 // FIXME: LLVM lacks nuanced semantics to differentiate between
1903 // memory and direct locations at the IR level. The backend will
1904 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
1905 // location. Thus, if there are deref and offset operations in the
1906 // expression, we need to add a DW_OP_deref at the *start* of the
1907 // expression to first load the contents of the alloca before
1908 // adjusting it with the expression.
1909 Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefBefore);
1910 }
1911
1912 Expr = Expr->foldConstantMath();
1913 return {{*Storage, *Expr}};
1914}
1915
1916void coro::salvageDebugInfo(
1917 SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
1918 DbgVariableIntrinsic &DVI, bool UseEntryValue) {
1919
1920 Function *F = DVI.getFunction();
1921 // Follow the pointer arithmetic all the way to the incoming
1922 // function argument and convert into a DIExpression.
1923 bool SkipOutermostLoad = !isa<DbgValueInst>(Val: DVI);
1924 Value *OriginalStorage = DVI.getVariableLocationOp(OpIdx: 0);
1925
1926 auto SalvagedInfo =
1927 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, Storage: OriginalStorage,
1928 Expr: DVI.getExpression(), SkipOutermostLoad);
1929 if (!SalvagedInfo)
1930 return;
1931
1932 Value *Storage = &SalvagedInfo->first;
1933 DIExpression *Expr = &SalvagedInfo->second;
1934
1935 DVI.replaceVariableLocationOp(OldValue: OriginalStorage, NewValue: Storage);
1936 DVI.setExpression(Expr);
1937 // We only hoist dbg.declare today since it doesn't make sense to hoist
1938 // dbg.value since it does not have the same function wide guarantees that
1939 // dbg.declare does.
1940 if (isa<DbgDeclareInst>(Val: DVI)) {
1941 std::optional<BasicBlock::iterator> InsertPt;
1942 if (auto *I = dyn_cast<Instruction>(Val: Storage)) {
1943 InsertPt = I->getInsertionPointAfterDef();
1944 // Update DILocation only if variable was not inlined.
1945 DebugLoc ILoc = I->getDebugLoc();
1946 DebugLoc DVILoc = DVI.getDebugLoc();
1947 if (ILoc && DVILoc &&
1948 DVILoc->getScope()->getSubprogram() ==
1949 ILoc->getScope()->getSubprogram())
1950 DVI.setDebugLoc(I->getDebugLoc());
1951 } else if (isa<Argument>(Val: Storage))
1952 InsertPt = F->getEntryBlock().begin();
1953 if (InsertPt)
1954 DVI.moveBefore(BB&: *(*InsertPt)->getParent(), I: *InsertPt);
1955 }
1956}
1957
1958void coro::salvageDebugInfo(
1959 SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
1960 DbgVariableRecord &DVR, bool UseEntryValue) {
1961
1962 Function *F = DVR.getFunction();
1963 // Follow the pointer arithmetic all the way to the incoming
1964 // function argument and convert into a DIExpression.
1965 bool SkipOutermostLoad = DVR.isDbgDeclare();
1966 Value *OriginalStorage = DVR.getVariableLocationOp(OpIdx: 0);
1967
1968 auto SalvagedInfo =
1969 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, Storage: OriginalStorage,
1970 Expr: DVR.getExpression(), SkipOutermostLoad);
1971 if (!SalvagedInfo)
1972 return;
1973
1974 Value *Storage = &SalvagedInfo->first;
1975 DIExpression *Expr = &SalvagedInfo->second;
1976
1977 DVR.replaceVariableLocationOp(OldValue: OriginalStorage, NewValue: Storage);
1978 DVR.setExpression(Expr);
1979 // We only hoist dbg.declare today since it doesn't make sense to hoist
1980 // dbg.value since it does not have the same function wide guarantees that
1981 // dbg.declare does.
1982 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) {
1983 std::optional<BasicBlock::iterator> InsertPt;
1984 if (auto *I = dyn_cast<Instruction>(Val: Storage)) {
1985 InsertPt = I->getInsertionPointAfterDef();
1986 // Update DILocation only if variable was not inlined.
1987 DebugLoc ILoc = I->getDebugLoc();
1988 DebugLoc DVRLoc = DVR.getDebugLoc();
1989 if (ILoc && DVRLoc &&
1990 DVRLoc->getScope()->getSubprogram() ==
1991 ILoc->getScope()->getSubprogram())
1992 DVR.setDebugLoc(ILoc);
1993 } else if (isa<Argument>(Val: Storage))
1994 InsertPt = F->getEntryBlock().begin();
1995 if (InsertPt) {
1996 DVR.removeFromParent();
1997 (*InsertPt)->getParent()->insertDbgRecordBefore(DR: &DVR, Here: *InsertPt);
1998 }
1999 }
2000}
2001
2002void coro::normalizeCoroutine(Function &F, coro::Shape &Shape,
2003 TargetTransformInfo &TTI) {
2004 // Don't eliminate swifterror in async functions that won't be split.
2005 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2006 eliminateSwiftError(F, Shape);
2007
2008 if (Shape.ABI == coro::ABI::Switch &&
2009 Shape.SwitchLowering.PromiseAlloca) {
2010 Shape.getSwitchCoroId()->clearPromise();
2011 }
2012
2013 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2014 // intrinsics are in their own blocks to simplify the logic of building up
2015 // SuspendCrossing data.
2016 for (auto *CSI : Shape.CoroSuspends) {
2017 if (auto *Save = CSI->getCoroSave())
2018 splitAround(I: Save, Name: "CoroSave");
2019 splitAround(I: CSI, Name: "CoroSuspend");
2020 }
2021
2022 // Put CoroEnds into their own blocks.
2023 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2024 splitAround(I: CE, Name: "CoroEnd");
2025
2026 // Emit the musttail call function in a new block before the CoroEnd.
2027 // We do this here so that the right suspend crossing info is computed for
2028 // the uses of the musttail call function call. (Arguments to the coro.end
2029 // instructions would be ignored)
2030 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(Val: CE)) {
2031 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2032 if (!MustTailCallFn)
2033 continue;
2034 IRBuilder<> Builder(AsyncEnd);
2035 SmallVector<Value *, 8> Args(AsyncEnd->args());
2036 auto Arguments = ArrayRef<Value *>(Args).drop_front(N: 3);
2037 auto *Call = coro::createMustTailCall(
2038 Loc: AsyncEnd->getDebugLoc(), MustTailCallFn, TTI, Arguments, Builder);
2039 splitAround(I: Call, Name: "MustTailCall.Before.CoroEnd");
2040 }
2041 }
2042
2043 // Later code makes structural assumptions about single predecessors phis e.g
2044 // that they are not live across a suspend point.
2045 cleanupSinglePredPHIs(F);
2046
2047 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2048 // never have its definition separated from the PHI by the suspend point.
2049 rewritePHIs(F);
2050}
2051
2052void coro::BaseABI::buildCoroutineFrame(bool OptimizeFrame) {
2053 SuspendCrossingInfo Checker(F, Shape.CoroSuspends, Shape.CoroEnds);
2054 doRematerializations(F, Checker, IsMaterializable);
2055
2056 const DominatorTree DT(F);
2057 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2058 Shape.ABI != coro::ABI::RetconOnce)
2059 sinkLifetimeStartMarkers(F, Shape, Checker, DT);
2060
2061 // All values (that are not allocas) that needs to be spilled to the frame.
2062 coro::SpillInfo Spills;
2063 // All values defined as allocas that need to live in the frame.
2064 SmallVector<coro::AllocaInfo, 8> Allocas;
2065
2066 // Collect the spills for arguments and other not-materializable values.
2067 coro::collectSpillsFromArgs(Spills, F, Checker);
2068 SmallVector<Instruction *, 4> DeadInstructions;
2069 SmallVector<CoroAllocaAllocInst *, 4> LocalAllocas;
2070 coro::collectSpillsAndAllocasFromInsts(Spills, Allocas, DeadInstructions,
2071 LocalAllocas, F, Checker, DT, Shape);
2072 coro::collectSpillsFromDbgInfo(Spills, F, Checker);
2073
2074 LLVM_DEBUG(dumpAllocas(Allocas));
2075 LLVM_DEBUG(dumpSpills("Spills", Spills));
2076
2077 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2078 Shape.ABI == coro::ABI::Async)
2079 sinkSpillUsesAfterCoroBegin(DT, CoroBegin: Shape.CoroBegin, Spills, Allocas);
2080
2081 // Build frame
2082 FrameDataInfo FrameData(Spills, Allocas);
2083 Shape.FrameTy = buildFrameType(F, Shape, FrameData, OptimizeFrame);
2084 Shape.FramePtr = Shape.CoroBegin;
2085 // For now, this works for C++ programs only.
2086 buildFrameDebugInfo(F, Shape, FrameData);
2087 // Insert spills and reloads
2088 insertSpills(FrameData, Shape);
2089 lowerLocalAllocas(LocalAllocas, DeadInsts&: DeadInstructions);
2090
2091 for (auto *I : DeadInstructions)
2092 I->eraseFromParent();
2093}
2094