| 1 | //===- SpillUtils.cpp - Utilities for checking for spills ---------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "llvm/Transforms/Coroutines/SpillUtils.h" |
| 10 | #include "CoroInternal.h" |
| 11 | #include "llvm/Analysis/CFG.h" |
| 12 | #include "llvm/Analysis/PtrUseVisitor.h" |
| 13 | #include "llvm/IR/CFG.h" |
| 14 | #include "llvm/IR/DebugInfo.h" |
| 15 | #include "llvm/IR/Dominators.h" |
| 16 | #include "llvm/IR/InstIterator.h" |
| 17 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
| 18 | |
| 19 | using namespace llvm; |
| 20 | using namespace llvm::coro; |
| 21 | |
| 22 | typedef SmallPtrSet<BasicBlock *, 8> VisitedBlocksSet; |
| 23 | |
| 24 | static bool isNonSpilledIntrinsic(Instruction &I) { |
| 25 | // Structural coroutine intrinsics that should not be spilled into the |
| 26 | // coroutine frame. |
| 27 | return isa<CoroIdInst>(Val: &I) || isa<CoroSaveInst>(Val: &I); |
| 28 | } |
| 29 | |
| 30 | /// Does control flow starting at the given block ever reach a suspend |
| 31 | /// instruction before reaching a block in VisitedOrFreeBBs? |
| 32 | static bool isSuspendReachableFrom(BasicBlock *From, |
| 33 | VisitedBlocksSet &VisitedOrFreeBBs) { |
| 34 | // Eagerly try to add this block to the visited set. If it's already |
| 35 | // there, stop recursing; this path doesn't reach a suspend before |
| 36 | // either looping or reaching a freeing block. |
| 37 | if (!VisitedOrFreeBBs.insert(Ptr: From).second) |
| 38 | return false; |
| 39 | |
| 40 | // We assume that we'll already have split suspends into their own blocks. |
| 41 | if (coro::isSuspendBlock(BB: From)) |
| 42 | return true; |
| 43 | |
| 44 | // Recurse on the successors. |
| 45 | for (auto *Succ : successors(BB: From)) { |
| 46 | if (isSuspendReachableFrom(From: Succ, VisitedOrFreeBBs)) |
| 47 | return true; |
| 48 | } |
| 49 | |
| 50 | return false; |
| 51 | } |
| 52 | |
| 53 | /// Is the given alloca "local", i.e. bounded in lifetime to not cross a |
| 54 | /// suspend point? |
| 55 | static bool isLocalAlloca(CoroAllocaAllocInst *AI) { |
| 56 | // Seed the visited set with all the basic blocks containing a free |
| 57 | // so that we won't pass them up. |
| 58 | VisitedBlocksSet VisitedOrFreeBBs; |
| 59 | for (auto *User : AI->users()) { |
| 60 | if (auto FI = dyn_cast<CoroAllocaFreeInst>(Val: User)) |
| 61 | VisitedOrFreeBBs.insert(Ptr: FI->getParent()); |
| 62 | } |
| 63 | |
| 64 | return !isSuspendReachableFrom(From: AI->getParent(), VisitedOrFreeBBs); |
| 65 | } |
| 66 | |
| 67 | /// Turn the given coro.alloca.alloc call into a dynamic allocation. |
| 68 | /// This happens during the all-instructions iteration, so it must not |
| 69 | /// delete the call. |
| 70 | static Instruction * |
| 71 | lowerNonLocalAlloca(CoroAllocaAllocInst *AI, const Shape &Shape, |
| 72 | SmallVectorImpl<Instruction *> &DeadInsts) { |
| 73 | IRBuilder<> Builder(AI); |
| 74 | auto Alloc = Shape.emitAlloc(Builder, Size: AI->getSize(), CG: nullptr); |
| 75 | |
| 76 | for (User *U : AI->users()) { |
| 77 | if (isa<CoroAllocaGetInst>(Val: U)) { |
| 78 | U->replaceAllUsesWith(V: Alloc); |
| 79 | } else { |
| 80 | auto FI = cast<CoroAllocaFreeInst>(Val: U); |
| 81 | Builder.SetInsertPoint(FI); |
| 82 | Shape.emitDealloc(Builder, Ptr: Alloc, CG: nullptr); |
| 83 | } |
| 84 | DeadInsts.push_back(Elt: cast<Instruction>(Val: U)); |
| 85 | } |
| 86 | |
| 87 | // Push this on last so that it gets deleted after all the others. |
| 88 | DeadInsts.push_back(Elt: AI); |
| 89 | |
| 90 | // Return the new allocation value so that we can check for needed spills. |
| 91 | return cast<Instruction>(Val: Alloc); |
| 92 | } |
| 93 | |
| 94 | // We need to make room to insert a spill after initial PHIs, but before |
| 95 | // catchswitch instruction. Placing it before violates the requirement that |
| 96 | // catchswitch, like all other EHPads must be the first nonPHI in a block. |
| 97 | // |
| 98 | // Split away catchswitch into a separate block and insert in its place: |
| 99 | // |
| 100 | // cleanuppad <InsertPt> cleanupret. |
| 101 | // |
| 102 | // cleanupret instruction will act as an insert point for the spill. |
| 103 | static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { |
| 104 | BasicBlock *CurrentBlock = CatchSwitch->getParent(); |
| 105 | BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(I: CatchSwitch); |
| 106 | CurrentBlock->getTerminator()->eraseFromParent(); |
| 107 | |
| 108 | auto *CleanupPad = |
| 109 | CleanupPadInst::Create(ParentPad: CatchSwitch->getParentPad(), Args: {}, NameStr: "" , InsertBefore: CurrentBlock); |
| 110 | auto *CleanupRet = |
| 111 | CleanupReturnInst::Create(CleanupPad, UnwindBB: NewBlock, InsertBefore: CurrentBlock); |
| 112 | return CleanupRet; |
| 113 | } |
| 114 | |
| 115 | // We use a pointer use visitor to track how an alloca is being used. |
| 116 | // The goal is to be able to answer the following three questions: |
| 117 | // 1. Should this alloca be allocated on the frame instead. |
| 118 | // 2. Could the content of the alloca be modified prior to CoroBegin, which |
| 119 | // would require copying the data from the alloca to the frame after |
| 120 | // CoroBegin. |
| 121 | // 3. Are there any aliases created for this alloca prior to CoroBegin, but |
| 122 | // used after CoroBegin. In that case, we will need to recreate the alias |
| 123 | // after CoroBegin based off the frame. |
| 124 | // |
| 125 | // To answer question 1, we track two things: |
| 126 | // A. List of all BasicBlocks that use this alloca or any of the aliases of |
| 127 | // the alloca. In the end, we check if there exists any two basic blocks that |
| 128 | // cross suspension points. If so, this alloca must be put on the frame. |
| 129 | // B. Whether the alloca or any alias of the alloca is escaped at some point, |
| 130 | // either by storing the address somewhere, or the address is used in a |
| 131 | // function call that might capture. If it's ever escaped, this alloca must be |
| 132 | // put on the frame conservatively. |
| 133 | // |
| 134 | // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. |
| 135 | // Whenever a potential write happens, either through a store instruction, a |
| 136 | // function call or any of the memory intrinsics, we check whether this |
| 137 | // instruction is prior to CoroBegin. |
| 138 | // |
| 139 | // To answer question 3, we track the offsets of all aliases created for the |
| 140 | // alloca prior to CoroBegin but used after CoroBegin. std::optional is used to |
| 141 | // be able to represent the case when the offset is unknown (e.g. when you have |
| 142 | // a PHINode that takes in different offset values). We cannot handle unknown |
| 143 | // offsets and will assert. This is the potential issue left out. An ideal |
| 144 | // solution would likely require a significant redesign. |
| 145 | |
| 146 | namespace { |
| 147 | struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { |
| 148 | using Base = PtrUseVisitor<AllocaUseVisitor>; |
| 149 | AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, |
| 150 | const coro::Shape &CoroShape, |
| 151 | const SuspendCrossingInfo &Checker, |
| 152 | bool ShouldUseLifetimeStartInfo) |
| 153 | : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker), |
| 154 | ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) { |
| 155 | for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends) |
| 156 | CoroSuspendBBs.insert(Ptr: SuspendInst->getParent()); |
| 157 | } |
| 158 | |
| 159 | void visit(Instruction &I) { |
| 160 | Users.insert(Ptr: &I); |
| 161 | Base::visit(I); |
| 162 | // If the pointer is escaped prior to CoroBegin, we have to assume it would |
| 163 | // be written into before CoroBegin as well. |
| 164 | if (PI.isEscaped() && |
| 165 | !DT.dominates(Def: CoroShape.CoroBegin, User: PI.getEscapingInst())) { |
| 166 | MayWriteBeforeCoroBegin = true; |
| 167 | } |
| 168 | } |
| 169 | // We need to provide this overload as PtrUseVisitor uses a pointer based |
| 170 | // visiting function. |
| 171 | void visit(Instruction *I) { return visit(I&: *I); } |
| 172 | |
| 173 | void visitPHINode(PHINode &I) { |
| 174 | enqueueUsers(I); |
| 175 | handleAlias(I); |
| 176 | } |
| 177 | |
| 178 | void visitSelectInst(SelectInst &I) { |
| 179 | enqueueUsers(I); |
| 180 | handleAlias(I); |
| 181 | } |
| 182 | |
| 183 | void visitInsertElementInst(InsertElementInst &I) { |
| 184 | enqueueUsers(I); |
| 185 | handleAlias(I); |
| 186 | } |
| 187 | |
| 188 | void visitInsertValueInst(InsertValueInst &I) { |
| 189 | enqueueUsers(I); |
| 190 | handleAlias(I); |
| 191 | } |
| 192 | |
| 193 | void visitStoreInst(StoreInst &SI) { |
| 194 | // Regardless whether the alias of the alloca is the value operand or the |
| 195 | // pointer operand, we need to assume the alloca is been written. |
| 196 | handleMayWrite(I: SI); |
| 197 | |
| 198 | if (SI.getValueOperand() != U->get()) |
| 199 | return; |
| 200 | |
| 201 | // We are storing the pointer into a memory location, potentially escaping. |
| 202 | // As an optimization, we try to detect simple cases where it doesn't |
| 203 | // actually escape, for example: |
| 204 | // %ptr = alloca .. |
| 205 | // %addr = alloca .. |
| 206 | // store %ptr, %addr |
| 207 | // %x = load %addr |
| 208 | // .. |
| 209 | // If %addr is only used by loading from it, we could simply treat %x as |
| 210 | // another alias of %ptr, and not considering %ptr being escaped. |
| 211 | auto IsSimpleStoreThenLoad = [&]() { |
| 212 | auto *AI = dyn_cast<AllocaInst>(Val: SI.getPointerOperand()); |
| 213 | // If the memory location we are storing to is not an alloca, it |
| 214 | // could be an alias of some other memory locations, which is difficult |
| 215 | // to analyze. |
| 216 | if (!AI) |
| 217 | return false; |
| 218 | // StoreAliases contains aliases of the memory location stored into. |
| 219 | SmallVector<Instruction *, 4> StoreAliases = {AI}; |
| 220 | while (!StoreAliases.empty()) { |
| 221 | Instruction *I = StoreAliases.pop_back_val(); |
| 222 | for (User *U : I->users()) { |
| 223 | // If we are loading from the memory location, we are creating an |
| 224 | // alias of the original pointer. |
| 225 | if (auto *LI = dyn_cast<LoadInst>(Val: U)) { |
| 226 | enqueueUsers(I&: *LI); |
| 227 | handleAlias(I&: *LI); |
| 228 | continue; |
| 229 | } |
| 230 | // If we are overriding the memory location, the pointer certainly |
| 231 | // won't escape. |
| 232 | if (auto *S = dyn_cast<StoreInst>(Val: U)) |
| 233 | if (S->getPointerOperand() == I) |
| 234 | continue; |
| 235 | if (isa<LifetimeIntrinsic>(Val: U)) |
| 236 | continue; |
| 237 | // BitCastInst creats aliases of the memory location being stored |
| 238 | // into. |
| 239 | if (auto *BI = dyn_cast<BitCastInst>(Val: U)) { |
| 240 | StoreAliases.push_back(Elt: BI); |
| 241 | continue; |
| 242 | } |
| 243 | return false; |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | return true; |
| 248 | }; |
| 249 | |
| 250 | if (!IsSimpleStoreThenLoad()) |
| 251 | PI.setEscaped(&SI); |
| 252 | } |
| 253 | |
| 254 | // All mem intrinsics modify the data. |
| 255 | void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(I: MI); } |
| 256 | |
| 257 | void visitBitCastInst(BitCastInst &BC) { |
| 258 | Base::visitBitCastInst(BC); |
| 259 | handleAlias(I&: BC); |
| 260 | } |
| 261 | |
| 262 | void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { |
| 263 | Base::visitAddrSpaceCastInst(ASC); |
| 264 | handleAlias(I&: ASC); |
| 265 | } |
| 266 | |
| 267 | void visitGetElementPtrInst(GetElementPtrInst &GEPI) { |
| 268 | // The base visitor will adjust Offset accordingly. |
| 269 | Base::visitGetElementPtrInst(GEPI); |
| 270 | handleAlias(I&: GEPI); |
| 271 | } |
| 272 | |
| 273 | void visitIntrinsicInst(IntrinsicInst &II) { |
| 274 | switch (II.getIntrinsicID()) { |
| 275 | default: |
| 276 | return Base::visitIntrinsicInst(II); |
| 277 | case Intrinsic::lifetime_start: |
| 278 | LifetimeStarts.insert(Ptr: &II); |
| 279 | LifetimeStartBBs.push_back(Elt: II.getParent()); |
| 280 | break; |
| 281 | case Intrinsic::lifetime_end: |
| 282 | LifetimeEndBBs.insert(Ptr: II.getParent()); |
| 283 | break; |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | void visitCallBase(CallBase &CB) { |
| 288 | for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op) |
| 289 | if (U->get() == CB.getArgOperand(i: Op) && !CB.doesNotCapture(OpNo: Op)) |
| 290 | PI.setEscaped(&CB); |
| 291 | handleMayWrite(I: CB); |
| 292 | } |
| 293 | |
| 294 | bool getShouldLiveOnFrame() const { |
| 295 | if (!ShouldLiveOnFrame) |
| 296 | ShouldLiveOnFrame = computeShouldLiveOnFrame(); |
| 297 | return *ShouldLiveOnFrame; |
| 298 | } |
| 299 | |
| 300 | bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } |
| 301 | |
| 302 | DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const { |
| 303 | assert(getShouldLiveOnFrame() && "This method should only be called if the " |
| 304 | "alloca needs to live on the frame." ); |
| 305 | for (const auto &P : AliasOffetMap) |
| 306 | if (!P.second) |
| 307 | report_fatal_error(reason: "Unable to handle an alias with unknown offset " |
| 308 | "created before CoroBegin." ); |
| 309 | return AliasOffetMap; |
| 310 | } |
| 311 | |
| 312 | private: |
| 313 | const DominatorTree &DT; |
| 314 | const coro::Shape &CoroShape; |
| 315 | const SuspendCrossingInfo &Checker; |
| 316 | // All alias to the original AllocaInst, created before CoroBegin and used |
| 317 | // after CoroBegin. Each entry contains the instruction and the offset in the |
| 318 | // original Alloca. They need to be recreated after CoroBegin off the frame. |
| 319 | DenseMap<Instruction *, std::optional<APInt>> AliasOffetMap{}; |
| 320 | SmallPtrSet<Instruction *, 4> Users{}; |
| 321 | SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{}; |
| 322 | SmallVector<BasicBlock *> LifetimeStartBBs{}; |
| 323 | SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{}; |
| 324 | SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{}; |
| 325 | bool MayWriteBeforeCoroBegin{false}; |
| 326 | bool ShouldUseLifetimeStartInfo{true}; |
| 327 | |
| 328 | mutable std::optional<bool> ShouldLiveOnFrame{}; |
| 329 | |
| 330 | bool computeShouldLiveOnFrame() const { |
| 331 | // If lifetime information is available, we check it first since it's |
| 332 | // more precise. We look at every pair of lifetime.start intrinsic and |
| 333 | // every basic block that uses the pointer to see if they cross suspension |
| 334 | // points. The uses cover both direct uses as well as indirect uses. |
| 335 | if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) { |
| 336 | // If there is no explicit lifetime.end, then assume the address can |
| 337 | // cross suspension points. |
| 338 | if (LifetimeEndBBs.empty()) |
| 339 | return true; |
| 340 | |
| 341 | // If there is a path from a lifetime.start to a suspend without a |
| 342 | // corresponding lifetime.end, then the alloca's lifetime persists |
| 343 | // beyond that suspension point and the alloca must go on the frame. |
| 344 | llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs); |
| 345 | if (isManyPotentiallyReachableFromMany(Worklist, StopSet: CoroSuspendBBs, |
| 346 | ExclusionSet: &LifetimeEndBBs, DT: &DT)) |
| 347 | return true; |
| 348 | |
| 349 | // Addresses are guaranteed to be identical after every lifetime.start so |
| 350 | // we cannot use the local stack if the address escaped and there is a |
| 351 | // suspend point between lifetime markers. This should also cover the |
| 352 | // case of a single lifetime.start intrinsic in a loop with suspend point. |
| 353 | if (PI.isEscaped()) { |
| 354 | for (auto *A : LifetimeStarts) { |
| 355 | for (auto *B : LifetimeStarts) { |
| 356 | if (Checker.hasPathOrLoopCrossingSuspendPoint(From: A->getParent(), |
| 357 | To: B->getParent())) |
| 358 | return true; |
| 359 | } |
| 360 | } |
| 361 | } |
| 362 | return false; |
| 363 | } |
| 364 | // FIXME: Ideally the isEscaped check should come at the beginning. |
| 365 | // However there are a few loose ends that need to be fixed first before |
| 366 | // we can do that. We need to make sure we are not over-conservative, so |
| 367 | // that the data accessed in-between await_suspend and symmetric transfer |
| 368 | // is always put on the stack, and also data accessed after coro.end is |
| 369 | // always put on the stack (esp the return object). To fix that, we need |
| 370 | // to: |
| 371 | // 1) Potentially treat sret as nocapture in calls |
| 372 | // 2) Special handle the return object and put it on the stack |
| 373 | // 3) Utilize lifetime.end intrinsic |
| 374 | if (PI.isEscaped()) |
| 375 | return true; |
| 376 | |
| 377 | for (auto *U1 : Users) |
| 378 | for (auto *U2 : Users) |
| 379 | if (Checker.isDefinitionAcrossSuspend(I&: *U1, U: U2)) |
| 380 | return true; |
| 381 | |
| 382 | return false; |
| 383 | } |
| 384 | |
| 385 | void handleMayWrite(const Instruction &I) { |
| 386 | if (!DT.dominates(Def: CoroShape.CoroBegin, User: &I)) |
| 387 | MayWriteBeforeCoroBegin = true; |
| 388 | } |
| 389 | |
| 390 | bool usedAfterCoroBegin(Instruction &I) { |
| 391 | for (auto &U : I.uses()) |
| 392 | if (DT.dominates(Def: CoroShape.CoroBegin, U)) |
| 393 | return true; |
| 394 | return false; |
| 395 | } |
| 396 | |
| 397 | void handleAlias(Instruction &I) { |
| 398 | // We track all aliases created prior to CoroBegin but used after. |
| 399 | // These aliases may need to be recreated after CoroBegin if the alloca |
| 400 | // need to live on the frame. |
| 401 | if (DT.dominates(Def: CoroShape.CoroBegin, User: &I) || !usedAfterCoroBegin(I)) |
| 402 | return; |
| 403 | |
| 404 | if (!IsOffsetKnown) { |
| 405 | AliasOffetMap[&I].reset(); |
| 406 | } else { |
| 407 | auto [Itr, Inserted] = AliasOffetMap.try_emplace(Key: &I, Args&: Offset); |
| 408 | if (!Inserted && Itr->second && *Itr->second != Offset) { |
| 409 | // If we have seen two different possible values for this alias, we set |
| 410 | // it to empty. |
| 411 | Itr->second.reset(); |
| 412 | } |
| 413 | } |
| 414 | } |
| 415 | }; |
| 416 | } // namespace |
| 417 | |
| 418 | static void collectFrameAlloca(AllocaInst *AI, const coro::Shape &Shape, |
| 419 | const SuspendCrossingInfo &Checker, |
| 420 | SmallVectorImpl<AllocaInfo> &Allocas, |
| 421 | const DominatorTree &DT) { |
| 422 | if (Shape.CoroSuspends.empty()) |
| 423 | return; |
| 424 | |
| 425 | // The PromiseAlloca will be specially handled since it needs to be in a |
| 426 | // fixed position in the frame. |
| 427 | if (AI == Shape.SwitchLowering.PromiseAlloca) |
| 428 | return; |
| 429 | |
| 430 | // The __coro_gro alloca should outlive the promise, make sure we |
| 431 | // keep it outside the frame. |
| 432 | if (AI->hasMetadata(KindID: LLVMContext::MD_coro_outside_frame)) |
| 433 | return; |
| 434 | |
| 435 | // The code that uses lifetime.start intrinsic does not work for functions |
| 436 | // with loops without exit. Disable it on ABIs we know to generate such |
| 437 | // code. |
| 438 | bool ShouldUseLifetimeStartInfo = |
| 439 | (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon && |
| 440 | Shape.ABI != coro::ABI::RetconOnce); |
| 441 | AllocaUseVisitor Visitor{AI->getDataLayout(), DT, Shape, Checker, |
| 442 | ShouldUseLifetimeStartInfo}; |
| 443 | Visitor.visitPtr(I&: *AI); |
| 444 | if (!Visitor.getShouldLiveOnFrame()) |
| 445 | return; |
| 446 | Allocas.emplace_back(Args&: AI, Args: Visitor.getAliasesCopy(), |
| 447 | Args: Visitor.getMayWriteBeforeCoroBegin()); |
| 448 | } |
| 449 | |
| 450 | void coro::collectSpillsFromArgs(SpillInfo &Spills, Function &F, |
| 451 | const SuspendCrossingInfo &Checker) { |
| 452 | // Collect the spills for arguments and other not-materializable values. |
| 453 | for (Argument &A : F.args()) |
| 454 | for (User *U : A.users()) |
| 455 | if (Checker.isDefinitionAcrossSuspend(A, U)) |
| 456 | Spills[&A].push_back(Elt: cast<Instruction>(Val: U)); |
| 457 | } |
| 458 | |
| 459 | void coro::collectSpillsAndAllocasFromInsts( |
| 460 | SpillInfo &Spills, SmallVector<AllocaInfo, 8> &Allocas, |
| 461 | SmallVector<Instruction *, 4> &DeadInstructions, |
| 462 | SmallVector<CoroAllocaAllocInst *, 4> &LocalAllocas, Function &F, |
| 463 | const SuspendCrossingInfo &Checker, const DominatorTree &DT, |
| 464 | const coro::Shape &Shape) { |
| 465 | |
| 466 | for (Instruction &I : instructions(F)) { |
| 467 | // Values returned from coroutine structure intrinsics should not be part |
| 468 | // of the Coroutine Frame. |
| 469 | if (isNonSpilledIntrinsic(I) || &I == Shape.CoroBegin) |
| 470 | continue; |
| 471 | |
| 472 | // Handle alloca.alloc specially here. |
| 473 | if (auto AI = dyn_cast<CoroAllocaAllocInst>(Val: &I)) { |
| 474 | // Check whether the alloca's lifetime is bounded by suspend points. |
| 475 | if (isLocalAlloca(AI)) { |
| 476 | LocalAllocas.push_back(Elt: AI); |
| 477 | continue; |
| 478 | } |
| 479 | |
| 480 | // If not, do a quick rewrite of the alloca and then add spills of |
| 481 | // the rewritten value. The rewrite doesn't invalidate anything in |
| 482 | // Spills because the other alloca intrinsics have no other operands |
| 483 | // besides AI, and it doesn't invalidate the iteration because we delay |
| 484 | // erasing AI. |
| 485 | auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInsts&: DeadInstructions); |
| 486 | |
| 487 | for (User *U : Alloc->users()) { |
| 488 | if (Checker.isDefinitionAcrossSuspend(I&: *Alloc, U)) |
| 489 | Spills[Alloc].push_back(Elt: cast<Instruction>(Val: U)); |
| 490 | } |
| 491 | continue; |
| 492 | } |
| 493 | |
| 494 | // Ignore alloca.get; we process this as part of coro.alloca.alloc. |
| 495 | if (isa<CoroAllocaGetInst>(Val: I)) |
| 496 | continue; |
| 497 | |
| 498 | if (auto *AI = dyn_cast<AllocaInst>(Val: &I)) { |
| 499 | collectFrameAlloca(AI, Shape, Checker, Allocas, DT); |
| 500 | continue; |
| 501 | } |
| 502 | |
| 503 | for (User *U : I.users()) |
| 504 | if (Checker.isDefinitionAcrossSuspend(I, U)) { |
| 505 | // We cannot spill a token. |
| 506 | if (I.getType()->isTokenTy()) |
| 507 | report_fatal_error( |
| 508 | reason: "token definition is separated from the use by a suspend point" ); |
| 509 | Spills[&I].push_back(Elt: cast<Instruction>(Val: U)); |
| 510 | } |
| 511 | } |
| 512 | } |
| 513 | |
| 514 | void coro::collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, |
| 515 | const SuspendCrossingInfo &Checker) { |
| 516 | // We don't want the layout of coroutine frame to be affected |
| 517 | // by debug information. So we only choose to salvage dbg.values for |
| 518 | // whose value is already in the frame. |
| 519 | // We would handle the dbg.values for allocas specially |
| 520 | for (auto &Iter : Spills) { |
| 521 | auto *V = Iter.first; |
| 522 | SmallVector<DbgVariableRecord *, 16> DVRs; |
| 523 | findDbgValues(V, DbgVariableRecords&: DVRs); |
| 524 | // Add the instructions which carry debug info that is in the frame. |
| 525 | for (DbgVariableRecord *DVR : DVRs) |
| 526 | if (Checker.isDefinitionAcrossSuspend(V&: *V, U: DVR->Marker->MarkedInstr)) |
| 527 | Spills[V].push_back(Elt: DVR->Marker->MarkedInstr); |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | /// Async and Retcon{Once} conventions assume that all spill uses can be sunk |
| 532 | /// after the coro.begin intrinsic. |
| 533 | void coro::sinkSpillUsesAfterCoroBegin( |
| 534 | const DominatorTree &Dom, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, |
| 535 | SmallVectorImpl<coro::AllocaInfo> &Allocas) { |
| 536 | SmallSetVector<Instruction *, 32> ToMove; |
| 537 | SmallVector<Instruction *, 32> Worklist; |
| 538 | |
| 539 | // Collect all users that precede coro.begin. |
| 540 | auto collectUsers = [&](Value *Def) { |
| 541 | for (User *U : Def->users()) { |
| 542 | auto Inst = cast<Instruction>(Val: U); |
| 543 | if (Inst->getParent() != CoroBegin->getParent() || |
| 544 | Dom.dominates(Def: CoroBegin, User: Inst)) |
| 545 | continue; |
| 546 | if (ToMove.insert(X: Inst)) |
| 547 | Worklist.push_back(Elt: Inst); |
| 548 | } |
| 549 | }; |
| 550 | for (auto &I : Spills) |
| 551 | collectUsers(I.first); |
| 552 | for (auto &I : Allocas) |
| 553 | collectUsers(I.Alloca); |
| 554 | |
| 555 | // Recursively collect users before coro.begin. |
| 556 | while (!Worklist.empty()) { |
| 557 | auto *Def = Worklist.pop_back_val(); |
| 558 | for (User *U : Def->users()) { |
| 559 | auto Inst = cast<Instruction>(Val: U); |
| 560 | if (Dom.dominates(Def: CoroBegin, User: Inst)) |
| 561 | continue; |
| 562 | if (ToMove.insert(X: Inst)) |
| 563 | Worklist.push_back(Elt: Inst); |
| 564 | } |
| 565 | } |
| 566 | |
| 567 | // Sort by dominance. |
| 568 | SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); |
| 569 | llvm::sort(C&: InsertionList, Comp: [&Dom](Instruction *A, Instruction *B) -> bool { |
| 570 | // If a dominates b it should precede (<) b. |
| 571 | return Dom.dominates(Def: A, User: B); |
| 572 | }); |
| 573 | |
| 574 | Instruction *InsertPt = CoroBegin->getNextNode(); |
| 575 | for (Instruction *Inst : InsertionList) |
| 576 | Inst->moveBefore(InsertPos: InsertPt->getIterator()); |
| 577 | } |
| 578 | |
| 579 | BasicBlock::iterator coro::getSpillInsertionPt(const coro::Shape &Shape, |
| 580 | Value *Def, |
| 581 | const DominatorTree &DT) { |
| 582 | BasicBlock::iterator InsertPt; |
| 583 | if (auto *Arg = dyn_cast<Argument>(Val: Def)) { |
| 584 | // For arguments, we will place the store instruction right after |
| 585 | // the coroutine frame pointer instruction, i.e. coro.begin. |
| 586 | InsertPt = Shape.getInsertPtAfterFramePtr(); |
| 587 | |
| 588 | // If we're spilling an Argument, make sure we clear 'captures' |
| 589 | // from the coroutine function. |
| 590 | Arg->getParent()->removeParamAttr(ArgNo: Arg->getArgNo(), Kind: Attribute::Captures); |
| 591 | } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Val: Def)) { |
| 592 | // Don't spill immediately after a suspend; splitting assumes |
| 593 | // that the suspend will be followed by a branch. |
| 594 | InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt(); |
| 595 | } else { |
| 596 | auto *I = cast<Instruction>(Val: Def); |
| 597 | if (!DT.dominates(Def: Shape.CoroBegin, User: I)) { |
| 598 | // If it is not dominated by CoroBegin, then spill should be |
| 599 | // inserted immediately after CoroFrame is computed. |
| 600 | InsertPt = Shape.getInsertPtAfterFramePtr(); |
| 601 | } else if (auto *II = dyn_cast<InvokeInst>(Val: I)) { |
| 602 | // If we are spilling the result of the invoke instruction, split |
| 603 | // the normal edge and insert the spill in the new block. |
| 604 | auto *NewBB = SplitEdge(From: II->getParent(), To: II->getNormalDest()); |
| 605 | InsertPt = NewBB->getTerminator()->getIterator(); |
| 606 | } else if (isa<PHINode>(Val: I)) { |
| 607 | // Skip the PHINodes and EH pads instructions. |
| 608 | BasicBlock *DefBlock = I->getParent(); |
| 609 | if (auto *CSI = dyn_cast<CatchSwitchInst>(Val: DefBlock->getTerminator())) |
| 610 | InsertPt = splitBeforeCatchSwitch(CatchSwitch: CSI)->getIterator(); |
| 611 | else |
| 612 | InsertPt = DefBlock->getFirstInsertionPt(); |
| 613 | } else { |
| 614 | assert(!I->isTerminator() && "unexpected terminator" ); |
| 615 | // For all other values, the spill is placed immediately after |
| 616 | // the definition. |
| 617 | InsertPt = I->getNextNode()->getIterator(); |
| 618 | } |
| 619 | } |
| 620 | |
| 621 | return InsertPt; |
| 622 | } |
| 623 | |