Home | History | Annotate | Download | only in Coroutines
      1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 // This file contains classes used to discover if for a particular value
     10 // there from sue to definition that crosses a suspend block.
     11 //
     12 // Using the information discovered we form a Coroutine Frame structure to
     13 // contain those values. All uses of those values are replaced with appropriate
     14 // GEP + load from the coroutine frame. At the point of the definition we spill
     15 // the value into the coroutine frame.
     16 //
     17 // TODO: pack values tightly using liveness info.
     18 //===----------------------------------------------------------------------===//
     19 
     20 #include "CoroInternal.h"
     21 #include "llvm/ADT/BitVector.h"
     22 #include "llvm/Transforms/Utils/Local.h"
     23 #include "llvm/Config/llvm-config.h"
     24 #include "llvm/IR/CFG.h"
     25 #include "llvm/IR/Dominators.h"
     26 #include "llvm/IR/IRBuilder.h"
     27 #include "llvm/IR/InstIterator.h"
     28 #include "llvm/Support/Debug.h"
     29 #include "llvm/Support/MathExtras.h"
     30 #include "llvm/Support/circular_raw_ostream.h"
     31 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
     32 
     33 using namespace llvm;
     34 
     35 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
     36 // "coro-frame", which results in leaner debug spew.
     37 #define DEBUG_TYPE "coro-suspend-crossing"
     38 
     39 enum { SmallVectorThreshold = 32 };
     40 
     41 // Provides two way mapping between the blocks and numbers.
     42 namespace {
     43 class BlockToIndexMapping {
     44   SmallVector<BasicBlock *, SmallVectorThreshold> V;
     45 
     46 public:
     47   size_t size() const { return V.size(); }
     48 
     49   BlockToIndexMapping(Function &F) {
     50     for (BasicBlock &BB : F)
     51       V.push_back(&BB);
     52     llvm::sort(V.begin(), V.end());
     53   }
     54 
     55   size_t blockToIndex(BasicBlock *BB) const {
     56     auto *I = std::lower_bound(V.begin(), V.end(), BB);
     57     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
     58     return I - V.begin();
     59   }
     60 
     61   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
     62 };
     63 } // end anonymous namespace
     64 
     65 // The SuspendCrossingInfo maintains data that allows to answer a question
     66 // whether given two BasicBlocks A and B there is a path from A to B that
     67 // passes through a suspend point.
     68 //
     69 // For every basic block 'i' it maintains a BlockData that consists of:
     70 //   Consumes:  a bit vector which contains a set of indices of blocks that can
     71 //              reach block 'i'
     72 //   Kills: a bit vector which contains a set of indices of blocks that can
     73 //          reach block 'i', but one of the path will cross a suspend point
     74 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
     75 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
     76 //
     77 namespace {
     78 struct SuspendCrossingInfo {
     79   BlockToIndexMapping Mapping;
     80 
     81   struct BlockData {
     82     BitVector Consumes;
     83     BitVector Kills;
     84     bool Suspend = false;
     85     bool End = false;
     86   };
     87   SmallVector<BlockData, SmallVectorThreshold> Block;
     88 
     89   iterator_range<succ_iterator> successors(BlockData const &BD) const {
     90     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
     91     return llvm::successors(BB);
     92   }
     93 
     94   BlockData &getBlockData(BasicBlock *BB) {
     95     return Block[Mapping.blockToIndex(BB)];
     96   }
     97 
     98   void dump() const;
     99   void dump(StringRef Label, BitVector const &BV) const;
    100 
    101   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
    102 
    103   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
    104     size_t const DefIndex = Mapping.blockToIndex(DefBB);
    105     size_t const UseIndex = Mapping.blockToIndex(UseBB);
    106 
    107     assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def");
    108     bool const Result = Block[UseIndex].Kills[DefIndex];
    109     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
    110                       << " answer is " << Result << "\n");
    111     return Result;
    112   }
    113 
    114   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
    115     auto *I = cast<Instruction>(U);
    116 
    117     // We rewrote PHINodes, so that only the ones with exactly one incoming
    118     // value need to be analyzed.
    119     if (auto *PN = dyn_cast<PHINode>(I))
    120       if (PN->getNumIncomingValues() > 1)
    121         return false;
    122 
    123     BasicBlock *UseBB = I->getParent();
    124     return hasPathCrossingSuspendPoint(DefBB, UseBB);
    125   }
    126 
    127   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
    128     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
    129   }
    130 
    131   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
    132     return isDefinitionAcrossSuspend(I.getParent(), U);
    133   }
    134 };
    135 } // end anonymous namespace
    136 
    137 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    138 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
    139                                                 BitVector const &BV) const {
    140   dbgs() << Label << ":";
    141   for (size_t I = 0, N = BV.size(); I < N; ++I)
    142     if (BV[I])
    143       dbgs() << " " << Mapping.indexToBlock(I)->getName();
    144   dbgs() << "\n";
    145 }
    146 
    147 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
    148   for (size_t I = 0, N = Block.size(); I < N; ++I) {
    149     BasicBlock *const B = Mapping.indexToBlock(I);
    150     dbgs() << B->getName() << ":\n";
    151     dump("   Consumes", Block[I].Consumes);
    152     dump("      Kills", Block[I].Kills);
    153   }
    154   dbgs() << "\n";
    155 }
    156 #endif
    157 
    158 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
    159     : Mapping(F) {
    160   const size_t N = Mapping.size();
    161   Block.resize(N);
    162 
    163   // Initialize every block so that it consumes itself
    164   for (size_t I = 0; I < N; ++I) {
    165     auto &B = Block[I];
    166     B.Consumes.resize(N);
    167     B.Kills.resize(N);
    168     B.Consumes.set(I);
    169   }
    170 
    171   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
    172   // the code beyond coro.end is reachable during initial invocation of the
    173   // coroutine.
    174   for (auto *CE : Shape.CoroEnds)
    175     getBlockData(CE->getParent()).End = true;
    176 
    177   // Mark all suspend blocks and indicate that they kill everything they
    178   // consume. Note, that crossing coro.save also requires a spill, as any code
    179   // between coro.save and coro.suspend may resume the coroutine and all of the
    180   // state needs to be saved by that time.
    181   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
    182     BasicBlock *SuspendBlock = BarrierInst->getParent();
    183     auto &B = getBlockData(SuspendBlock);
    184     B.Suspend = true;
    185     B.Kills |= B.Consumes;
    186   };
    187   for (CoroSuspendInst *CSI : Shape.CoroSuspends) {
    188     markSuspendBlock(CSI);
    189     markSuspendBlock(CSI->getCoroSave());
    190   }
    191 
    192   // Iterate propagating consumes and kills until they stop changing.
    193   int Iteration = 0;
    194   (void)Iteration;
    195 
    196   bool Changed;
    197   do {
    198     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
    199     LLVM_DEBUG(dbgs() << "==============\n");
    200 
    201     Changed = false;
    202     for (size_t I = 0; I < N; ++I) {
    203       auto &B = Block[I];
    204       for (BasicBlock *SI : successors(B)) {
    205 
    206         auto SuccNo = Mapping.blockToIndex(SI);
    207 
    208         // Saved Consumes and Kills bitsets so that it is easy to see
    209         // if anything changed after propagation.
    210         auto &S = Block[SuccNo];
    211         auto SavedConsumes = S.Consumes;
    212         auto SavedKills = S.Kills;
    213 
    214         // Propagate Kills and Consumes from block B into its successor S.
    215         S.Consumes |= B.Consumes;
    216         S.Kills |= B.Kills;
    217 
    218         // If block B is a suspend block, it should propagate kills into the
    219         // its successor for every block B consumes.
    220         if (B.Suspend) {
    221           S.Kills |= B.Consumes;
    222         }
    223         if (S.Suspend) {
    224           // If block S is a suspend block, it should kill all of the blocks it
    225           // consumes.
    226           S.Kills |= S.Consumes;
    227         } else if (S.End) {
    228           // If block S is an end block, it should not propagate kills as the
    229           // blocks following coro.end() are reached during initial invocation
    230           // of the coroutine while all the data are still available on the
    231           // stack or in the registers.
    232           S.Kills.reset();
    233         } else {
    234           // This is reached when S block it not Suspend nor coro.end and it
    235           // need to make sure that it is not in the kill set.
    236           S.Kills.reset(SuccNo);
    237         }
    238 
    239         // See if anything changed.
    240         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
    241 
    242         if (S.Kills != SavedKills) {
    243           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
    244                             << "\n");
    245           LLVM_DEBUG(dump("S.Kills", S.Kills));
    246           LLVM_DEBUG(dump("SavedKills", SavedKills));
    247         }
    248         if (S.Consumes != SavedConsumes) {
    249           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
    250           LLVM_DEBUG(dump("S.Consume", S.Consumes));
    251           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
    252         }
    253       }
    254     }
    255   } while (Changed);
    256   LLVM_DEBUG(dump());
    257 }
    258 
    259 #undef DEBUG_TYPE // "coro-suspend-crossing"
    260 #define DEBUG_TYPE "coro-frame"
    261 
    262 // We build up the list of spills for every case where a use is separated
    263 // from the definition by a suspend point.
    264 
    265 namespace {
    266 class Spill {
    267   Value *Def = nullptr;
    268   Instruction *User = nullptr;
    269   unsigned FieldNo = 0;
    270 
    271 public:
    272   Spill(Value *Def, llvm::User *U) : Def(Def), User(cast<Instruction>(U)) {}
    273 
    274   Value *def() const { return Def; }
    275   Instruction *user() const { return User; }
    276   BasicBlock *userBlock() const { return User->getParent(); }
    277 
    278   // Note that field index is stored in the first SpillEntry for a particular
    279   // definition. Subsequent mentions of a defintion do not have fieldNo
    280   // assigned. This works out fine as the users of Spills capture the info about
    281   // the definition the first time they encounter it. Consider refactoring
    282   // SpillInfo into two arrays to normalize the spill representation.
    283   unsigned fieldIndex() const {
    284     assert(FieldNo && "Accessing unassigned field");
    285     return FieldNo;
    286   }
    287   void setFieldIndex(unsigned FieldNumber) {
    288     assert(!FieldNo && "Reassigning field number");
    289     FieldNo = FieldNumber;
    290   }
    291 };
    292 } // namespace
    293 
    294 // Note that there may be more than one record with the same value of Def in
    295 // the SpillInfo vector.
    296 using SpillInfo = SmallVector<Spill, 8>;
    297 
    298 #ifndef NDEBUG
    299 static void dump(StringRef Title, SpillInfo const &Spills) {
    300   dbgs() << "------------- " << Title << "--------------\n";
    301   Value *CurrentValue = nullptr;
    302   for (auto const &E : Spills) {
    303     if (CurrentValue != E.def()) {
    304       CurrentValue = E.def();
    305       CurrentValue->dump();
    306     }
    307     dbgs() << "   user: ";
    308     E.user()->dump();
    309   }
    310 }
    311 #endif
    312 
    313 namespace {
    314 // We cannot rely solely on natural alignment of a type when building a
    315 // coroutine frame and if the alignment specified on the Alloca instruction
    316 // differs from the natural alignment of the alloca type we will need to insert
    317 // padding.
    318 struct PaddingCalculator {
    319   const DataLayout &DL;
    320   LLVMContext &Context;
    321   unsigned StructSize = 0;
    322 
    323   PaddingCalculator(LLVMContext &Context, DataLayout const &DL)
    324       : DL(DL), Context(Context) {}
    325 
    326   // Replicate the logic from IR/DataLayout.cpp to match field offset
    327   // computation for LLVM structs.
    328   void addType(Type *Ty) {
    329     unsigned TyAlign = DL.getABITypeAlignment(Ty);
    330     if ((StructSize & (TyAlign - 1)) != 0)
    331       StructSize = alignTo(StructSize, TyAlign);
    332 
    333     StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item.
    334   }
    335 
    336   void addTypes(SmallVectorImpl<Type *> const &Types) {
    337     for (auto *Ty : Types)
    338       addType(Ty);
    339   }
    340 
    341   unsigned computePadding(Type *Ty, unsigned ForcedAlignment) {
    342     unsigned TyAlign = DL.getABITypeAlignment(Ty);
    343     auto Natural = alignTo(StructSize, TyAlign);
    344     auto Forced = alignTo(StructSize, ForcedAlignment);
    345 
    346     // Return how many bytes of padding we need to insert.
    347     if (Natural != Forced)
    348       return std::max(Natural, Forced) - StructSize;
    349 
    350     // Rely on natural alignment.
    351     return 0;
    352   }
    353 
    354   // If padding required, return the padding field type to insert.
    355   ArrayType *getPaddingType(Type *Ty, unsigned ForcedAlignment) {
    356     if (auto Padding = computePadding(Ty, ForcedAlignment))
    357       return ArrayType::get(Type::getInt8Ty(Context), Padding);
    358 
    359     return nullptr;
    360   }
    361 };
    362 } // namespace
    363 
    364 // Build a struct that will keep state for an active coroutine.
    365 //   struct f.frame {
    366 //     ResumeFnTy ResumeFnAddr;
    367 //     ResumeFnTy DestroyFnAddr;
    368 //     int ResumeIndex;
    369 //     ... promise (if present) ...
    370 //     ... spills ...
    371 //   };
    372 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
    373                                   SpillInfo &Spills) {
    374   LLVMContext &C = F.getContext();
    375   const DataLayout &DL = F.getParent()->getDataLayout();
    376   PaddingCalculator Padder(C, DL);
    377   SmallString<32> Name(F.getName());
    378   Name.append(".Frame");
    379   StructType *FrameTy = StructType::create(C, Name);
    380   auto *FramePtrTy = FrameTy->getPointerTo();
    381   auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
    382                                  /*IsVarArgs=*/false);
    383   auto *FnPtrTy = FnTy->getPointerTo();
    384 
    385   // Figure out how wide should be an integer type storing the suspend index.
    386   unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
    387   Type *PromiseType = Shape.PromiseAlloca
    388                           ? Shape.PromiseAlloca->getType()->getElementType()
    389                           : Type::getInt1Ty(C);
    390   SmallVector<Type *, 8> Types{FnPtrTy, FnPtrTy, PromiseType,
    391                                Type::getIntNTy(C, IndexBits)};
    392   Value *CurrentDef = nullptr;
    393 
    394   Padder.addTypes(Types);
    395 
    396   // Create an entry for every spilled value.
    397   for (auto &S : Spills) {
    398     if (CurrentDef == S.def())
    399       continue;
    400 
    401     CurrentDef = S.def();
    402     // PromiseAlloca was already added to Types array earlier.
    403     if (CurrentDef == Shape.PromiseAlloca)
    404       continue;
    405 
    406     Type *Ty = nullptr;
    407     if (auto *AI = dyn_cast<AllocaInst>(CurrentDef)) {
    408       Ty = AI->getAllocatedType();
    409       if (unsigned AllocaAlignment = AI->getAlignment()) {
    410         // If alignment is specified in alloca, see if we need to insert extra
    411         // padding.
    412         if (auto PaddingTy = Padder.getPaddingType(Ty, AllocaAlignment)) {
    413           Types.push_back(PaddingTy);
    414           Padder.addType(PaddingTy);
    415         }
    416       }
    417     } else {
    418       Ty = CurrentDef->getType();
    419     }
    420     S.setFieldIndex(Types.size());
    421     Types.push_back(Ty);
    422     Padder.addType(Ty);
    423   }
    424   FrameTy->setBody(Types);
    425 
    426   return FrameTy;
    427 }
    428 
    429 // We need to make room to insert a spill after initial PHIs, but before
    430 // catchswitch instruction. Placing it before violates the requirement that
    431 // catchswitch, like all other EHPads must be the first nonPHI in a block.
    432 //
    433 // Split away catchswitch into a separate block and insert in its place:
    434 //
    435 //   cleanuppad <InsertPt> cleanupret.
    436 //
    437 // cleanupret instruction will act as an insert point for the spill.
    438 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
    439   BasicBlock *CurrentBlock = CatchSwitch->getParent();
    440   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
    441   CurrentBlock->getTerminator()->eraseFromParent();
    442 
    443   auto *CleanupPad =
    444       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
    445   auto *CleanupRet =
    446       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
    447   return CleanupRet;
    448 }
    449 
    450 // Replace all alloca and SSA values that are accessed across suspend points
    451 // with GetElementPointer from coroutine frame + loads and stores. Create an
    452 // AllocaSpillBB that will become the new entry block for the resume parts of
    453 // the coroutine:
    454 //
    455 //    %hdl = coro.begin(...)
    456 //    whatever
    457 //
    458 // becomes:
    459 //
    460 //    %hdl = coro.begin(...)
    461 //    %FramePtr = bitcast i8* hdl to %f.frame*
    462 //    br label %AllocaSpillBB
    463 //
    464 //  AllocaSpillBB:
    465 //    ; geps corresponding to allocas that were moved to coroutine frame
    466 //    br label PostSpill
    467 //
    468 //  PostSpill:
    469 //    whatever
    470 //
    471 //
    472 static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) {
    473   auto *CB = Shape.CoroBegin;
    474   IRBuilder<> Builder(CB->getNextNode());
    475   PointerType *FramePtrTy = Shape.FrameTy->getPointerTo();
    476   auto *FramePtr =
    477       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
    478   Type *FrameTy = FramePtrTy->getElementType();
    479 
    480   Value *CurrentValue = nullptr;
    481   BasicBlock *CurrentBlock = nullptr;
    482   Value *CurrentReload = nullptr;
    483   unsigned Index = 0; // Proper field number will be read from field definition.
    484 
    485   // We need to keep track of any allocas that need "spilling"
    486   // since they will live in the coroutine frame now, all access to them
    487   // need to be changed, not just the access across suspend points
    488   // we remember allocas and their indices to be handled once we processed
    489   // all the spills.
    490   SmallVector<std::pair<AllocaInst *, unsigned>, 4> Allocas;
    491   // Promise alloca (if present) has a fixed field number (Shape::PromiseField)
    492   if (Shape.PromiseAlloca)
    493     Allocas.emplace_back(Shape.PromiseAlloca, coro::Shape::PromiseField);
    494 
    495   // Create a load instruction to reload the spilled value from the coroutine
    496   // frame.
    497   auto CreateReload = [&](Instruction *InsertBefore) {
    498     assert(Index && "accessing unassigned field number");
    499     Builder.SetInsertPoint(InsertBefore);
    500     auto *G = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index,
    501                                                  CurrentValue->getName() +
    502                                                      Twine(".reload.addr"));
    503     return isa<AllocaInst>(CurrentValue)
    504                ? G
    505                : Builder.CreateLoad(G,
    506                                     CurrentValue->getName() + Twine(".reload"));
    507   };
    508 
    509   for (auto const &E : Spills) {
    510     // If we have not seen the value, generate a spill.
    511     if (CurrentValue != E.def()) {
    512       CurrentValue = E.def();
    513       CurrentBlock = nullptr;
    514       CurrentReload = nullptr;
    515 
    516       Index = E.fieldIndex();
    517 
    518       if (auto *AI = dyn_cast<AllocaInst>(CurrentValue)) {
    519         // Spilled AllocaInst will be replaced with GEP from the coroutine frame
    520         // there is no spill required.
    521         Allocas.emplace_back(AI, Index);
    522         if (!AI->isStaticAlloca())
    523           report_fatal_error("Coroutines cannot handle non static allocas yet");
    524       } else {
    525         // Otherwise, create a store instruction storing the value into the
    526         // coroutine frame.
    527 
    528         Instruction *InsertPt = nullptr;
    529         if (isa<Argument>(CurrentValue)) {
    530           // For arguments, we will place the store instruction right after
    531           // the coroutine frame pointer instruction, i.e. bitcast of
    532           // coro.begin from i8* to %f.frame*.
    533           InsertPt = FramePtr->getNextNode();
    534         } else if (auto *II = dyn_cast<InvokeInst>(CurrentValue)) {
    535           // If we are spilling the result of the invoke instruction, split the
    536           // normal edge and insert the spill in the new block.
    537           auto NewBB = SplitEdge(II->getParent(), II->getNormalDest());
    538           InsertPt = NewBB->getTerminator();
    539         } else if (dyn_cast<PHINode>(CurrentValue)) {
    540           // Skip the PHINodes and EH pads instructions.
    541           BasicBlock *DefBlock = cast<Instruction>(E.def())->getParent();
    542           if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
    543             InsertPt = splitBeforeCatchSwitch(CSI);
    544           else
    545             InsertPt = &*DefBlock->getFirstInsertionPt();
    546         } else {
    547           // For all other values, the spill is placed immediately after
    548           // the definition.
    549           assert(!isa<TerminatorInst>(E.def()) && "unexpected terminator");
    550           InsertPt = cast<Instruction>(E.def())->getNextNode();
    551         }
    552 
    553         Builder.SetInsertPoint(InsertPt);
    554         auto *G = Builder.CreateConstInBoundsGEP2_32(
    555             FrameTy, FramePtr, 0, Index,
    556             CurrentValue->getName() + Twine(".spill.addr"));
    557         Builder.CreateStore(CurrentValue, G);
    558       }
    559     }
    560 
    561     // If we have not seen the use block, generate a reload in it.
    562     if (CurrentBlock != E.userBlock()) {
    563       CurrentBlock = E.userBlock();
    564       CurrentReload = CreateReload(&*CurrentBlock->getFirstInsertionPt());
    565     }
    566 
    567     // If we have a single edge PHINode, remove it and replace it with a reload
    568     // from the coroutine frame. (We already took care of multi edge PHINodes
    569     // by rewriting them in the rewritePHIs function).
    570     if (auto *PN = dyn_cast<PHINode>(E.user())) {
    571       assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
    572                                                 "values in the PHINode");
    573       PN->replaceAllUsesWith(CurrentReload);
    574       PN->eraseFromParent();
    575       continue;
    576     }
    577 
    578     // Replace all uses of CurrentValue in the current instruction with reload.
    579     E.user()->replaceUsesOfWith(CurrentValue, CurrentReload);
    580   }
    581 
    582   BasicBlock *FramePtrBB = FramePtr->getParent();
    583   Shape.AllocaSpillBlock =
    584       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
    585   Shape.AllocaSpillBlock->splitBasicBlock(&Shape.AllocaSpillBlock->front(),
    586                                           "PostSpill");
    587 
    588   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
    589   // If we found any allocas, replace all of their remaining uses with Geps.
    590   for (auto &P : Allocas) {
    591     auto *G =
    592         Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, P.second);
    593     // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) here,
    594     // as we are changing location of the instruction.
    595     G->takeName(P.first);
    596     P.first->replaceAllUsesWith(G);
    597     P.first->eraseFromParent();
    598   }
    599   return FramePtr;
    600 }
    601 
    602 // Sets the unwind edge of an instruction to a particular successor.
    603 static void setUnwindEdgeTo(TerminatorInst *TI, BasicBlock *Succ) {
    604   if (auto *II = dyn_cast<InvokeInst>(TI))
    605     II->setUnwindDest(Succ);
    606   else if (auto *CS = dyn_cast<CatchSwitchInst>(TI))
    607     CS->setUnwindDest(Succ);
    608   else if (auto *CR = dyn_cast<CleanupReturnInst>(TI))
    609     CR->setUnwindDest(Succ);
    610   else
    611     llvm_unreachable("unexpected terminator instruction");
    612 }
    613 
    614 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a
    615 // block.
    616 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
    617                            BasicBlock *NewPred,
    618                            PHINode *LandingPadReplacement) {
    619   unsigned BBIdx = 0;
    620   for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
    621     PHINode *PN = cast<PHINode>(I);
    622 
    623     // We manually update the LandingPadReplacement PHINode and it is the last
    624     // PHI Node. So, if we find it, we are done.
    625     if (LandingPadReplacement == PN)
    626       break;
    627 
    628     // Reuse the previous value of BBIdx if it lines up.  In cases where we
    629     // have multiple phi nodes with *lots* of predecessors, this is a speed
    630     // win because we don't have to scan the PHI looking for TIBB.  This
    631     // happens because the BB list of PHI nodes are usually in the same
    632     // order.
    633     if (PN->getIncomingBlock(BBIdx) != OldPred)
    634       BBIdx = PN->getBasicBlockIndex(OldPred);
    635 
    636     assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!");
    637     PN->setIncomingBlock(BBIdx, NewPred);
    638   }
    639 }
    640 
    641 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH
    642 // specific handling.
    643 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
    644                                     LandingPadInst *OriginalPad,
    645                                     PHINode *LandingPadReplacement) {
    646   auto *PadInst = Succ->getFirstNonPHI();
    647   if (!LandingPadReplacement && !PadInst->isEHPad())
    648     return SplitEdge(BB, Succ);
    649 
    650   auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ);
    651   setUnwindEdgeTo(BB->getTerminator(), NewBB);
    652   updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement);
    653 
    654   if (LandingPadReplacement) {
    655     auto *NewLP = OriginalPad->clone();
    656     auto *Terminator = BranchInst::Create(Succ, NewBB);
    657     NewLP->insertBefore(Terminator);
    658     LandingPadReplacement->addIncoming(NewLP, NewBB);
    659     return NewBB;
    660   }
    661   Value *ParentPad = nullptr;
    662   if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst))
    663     ParentPad = FuncletPad->getParentPad();
    664   else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst))
    665     ParentPad = CatchSwitch->getParentPad();
    666   else
    667     llvm_unreachable("handling for other EHPads not implemented yet");
    668 
    669   auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB);
    670   CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB);
    671   return NewBB;
    672 }
    673 
    674 static void rewritePHIs(BasicBlock &BB) {
    675   // For every incoming edge we will create a block holding all
    676   // incoming values in a single PHI nodes.
    677   //
    678   // loop:
    679   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
    680   //
    681   // It will create:
    682   //
    683   // loop.from.entry:
    684   //    %n.loop.pre = phi i32 [%n, %entry]
    685   //    br %label loop
    686   // loop.from.loop:
    687   //    %inc.loop.pre = phi i32 [%inc, %loop]
    688   //    br %label loop
    689   //
    690   // After this rewrite, further analysis will ignore any phi nodes with more
    691   // than one incoming edge.
    692 
    693   // TODO: Simplify PHINodes in the basic block to remove duplicate
    694   // predecessors.
    695 
    696   LandingPadInst *LandingPad = nullptr;
    697   PHINode *ReplPHI = nullptr;
    698   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
    699     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
    700     // We replace the original landing pad with a PHINode that will collect the
    701     // results from all of them.
    702     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
    703     ReplPHI->takeName(LandingPad);
    704     LandingPad->replaceAllUsesWith(ReplPHI);
    705     // We will erase the original landing pad at the end of this function after
    706     // ehAwareSplitEdge cloned it in the transition blocks.
    707   }
    708 
    709   SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB));
    710   for (BasicBlock *Pred : Preds) {
    711     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
    712     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
    713     auto *PN = cast<PHINode>(&BB.front());
    714     do {
    715       int Index = PN->getBasicBlockIndex(IncomingBB);
    716       Value *V = PN->getIncomingValue(Index);
    717       PHINode *InputV = PHINode::Create(
    718           V->getType(), 1, V->getName() + Twine(".") + BB.getName(),
    719           &IncomingBB->front());
    720       InputV->addIncoming(V, Pred);
    721       PN->setIncomingValue(Index, InputV);
    722       PN = dyn_cast<PHINode>(PN->getNextNode());
    723     } while (PN != ReplPHI); // ReplPHI is either null or the PHI that replaced
    724                              // the landing pad.
    725   }
    726 
    727   if (LandingPad) {
    728     // Calls to ehAwareSplitEdge function cloned the original lading pad.
    729     // No longer need it.
    730     LandingPad->eraseFromParent();
    731   }
    732 }
    733 
    734 static void rewritePHIs(Function &F) {
    735   SmallVector<BasicBlock *, 8> WorkList;
    736 
    737   for (BasicBlock &BB : F)
    738     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
    739       if (PN->getNumIncomingValues() > 1)
    740         WorkList.push_back(&BB);
    741 
    742   for (BasicBlock *BB : WorkList)
    743     rewritePHIs(*BB);
    744 }
    745 
    746 // Check for instructions that we can recreate on resume as opposed to spill
    747 // the result into a coroutine frame.
    748 static bool materializable(Instruction &V) {
    749   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
    750          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
    751 }
    752 
    753 // Check for structural coroutine intrinsics that should not be spilled into
    754 // the coroutine frame.
    755 static bool isCoroutineStructureIntrinsic(Instruction &I) {
    756   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
    757          isa<CoroSuspendInst>(&I);
    758 }
    759 
    760 // For every use of the value that is across suspend point, recreate that value
    761 // after a suspend point.
    762 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
    763                                               SpillInfo const &Spills) {
    764   BasicBlock *CurrentBlock = nullptr;
    765   Instruction *CurrentMaterialization = nullptr;
    766   Instruction *CurrentDef = nullptr;
    767 
    768   for (auto const &E : Spills) {
    769     // If it is a new definition, update CurrentXXX variables.
    770     if (CurrentDef != E.def()) {
    771       CurrentDef = cast<Instruction>(E.def());
    772       CurrentBlock = nullptr;
    773       CurrentMaterialization = nullptr;
    774     }
    775 
    776     // If we have not seen this block, materialize the value.
    777     if (CurrentBlock != E.userBlock()) {
    778       CurrentBlock = E.userBlock();
    779       CurrentMaterialization = cast<Instruction>(CurrentDef)->clone();
    780       CurrentMaterialization->setName(CurrentDef->getName());
    781       CurrentMaterialization->insertBefore(
    782           &*CurrentBlock->getFirstInsertionPt());
    783     }
    784 
    785     if (auto *PN = dyn_cast<PHINode>(E.user())) {
    786       assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
    787                                                 "values in the PHINode");
    788       PN->replaceAllUsesWith(CurrentMaterialization);
    789       PN->eraseFromParent();
    790       continue;
    791     }
    792 
    793     // Replace all uses of CurrentDef in the current instruction with the
    794     // CurrentMaterialization for the block.
    795     E.user()->replaceUsesOfWith(CurrentDef, CurrentMaterialization);
    796   }
    797 }
    798 
    799 // Move early uses of spilled variable after CoroBegin.
    800 // For example, if a parameter had address taken, we may end up with the code
    801 // like:
    802 //        define @f(i32 %n) {
    803 //          %n.addr = alloca i32
    804 //          store %n, %n.addr
    805 //          ...
    806 //          call @coro.begin
    807 //    we need to move the store after coro.begin
    808 static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills,
    809                                         CoroBeginInst *CoroBegin) {
    810   DominatorTree DT(F);
    811   SmallVector<Instruction *, 8> NeedsMoving;
    812 
    813   Value *CurrentValue = nullptr;
    814 
    815   for (auto const &E : Spills) {
    816     if (CurrentValue == E.def())
    817       continue;
    818 
    819     CurrentValue = E.def();
    820 
    821     for (User *U : CurrentValue->users()) {
    822       Instruction *I = cast<Instruction>(U);
    823       if (!DT.dominates(CoroBegin, I)) {
    824         LLVM_DEBUG(dbgs() << "will move: " << *I << "\n");
    825 
    826         // TODO: Make this more robust. Currently if we run into a situation
    827         // where simple instruction move won't work we panic and
    828         // report_fatal_error.
    829         for (User *UI : I->users()) {
    830           if (!DT.dominates(CoroBegin, cast<Instruction>(UI)))
    831             report_fatal_error("cannot move instruction since its users are not"
    832                                " dominated by CoroBegin");
    833         }
    834 
    835         NeedsMoving.push_back(I);
    836       }
    837     }
    838   }
    839 
    840   Instruction *InsertPt = CoroBegin->getNextNode();
    841   for (Instruction *I : NeedsMoving)
    842     I->moveBefore(InsertPt);
    843 }
    844 
    845 // Splits the block at a particular instruction unless it is the first
    846 // instruction in the block with a single predecessor.
    847 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
    848   auto *BB = I->getParent();
    849   if (&BB->front() == I) {
    850     if (BB->getSinglePredecessor()) {
    851       BB->setName(Name);
    852       return BB;
    853     }
    854   }
    855   return BB->splitBasicBlock(I, Name);
    856 }
    857 
    858 // Split above and below a particular instruction so that it
    859 // will be all alone by itself in a block.
    860 static void splitAround(Instruction *I, const Twine &Name) {
    861   splitBlockIfNotFirst(I, Name);
    862   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
    863 }
    864 
    865 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
    866   // Lower coro.dbg.declare to coro.dbg.value, since we are going to rewrite
    867   // access to local variables.
    868   LowerDbgDeclare(F);
    869 
    870   Shape.PromiseAlloca = Shape.CoroBegin->getId()->getPromise();
    871   if (Shape.PromiseAlloca) {
    872     Shape.CoroBegin->getId()->clearPromise();
    873   }
    874 
    875   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
    876   // intrinsics are in their own blocks to simplify the logic of building up
    877   // SuspendCrossing data.
    878   for (CoroSuspendInst *CSI : Shape.CoroSuspends) {
    879     splitAround(CSI->getCoroSave(), "CoroSave");
    880     splitAround(CSI, "CoroSuspend");
    881   }
    882 
    883   // Put CoroEnds into their own blocks.
    884   for (CoroEndInst *CE : Shape.CoroEnds)
    885     splitAround(CE, "CoroEnd");
    886 
    887   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
    888   // never has its definition separated from the PHI by the suspend point.
    889   rewritePHIs(F);
    890 
    891   // Build suspend crossing info.
    892   SuspendCrossingInfo Checker(F, Shape);
    893 
    894   IRBuilder<> Builder(F.getContext());
    895   SpillInfo Spills;
    896 
    897   for (int Repeat = 0; Repeat < 4; ++Repeat) {
    898     // See if there are materializable instructions across suspend points.
    899     for (Instruction &I : instructions(F))
    900       if (materializable(I))
    901         for (User *U : I.users())
    902           if (Checker.isDefinitionAcrossSuspend(I, U))
    903             Spills.emplace_back(&I, U);
    904 
    905     if (Spills.empty())
    906       break;
    907 
    908     // Rewrite materializable instructions to be materialized at the use point.
    909     LLVM_DEBUG(dump("Materializations", Spills));
    910     rewriteMaterializableInstructions(Builder, Spills);
    911     Spills.clear();
    912   }
    913 
    914   // Collect the spills for arguments and other not-materializable values.
    915   for (Argument &A : F.args())
    916     for (User *U : A.users())
    917       if (Checker.isDefinitionAcrossSuspend(A, U))
    918         Spills.emplace_back(&A, U);
    919 
    920   for (Instruction &I : instructions(F)) {
    921     // Values returned from coroutine structure intrinsics should not be part
    922     // of the Coroutine Frame.
    923     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
    924       continue;
    925     // The Coroutine Promise always included into coroutine frame, no need to
    926     // check for suspend crossing.
    927     if (Shape.PromiseAlloca == &I)
    928       continue;
    929 
    930     for (User *U : I.users())
    931       if (Checker.isDefinitionAcrossSuspend(I, U)) {
    932         // We cannot spill a token.
    933         if (I.getType()->isTokenTy())
    934           report_fatal_error(
    935               "token definition is separated from the use by a suspend point");
    936         Spills.emplace_back(&I, U);
    937       }
    938   }
    939   LLVM_DEBUG(dump("Spills", Spills));
    940   moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin);
    941   Shape.FrameTy = buildFrameType(F, Shape, Spills);
    942   Shape.FramePtr = insertSpills(Spills, Shape);
    943 }
    944