Home | History | Annotate | Download | only in VMCore
      1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the auto-upgrade helper functions
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/AutoUpgrade.h"
     15 #include "llvm/Constants.h"
     16 #include "llvm/Function.h"
     17 #include "llvm/Instruction.h"
     18 #include "llvm/LLVMContext.h"
     19 #include "llvm/Module.h"
     20 #include "llvm/IntrinsicInst.h"
     21 #include "llvm/ADT/DenseMap.h"
     22 #include "llvm/ADT/SmallPtrSet.h"
     23 #include "llvm/ADT/SmallVector.h"
     24 #include "llvm/Support/CallSite.h"
     25 #include "llvm/Support/CFG.h"
     26 #include "llvm/Support/ErrorHandling.h"
     27 #include "llvm/Support/IRBuilder.h"
     28 #include <cstring>
     29 using namespace llvm;
     30 
     31 
     32 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
     33   assert(F && "Illegal to upgrade a non-existent Function.");
     34 
     35   // Quickly eliminate it, if it's not a candidate.
     36   StringRef Name = F->getName();
     37   if (Name.size() <= 8 || !Name.startswith("llvm."))
     38     return false;
     39   Name = Name.substr(5); // Strip off "llvm."
     40 
     41   FunctionType *FTy = F->getFunctionType();
     42   Module *M = F->getParent();
     43 
     44   switch (Name[0]) {
     45   default: break;
     46   case 'a':
     47     if (Name.startswith("atomic.cmp.swap") ||
     48         Name.startswith("atomic.swap") ||
     49         Name.startswith("atomic.load.add") ||
     50         Name.startswith("atomic.load.sub") ||
     51         Name.startswith("atomic.load.and") ||
     52         Name.startswith("atomic.load.nand") ||
     53         Name.startswith("atomic.load.or") ||
     54         Name.startswith("atomic.load.xor") ||
     55         Name.startswith("atomic.load.max") ||
     56         Name.startswith("atomic.load.min") ||
     57         Name.startswith("atomic.load.umax") ||
     58         Name.startswith("atomic.load.umin"))
     59       return true;
     60   case 'i':
     61     //  This upgrades the old llvm.init.trampoline to the new
     62     //  llvm.init.trampoline and llvm.adjust.trampoline pair.
     63     if (Name == "init.trampoline") {
     64       // The new llvm.init.trampoline returns nothing.
     65       if (FTy->getReturnType()->isVoidTy())
     66         break;
     67 
     68       assert(FTy->getNumParams() == 3 && "old init.trampoline takes 3 args!");
     69 
     70       // Change the name of the old intrinsic so that we can play with its type.
     71       std::string NameTmp = F->getName();
     72       F->setName("");
     73       NewFn = cast<Function>(M->getOrInsertFunction(
     74                                NameTmp,
     75                                Type::getVoidTy(M->getContext()),
     76                                FTy->getParamType(0), FTy->getParamType(1),
     77                                FTy->getParamType(2), (Type *)0));
     78       return true;
     79     }
     80   case 'm':
     81     if (Name == "memory.barrier")
     82       return true;
     83   case 'p':
     84     //  This upgrades the llvm.prefetch intrinsic to accept one more parameter,
     85     //  which is a instruction / data cache identifier. The old version only
     86     //  implicitly accepted the data version.
     87     if (Name == "prefetch") {
     88       // Don't do anything if it has the correct number of arguments already
     89       if (FTy->getNumParams() == 4)
     90         break;
     91 
     92       assert(FTy->getNumParams() == 3 && "old prefetch takes 3 args!");
     93       //  We first need to change the name of the old (bad) intrinsic, because
     94       //  its type is incorrect, but we cannot overload that name. We
     95       //  arbitrarily unique it here allowing us to construct a correctly named
     96       //  and typed function below.
     97       std::string NameTmp = F->getName();
     98       F->setName("");
     99       NewFn = cast<Function>(M->getOrInsertFunction(NameTmp,
    100                                                     FTy->getReturnType(),
    101                                                     FTy->getParamType(0),
    102                                                     FTy->getParamType(1),
    103                                                     FTy->getParamType(2),
    104                                                     FTy->getParamType(2),
    105                                                     (Type*)0));
    106       return true;
    107     }
    108 
    109     break;
    110   case 'x': {
    111     const char *NewFnName = NULL;
    112     // This fixes the poorly named crc32 intrinsics.
    113     if (Name == "x86.sse42.crc32.8")
    114       NewFnName = "llvm.x86.sse42.crc32.32.8";
    115     else if (Name == "x86.sse42.crc32.16")
    116       NewFnName = "llvm.x86.sse42.crc32.32.16";
    117     else if (Name == "x86.sse42.crc32.32")
    118       NewFnName = "llvm.x86.sse42.crc32.32.32";
    119     else if (Name == "x86.sse42.crc64.8")
    120       NewFnName = "llvm.x86.sse42.crc32.64.8";
    121     else if (Name == "x86.sse42.crc64.64")
    122       NewFnName = "llvm.x86.sse42.crc32.64.64";
    123 
    124     if (NewFnName) {
    125       F->setName(NewFnName);
    126       NewFn = F;
    127       return true;
    128     }
    129 
    130     // Calls to these instructions are transformed into unaligned loads.
    131     if (Name == "x86.sse.loadu.ps" || Name == "x86.sse2.loadu.dq" ||
    132         Name == "x86.sse2.loadu.pd")
    133       return true;
    134 
    135     // Calls to these instructions are transformed into nontemporal stores.
    136     if (Name == "x86.sse.movnt.ps"  || Name == "x86.sse2.movnt.dq" ||
    137         Name == "x86.sse2.movnt.pd" || Name == "x86.sse2.movnt.i")
    138       return true;
    139 
    140     break;
    141   }
    142   }
    143 
    144   //  This may not belong here. This function is effectively being overloaded
    145   //  to both detect an intrinsic which needs upgrading, and to provide the
    146   //  upgraded form of the intrinsic. We should perhaps have two separate
    147   //  functions for this.
    148   return false;
    149 }
    150 
    151 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
    152   NewFn = 0;
    153   bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
    154 
    155   // Upgrade intrinsic attributes.  This does not change the function.
    156   if (NewFn)
    157     F = NewFn;
    158   if (unsigned id = F->getIntrinsicID())
    159     F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id));
    160   return Upgraded;
    161 }
    162 
    163 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
    164   // Nothing to do yet.
    165   return false;
    166 }
    167 
    168 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
    169 // upgraded intrinsic. All argument and return casting must be provided in
    170 // order to seamlessly integrate with existing context.
    171 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
    172   Function *F = CI->getCalledFunction();
    173   LLVMContext &C = CI->getContext();
    174   ImmutableCallSite CS(CI);
    175 
    176   assert(F && "CallInst has no function associated with it.");
    177 
    178   if (!NewFn) {
    179     if (F->getName() == "llvm.x86.sse.loadu.ps" ||
    180         F->getName() == "llvm.x86.sse2.loadu.dq" ||
    181         F->getName() == "llvm.x86.sse2.loadu.pd") {
    182       // Convert to a native, unaligned load.
    183       Type *VecTy = CI->getType();
    184       Type *IntTy = IntegerType::get(C, 128);
    185       IRBuilder<> Builder(C);
    186       Builder.SetInsertPoint(CI->getParent(), CI);
    187 
    188       Value *BC = Builder.CreateBitCast(CI->getArgOperand(0),
    189                                         PointerType::getUnqual(IntTy),
    190                                         "cast");
    191       LoadInst *LI = Builder.CreateLoad(BC, CI->getName());
    192       LI->setAlignment(1);      // Unaligned load.
    193       BC = Builder.CreateBitCast(LI, VecTy, "new.cast");
    194 
    195       // Fix up all the uses with our new load.
    196       if (!CI->use_empty())
    197         CI->replaceAllUsesWith(BC);
    198 
    199       // Remove intrinsic.
    200       CI->eraseFromParent();
    201     } else if (F->getName() == "llvm.x86.sse.movnt.ps" ||
    202                F->getName() == "llvm.x86.sse2.movnt.dq" ||
    203                F->getName() == "llvm.x86.sse2.movnt.pd" ||
    204                F->getName() == "llvm.x86.sse2.movnt.i") {
    205       IRBuilder<> Builder(C);
    206       Builder.SetInsertPoint(CI->getParent(), CI);
    207 
    208       Module *M = F->getParent();
    209       SmallVector<Value *, 1> Elts;
    210       Elts.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
    211       MDNode *Node = MDNode::get(C, Elts);
    212 
    213       Value *Arg0 = CI->getArgOperand(0);
    214       Value *Arg1 = CI->getArgOperand(1);
    215 
    216       // Convert the type of the pointer to a pointer to the stored type.
    217       Value *BC = Builder.CreateBitCast(Arg0,
    218                                         PointerType::getUnqual(Arg1->getType()),
    219                                         "cast");
    220       StoreInst *SI = Builder.CreateStore(Arg1, BC);
    221       SI->setMetadata(M->getMDKindID("nontemporal"), Node);
    222       SI->setAlignment(16);
    223 
    224       // Remove intrinsic.
    225       CI->eraseFromParent();
    226     } else if (F->getName().startswith("llvm.atomic.cmp.swap")) {
    227       IRBuilder<> Builder(C);
    228       Builder.SetInsertPoint(CI->getParent(), CI);
    229       Value *Val = Builder.CreateAtomicCmpXchg(CI->getArgOperand(0),
    230                                                CI->getArgOperand(1),
    231                                                CI->getArgOperand(2),
    232                                                Monotonic);
    233 
    234       // Replace intrinsic.
    235       Val->takeName(CI);
    236       if (!CI->use_empty())
    237         CI->replaceAllUsesWith(Val);
    238       CI->eraseFromParent();
    239     } else if (F->getName().startswith("llvm.atomic")) {
    240       IRBuilder<> Builder(C);
    241       Builder.SetInsertPoint(CI->getParent(), CI);
    242 
    243       AtomicRMWInst::BinOp Op;
    244       if (F->getName().startswith("llvm.atomic.swap"))
    245         Op = AtomicRMWInst::Xchg;
    246       else if (F->getName().startswith("llvm.atomic.load.add"))
    247         Op = AtomicRMWInst::Add;
    248       else if (F->getName().startswith("llvm.atomic.load.sub"))
    249         Op = AtomicRMWInst::Sub;
    250       else if (F->getName().startswith("llvm.atomic.load.and"))
    251         Op = AtomicRMWInst::And;
    252       else if (F->getName().startswith("llvm.atomic.load.nand"))
    253         Op = AtomicRMWInst::Nand;
    254       else if (F->getName().startswith("llvm.atomic.load.or"))
    255         Op = AtomicRMWInst::Or;
    256       else if (F->getName().startswith("llvm.atomic.load.xor"))
    257         Op = AtomicRMWInst::Xor;
    258       else if (F->getName().startswith("llvm.atomic.load.max"))
    259         Op = AtomicRMWInst::Max;
    260       else if (F->getName().startswith("llvm.atomic.load.min"))
    261         Op = AtomicRMWInst::Min;
    262       else if (F->getName().startswith("llvm.atomic.load.umax"))
    263         Op = AtomicRMWInst::UMax;
    264       else if (F->getName().startswith("llvm.atomic.load.umin"))
    265         Op = AtomicRMWInst::UMin;
    266       else
    267         llvm_unreachable("Unknown atomic");
    268 
    269       Value *Val = Builder.CreateAtomicRMW(Op, CI->getArgOperand(0),
    270                                            CI->getArgOperand(1),
    271                                            Monotonic);
    272 
    273       // Replace intrinsic.
    274       Val->takeName(CI);
    275       if (!CI->use_empty())
    276         CI->replaceAllUsesWith(Val);
    277       CI->eraseFromParent();
    278     } else if (F->getName() == "llvm.memory.barrier") {
    279       IRBuilder<> Builder(C);
    280       Builder.SetInsertPoint(CI->getParent(), CI);
    281 
    282       // Note that this conversion ignores the "device" bit; it was not really
    283       // well-defined, and got abused because nobody paid enough attention to
    284       // get it right. In practice, this probably doesn't matter; application
    285       // code generally doesn't need anything stronger than
    286       // SequentiallyConsistent (and realistically, SequentiallyConsistent
    287       // is lowered to a strong enough barrier for almost anything).
    288 
    289       if (cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue())
    290         Builder.CreateFence(SequentiallyConsistent);
    291       else if (!cast<ConstantInt>(CI->getArgOperand(0))->getZExtValue())
    292         Builder.CreateFence(Release);
    293       else if (!cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue())
    294         Builder.CreateFence(Acquire);
    295       else
    296         Builder.CreateFence(AcquireRelease);
    297 
    298       // Remove intrinsic.
    299       CI->eraseFromParent();
    300     } else {
    301       llvm_unreachable("Unknown function for CallInst upgrade.");
    302     }
    303     return;
    304   }
    305 
    306   switch (NewFn->getIntrinsicID()) {
    307   case Intrinsic::prefetch: {
    308     IRBuilder<> Builder(C);
    309     Builder.SetInsertPoint(CI->getParent(), CI);
    310     llvm::Type *I32Ty = llvm::Type::getInt32Ty(CI->getContext());
    311 
    312     // Add the extra "data cache" argument
    313     Value *Operands[4] = { CI->getArgOperand(0), CI->getArgOperand(1),
    314                            CI->getArgOperand(2),
    315                            llvm::ConstantInt::get(I32Ty, 1) };
    316     CallInst *NewCI = CallInst::Create(NewFn, Operands,
    317                                        CI->getName(), CI);
    318     NewCI->setTailCall(CI->isTailCall());
    319     NewCI->setCallingConv(CI->getCallingConv());
    320     //  Handle any uses of the old CallInst.
    321     if (!CI->use_empty())
    322       //  Replace all uses of the old call with the new cast which has the
    323       //  correct type.
    324       CI->replaceAllUsesWith(NewCI);
    325 
    326     //  Clean up the old call now that it has been completely upgraded.
    327     CI->eraseFromParent();
    328     break;
    329   }
    330   case Intrinsic::init_trampoline: {
    331 
    332     //  Transform
    333     //    %tramp = call i8* llvm.init.trampoline (i8* x, i8* y, i8* z)
    334     //  to
    335     //    call void llvm.init.trampoline (i8* %x, i8* %y, i8* %z)
    336     //    %tramp = call i8* llvm.adjust.trampoline (i8* %x)
    337 
    338     Function *AdjustTrampolineFn =
    339       cast<Function>(Intrinsic::getDeclaration(F->getParent(),
    340                                                Intrinsic::adjust_trampoline));
    341 
    342     IRBuilder<> Builder(C);
    343     Builder.SetInsertPoint(CI);
    344 
    345     Builder.CreateCall3(NewFn, CI->getArgOperand(0), CI->getArgOperand(1),
    346                         CI->getArgOperand(2));
    347 
    348     CallInst *AdjustCall = Builder.CreateCall(AdjustTrampolineFn,
    349                                               CI->getArgOperand(0),
    350                                               CI->getName());
    351     if (!CI->use_empty())
    352       CI->replaceAllUsesWith(AdjustCall);
    353     CI->eraseFromParent();
    354     break;
    355   }
    356   }
    357 }
    358 
    359 // This tests each Function to determine if it needs upgrading. When we find
    360 // one we are interested in, we then upgrade all calls to reflect the new
    361 // function.
    362 void llvm::UpgradeCallsToIntrinsic(Function* F) {
    363   assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
    364 
    365   // Upgrade the function and check if it is a totaly new function.
    366   Function *NewFn;
    367   if (UpgradeIntrinsicFunction(F, NewFn)) {
    368     if (NewFn != F) {
    369       // Replace all uses to the old function with the new one if necessary.
    370       for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
    371            UI != UE; ) {
    372         if (CallInst *CI = dyn_cast<CallInst>(*UI++))
    373           UpgradeIntrinsicCall(CI, NewFn);
    374       }
    375       // Remove old function, no longer used, from the module.
    376       F->eraseFromParent();
    377     }
    378   }
    379 }
    380 
    381 /// This function strips all debug info intrinsics, except for llvm.dbg.declare.
    382 /// If an llvm.dbg.declare intrinsic is invalid, then this function simply
    383 /// strips that use.
    384 void llvm::CheckDebugInfoIntrinsics(Module *M) {
    385   if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) {
    386     while (!FuncStart->use_empty())
    387       cast<CallInst>(FuncStart->use_back())->eraseFromParent();
    388     FuncStart->eraseFromParent();
    389   }
    390 
    391   if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) {
    392     while (!StopPoint->use_empty())
    393       cast<CallInst>(StopPoint->use_back())->eraseFromParent();
    394     StopPoint->eraseFromParent();
    395   }
    396 
    397   if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) {
    398     while (!RegionStart->use_empty())
    399       cast<CallInst>(RegionStart->use_back())->eraseFromParent();
    400     RegionStart->eraseFromParent();
    401   }
    402 
    403   if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) {
    404     while (!RegionEnd->use_empty())
    405       cast<CallInst>(RegionEnd->use_back())->eraseFromParent();
    406     RegionEnd->eraseFromParent();
    407   }
    408 
    409   if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
    410     if (!Declare->use_empty()) {
    411       DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
    412       if (!isa<MDNode>(DDI->getArgOperand(0)) ||
    413           !isa<MDNode>(DDI->getArgOperand(1))) {
    414         while (!Declare->use_empty()) {
    415           CallInst *CI = cast<CallInst>(Declare->use_back());
    416           CI->eraseFromParent();
    417         }
    418         Declare->eraseFromParent();
    419       }
    420     }
    421   }
    422 }
    423 
    424 /// FindExnAndSelIntrinsics - Find the eh_exception and eh_selector intrinsic
    425 /// calls reachable from the unwind basic block.
    426 static void FindExnAndSelIntrinsics(BasicBlock *BB, CallInst *&Exn,
    427                                     CallInst *&Sel,
    428                                     SmallPtrSet<BasicBlock*, 8> &Visited) {
    429   if (!Visited.insert(BB)) return;
    430 
    431   for (BasicBlock::iterator
    432          I = BB->begin(), E = BB->end(); I != E; ++I) {
    433     if (CallInst *CI = dyn_cast<CallInst>(I)) {
    434       switch (CI->getCalledFunction()->getIntrinsicID()) {
    435       default: break;
    436       case Intrinsic::eh_exception:
    437         assert(!Exn && "Found more than one eh.exception call!");
    438         Exn = CI;
    439         break;
    440       case Intrinsic::eh_selector:
    441         assert(!Sel && "Found more than one eh.selector call!");
    442         Sel = CI;
    443         break;
    444       }
    445 
    446       if (Exn && Sel) return;
    447     }
    448   }
    449 
    450   if (Exn && Sel) return;
    451 
    452   for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
    453     FindExnAndSelIntrinsics(*I, Exn, Sel, Visited);
    454     if (Exn && Sel) return;
    455   }
    456 }
    457 
    458 /// TransferClausesToLandingPadInst - Transfer the exception handling clauses
    459 /// from the eh_selector call to the new landingpad instruction.
    460 static void TransferClausesToLandingPadInst(LandingPadInst *LPI,
    461                                             CallInst *EHSel) {
    462   LLVMContext &Context = LPI->getContext();
    463   unsigned N = EHSel->getNumArgOperands();
    464 
    465   for (unsigned i = N - 1; i > 1; --i) {
    466     if (const ConstantInt *CI = dyn_cast<ConstantInt>(EHSel->getArgOperand(i))){
    467       unsigned FilterLength = CI->getZExtValue();
    468       unsigned FirstCatch = i + FilterLength + !FilterLength;
    469       assert(FirstCatch <= N && "Invalid filter length");
    470 
    471       if (FirstCatch < N)
    472         for (unsigned j = FirstCatch; j < N; ++j) {
    473           Value *Val = EHSel->getArgOperand(j);
    474           if (!Val->hasName() || Val->getName() != "llvm.eh.catch.all.value") {
    475             LPI->addClause(EHSel->getArgOperand(j));
    476           } else {
    477             GlobalVariable *GV = cast<GlobalVariable>(Val);
    478             LPI->addClause(GV->getInitializer());
    479           }
    480         }
    481 
    482       if (!FilterLength) {
    483         // Cleanup.
    484         LPI->setCleanup(true);
    485       } else {
    486         // Filter.
    487         SmallVector<Constant *, 4> TyInfo;
    488         TyInfo.reserve(FilterLength - 1);
    489         for (unsigned j = i + 1; j < FirstCatch; ++j)
    490           TyInfo.push_back(cast<Constant>(EHSel->getArgOperand(j)));
    491         ArrayType *AType =
    492           ArrayType::get(!TyInfo.empty() ? TyInfo[0]->getType() :
    493                          PointerType::getUnqual(Type::getInt8Ty(Context)),
    494                          TyInfo.size());
    495         LPI->addClause(ConstantArray::get(AType, TyInfo));
    496       }
    497 
    498       N = i;
    499     }
    500   }
    501 
    502   if (N > 2)
    503     for (unsigned j = 2; j < N; ++j) {
    504       Value *Val = EHSel->getArgOperand(j);
    505       if (!Val->hasName() || Val->getName() != "llvm.eh.catch.all.value") {
    506         LPI->addClause(EHSel->getArgOperand(j));
    507       } else {
    508         GlobalVariable *GV = cast<GlobalVariable>(Val);
    509         LPI->addClause(GV->getInitializer());
    510       }
    511     }
    512 }
    513 
    514 /// This function upgrades the old pre-3.0 exception handling system to the new
    515 /// one. N.B. This will be removed in 3.1.
    516 void llvm::UpgradeExceptionHandling(Module *M) {
    517   Function *EHException = M->getFunction("llvm.eh.exception");
    518   Function *EHSelector = M->getFunction("llvm.eh.selector");
    519   if (!EHException || !EHSelector)
    520     return;
    521 
    522   LLVMContext &Context = M->getContext();
    523   Type *ExnTy = PointerType::getUnqual(Type::getInt8Ty(Context));
    524   Type *SelTy = Type::getInt32Ty(Context);
    525   Type *LPadSlotTy = StructType::get(ExnTy, SelTy, NULL);
    526 
    527   // This map links the invoke instruction with the eh.exception and eh.selector
    528   // calls associated with it.
    529   DenseMap<InvokeInst*, std::pair<Value*, Value*> > InvokeToIntrinsicsMap;
    530   for (Module::iterator
    531          I = M->begin(), E = M->end(); I != E; ++I) {
    532     Function &F = *I;
    533 
    534     for (Function::iterator
    535            II = F.begin(), IE = F.end(); II != IE; ++II) {
    536       BasicBlock *BB = &*II;
    537       InvokeInst *Inst = dyn_cast<InvokeInst>(BB->getTerminator());
    538       if (!Inst) continue;
    539       BasicBlock *UnwindDest = Inst->getUnwindDest();
    540       if (UnwindDest->isLandingPad()) continue; // Already converted.
    541 
    542       SmallPtrSet<BasicBlock*, 8> Visited;
    543       CallInst *Exn = 0;
    544       CallInst *Sel = 0;
    545       FindExnAndSelIntrinsics(UnwindDest, Exn, Sel, Visited);
    546       assert(Exn && Sel && "Cannot find eh.exception and eh.selector calls!");
    547       InvokeToIntrinsicsMap[Inst] = std::make_pair(Exn, Sel);
    548     }
    549   }
    550 
    551   // This map stores the slots where the exception object and selector value are
    552   // stored within a function.
    553   DenseMap<Function*, std::pair<Value*, Value*> > FnToLPadSlotMap;
    554   SmallPtrSet<Instruction*, 32> DeadInsts;
    555   for (DenseMap<InvokeInst*, std::pair<Value*, Value*> >::iterator
    556          I = InvokeToIntrinsicsMap.begin(), E = InvokeToIntrinsicsMap.end();
    557        I != E; ++I) {
    558     InvokeInst *Invoke = I->first;
    559     BasicBlock *UnwindDest = Invoke->getUnwindDest();
    560     Function *F = UnwindDest->getParent();
    561     std::pair<Value*, Value*> EHIntrinsics = I->second;
    562     CallInst *Exn = cast<CallInst>(EHIntrinsics.first);
    563     CallInst *Sel = cast<CallInst>(EHIntrinsics.second);
    564 
    565     // Store the exception object and selector value in the entry block.
    566     Value *ExnSlot = 0;
    567     Value *SelSlot = 0;
    568     if (!FnToLPadSlotMap[F].first) {
    569       BasicBlock *Entry = &F->front();
    570       ExnSlot = new AllocaInst(ExnTy, "exn", Entry->getTerminator());
    571       SelSlot = new AllocaInst(SelTy, "sel", Entry->getTerminator());
    572       FnToLPadSlotMap[F] = std::make_pair(ExnSlot, SelSlot);
    573     } else {
    574       ExnSlot = FnToLPadSlotMap[F].first;
    575       SelSlot = FnToLPadSlotMap[F].second;
    576     }
    577 
    578     if (!UnwindDest->getSinglePredecessor()) {
    579       // The unwind destination doesn't have a single predecessor. Create an
    580       // unwind destination which has only one predecessor.
    581       BasicBlock *NewBB = BasicBlock::Create(Context, "new.lpad",
    582                                              UnwindDest->getParent());
    583       BranchInst::Create(UnwindDest, NewBB);
    584       Invoke->setUnwindDest(NewBB);
    585 
    586       // Fix up any PHIs in the original unwind destination block.
    587       for (BasicBlock::iterator
    588              II = UnwindDest->begin(); isa<PHINode>(II); ++II) {
    589         PHINode *PN = cast<PHINode>(II);
    590         int Idx = PN->getBasicBlockIndex(Invoke->getParent());
    591         if (Idx == -1) continue;
    592         PN->setIncomingBlock(Idx, NewBB);
    593       }
    594 
    595       UnwindDest = NewBB;
    596     }
    597 
    598     IRBuilder<> Builder(Context);
    599     Builder.SetInsertPoint(UnwindDest, UnwindDest->getFirstInsertionPt());
    600 
    601     Value *PersFn = Sel->getArgOperand(1);
    602     LandingPadInst *LPI = Builder.CreateLandingPad(LPadSlotTy, PersFn, 0);
    603     Value *LPExn = Builder.CreateExtractValue(LPI, 0);
    604     Value *LPSel = Builder.CreateExtractValue(LPI, 1);
    605     Builder.CreateStore(LPExn, ExnSlot);
    606     Builder.CreateStore(LPSel, SelSlot);
    607 
    608     TransferClausesToLandingPadInst(LPI, Sel);
    609 
    610     DeadInsts.insert(Exn);
    611     DeadInsts.insert(Sel);
    612   }
    613 
    614   // Replace the old intrinsic calls with the values from the landingpad
    615   // instruction(s). These values were stored in allocas for us to use here.
    616   for (DenseMap<InvokeInst*, std::pair<Value*, Value*> >::iterator
    617          I = InvokeToIntrinsicsMap.begin(), E = InvokeToIntrinsicsMap.end();
    618        I != E; ++I) {
    619     std::pair<Value*, Value*> EHIntrinsics = I->second;
    620     CallInst *Exn = cast<CallInst>(EHIntrinsics.first);
    621     CallInst *Sel = cast<CallInst>(EHIntrinsics.second);
    622     BasicBlock *Parent = Exn->getParent();
    623 
    624     std::pair<Value*,Value*> ExnSelSlots = FnToLPadSlotMap[Parent->getParent()];
    625 
    626     IRBuilder<> Builder(Context);
    627     Builder.SetInsertPoint(Parent, Exn);
    628     LoadInst *LPExn = Builder.CreateLoad(ExnSelSlots.first, "exn.load");
    629     LoadInst *LPSel = Builder.CreateLoad(ExnSelSlots.second, "sel.load");
    630 
    631     Exn->replaceAllUsesWith(LPExn);
    632     Sel->replaceAllUsesWith(LPSel);
    633   }
    634 
    635   // Remove the dead instructions.
    636   for (SmallPtrSet<Instruction*, 32>::iterator
    637          I = DeadInsts.begin(), E = DeadInsts.end(); I != E; ++I) {
    638     Instruction *Inst = *I;
    639     Inst->eraseFromParent();
    640   }
    641 
    642   // Replace calls to "llvm.eh.resume" with the 'resume' instruction. Load the
    643   // exception and selector values from the stored place.
    644   Function *EHResume = M->getFunction("llvm.eh.resume");
    645   if (!EHResume) return;
    646 
    647   while (!EHResume->use_empty()) {
    648     CallInst *Resume = cast<CallInst>(EHResume->use_back());
    649     BasicBlock *BB = Resume->getParent();
    650 
    651     IRBuilder<> Builder(Context);
    652     Builder.SetInsertPoint(BB, Resume);
    653 
    654     Value *LPadVal =
    655       Builder.CreateInsertValue(UndefValue::get(LPadSlotTy),
    656                                 Resume->getArgOperand(0), 0, "lpad.val");
    657     LPadVal = Builder.CreateInsertValue(LPadVal, Resume->getArgOperand(1),
    658                                         1, "lpad.val");
    659     Builder.CreateResume(LPadVal);
    660 
    661     // Remove all instructions after the 'resume.'
    662     BasicBlock::iterator I = Resume;
    663     while (I != BB->end()) {
    664       Instruction *Inst = &*I++;
    665       Inst->eraseFromParent();
    666     }
    667   }
    668 }
    669