1 //===- LowerInvoke.cpp - Eliminate Invoke & Unwind instructions -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This transformation is designed for use by code generators which do not yet 11 // support stack unwinding. This pass supports two models of exception handling 12 // lowering, the 'cheap' support and the 'expensive' support. 13 // 14 // 'Cheap' exception handling support gives the program the ability to execute 15 // any program which does not "throw an exception", by turning 'invoke' 16 // instructions into calls and by turning 'unwind' instructions into calls to 17 // abort(). If the program does dynamically use the unwind instruction, the 18 // program will print a message then abort. 19 // 20 // 'Expensive' exception handling support gives the full exception handling 21 // support to the program at the cost of making the 'invoke' instruction 22 // really expensive. It basically inserts setjmp/longjmp calls to emulate the 23 // exception handling as necessary. 24 // 25 // Because the 'expensive' support slows down programs a lot, and EH is only 26 // used for a subset of the programs, it must be specifically enabled by an 27 // option. 28 // 29 // Note that after this pass runs the CFG is not entirely accurate (exceptional 30 // control flow edges are not correct anymore) so only very simple things should 31 // be done after the lowerinvoke pass has run (like generation of native code). 32 // This should not be used as a general purpose "my LLVM-to-LLVM pass doesn't 33 // support the invoke instruction yet" lowering pass. 34 // 35 //===----------------------------------------------------------------------===// 36 37 #define DEBUG_TYPE "lowerinvoke" 38 #include "llvm/Transforms/Scalar.h" 39 #include "llvm/ADT/SmallVector.h" 40 #include "llvm/ADT/Statistic.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/Intrinsics.h" 45 #include "llvm/IR/LLVMContext.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/Pass.h" 48 #include "llvm/Support/CommandLine.h" 49 #include "llvm/Target/TargetLowering.h" 50 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 51 #include "llvm/Transforms/Utils/Local.h" 52 #include <csetjmp> 53 #include <set> 54 using namespace llvm; 55 56 STATISTIC(NumInvokes, "Number of invokes replaced"); 57 STATISTIC(NumSpilled, "Number of registers live across unwind edges"); 58 59 static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support", 60 cl::desc("Make the -lowerinvoke pass insert expensive, but correct, EH code")); 61 62 namespace { 63 class LowerInvoke : public FunctionPass { 64 // Used for both models. 65 Constant *AbortFn; 66 67 // Used for expensive EH support. 68 StructType *JBLinkTy; 69 GlobalVariable *JBListHead; 70 Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn; 71 bool useExpensiveEHSupport; 72 73 // We peek in TLI to grab the target's jmp_buf size and alignment 74 const TargetLowering *TLI; 75 76 public: 77 static char ID; // Pass identification, replacement for typeid 78 explicit LowerInvoke(const TargetLowering *tli = NULL, 79 bool useExpensiveEHSupport = ExpensiveEHSupport) 80 : FunctionPass(ID), useExpensiveEHSupport(useExpensiveEHSupport), 81 TLI(tli) { 82 initializeLowerInvokePass(*PassRegistry::getPassRegistry()); 83 } 84 bool doInitialization(Module &M); 85 bool runOnFunction(Function &F); 86 87 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 88 // This is a cluster of orthogonal Transforms 89 AU.addPreserved("mem2reg"); 90 AU.addPreservedID(LowerSwitchID); 91 } 92 93 private: 94 bool insertCheapEHSupport(Function &F); 95 void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*>&Invokes); 96 void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo, 97 AllocaInst *InvokeNum, AllocaInst *StackPtr, 98 SwitchInst *CatchSwitch); 99 bool insertExpensiveEHSupport(Function &F); 100 }; 101 } 102 103 char LowerInvoke::ID = 0; 104 INITIALIZE_PASS(LowerInvoke, "lowerinvoke", 105 "Lower invoke and unwind, for unwindless code generators", 106 false, false) 107 108 char &llvm::LowerInvokePassID = LowerInvoke::ID; 109 110 // Public Interface To the LowerInvoke pass. 111 FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI) { 112 return new LowerInvoke(TLI, ExpensiveEHSupport); 113 } 114 FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI, 115 bool useExpensiveEHSupport) { 116 return new LowerInvoke(TLI, useExpensiveEHSupport); 117 } 118 119 // doInitialization - Make sure that there is a prototype for abort in the 120 // current module. 121 bool LowerInvoke::doInitialization(Module &M) { 122 Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext()); 123 if (useExpensiveEHSupport) { 124 // Insert a type for the linked list of jump buffers. 125 unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0; 126 JBSize = JBSize ? JBSize : 200; 127 Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize); 128 129 JBLinkTy = StructType::create(M.getContext(), "llvm.sjljeh.jmpbufty"); 130 Type *Elts[] = { JmpBufTy, PointerType::getUnqual(JBLinkTy) }; 131 JBLinkTy->setBody(Elts); 132 133 Type *PtrJBList = PointerType::getUnqual(JBLinkTy); 134 135 // Now that we've done that, insert the jmpbuf list head global, unless it 136 // already exists. 137 if (!(JBListHead = M.getGlobalVariable("llvm.sjljeh.jblist", PtrJBList))) { 138 JBListHead = new GlobalVariable(M, PtrJBList, false, 139 GlobalValue::LinkOnceAnyLinkage, 140 Constant::getNullValue(PtrJBList), 141 "llvm.sjljeh.jblist"); 142 } 143 144 // VisualStudio defines setjmp as _setjmp 145 #if defined(_MSC_VER) && defined(setjmp) && \ 146 !defined(setjmp_undefined_for_msvc) 147 # pragma push_macro("setjmp") 148 # undef setjmp 149 # define setjmp_undefined_for_msvc 150 #endif 151 152 SetJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::setjmp); 153 154 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) 155 // let's return it to _setjmp state 156 # pragma pop_macro("setjmp") 157 # undef setjmp_undefined_for_msvc 158 #endif 159 160 LongJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::longjmp); 161 StackSaveFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave); 162 StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore); 163 } 164 165 // We need the 'write' and 'abort' functions for both models. 166 AbortFn = M.getOrInsertFunction("abort", Type::getVoidTy(M.getContext()), 167 (Type *)0); 168 return true; 169 } 170 171 bool LowerInvoke::insertCheapEHSupport(Function &F) { 172 bool Changed = false; 173 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 174 if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) { 175 SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3); 176 // Insert a normal call instruction... 177 CallInst *NewCall = CallInst::Create(II->getCalledValue(), 178 CallArgs, "", II); 179 NewCall->takeName(II); 180 NewCall->setCallingConv(II->getCallingConv()); 181 NewCall->setAttributes(II->getAttributes()); 182 NewCall->setDebugLoc(II->getDebugLoc()); 183 II->replaceAllUsesWith(NewCall); 184 185 // Insert an unconditional branch to the normal destination. 186 BranchInst::Create(II->getNormalDest(), II); 187 188 // Remove any PHI node entries from the exception destination. 189 II->getUnwindDest()->removePredecessor(BB); 190 191 // Remove the invoke instruction now. 192 BB->getInstList().erase(II); 193 194 ++NumInvokes; Changed = true; 195 } 196 return Changed; 197 } 198 199 /// rewriteExpensiveInvoke - Insert code and hack the function to replace the 200 /// specified invoke instruction with a call. 201 void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo, 202 AllocaInst *InvokeNum, 203 AllocaInst *StackPtr, 204 SwitchInst *CatchSwitch) { 205 ConstantInt *InvokeNoC = ConstantInt::get(Type::getInt32Ty(II->getContext()), 206 InvokeNo); 207 208 // If the unwind edge has phi nodes, split the edge. 209 if (isa<PHINode>(II->getUnwindDest()->begin())) { 210 SplitCriticalEdge(II, 1, this); 211 212 // If there are any phi nodes left, they must have a single predecessor. 213 while (PHINode *PN = dyn_cast<PHINode>(II->getUnwindDest()->begin())) { 214 PN->replaceAllUsesWith(PN->getIncomingValue(0)); 215 PN->eraseFromParent(); 216 } 217 } 218 219 // Insert a store of the invoke num before the invoke and store zero into the 220 // location afterward. 221 new StoreInst(InvokeNoC, InvokeNum, true, II); // volatile 222 223 // Insert a store of the stack ptr before the invoke, so we can restore it 224 // later in the exception case. 225 CallInst* StackSaveRet = CallInst::Create(StackSaveFn, "ssret", II); 226 new StoreInst(StackSaveRet, StackPtr, true, II); // volatile 227 228 BasicBlock::iterator NI = II->getNormalDest()->getFirstInsertionPt(); 229 // nonvolatile. 230 new StoreInst(Constant::getNullValue(Type::getInt32Ty(II->getContext())), 231 InvokeNum, false, NI); 232 233 Instruction* StackPtrLoad = 234 new LoadInst(StackPtr, "stackptr.restore", true, 235 II->getUnwindDest()->getFirstInsertionPt()); 236 CallInst::Create(StackRestoreFn, StackPtrLoad, "")->insertAfter(StackPtrLoad); 237 238 // Add a switch case to our unwind block. 239 CatchSwitch->addCase(InvokeNoC, II->getUnwindDest()); 240 241 // Insert a normal call instruction. 242 SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3); 243 CallInst *NewCall = CallInst::Create(II->getCalledValue(), 244 CallArgs, "", II); 245 NewCall->takeName(II); 246 NewCall->setCallingConv(II->getCallingConv()); 247 NewCall->setAttributes(II->getAttributes()); 248 NewCall->setDebugLoc(II->getDebugLoc()); 249 II->replaceAllUsesWith(NewCall); 250 251 // Replace the invoke with an uncond branch. 252 BranchInst::Create(II->getNormalDest(), NewCall->getParent()); 253 II->eraseFromParent(); 254 } 255 256 /// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until 257 /// we reach blocks we've already seen. 258 static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) { 259 if (!LiveBBs.insert(BB).second) return; // already been here. 260 261 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 262 MarkBlocksLiveIn(*PI, LiveBBs); 263 } 264 265 // First thing we need to do is scan the whole function for values that are 266 // live across unwind edges. Each value that is live across an unwind edge 267 // we spill into a stack location, guaranteeing that there is nothing live 268 // across the unwind edge. This process also splits all critical edges 269 // coming out of invoke's. 270 void LowerInvoke:: 271 splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) { 272 // First step, split all critical edges from invoke instructions. 273 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) { 274 InvokeInst *II = Invokes[i]; 275 SplitCriticalEdge(II, 0, this); 276 SplitCriticalEdge(II, 1, this); 277 assert(!isa<PHINode>(II->getNormalDest()) && 278 !isa<PHINode>(II->getUnwindDest()) && 279 "critical edge splitting left single entry phi nodes?"); 280 } 281 282 Function *F = Invokes.back()->getParent()->getParent(); 283 284 // To avoid having to handle incoming arguments specially, we lower each arg 285 // to a copy instruction in the entry block. This ensures that the argument 286 // value itself cannot be live across the entry block. 287 BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin(); 288 while (isa<AllocaInst>(AfterAllocaInsertPt) && 289 isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize())) 290 ++AfterAllocaInsertPt; 291 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); 292 AI != E; ++AI) { 293 Type *Ty = AI->getType(); 294 // Aggregate types can't be cast, but are legal argument types, so we have 295 // to handle them differently. We use an extract/insert pair as a 296 // lightweight method to achieve the same goal. 297 if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 298 Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt); 299 Instruction *NI = InsertValueInst::Create(AI, EI, 0); 300 NI->insertAfter(EI); 301 AI->replaceAllUsesWith(NI); 302 // Set the operand of the instructions back to the AllocaInst. 303 EI->setOperand(0, AI); 304 NI->setOperand(0, AI); 305 } else { 306 // This is always a no-op cast because we're casting AI to AI->getType() 307 // so src and destination types are identical. BitCast is the only 308 // possibility. 309 CastInst *NC = new BitCastInst( 310 AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt); 311 AI->replaceAllUsesWith(NC); 312 // Set the operand of the cast instruction back to the AllocaInst. 313 // Normally it's forbidden to replace a CastInst's operand because it 314 // could cause the opcode to reflect an illegal conversion. However, 315 // we're replacing it here with the same value it was constructed with. 316 // We do this because the above replaceAllUsesWith() clobbered the 317 // operand, but we want this one to remain. 318 NC->setOperand(0, AI); 319 } 320 } 321 322 // Finally, scan the code looking for instructions with bad live ranges. 323 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) 324 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) { 325 // Ignore obvious cases we don't have to handle. In particular, most 326 // instructions either have no uses or only have a single use inside the 327 // current block. Ignore them quickly. 328 Instruction *Inst = II; 329 if (Inst->use_empty()) continue; 330 if (Inst->hasOneUse() && 331 cast<Instruction>(Inst->use_back())->getParent() == BB && 332 !isa<PHINode>(Inst->use_back())) continue; 333 334 // If this is an alloca in the entry block, it's not a real register 335 // value. 336 if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst)) 337 if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin()) 338 continue; 339 340 // Avoid iterator invalidation by copying users to a temporary vector. 341 SmallVector<Instruction*,16> Users; 342 for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end(); 343 UI != E; ++UI) { 344 Instruction *User = cast<Instruction>(*UI); 345 if (User->getParent() != BB || isa<PHINode>(User)) 346 Users.push_back(User); 347 } 348 349 // Scan all of the uses and see if the live range is live across an unwind 350 // edge. If we find a use live across an invoke edge, create an alloca 351 // and spill the value. 352 std::set<InvokeInst*> InvokesWithStoreInserted; 353 354 // Find all of the blocks that this value is live in. 355 std::set<BasicBlock*> LiveBBs; 356 LiveBBs.insert(Inst->getParent()); 357 while (!Users.empty()) { 358 Instruction *U = Users.back(); 359 Users.pop_back(); 360 361 if (!isa<PHINode>(U)) { 362 MarkBlocksLiveIn(U->getParent(), LiveBBs); 363 } else { 364 // Uses for a PHI node occur in their predecessor block. 365 PHINode *PN = cast<PHINode>(U); 366 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 367 if (PN->getIncomingValue(i) == Inst) 368 MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs); 369 } 370 } 371 372 // Now that we know all of the blocks that this thing is live in, see if 373 // it includes any of the unwind locations. 374 bool NeedsSpill = false; 375 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) { 376 BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest(); 377 if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) { 378 NeedsSpill = true; 379 } 380 } 381 382 // If we decided we need a spill, do it. 383 if (NeedsSpill) { 384 ++NumSpilled; 385 DemoteRegToStack(*Inst, true); 386 } 387 } 388 } 389 390 bool LowerInvoke::insertExpensiveEHSupport(Function &F) { 391 SmallVector<ReturnInst*,16> Returns; 392 SmallVector<InvokeInst*,16> Invokes; 393 UnreachableInst* UnreachablePlaceholder = 0; 394 395 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 396 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) { 397 // Remember all return instructions in case we insert an invoke into this 398 // function. 399 Returns.push_back(RI); 400 } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) { 401 Invokes.push_back(II); 402 } 403 404 if (Invokes.empty()) return false; 405 406 NumInvokes += Invokes.size(); 407 408 // TODO: This is not an optimal way to do this. In particular, this always 409 // inserts setjmp calls into the entries of functions with invoke instructions 410 // even though there are possibly paths through the function that do not 411 // execute any invokes. In particular, for functions with early exits, e.g. 412 // the 'addMove' method in hexxagon, it would be nice to not have to do the 413 // setjmp stuff on the early exit path. This requires a bit of dataflow, but 414 // would not be too hard to do. 415 416 // If we have an invoke instruction, insert a setjmp that dominates all 417 // invokes. After the setjmp, use a cond branch that goes to the original 418 // code path on zero, and to a designated 'catch' block of nonzero. 419 Value *OldJmpBufPtr = 0; 420 if (!Invokes.empty()) { 421 // First thing we need to do is scan the whole function for values that are 422 // live across unwind edges. Each value that is live across an unwind edge 423 // we spill into a stack location, guaranteeing that there is nothing live 424 // across the unwind edge. This process also splits all critical edges 425 // coming out of invoke's. 426 splitLiveRangesLiveAcrossInvokes(Invokes); 427 428 BasicBlock *EntryBB = F.begin(); 429 430 // Create an alloca for the incoming jump buffer ptr and the new jump buffer 431 // that needs to be restored on all exits from the function. This is an 432 // alloca because the value needs to be live across invokes. 433 unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0; 434 AllocaInst *JmpBuf = 435 new AllocaInst(JBLinkTy, 0, Align, 436 "jblink", F.begin()->begin()); 437 438 Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())), 439 ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) }; 440 OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "OldBuf", 441 EntryBB->getTerminator()); 442 443 // Copy the JBListHead to the alloca. 444 Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true, 445 EntryBB->getTerminator()); 446 new StoreInst(OldBuf, OldJmpBufPtr, true, EntryBB->getTerminator()); 447 448 // Add the new jumpbuf to the list. 449 new StoreInst(JmpBuf, JBListHead, true, EntryBB->getTerminator()); 450 451 // Create the catch block. The catch block is basically a big switch 452 // statement that goes to all of the invoke catch blocks. 453 BasicBlock *CatchBB = 454 BasicBlock::Create(F.getContext(), "setjmp.catch", &F); 455 456 // Create an alloca which keeps track of the stack pointer before every 457 // invoke, this allows us to properly restore the stack pointer after 458 // long jumping. 459 AllocaInst *StackPtr = new AllocaInst(Type::getInt8PtrTy(F.getContext()), 0, 460 "stackptr", EntryBB->begin()); 461 462 // Create an alloca which keeps track of which invoke is currently 463 // executing. For normal calls it contains zero. 464 AllocaInst *InvokeNum = new AllocaInst(Type::getInt32Ty(F.getContext()), 0, 465 "invokenum",EntryBB->begin()); 466 new StoreInst(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), 467 InvokeNum, true, EntryBB->getTerminator()); 468 469 // Insert a load in the Catch block, and a switch on its value. By default, 470 // we go to a block that just does an unwind (which is the correct action 471 // for a standard call). We insert an unreachable instruction here and 472 // modify the block to jump to the correct unwinding pad later. 473 BasicBlock *UnwindBB = BasicBlock::Create(F.getContext(), "unwindbb", &F); 474 UnreachablePlaceholder = new UnreachableInst(F.getContext(), UnwindBB); 475 476 Value *CatchLoad = new LoadInst(InvokeNum, "invoke.num", true, CatchBB); 477 SwitchInst *CatchSwitch = 478 SwitchInst::Create(CatchLoad, UnwindBB, Invokes.size(), CatchBB); 479 480 // Now that things are set up, insert the setjmp call itself. 481 482 // Split the entry block to insert the conditional branch for the setjmp. 483 BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(), 484 "setjmp.cont"); 485 486 Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0); 487 Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "TheJmpBuf", 488 EntryBB->getTerminator()); 489 JmpBufPtr = new BitCastInst(JmpBufPtr, 490 Type::getInt8PtrTy(F.getContext()), 491 "tmp", EntryBB->getTerminator()); 492 Value *SJRet = CallInst::Create(SetJmpFn, JmpBufPtr, "sjret", 493 EntryBB->getTerminator()); 494 495 // Compare the return value to zero. 496 Value *IsNormal = new ICmpInst(EntryBB->getTerminator(), 497 ICmpInst::ICMP_EQ, SJRet, 498 Constant::getNullValue(SJRet->getType()), 499 "notunwind"); 500 // Nuke the uncond branch. 501 EntryBB->getTerminator()->eraseFromParent(); 502 503 // Put in a new condbranch in its place. 504 BranchInst::Create(ContBlock, CatchBB, IsNormal, EntryBB); 505 506 // At this point, we are all set up, rewrite each invoke instruction. 507 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) 508 rewriteExpensiveInvoke(Invokes[i], i+1, InvokeNum, StackPtr, CatchSwitch); 509 } 510 511 // We know that there is at least one unwind. 512 513 // Create three new blocks, the block to load the jmpbuf ptr and compare 514 // against null, the block to do the longjmp, and the error block for if it 515 // is null. Add them at the end of the function because they are not hot. 516 BasicBlock *UnwindHandler = BasicBlock::Create(F.getContext(), 517 "dounwind", &F); 518 BasicBlock *UnwindBlock = BasicBlock::Create(F.getContext(), "unwind", &F); 519 BasicBlock *TermBlock = BasicBlock::Create(F.getContext(), "unwinderror", &F); 520 521 // If this function contains an invoke, restore the old jumpbuf ptr. 522 Value *BufPtr; 523 if (OldJmpBufPtr) { 524 // Before the return, insert a copy from the saved value to the new value. 525 BufPtr = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", UnwindHandler); 526 new StoreInst(BufPtr, JBListHead, UnwindHandler); 527 } else { 528 BufPtr = new LoadInst(JBListHead, "ehlist", UnwindHandler); 529 } 530 531 // Load the JBList, if it's null, then there was no catch! 532 Value *NotNull = new ICmpInst(*UnwindHandler, ICmpInst::ICMP_NE, BufPtr, 533 Constant::getNullValue(BufPtr->getType()), 534 "notnull"); 535 BranchInst::Create(UnwindBlock, TermBlock, NotNull, UnwindHandler); 536 537 // Create the block to do the longjmp. 538 // Get a pointer to the jmpbuf and longjmp. 539 Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())), 540 ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) }; 541 Idx[0] = GetElementPtrInst::Create(BufPtr, Idx, "JmpBuf", UnwindBlock); 542 Idx[0] = new BitCastInst(Idx[0], 543 Type::getInt8PtrTy(F.getContext()), 544 "tmp", UnwindBlock); 545 Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1); 546 CallInst::Create(LongJmpFn, Idx, "", UnwindBlock); 547 new UnreachableInst(F.getContext(), UnwindBlock); 548 549 // Set up the term block ("throw without a catch"). 550 new UnreachableInst(F.getContext(), TermBlock); 551 552 // Insert a call to abort() 553 CallInst::Create(AbortFn, "", 554 TermBlock->getTerminator())->setTailCall(); 555 556 // Replace the inserted unreachable with a branch to the unwind handler. 557 if (UnreachablePlaceholder) { 558 BranchInst::Create(UnwindHandler, UnreachablePlaceholder); 559 UnreachablePlaceholder->eraseFromParent(); 560 } 561 562 // Finally, for any returns from this function, if this function contains an 563 // invoke, restore the old jmpbuf pointer to its input value. 564 if (OldJmpBufPtr) { 565 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 566 ReturnInst *R = Returns[i]; 567 568 // Before the return, insert a copy from the saved value to the new value. 569 Value *OldBuf = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", true, R); 570 new StoreInst(OldBuf, JBListHead, true, R); 571 } 572 } 573 574 return true; 575 } 576 577 bool LowerInvoke::runOnFunction(Function &F) { 578 if (useExpensiveEHSupport) 579 return insertExpensiveEHSupport(F); 580 else 581 return insertCheapEHSupport(F); 582 } 583