1 //===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains code dealing with the IR generation for cleanups 11 // and related information. 12 // 13 // A "cleanup" is a piece of code which needs to be executed whenever 14 // control transfers out of a particular scope. This can be 15 // conditionalized to occur only on exceptional control flow, only on 16 // normal control flow, or both. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "CodeGenFunction.h" 21 #include "CGCleanup.h" 22 23 using namespace clang; 24 using namespace CodeGen; 25 26 bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { 27 if (rv.isScalar()) 28 return DominatingLLVMValue::needsSaving(rv.getScalarVal()); 29 if (rv.isAggregate()) 30 return DominatingLLVMValue::needsSaving(rv.getAggregateAddr()); 31 return true; 32 } 33 34 DominatingValue<RValue>::saved_type 35 DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { 36 if (rv.isScalar()) { 37 llvm::Value *V = rv.getScalarVal(); 38 39 // These automatically dominate and don't need to be saved. 40 if (!DominatingLLVMValue::needsSaving(V)) 41 return saved_type(V, ScalarLiteral); 42 43 // Everything else needs an alloca. 44 llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); 45 CGF.Builder.CreateStore(V, addr); 46 return saved_type(addr, ScalarAddress); 47 } 48 49 if (rv.isComplex()) { 50 CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); 51 llvm::Type *ComplexTy = 52 llvm::StructType::get(V.first->getType(), V.second->getType(), 53 (void*) 0); 54 llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); 55 CGF.StoreComplexToAddr(V, addr, /*volatile*/ false); 56 return saved_type(addr, ComplexAddress); 57 } 58 59 assert(rv.isAggregate()); 60 llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile? 61 if (!DominatingLLVMValue::needsSaving(V)) 62 return saved_type(V, AggregateLiteral); 63 64 llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); 65 CGF.Builder.CreateStore(V, addr); 66 return saved_type(addr, AggregateAddress); 67 } 68 69 /// Given a saved r-value produced by SaveRValue, perform the code 70 /// necessary to restore it to usability at the current insertion 71 /// point. 72 RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { 73 switch (K) { 74 case ScalarLiteral: 75 return RValue::get(Value); 76 case ScalarAddress: 77 return RValue::get(CGF.Builder.CreateLoad(Value)); 78 case AggregateLiteral: 79 return RValue::getAggregate(Value); 80 case AggregateAddress: 81 return RValue::getAggregate(CGF.Builder.CreateLoad(Value)); 82 case ComplexAddress: 83 return RValue::getComplex(CGF.LoadComplexFromAddr(Value, false)); 84 } 85 86 llvm_unreachable("bad saved r-value kind"); 87 } 88 89 /// Push an entry of the given size onto this protected-scope stack. 90 char *EHScopeStack::allocate(size_t Size) { 91 if (!StartOfBuffer) { 92 unsigned Capacity = 1024; 93 while (Capacity < Size) Capacity *= 2; 94 StartOfBuffer = new char[Capacity]; 95 StartOfData = EndOfBuffer = StartOfBuffer + Capacity; 96 } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { 97 unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; 98 unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); 99 100 unsigned NewCapacity = CurrentCapacity; 101 do { 102 NewCapacity *= 2; 103 } while (NewCapacity < UsedCapacity + Size); 104 105 char *NewStartOfBuffer = new char[NewCapacity]; 106 char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; 107 char *NewStartOfData = NewEndOfBuffer - UsedCapacity; 108 memcpy(NewStartOfData, StartOfData, UsedCapacity); 109 delete [] StartOfBuffer; 110 StartOfBuffer = NewStartOfBuffer; 111 EndOfBuffer = NewEndOfBuffer; 112 StartOfData = NewStartOfData; 113 } 114 115 assert(StartOfBuffer + Size <= StartOfData); 116 StartOfData -= Size; 117 return StartOfData; 118 } 119 120 EHScopeStack::stable_iterator 121 EHScopeStack::getInnermostActiveNormalCleanup() const { 122 for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); 123 si != se; ) { 124 EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si)); 125 if (cleanup.isActive()) return si; 126 si = cleanup.getEnclosingNormalCleanup(); 127 } 128 return stable_end(); 129 } 130 131 EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const { 132 for (stable_iterator si = getInnermostEHScope(), se = stable_end(); 133 si != se; ) { 134 // Skip over inactive cleanups. 135 EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si)); 136 if (cleanup && !cleanup->isActive()) { 137 si = cleanup->getEnclosingEHScope(); 138 continue; 139 } 140 141 // All other scopes are always active. 142 return si; 143 } 144 145 return stable_end(); 146 } 147 148 149 void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { 150 assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned"); 151 char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); 152 bool IsNormalCleanup = Kind & NormalCleanup; 153 bool IsEHCleanup = Kind & EHCleanup; 154 bool IsActive = !(Kind & InactiveCleanup); 155 EHCleanupScope *Scope = 156 new (Buffer) EHCleanupScope(IsNormalCleanup, 157 IsEHCleanup, 158 IsActive, 159 Size, 160 BranchFixups.size(), 161 InnermostNormalCleanup, 162 InnermostEHScope); 163 if (IsNormalCleanup) 164 InnermostNormalCleanup = stable_begin(); 165 if (IsEHCleanup) 166 InnermostEHScope = stable_begin(); 167 168 return Scope->getCleanupBuffer(); 169 } 170 171 void EHScopeStack::popCleanup() { 172 assert(!empty() && "popping exception stack when not empty"); 173 174 assert(isa<EHCleanupScope>(*begin())); 175 EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); 176 InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); 177 InnermostEHScope = Cleanup.getEnclosingEHScope(); 178 StartOfData += Cleanup.getAllocatedSize(); 179 180 // Destroy the cleanup. 181 Cleanup.~EHCleanupScope(); 182 183 // Check whether we can shrink the branch-fixups stack. 184 if (!BranchFixups.empty()) { 185 // If we no longer have any normal cleanups, all the fixups are 186 // complete. 187 if (!hasNormalCleanups()) 188 BranchFixups.clear(); 189 190 // Otherwise we can still trim out unnecessary nulls. 191 else 192 popNullFixups(); 193 } 194 } 195 196 EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) { 197 assert(getInnermostEHScope() == stable_end()); 198 char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters)); 199 EHFilterScope *filter = new (buffer) EHFilterScope(numFilters); 200 InnermostEHScope = stable_begin(); 201 return filter; 202 } 203 204 void EHScopeStack::popFilter() { 205 assert(!empty() && "popping exception stack when not empty"); 206 207 EHFilterScope &filter = cast<EHFilterScope>(*begin()); 208 StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters()); 209 210 InnermostEHScope = filter.getEnclosingEHScope(); 211 } 212 213 EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { 214 char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); 215 EHCatchScope *scope = 216 new (buffer) EHCatchScope(numHandlers, InnermostEHScope); 217 InnermostEHScope = stable_begin(); 218 return scope; 219 } 220 221 void EHScopeStack::pushTerminate() { 222 char *Buffer = allocate(EHTerminateScope::getSize()); 223 new (Buffer) EHTerminateScope(InnermostEHScope); 224 InnermostEHScope = stable_begin(); 225 } 226 227 /// Remove any 'null' fixups on the stack. However, we can't pop more 228 /// fixups than the fixup depth on the innermost normal cleanup, or 229 /// else fixups that we try to add to that cleanup will end up in the 230 /// wrong place. We *could* try to shrink fixup depths, but that's 231 /// actually a lot of work for little benefit. 232 void EHScopeStack::popNullFixups() { 233 // We expect this to only be called when there's still an innermost 234 // normal cleanup; otherwise there really shouldn't be any fixups. 235 assert(hasNormalCleanups()); 236 237 EHScopeStack::iterator it = find(InnermostNormalCleanup); 238 unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); 239 assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); 240 241 while (BranchFixups.size() > MinSize && 242 BranchFixups.back().Destination == 0) 243 BranchFixups.pop_back(); 244 } 245 246 void CodeGenFunction::initFullExprCleanup() { 247 // Create a variable to decide whether the cleanup needs to be run. 248 llvm::AllocaInst *active 249 = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond"); 250 251 // Initialize it to false at a site that's guaranteed to be run 252 // before each evaluation. 253 setBeforeOutermostConditional(Builder.getFalse(), active); 254 255 // Initialize it to true at the current location. 256 Builder.CreateStore(Builder.getTrue(), active); 257 258 // Set that as the active flag in the cleanup. 259 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); 260 assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?"); 261 cleanup.setActiveFlag(active); 262 263 if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); 264 if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); 265 } 266 267 void EHScopeStack::Cleanup::anchor() {} 268 269 /// All the branch fixups on the EH stack have propagated out past the 270 /// outermost normal cleanup; resolve them all by adding cases to the 271 /// given switch instruction. 272 static void ResolveAllBranchFixups(CodeGenFunction &CGF, 273 llvm::SwitchInst *Switch, 274 llvm::BasicBlock *CleanupEntry) { 275 llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; 276 277 for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { 278 // Skip this fixup if its destination isn't set. 279 BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); 280 if (Fixup.Destination == 0) continue; 281 282 // If there isn't an OptimisticBranchBlock, then InitialBranch is 283 // still pointing directly to its destination; forward it to the 284 // appropriate cleanup entry. This is required in the specific 285 // case of 286 // { std::string s; goto lbl; } 287 // lbl: 288 // i.e. where there's an unresolved fixup inside a single cleanup 289 // entry which we're currently popping. 290 if (Fixup.OptimisticBranchBlock == 0) { 291 new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex), 292 CGF.getNormalCleanupDestSlot(), 293 Fixup.InitialBranch); 294 Fixup.InitialBranch->setSuccessor(0, CleanupEntry); 295 } 296 297 // Don't add this case to the switch statement twice. 298 if (!CasesAdded.insert(Fixup.Destination)) continue; 299 300 Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), 301 Fixup.Destination); 302 } 303 304 CGF.EHStack.clearFixups(); 305 } 306 307 /// Transitions the terminator of the given exit-block of a cleanup to 308 /// be a cleanup switch. 309 static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, 310 llvm::BasicBlock *Block) { 311 // If it's a branch, turn it into a switch whose default 312 // destination is its original target. 313 llvm::TerminatorInst *Term = Block->getTerminator(); 314 assert(Term && "can't transition block without terminator"); 315 316 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 317 assert(Br->isUnconditional()); 318 llvm::LoadInst *Load = 319 new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term); 320 llvm::SwitchInst *Switch = 321 llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); 322 Br->eraseFromParent(); 323 return Switch; 324 } else { 325 return cast<llvm::SwitchInst>(Term); 326 } 327 } 328 329 void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { 330 assert(Block && "resolving a null target block"); 331 if (!EHStack.getNumBranchFixups()) return; 332 333 assert(EHStack.hasNormalCleanups() && 334 "branch fixups exist with no normal cleanups on stack"); 335 336 llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; 337 bool ResolvedAny = false; 338 339 for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { 340 // Skip this fixup if its destination doesn't match. 341 BranchFixup &Fixup = EHStack.getBranchFixup(I); 342 if (Fixup.Destination != Block) continue; 343 344 Fixup.Destination = 0; 345 ResolvedAny = true; 346 347 // If it doesn't have an optimistic branch block, LatestBranch is 348 // already pointing to the right place. 349 llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; 350 if (!BranchBB) 351 continue; 352 353 // Don't process the same optimistic branch block twice. 354 if (!ModifiedOptimisticBlocks.insert(BranchBB)) 355 continue; 356 357 llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); 358 359 // Add a case to the switch. 360 Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); 361 } 362 363 if (ResolvedAny) 364 EHStack.popNullFixups(); 365 } 366 367 /// Pops cleanup blocks until the given savepoint is reached. 368 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { 369 assert(Old.isValid()); 370 371 while (EHStack.stable_begin() != Old) { 372 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 373 374 // As long as Old strictly encloses the scope's enclosing normal 375 // cleanup, we're going to emit another normal cleanup which 376 // fallthrough can propagate through. 377 bool FallThroughIsBranchThrough = 378 Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); 379 380 PopCleanupBlock(FallThroughIsBranchThrough); 381 } 382 } 383 384 static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, 385 EHCleanupScope &Scope) { 386 assert(Scope.isNormalCleanup()); 387 llvm::BasicBlock *Entry = Scope.getNormalBlock(); 388 if (!Entry) { 389 Entry = CGF.createBasicBlock("cleanup"); 390 Scope.setNormalBlock(Entry); 391 } 392 return Entry; 393 } 394 395 /// Attempts to reduce a cleanup's entry block to a fallthrough. This 396 /// is basically llvm::MergeBlockIntoPredecessor, except 397 /// simplified/optimized for the tighter constraints on cleanup blocks. 398 /// 399 /// Returns the new block, whatever it is. 400 static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, 401 llvm::BasicBlock *Entry) { 402 llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); 403 if (!Pred) return Entry; 404 405 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); 406 if (!Br || Br->isConditional()) return Entry; 407 assert(Br->getSuccessor(0) == Entry); 408 409 // If we were previously inserting at the end of the cleanup entry 410 // block, we'll need to continue inserting at the end of the 411 // predecessor. 412 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; 413 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); 414 415 // Kill the branch. 416 Br->eraseFromParent(); 417 418 // Replace all uses of the entry with the predecessor, in case there 419 // are phis in the cleanup. 420 Entry->replaceAllUsesWith(Pred); 421 422 // Merge the blocks. 423 Pred->getInstList().splice(Pred->end(), Entry->getInstList()); 424 425 // Kill the entry block. 426 Entry->eraseFromParent(); 427 428 if (WasInsertBlock) 429 CGF.Builder.SetInsertPoint(Pred); 430 431 return Pred; 432 } 433 434 static void EmitCleanup(CodeGenFunction &CGF, 435 EHScopeStack::Cleanup *Fn, 436 EHScopeStack::Cleanup::Flags flags, 437 llvm::Value *ActiveFlag) { 438 // EH cleanups always occur within a terminate scope. 439 if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate(); 440 441 // If there's an active flag, load it and skip the cleanup if it's 442 // false. 443 llvm::BasicBlock *ContBB = 0; 444 if (ActiveFlag) { 445 ContBB = CGF.createBasicBlock("cleanup.done"); 446 llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); 447 llvm::Value *IsActive 448 = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); 449 CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); 450 CGF.EmitBlock(CleanupBB); 451 } 452 453 // Ask the cleanup to emit itself. 454 Fn->Emit(CGF, flags); 455 assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); 456 457 // Emit the continuation block if there was an active flag. 458 if (ActiveFlag) 459 CGF.EmitBlock(ContBB); 460 461 // Leave the terminate scope. 462 if (flags.isForEHCleanup()) CGF.EHStack.popTerminate(); 463 } 464 465 static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, 466 llvm::BasicBlock *From, 467 llvm::BasicBlock *To) { 468 // Exit is the exit block of a cleanup, so it always terminates in 469 // an unconditional branch or a switch. 470 llvm::TerminatorInst *Term = Exit->getTerminator(); 471 472 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 473 assert(Br->isUnconditional() && Br->getSuccessor(0) == From); 474 Br->setSuccessor(0, To); 475 } else { 476 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); 477 for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) 478 if (Switch->getSuccessor(I) == From) 479 Switch->setSuccessor(I, To); 480 } 481 } 482 483 /// We don't need a normal entry block for the given cleanup. 484 /// Optimistic fixup branches can cause these blocks to come into 485 /// existence anyway; if so, destroy it. 486 /// 487 /// The validity of this transformation is very much specific to the 488 /// exact ways in which we form branches to cleanup entries. 489 static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, 490 EHCleanupScope &scope) { 491 llvm::BasicBlock *entry = scope.getNormalBlock(); 492 if (!entry) return; 493 494 // Replace all the uses with unreachable. 495 llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); 496 for (llvm::BasicBlock::use_iterator 497 i = entry->use_begin(), e = entry->use_end(); i != e; ) { 498 llvm::Use &use = i.getUse(); 499 ++i; 500 501 use.set(unreachableBB); 502 503 // The only uses should be fixup switches. 504 llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser()); 505 if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { 506 // Replace the switch with a branch. 507 llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si); 508 509 // The switch operand is a load from the cleanup-dest alloca. 510 llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition()); 511 512 // Destroy the switch. 513 si->eraseFromParent(); 514 515 // Destroy the load. 516 assert(condition->getOperand(0) == CGF.NormalCleanupDest); 517 assert(condition->use_empty()); 518 condition->eraseFromParent(); 519 } 520 } 521 522 assert(entry->use_empty()); 523 delete entry; 524 } 525 526 /// Pops a cleanup block. If the block includes a normal cleanup, the 527 /// current insertion point is threaded through the cleanup, as are 528 /// any branch fixups on the cleanup. 529 void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { 530 assert(!EHStack.empty() && "cleanup stack is empty!"); 531 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); 532 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 533 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); 534 535 // Remember activation information. 536 bool IsActive = Scope.isActive(); 537 llvm::Value *NormalActiveFlag = 538 Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0; 539 llvm::Value *EHActiveFlag = 540 Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0; 541 542 // Check whether we need an EH cleanup. This is only true if we've 543 // generated a lazy EH cleanup block. 544 llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock(); 545 assert(Scope.hasEHBranches() == (EHEntry != 0)); 546 bool RequiresEHCleanup = (EHEntry != 0); 547 EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); 548 549 // Check the three conditions which might require a normal cleanup: 550 551 // - whether there are branch fix-ups through this cleanup 552 unsigned FixupDepth = Scope.getFixupDepth(); 553 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; 554 555 // - whether there are branch-throughs or branch-afters 556 bool HasExistingBranches = Scope.hasBranches(); 557 558 // - whether there's a fallthrough 559 llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); 560 bool HasFallthrough = (FallthroughSource != 0 && IsActive); 561 562 // Branch-through fall-throughs leave the insertion point set to the 563 // end of the last cleanup, which points to the current scope. The 564 // rest of IR gen doesn't need to worry about this; it only happens 565 // during the execution of PopCleanupBlocks(). 566 bool HasPrebranchedFallthrough = 567 (FallthroughSource && FallthroughSource->getTerminator()); 568 569 // If this is a normal cleanup, then having a prebranched 570 // fallthrough implies that the fallthrough source unconditionally 571 // jumps here. 572 assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || 573 (Scope.getNormalBlock() && 574 FallthroughSource->getTerminator()->getSuccessor(0) 575 == Scope.getNormalBlock())); 576 577 bool RequiresNormalCleanup = false; 578 if (Scope.isNormalCleanup() && 579 (HasFixups || HasExistingBranches || HasFallthrough)) { 580 RequiresNormalCleanup = true; 581 } 582 583 // If we have a prebranched fallthrough into an inactive normal 584 // cleanup, rewrite it so that it leads to the appropriate place. 585 if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { 586 llvm::BasicBlock *prebranchDest; 587 588 // If the prebranch is semantically branching through the next 589 // cleanup, just forward it to the next block, leaving the 590 // insertion point in the prebranched block. 591 if (FallthroughIsBranchThrough) { 592 EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup()); 593 prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing)); 594 595 // Otherwise, we need to make a new block. If the normal cleanup 596 // isn't being used at all, we could actually reuse the normal 597 // entry block, but this is simpler, and it avoids conflicts with 598 // dead optimistic fixup branches. 599 } else { 600 prebranchDest = createBasicBlock("forwarded-prebranch"); 601 EmitBlock(prebranchDest); 602 } 603 604 llvm::BasicBlock *normalEntry = Scope.getNormalBlock(); 605 assert(normalEntry && !normalEntry->use_empty()); 606 607 ForwardPrebranchedFallthrough(FallthroughSource, 608 normalEntry, prebranchDest); 609 } 610 611 // If we don't need the cleanup at all, we're done. 612 if (!RequiresNormalCleanup && !RequiresEHCleanup) { 613 destroyOptimisticNormalEntry(*this, Scope); 614 EHStack.popCleanup(); // safe because there are no fixups 615 assert(EHStack.getNumBranchFixups() == 0 || 616 EHStack.hasNormalCleanups()); 617 return; 618 } 619 620 // Copy the cleanup emission data out. Note that SmallVector 621 // guarantees maximal alignment for its buffer regardless of its 622 // type parameter. 623 SmallVector<char, 8*sizeof(void*)> CleanupBuffer; 624 CleanupBuffer.reserve(Scope.getCleanupSize()); 625 memcpy(CleanupBuffer.data(), 626 Scope.getCleanupBuffer(), Scope.getCleanupSize()); 627 CleanupBuffer.set_size(Scope.getCleanupSize()); 628 EHScopeStack::Cleanup *Fn = 629 reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data()); 630 631 EHScopeStack::Cleanup::Flags cleanupFlags; 632 if (Scope.isNormalCleanup()) 633 cleanupFlags.setIsNormalCleanupKind(); 634 if (Scope.isEHCleanup()) 635 cleanupFlags.setIsEHCleanupKind(); 636 637 if (!RequiresNormalCleanup) { 638 destroyOptimisticNormalEntry(*this, Scope); 639 EHStack.popCleanup(); 640 } else { 641 // If we have a fallthrough and no other need for the cleanup, 642 // emit it directly. 643 if (HasFallthrough && !HasPrebranchedFallthrough && 644 !HasFixups && !HasExistingBranches) { 645 646 destroyOptimisticNormalEntry(*this, Scope); 647 EHStack.popCleanup(); 648 649 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); 650 651 // Otherwise, the best approach is to thread everything through 652 // the cleanup block and then try to clean up after ourselves. 653 } else { 654 // Force the entry block to exist. 655 llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); 656 657 // I. Set up the fallthrough edge in. 658 659 CGBuilderTy::InsertPoint savedInactiveFallthroughIP; 660 661 // If there's a fallthrough, we need to store the cleanup 662 // destination index. For fall-throughs this is always zero. 663 if (HasFallthrough) { 664 if (!HasPrebranchedFallthrough) 665 Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); 666 667 // Otherwise, save and clear the IP if we don't have fallthrough 668 // because the cleanup is inactive. 669 } else if (FallthroughSource) { 670 assert(!IsActive && "source without fallthrough for active cleanup"); 671 savedInactiveFallthroughIP = Builder.saveAndClearIP(); 672 } 673 674 // II. Emit the entry block. This implicitly branches to it if 675 // we have fallthrough. All the fixups and existing branches 676 // should already be branched to it. 677 EmitBlock(NormalEntry); 678 679 // III. Figure out where we're going and build the cleanup 680 // epilogue. 681 682 bool HasEnclosingCleanups = 683 (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); 684 685 // Compute the branch-through dest if we need it: 686 // - if there are branch-throughs threaded through the scope 687 // - if fall-through is a branch-through 688 // - if there are fixups that will be optimistically forwarded 689 // to the enclosing cleanup 690 llvm::BasicBlock *BranchThroughDest = 0; 691 if (Scope.hasBranchThroughs() || 692 (FallthroughSource && FallthroughIsBranchThrough) || 693 (HasFixups && HasEnclosingCleanups)) { 694 assert(HasEnclosingCleanups); 695 EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); 696 BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); 697 } 698 699 llvm::BasicBlock *FallthroughDest = 0; 700 SmallVector<llvm::Instruction*, 2> InstsToAppend; 701 702 // If there's exactly one branch-after and no other threads, 703 // we can route it without a switch. 704 if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && 705 Scope.getNumBranchAfters() == 1) { 706 assert(!BranchThroughDest || !IsActive); 707 708 // TODO: clean up the possibly dead stores to the cleanup dest slot. 709 llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); 710 InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); 711 712 // Build a switch-out if we need it: 713 // - if there are branch-afters threaded through the scope 714 // - if fall-through is a branch-after 715 // - if there are fixups that have nowhere left to go and 716 // so must be immediately resolved 717 } else if (Scope.getNumBranchAfters() || 718 (HasFallthrough && !FallthroughIsBranchThrough) || 719 (HasFixups && !HasEnclosingCleanups)) { 720 721 llvm::BasicBlock *Default = 722 (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); 723 724 // TODO: base this on the number of branch-afters and fixups 725 const unsigned SwitchCapacity = 10; 726 727 llvm::LoadInst *Load = 728 new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest"); 729 llvm::SwitchInst *Switch = 730 llvm::SwitchInst::Create(Load, Default, SwitchCapacity); 731 732 InstsToAppend.push_back(Load); 733 InstsToAppend.push_back(Switch); 734 735 // Branch-after fallthrough. 736 if (FallthroughSource && !FallthroughIsBranchThrough) { 737 FallthroughDest = createBasicBlock("cleanup.cont"); 738 if (HasFallthrough) 739 Switch->addCase(Builder.getInt32(0), FallthroughDest); 740 } 741 742 for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { 743 Switch->addCase(Scope.getBranchAfterIndex(I), 744 Scope.getBranchAfterBlock(I)); 745 } 746 747 // If there aren't any enclosing cleanups, we can resolve all 748 // the fixups now. 749 if (HasFixups && !HasEnclosingCleanups) 750 ResolveAllBranchFixups(*this, Switch, NormalEntry); 751 } else { 752 // We should always have a branch-through destination in this case. 753 assert(BranchThroughDest); 754 InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); 755 } 756 757 // IV. Pop the cleanup and emit it. 758 EHStack.popCleanup(); 759 assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); 760 761 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); 762 763 // Append the prepared cleanup prologue from above. 764 llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); 765 for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) 766 NormalExit->getInstList().push_back(InstsToAppend[I]); 767 768 // Optimistically hope that any fixups will continue falling through. 769 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 770 I < E; ++I) { 771 BranchFixup &Fixup = EHStack.getBranchFixup(I); 772 if (!Fixup.Destination) continue; 773 if (!Fixup.OptimisticBranchBlock) { 774 new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex), 775 getNormalCleanupDestSlot(), 776 Fixup.InitialBranch); 777 Fixup.InitialBranch->setSuccessor(0, NormalEntry); 778 } 779 Fixup.OptimisticBranchBlock = NormalExit; 780 } 781 782 // V. Set up the fallthrough edge out. 783 784 // Case 1: a fallthrough source exists but doesn't branch to the 785 // cleanup because the cleanup is inactive. 786 if (!HasFallthrough && FallthroughSource) { 787 // Prebranched fallthrough was forwarded earlier. 788 // Non-prebranched fallthrough doesn't need to be forwarded. 789 // Either way, all we need to do is restore the IP we cleared before. 790 assert(!IsActive); 791 Builder.restoreIP(savedInactiveFallthroughIP); 792 793 // Case 2: a fallthrough source exists and should branch to the 794 // cleanup, but we're not supposed to branch through to the next 795 // cleanup. 796 } else if (HasFallthrough && FallthroughDest) { 797 assert(!FallthroughIsBranchThrough); 798 EmitBlock(FallthroughDest); 799 800 // Case 3: a fallthrough source exists and should branch to the 801 // cleanup and then through to the next. 802 } else if (HasFallthrough) { 803 // Everything is already set up for this. 804 805 // Case 4: no fallthrough source exists. 806 } else { 807 Builder.ClearInsertionPoint(); 808 } 809 810 // VI. Assorted cleaning. 811 812 // Check whether we can merge NormalEntry into a single predecessor. 813 // This might invalidate (non-IR) pointers to NormalEntry. 814 llvm::BasicBlock *NewNormalEntry = 815 SimplifyCleanupEntry(*this, NormalEntry); 816 817 // If it did invalidate those pointers, and NormalEntry was the same 818 // as NormalExit, go back and patch up the fixups. 819 if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) 820 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 821 I < E; ++I) 822 EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; 823 } 824 } 825 826 assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); 827 828 // Emit the EH cleanup if required. 829 if (RequiresEHCleanup) { 830 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 831 832 EmitBlock(EHEntry); 833 834 cleanupFlags.setIsForEHCleanup(); 835 EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); 836 837 Builder.CreateBr(getEHDispatchBlock(EHParent)); 838 839 Builder.restoreIP(SavedIP); 840 841 SimplifyCleanupEntry(*this, EHEntry); 842 } 843 } 844 845 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the 846 /// specified destination obviously has no cleanups to run. 'false' is always 847 /// a conservatively correct answer for this method. 848 bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const { 849 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) 850 && "stale jump destination"); 851 852 // Calculate the innermost active normal cleanup. 853 EHScopeStack::stable_iterator TopCleanup = 854 EHStack.getInnermostActiveNormalCleanup(); 855 856 // If we're not in an active normal cleanup scope, or if the 857 // destination scope is within the innermost active normal cleanup 858 // scope, we don't need to worry about fixups. 859 if (TopCleanup == EHStack.stable_end() || 860 TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid 861 return true; 862 863 // Otherwise, we might need some cleanups. 864 return false; 865 } 866 867 868 /// Terminate the current block by emitting a branch which might leave 869 /// the current cleanup-protected scope. The target scope may not yet 870 /// be known, in which case this will require a fixup. 871 /// 872 /// As a side-effect, this method clears the insertion point. 873 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { 874 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) 875 && "stale jump destination"); 876 877 if (!HaveInsertPoint()) 878 return; 879 880 // Create the branch. 881 llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); 882 883 // Calculate the innermost active normal cleanup. 884 EHScopeStack::stable_iterator 885 TopCleanup = EHStack.getInnermostActiveNormalCleanup(); 886 887 // If we're not in an active normal cleanup scope, or if the 888 // destination scope is within the innermost active normal cleanup 889 // scope, we don't need to worry about fixups. 890 if (TopCleanup == EHStack.stable_end() || 891 TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid 892 Builder.ClearInsertionPoint(); 893 return; 894 } 895 896 // If we can't resolve the destination cleanup scope, just add this 897 // to the current cleanup scope as a branch fixup. 898 if (!Dest.getScopeDepth().isValid()) { 899 BranchFixup &Fixup = EHStack.addBranchFixup(); 900 Fixup.Destination = Dest.getBlock(); 901 Fixup.DestinationIndex = Dest.getDestIndex(); 902 Fixup.InitialBranch = BI; 903 Fixup.OptimisticBranchBlock = 0; 904 905 Builder.ClearInsertionPoint(); 906 return; 907 } 908 909 // Otherwise, thread through all the normal cleanups in scope. 910 911 // Store the index at the start. 912 llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); 913 new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI); 914 915 // Adjust BI to point to the first cleanup block. 916 { 917 EHCleanupScope &Scope = 918 cast<EHCleanupScope>(*EHStack.find(TopCleanup)); 919 BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); 920 } 921 922 // Add this destination to all the scopes involved. 923 EHScopeStack::stable_iterator I = TopCleanup; 924 EHScopeStack::stable_iterator E = Dest.getScopeDepth(); 925 if (E.strictlyEncloses(I)) { 926 while (true) { 927 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); 928 assert(Scope.isNormalCleanup()); 929 I = Scope.getEnclosingNormalCleanup(); 930 931 // If this is the last cleanup we're propagating through, tell it 932 // that there's a resolved jump moving through it. 933 if (!E.strictlyEncloses(I)) { 934 Scope.addBranchAfter(Index, Dest.getBlock()); 935 break; 936 } 937 938 // Otherwise, tell the scope that there's a jump propoagating 939 // through it. If this isn't new information, all the rest of 940 // the work has been done before. 941 if (!Scope.addBranchThrough(Dest.getBlock())) 942 break; 943 } 944 } 945 946 Builder.ClearInsertionPoint(); 947 } 948 949 static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, 950 EHScopeStack::stable_iterator C) { 951 // If we needed a normal block for any reason, that counts. 952 if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) 953 return true; 954 955 // Check whether any enclosed cleanups were needed. 956 for (EHScopeStack::stable_iterator 957 I = EHStack.getInnermostNormalCleanup(); 958 I != C; ) { 959 assert(C.strictlyEncloses(I)); 960 EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); 961 if (S.getNormalBlock()) return true; 962 I = S.getEnclosingNormalCleanup(); 963 } 964 965 return false; 966 } 967 968 static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, 969 EHScopeStack::stable_iterator cleanup) { 970 // If we needed an EH block for any reason, that counts. 971 if (EHStack.find(cleanup)->hasEHBranches()) 972 return true; 973 974 // Check whether any enclosed cleanups were needed. 975 for (EHScopeStack::stable_iterator 976 i = EHStack.getInnermostEHScope(); i != cleanup; ) { 977 assert(cleanup.strictlyEncloses(i)); 978 979 EHScope &scope = *EHStack.find(i); 980 if (scope.hasEHBranches()) 981 return true; 982 983 i = scope.getEnclosingEHScope(); 984 } 985 986 return false; 987 } 988 989 enum ForActivation_t { 990 ForActivation, 991 ForDeactivation 992 }; 993 994 /// The given cleanup block is changing activation state. Configure a 995 /// cleanup variable if necessary. 996 /// 997 /// It would be good if we had some way of determining if there were 998 /// extra uses *after* the change-over point. 999 static void SetupCleanupBlockActivation(CodeGenFunction &CGF, 1000 EHScopeStack::stable_iterator C, 1001 ForActivation_t kind, 1002 llvm::Instruction *dominatingIP) { 1003 EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); 1004 1005 // We always need the flag if we're activating the cleanup in a 1006 // conditional context, because we have to assume that the current 1007 // location doesn't necessarily dominate the cleanup's code. 1008 bool isActivatedInConditional = 1009 (kind == ForActivation && CGF.isInConditionalBranch()); 1010 1011 bool needFlag = false; 1012 1013 // Calculate whether the cleanup was used: 1014 1015 // - as a normal cleanup 1016 if (Scope.isNormalCleanup() && 1017 (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) { 1018 Scope.setTestFlagInNormalCleanup(); 1019 needFlag = true; 1020 } 1021 1022 // - as an EH cleanup 1023 if (Scope.isEHCleanup() && 1024 (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) { 1025 Scope.setTestFlagInEHCleanup(); 1026 needFlag = true; 1027 } 1028 1029 // If it hasn't yet been used as either, we're done. 1030 if (!needFlag) return; 1031 1032 llvm::AllocaInst *var = Scope.getActiveFlag(); 1033 if (!var) { 1034 var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive"); 1035 Scope.setActiveFlag(var); 1036 1037 assert(dominatingIP && "no existing variable and no dominating IP!"); 1038 1039 // Initialize to true or false depending on whether it was 1040 // active up to this point. 1041 llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation); 1042 1043 // If we're in a conditional block, ignore the dominating IP and 1044 // use the outermost conditional branch. 1045 if (CGF.isInConditionalBranch()) { 1046 CGF.setBeforeOutermostConditional(value, var); 1047 } else { 1048 new llvm::StoreInst(value, var, dominatingIP); 1049 } 1050 } 1051 1052 CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var); 1053 } 1054 1055 /// Activate a cleanup that was created in an inactivated state. 1056 void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C, 1057 llvm::Instruction *dominatingIP) { 1058 assert(C != EHStack.stable_end() && "activating bottom of stack?"); 1059 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1060 assert(!Scope.isActive() && "double activation"); 1061 1062 SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP); 1063 1064 Scope.setActive(true); 1065 } 1066 1067 /// Deactive a cleanup that was created in an active state. 1068 void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, 1069 llvm::Instruction *dominatingIP) { 1070 assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); 1071 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1072 assert(Scope.isActive() && "double deactivation"); 1073 1074 // If it's the top of the stack, just pop it. 1075 if (C == EHStack.stable_begin()) { 1076 // If it's a normal cleanup, we need to pretend that the 1077 // fallthrough is unreachable. 1078 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 1079 PopCleanupBlock(); 1080 Builder.restoreIP(SavedIP); 1081 return; 1082 } 1083 1084 // Otherwise, follow the general case. 1085 SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP); 1086 1087 Scope.setActive(false); 1088 } 1089 1090 llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() { 1091 if (!NormalCleanupDest) 1092 NormalCleanupDest = 1093 CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); 1094 return NormalCleanupDest; 1095 } 1096 1097 /// Emits all the code to cause the given temporary to be cleaned up. 1098 void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, 1099 QualType TempType, 1100 llvm::Value *Ptr) { 1101 pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, 1102 /*useEHCleanup*/ true); 1103 } 1104