1 //===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains code dealing with the IR generation for cleanups 11 // and related information. 12 // 13 // A "cleanup" is a piece of code which needs to be executed whenever 14 // control transfers out of a particular scope. This can be 15 // conditionalized to occur only on exceptional control flow, only on 16 // normal control flow, or both. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "CGCleanup.h" 21 #include "CodeGenFunction.h" 22 23 using namespace clang; 24 using namespace CodeGen; 25 26 bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { 27 if (rv.isScalar()) 28 return DominatingLLVMValue::needsSaving(rv.getScalarVal()); 29 if (rv.isAggregate()) 30 return DominatingLLVMValue::needsSaving(rv.getAggregateAddr()); 31 return true; 32 } 33 34 DominatingValue<RValue>::saved_type 35 DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { 36 if (rv.isScalar()) { 37 llvm::Value *V = rv.getScalarVal(); 38 39 // These automatically dominate and don't need to be saved. 40 if (!DominatingLLVMValue::needsSaving(V)) 41 return saved_type(V, ScalarLiteral); 42 43 // Everything else needs an alloca. 44 llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); 45 CGF.Builder.CreateStore(V, addr); 46 return saved_type(addr, ScalarAddress); 47 } 48 49 if (rv.isComplex()) { 50 CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); 51 llvm::Type *ComplexTy = 52 llvm::StructType::get(V.first->getType(), V.second->getType(), 53 (void*) nullptr); 54 llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); 55 CGF.Builder.CreateStore(V.first, 56 CGF.Builder.CreateStructGEP(ComplexTy, addr, 0)); 57 CGF.Builder.CreateStore(V.second, 58 CGF.Builder.CreateStructGEP(ComplexTy, addr, 1)); 59 return saved_type(addr, ComplexAddress); 60 } 61 62 assert(rv.isAggregate()); 63 llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile? 64 if (!DominatingLLVMValue::needsSaving(V)) 65 return saved_type(V, AggregateLiteral); 66 67 llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); 68 CGF.Builder.CreateStore(V, addr); 69 return saved_type(addr, AggregateAddress); 70 } 71 72 /// Given a saved r-value produced by SaveRValue, perform the code 73 /// necessary to restore it to usability at the current insertion 74 /// point. 75 RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { 76 switch (K) { 77 case ScalarLiteral: 78 return RValue::get(Value); 79 case ScalarAddress: 80 return RValue::get(CGF.Builder.CreateLoad(Value)); 81 case AggregateLiteral: 82 return RValue::getAggregate(Value); 83 case AggregateAddress: 84 return RValue::getAggregate(CGF.Builder.CreateLoad(Value)); 85 case ComplexAddress: { 86 llvm::Value *real = 87 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 0)); 88 llvm::Value *imag = 89 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 1)); 90 return RValue::getComplex(real, imag); 91 } 92 } 93 94 llvm_unreachable("bad saved r-value kind"); 95 } 96 97 /// Push an entry of the given size onto this protected-scope stack. 98 char *EHScopeStack::allocate(size_t Size) { 99 if (!StartOfBuffer) { 100 unsigned Capacity = 1024; 101 while (Capacity < Size) Capacity *= 2; 102 StartOfBuffer = new char[Capacity]; 103 StartOfData = EndOfBuffer = StartOfBuffer + Capacity; 104 } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { 105 unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; 106 unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); 107 108 unsigned NewCapacity = CurrentCapacity; 109 do { 110 NewCapacity *= 2; 111 } while (NewCapacity < UsedCapacity + Size); 112 113 char *NewStartOfBuffer = new char[NewCapacity]; 114 char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; 115 char *NewStartOfData = NewEndOfBuffer - UsedCapacity; 116 memcpy(NewStartOfData, StartOfData, UsedCapacity); 117 delete [] StartOfBuffer; 118 StartOfBuffer = NewStartOfBuffer; 119 EndOfBuffer = NewEndOfBuffer; 120 StartOfData = NewStartOfData; 121 } 122 123 assert(StartOfBuffer + Size <= StartOfData); 124 StartOfData -= Size; 125 return StartOfData; 126 } 127 128 EHScopeStack::stable_iterator 129 EHScopeStack::getInnermostActiveNormalCleanup() const { 130 for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); 131 si != se; ) { 132 EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si)); 133 if (cleanup.isActive()) return si; 134 si = cleanup.getEnclosingNormalCleanup(); 135 } 136 return stable_end(); 137 } 138 139 EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const { 140 for (stable_iterator si = getInnermostEHScope(), se = stable_end(); 141 si != se; ) { 142 // Skip over inactive cleanups. 143 EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si)); 144 if (cleanup && !cleanup->isActive()) { 145 si = cleanup->getEnclosingEHScope(); 146 continue; 147 } 148 149 // All other scopes are always active. 150 return si; 151 } 152 153 return stable_end(); 154 } 155 156 157 void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { 158 assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned"); 159 char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); 160 bool IsNormalCleanup = Kind & NormalCleanup; 161 bool IsEHCleanup = Kind & EHCleanup; 162 bool IsActive = !(Kind & InactiveCleanup); 163 EHCleanupScope *Scope = 164 new (Buffer) EHCleanupScope(IsNormalCleanup, 165 IsEHCleanup, 166 IsActive, 167 Size, 168 BranchFixups.size(), 169 InnermostNormalCleanup, 170 InnermostEHScope); 171 if (IsNormalCleanup) 172 InnermostNormalCleanup = stable_begin(); 173 if (IsEHCleanup) 174 InnermostEHScope = stable_begin(); 175 176 return Scope->getCleanupBuffer(); 177 } 178 179 void EHScopeStack::popCleanup() { 180 assert(!empty() && "popping exception stack when not empty"); 181 182 assert(isa<EHCleanupScope>(*begin())); 183 EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); 184 InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); 185 InnermostEHScope = Cleanup.getEnclosingEHScope(); 186 StartOfData += Cleanup.getAllocatedSize(); 187 188 // Destroy the cleanup. 189 Cleanup.Destroy(); 190 191 // Check whether we can shrink the branch-fixups stack. 192 if (!BranchFixups.empty()) { 193 // If we no longer have any normal cleanups, all the fixups are 194 // complete. 195 if (!hasNormalCleanups()) 196 BranchFixups.clear(); 197 198 // Otherwise we can still trim out unnecessary nulls. 199 else 200 popNullFixups(); 201 } 202 } 203 204 EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) { 205 assert(getInnermostEHScope() == stable_end()); 206 char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters)); 207 EHFilterScope *filter = new (buffer) EHFilterScope(numFilters); 208 InnermostEHScope = stable_begin(); 209 return filter; 210 } 211 212 void EHScopeStack::popFilter() { 213 assert(!empty() && "popping exception stack when not empty"); 214 215 EHFilterScope &filter = cast<EHFilterScope>(*begin()); 216 StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters()); 217 218 InnermostEHScope = filter.getEnclosingEHScope(); 219 } 220 221 EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { 222 char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); 223 EHCatchScope *scope = 224 new (buffer) EHCatchScope(numHandlers, InnermostEHScope); 225 InnermostEHScope = stable_begin(); 226 return scope; 227 } 228 229 void EHScopeStack::pushTerminate() { 230 char *Buffer = allocate(EHTerminateScope::getSize()); 231 new (Buffer) EHTerminateScope(InnermostEHScope); 232 InnermostEHScope = stable_begin(); 233 } 234 235 /// Remove any 'null' fixups on the stack. However, we can't pop more 236 /// fixups than the fixup depth on the innermost normal cleanup, or 237 /// else fixups that we try to add to that cleanup will end up in the 238 /// wrong place. We *could* try to shrink fixup depths, but that's 239 /// actually a lot of work for little benefit. 240 void EHScopeStack::popNullFixups() { 241 // We expect this to only be called when there's still an innermost 242 // normal cleanup; otherwise there really shouldn't be any fixups. 243 assert(hasNormalCleanups()); 244 245 EHScopeStack::iterator it = find(InnermostNormalCleanup); 246 unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); 247 assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); 248 249 while (BranchFixups.size() > MinSize && 250 BranchFixups.back().Destination == nullptr) 251 BranchFixups.pop_back(); 252 } 253 254 void CodeGenFunction::initFullExprCleanup() { 255 // Create a variable to decide whether the cleanup needs to be run. 256 llvm::AllocaInst *active 257 = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond"); 258 259 // Initialize it to false at a site that's guaranteed to be run 260 // before each evaluation. 261 setBeforeOutermostConditional(Builder.getFalse(), active); 262 263 // Initialize it to true at the current location. 264 Builder.CreateStore(Builder.getTrue(), active); 265 266 // Set that as the active flag in the cleanup. 267 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); 268 assert(!cleanup.getActiveFlag() && "cleanup already has active flag?"); 269 cleanup.setActiveFlag(active); 270 271 if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); 272 if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); 273 } 274 275 void EHScopeStack::Cleanup::anchor() {} 276 277 /// All the branch fixups on the EH stack have propagated out past the 278 /// outermost normal cleanup; resolve them all by adding cases to the 279 /// given switch instruction. 280 static void ResolveAllBranchFixups(CodeGenFunction &CGF, 281 llvm::SwitchInst *Switch, 282 llvm::BasicBlock *CleanupEntry) { 283 llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; 284 285 for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { 286 // Skip this fixup if its destination isn't set. 287 BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); 288 if (Fixup.Destination == nullptr) continue; 289 290 // If there isn't an OptimisticBranchBlock, then InitialBranch is 291 // still pointing directly to its destination; forward it to the 292 // appropriate cleanup entry. This is required in the specific 293 // case of 294 // { std::string s; goto lbl; } 295 // lbl: 296 // i.e. where there's an unresolved fixup inside a single cleanup 297 // entry which we're currently popping. 298 if (Fixup.OptimisticBranchBlock == nullptr) { 299 new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex), 300 CGF.getNormalCleanupDestSlot(), 301 Fixup.InitialBranch); 302 Fixup.InitialBranch->setSuccessor(0, CleanupEntry); 303 } 304 305 // Don't add this case to the switch statement twice. 306 if (!CasesAdded.insert(Fixup.Destination).second) 307 continue; 308 309 Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), 310 Fixup.Destination); 311 } 312 313 CGF.EHStack.clearFixups(); 314 } 315 316 /// Transitions the terminator of the given exit-block of a cleanup to 317 /// be a cleanup switch. 318 static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, 319 llvm::BasicBlock *Block) { 320 // If it's a branch, turn it into a switch whose default 321 // destination is its original target. 322 llvm::TerminatorInst *Term = Block->getTerminator(); 323 assert(Term && "can't transition block without terminator"); 324 325 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 326 assert(Br->isUnconditional()); 327 llvm::LoadInst *Load = 328 new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term); 329 llvm::SwitchInst *Switch = 330 llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); 331 Br->eraseFromParent(); 332 return Switch; 333 } else { 334 return cast<llvm::SwitchInst>(Term); 335 } 336 } 337 338 void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { 339 assert(Block && "resolving a null target block"); 340 if (!EHStack.getNumBranchFixups()) return; 341 342 assert(EHStack.hasNormalCleanups() && 343 "branch fixups exist with no normal cleanups on stack"); 344 345 llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; 346 bool ResolvedAny = false; 347 348 for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { 349 // Skip this fixup if its destination doesn't match. 350 BranchFixup &Fixup = EHStack.getBranchFixup(I); 351 if (Fixup.Destination != Block) continue; 352 353 Fixup.Destination = nullptr; 354 ResolvedAny = true; 355 356 // If it doesn't have an optimistic branch block, LatestBranch is 357 // already pointing to the right place. 358 llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; 359 if (!BranchBB) 360 continue; 361 362 // Don't process the same optimistic branch block twice. 363 if (!ModifiedOptimisticBlocks.insert(BranchBB).second) 364 continue; 365 366 llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); 367 368 // Add a case to the switch. 369 Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); 370 } 371 372 if (ResolvedAny) 373 EHStack.popNullFixups(); 374 } 375 376 /// Pops cleanup blocks until the given savepoint is reached. 377 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { 378 assert(Old.isValid()); 379 380 while (EHStack.stable_begin() != Old) { 381 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 382 383 // As long as Old strictly encloses the scope's enclosing normal 384 // cleanup, we're going to emit another normal cleanup which 385 // fallthrough can propagate through. 386 bool FallThroughIsBranchThrough = 387 Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); 388 389 PopCleanupBlock(FallThroughIsBranchThrough); 390 } 391 } 392 393 /// Pops cleanup blocks until the given savepoint is reached, then add the 394 /// cleanups from the given savepoint in the lifetime-extended cleanups stack. 395 void 396 CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old, 397 size_t OldLifetimeExtendedSize) { 398 PopCleanupBlocks(Old); 399 400 // Move our deferred cleanups onto the EH stack. 401 for (size_t I = OldLifetimeExtendedSize, 402 E = LifetimeExtendedCleanupStack.size(); I != E; /**/) { 403 // Alignment should be guaranteed by the vptrs in the individual cleanups. 404 assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) && 405 "misaligned cleanup stack entry"); 406 407 LifetimeExtendedCleanupHeader &Header = 408 reinterpret_cast<LifetimeExtendedCleanupHeader&>( 409 LifetimeExtendedCleanupStack[I]); 410 I += sizeof(Header); 411 412 EHStack.pushCopyOfCleanup(Header.getKind(), 413 &LifetimeExtendedCleanupStack[I], 414 Header.getSize()); 415 I += Header.getSize(); 416 } 417 LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize); 418 } 419 420 static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, 421 EHCleanupScope &Scope) { 422 assert(Scope.isNormalCleanup()); 423 llvm::BasicBlock *Entry = Scope.getNormalBlock(); 424 if (!Entry) { 425 Entry = CGF.createBasicBlock("cleanup"); 426 Scope.setNormalBlock(Entry); 427 } 428 return Entry; 429 } 430 431 /// Attempts to reduce a cleanup's entry block to a fallthrough. This 432 /// is basically llvm::MergeBlockIntoPredecessor, except 433 /// simplified/optimized for the tighter constraints on cleanup blocks. 434 /// 435 /// Returns the new block, whatever it is. 436 static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, 437 llvm::BasicBlock *Entry) { 438 llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); 439 if (!Pred) return Entry; 440 441 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); 442 if (!Br || Br->isConditional()) return Entry; 443 assert(Br->getSuccessor(0) == Entry); 444 445 // If we were previously inserting at the end of the cleanup entry 446 // block, we'll need to continue inserting at the end of the 447 // predecessor. 448 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; 449 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); 450 451 // Kill the branch. 452 Br->eraseFromParent(); 453 454 // Replace all uses of the entry with the predecessor, in case there 455 // are phis in the cleanup. 456 Entry->replaceAllUsesWith(Pred); 457 458 // Merge the blocks. 459 Pred->getInstList().splice(Pred->end(), Entry->getInstList()); 460 461 // Kill the entry block. 462 Entry->eraseFromParent(); 463 464 if (WasInsertBlock) 465 CGF.Builder.SetInsertPoint(Pred); 466 467 return Pred; 468 } 469 470 static void EmitCleanup(CodeGenFunction &CGF, 471 EHScopeStack::Cleanup *Fn, 472 EHScopeStack::Cleanup::Flags flags, 473 llvm::Value *ActiveFlag) { 474 // Itanium EH cleanups occur within a terminate scope. Microsoft SEH doesn't 475 // have this behavior, and the Microsoft C++ runtime will call terminate for 476 // us if the cleanup throws. 477 bool PushedTerminate = false; 478 if (flags.isForEHCleanup() && !CGF.getTarget().getCXXABI().isMicrosoft()) { 479 CGF.EHStack.pushTerminate(); 480 PushedTerminate = true; 481 } 482 483 // If there's an active flag, load it and skip the cleanup if it's 484 // false. 485 llvm::BasicBlock *ContBB = nullptr; 486 if (ActiveFlag) { 487 ContBB = CGF.createBasicBlock("cleanup.done"); 488 llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); 489 llvm::Value *IsActive 490 = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); 491 CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); 492 CGF.EmitBlock(CleanupBB); 493 } 494 495 // Ask the cleanup to emit itself. 496 Fn->Emit(CGF, flags); 497 assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); 498 499 // Emit the continuation block if there was an active flag. 500 if (ActiveFlag) 501 CGF.EmitBlock(ContBB); 502 503 // Leave the terminate scope. 504 if (PushedTerminate) 505 CGF.EHStack.popTerminate(); 506 } 507 508 static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, 509 llvm::BasicBlock *From, 510 llvm::BasicBlock *To) { 511 // Exit is the exit block of a cleanup, so it always terminates in 512 // an unconditional branch or a switch. 513 llvm::TerminatorInst *Term = Exit->getTerminator(); 514 515 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 516 assert(Br->isUnconditional() && Br->getSuccessor(0) == From); 517 Br->setSuccessor(0, To); 518 } else { 519 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); 520 for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) 521 if (Switch->getSuccessor(I) == From) 522 Switch->setSuccessor(I, To); 523 } 524 } 525 526 /// We don't need a normal entry block for the given cleanup. 527 /// Optimistic fixup branches can cause these blocks to come into 528 /// existence anyway; if so, destroy it. 529 /// 530 /// The validity of this transformation is very much specific to the 531 /// exact ways in which we form branches to cleanup entries. 532 static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, 533 EHCleanupScope &scope) { 534 llvm::BasicBlock *entry = scope.getNormalBlock(); 535 if (!entry) return; 536 537 // Replace all the uses with unreachable. 538 llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); 539 for (llvm::BasicBlock::use_iterator 540 i = entry->use_begin(), e = entry->use_end(); i != e; ) { 541 llvm::Use &use = *i; 542 ++i; 543 544 use.set(unreachableBB); 545 546 // The only uses should be fixup switches. 547 llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser()); 548 if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { 549 // Replace the switch with a branch. 550 llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si); 551 552 // The switch operand is a load from the cleanup-dest alloca. 553 llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition()); 554 555 // Destroy the switch. 556 si->eraseFromParent(); 557 558 // Destroy the load. 559 assert(condition->getOperand(0) == CGF.NormalCleanupDest); 560 assert(condition->use_empty()); 561 condition->eraseFromParent(); 562 } 563 } 564 565 assert(entry->use_empty()); 566 delete entry; 567 } 568 569 /// Pops a cleanup block. If the block includes a normal cleanup, the 570 /// current insertion point is threaded through the cleanup, as are 571 /// any branch fixups on the cleanup. 572 void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { 573 assert(!EHStack.empty() && "cleanup stack is empty!"); 574 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); 575 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 576 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); 577 578 // Remember activation information. 579 bool IsActive = Scope.isActive(); 580 llvm::Value *NormalActiveFlag = 581 Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : nullptr; 582 llvm::Value *EHActiveFlag = 583 Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : nullptr; 584 585 // Check whether we need an EH cleanup. This is only true if we've 586 // generated a lazy EH cleanup block. 587 llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock(); 588 assert(Scope.hasEHBranches() == (EHEntry != nullptr)); 589 bool RequiresEHCleanup = (EHEntry != nullptr); 590 EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); 591 592 // Check the three conditions which might require a normal cleanup: 593 594 // - whether there are branch fix-ups through this cleanup 595 unsigned FixupDepth = Scope.getFixupDepth(); 596 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; 597 598 // - whether there are branch-throughs or branch-afters 599 bool HasExistingBranches = Scope.hasBranches(); 600 601 // - whether there's a fallthrough 602 llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); 603 bool HasFallthrough = (FallthroughSource != nullptr && IsActive); 604 605 // Branch-through fall-throughs leave the insertion point set to the 606 // end of the last cleanup, which points to the current scope. The 607 // rest of IR gen doesn't need to worry about this; it only happens 608 // during the execution of PopCleanupBlocks(). 609 bool HasPrebranchedFallthrough = 610 (FallthroughSource && FallthroughSource->getTerminator()); 611 612 // If this is a normal cleanup, then having a prebranched 613 // fallthrough implies that the fallthrough source unconditionally 614 // jumps here. 615 assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || 616 (Scope.getNormalBlock() && 617 FallthroughSource->getTerminator()->getSuccessor(0) 618 == Scope.getNormalBlock())); 619 620 bool RequiresNormalCleanup = false; 621 if (Scope.isNormalCleanup() && 622 (HasFixups || HasExistingBranches || HasFallthrough)) { 623 RequiresNormalCleanup = true; 624 } 625 626 // If we have a prebranched fallthrough into an inactive normal 627 // cleanup, rewrite it so that it leads to the appropriate place. 628 if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { 629 llvm::BasicBlock *prebranchDest; 630 631 // If the prebranch is semantically branching through the next 632 // cleanup, just forward it to the next block, leaving the 633 // insertion point in the prebranched block. 634 if (FallthroughIsBranchThrough) { 635 EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup()); 636 prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing)); 637 638 // Otherwise, we need to make a new block. If the normal cleanup 639 // isn't being used at all, we could actually reuse the normal 640 // entry block, but this is simpler, and it avoids conflicts with 641 // dead optimistic fixup branches. 642 } else { 643 prebranchDest = createBasicBlock("forwarded-prebranch"); 644 EmitBlock(prebranchDest); 645 } 646 647 llvm::BasicBlock *normalEntry = Scope.getNormalBlock(); 648 assert(normalEntry && !normalEntry->use_empty()); 649 650 ForwardPrebranchedFallthrough(FallthroughSource, 651 normalEntry, prebranchDest); 652 } 653 654 // If we don't need the cleanup at all, we're done. 655 if (!RequiresNormalCleanup && !RequiresEHCleanup) { 656 destroyOptimisticNormalEntry(*this, Scope); 657 EHStack.popCleanup(); // safe because there are no fixups 658 assert(EHStack.getNumBranchFixups() == 0 || 659 EHStack.hasNormalCleanups()); 660 return; 661 } 662 663 // Copy the cleanup emission data out. Note that SmallVector 664 // guarantees maximal alignment for its buffer regardless of its 665 // type parameter. 666 SmallVector<char, 8*sizeof(void*)> CleanupBuffer; 667 CleanupBuffer.reserve(Scope.getCleanupSize()); 668 memcpy(CleanupBuffer.data(), 669 Scope.getCleanupBuffer(), Scope.getCleanupSize()); 670 CleanupBuffer.set_size(Scope.getCleanupSize()); 671 EHScopeStack::Cleanup *Fn = 672 reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data()); 673 674 EHScopeStack::Cleanup::Flags cleanupFlags; 675 if (Scope.isNormalCleanup()) 676 cleanupFlags.setIsNormalCleanupKind(); 677 if (Scope.isEHCleanup()) 678 cleanupFlags.setIsEHCleanupKind(); 679 680 if (!RequiresNormalCleanup) { 681 destroyOptimisticNormalEntry(*this, Scope); 682 EHStack.popCleanup(); 683 } else { 684 // If we have a fallthrough and no other need for the cleanup, 685 // emit it directly. 686 if (HasFallthrough && !HasPrebranchedFallthrough && 687 !HasFixups && !HasExistingBranches) { 688 689 destroyOptimisticNormalEntry(*this, Scope); 690 EHStack.popCleanup(); 691 692 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); 693 694 // Otherwise, the best approach is to thread everything through 695 // the cleanup block and then try to clean up after ourselves. 696 } else { 697 // Force the entry block to exist. 698 llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); 699 700 // I. Set up the fallthrough edge in. 701 702 CGBuilderTy::InsertPoint savedInactiveFallthroughIP; 703 704 // If there's a fallthrough, we need to store the cleanup 705 // destination index. For fall-throughs this is always zero. 706 if (HasFallthrough) { 707 if (!HasPrebranchedFallthrough) 708 Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); 709 710 // Otherwise, save and clear the IP if we don't have fallthrough 711 // because the cleanup is inactive. 712 } else if (FallthroughSource) { 713 assert(!IsActive && "source without fallthrough for active cleanup"); 714 savedInactiveFallthroughIP = Builder.saveAndClearIP(); 715 } 716 717 // II. Emit the entry block. This implicitly branches to it if 718 // we have fallthrough. All the fixups and existing branches 719 // should already be branched to it. 720 EmitBlock(NormalEntry); 721 722 // III. Figure out where we're going and build the cleanup 723 // epilogue. 724 725 bool HasEnclosingCleanups = 726 (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); 727 728 // Compute the branch-through dest if we need it: 729 // - if there are branch-throughs threaded through the scope 730 // - if fall-through is a branch-through 731 // - if there are fixups that will be optimistically forwarded 732 // to the enclosing cleanup 733 llvm::BasicBlock *BranchThroughDest = nullptr; 734 if (Scope.hasBranchThroughs() || 735 (FallthroughSource && FallthroughIsBranchThrough) || 736 (HasFixups && HasEnclosingCleanups)) { 737 assert(HasEnclosingCleanups); 738 EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); 739 BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); 740 } 741 742 llvm::BasicBlock *FallthroughDest = nullptr; 743 SmallVector<llvm::Instruction*, 2> InstsToAppend; 744 745 // If there's exactly one branch-after and no other threads, 746 // we can route it without a switch. 747 if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && 748 Scope.getNumBranchAfters() == 1) { 749 assert(!BranchThroughDest || !IsActive); 750 751 // TODO: clean up the possibly dead stores to the cleanup dest slot. 752 llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); 753 InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); 754 755 // Build a switch-out if we need it: 756 // - if there are branch-afters threaded through the scope 757 // - if fall-through is a branch-after 758 // - if there are fixups that have nowhere left to go and 759 // so must be immediately resolved 760 } else if (Scope.getNumBranchAfters() || 761 (HasFallthrough && !FallthroughIsBranchThrough) || 762 (HasFixups && !HasEnclosingCleanups)) { 763 764 llvm::BasicBlock *Default = 765 (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); 766 767 // TODO: base this on the number of branch-afters and fixups 768 const unsigned SwitchCapacity = 10; 769 770 llvm::LoadInst *Load = 771 new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest"); 772 llvm::SwitchInst *Switch = 773 llvm::SwitchInst::Create(Load, Default, SwitchCapacity); 774 775 InstsToAppend.push_back(Load); 776 InstsToAppend.push_back(Switch); 777 778 // Branch-after fallthrough. 779 if (FallthroughSource && !FallthroughIsBranchThrough) { 780 FallthroughDest = createBasicBlock("cleanup.cont"); 781 if (HasFallthrough) 782 Switch->addCase(Builder.getInt32(0), FallthroughDest); 783 } 784 785 for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { 786 Switch->addCase(Scope.getBranchAfterIndex(I), 787 Scope.getBranchAfterBlock(I)); 788 } 789 790 // If there aren't any enclosing cleanups, we can resolve all 791 // the fixups now. 792 if (HasFixups && !HasEnclosingCleanups) 793 ResolveAllBranchFixups(*this, Switch, NormalEntry); 794 } else { 795 // We should always have a branch-through destination in this case. 796 assert(BranchThroughDest); 797 InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); 798 } 799 800 // IV. Pop the cleanup and emit it. 801 EHStack.popCleanup(); 802 assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); 803 804 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); 805 806 // Append the prepared cleanup prologue from above. 807 llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); 808 for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) 809 NormalExit->getInstList().push_back(InstsToAppend[I]); 810 811 // Optimistically hope that any fixups will continue falling through. 812 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 813 I < E; ++I) { 814 BranchFixup &Fixup = EHStack.getBranchFixup(I); 815 if (!Fixup.Destination) continue; 816 if (!Fixup.OptimisticBranchBlock) { 817 new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex), 818 getNormalCleanupDestSlot(), 819 Fixup.InitialBranch); 820 Fixup.InitialBranch->setSuccessor(0, NormalEntry); 821 } 822 Fixup.OptimisticBranchBlock = NormalExit; 823 } 824 825 // V. Set up the fallthrough edge out. 826 827 // Case 1: a fallthrough source exists but doesn't branch to the 828 // cleanup because the cleanup is inactive. 829 if (!HasFallthrough && FallthroughSource) { 830 // Prebranched fallthrough was forwarded earlier. 831 // Non-prebranched fallthrough doesn't need to be forwarded. 832 // Either way, all we need to do is restore the IP we cleared before. 833 assert(!IsActive); 834 Builder.restoreIP(savedInactiveFallthroughIP); 835 836 // Case 2: a fallthrough source exists and should branch to the 837 // cleanup, but we're not supposed to branch through to the next 838 // cleanup. 839 } else if (HasFallthrough && FallthroughDest) { 840 assert(!FallthroughIsBranchThrough); 841 EmitBlock(FallthroughDest); 842 843 // Case 3: a fallthrough source exists and should branch to the 844 // cleanup and then through to the next. 845 } else if (HasFallthrough) { 846 // Everything is already set up for this. 847 848 // Case 4: no fallthrough source exists. 849 } else { 850 Builder.ClearInsertionPoint(); 851 } 852 853 // VI. Assorted cleaning. 854 855 // Check whether we can merge NormalEntry into a single predecessor. 856 // This might invalidate (non-IR) pointers to NormalEntry. 857 llvm::BasicBlock *NewNormalEntry = 858 SimplifyCleanupEntry(*this, NormalEntry); 859 860 // If it did invalidate those pointers, and NormalEntry was the same 861 // as NormalExit, go back and patch up the fixups. 862 if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) 863 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 864 I < E; ++I) 865 EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; 866 } 867 } 868 869 assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); 870 871 // Emit the EH cleanup if required. 872 if (RequiresEHCleanup) { 873 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 874 875 EmitBlock(EHEntry); 876 877 // We only actually emit the cleanup code if the cleanup is either 878 // active or was used before it was deactivated. 879 if (EHActiveFlag || IsActive) { 880 881 cleanupFlags.setIsForEHCleanup(); 882 EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); 883 } 884 885 Builder.CreateBr(getEHDispatchBlock(EHParent)); 886 887 Builder.restoreIP(SavedIP); 888 889 SimplifyCleanupEntry(*this, EHEntry); 890 } 891 } 892 893 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the 894 /// specified destination obviously has no cleanups to run. 'false' is always 895 /// a conservatively correct answer for this method. 896 bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const { 897 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) 898 && "stale jump destination"); 899 900 // Calculate the innermost active normal cleanup. 901 EHScopeStack::stable_iterator TopCleanup = 902 EHStack.getInnermostActiveNormalCleanup(); 903 904 // If we're not in an active normal cleanup scope, or if the 905 // destination scope is within the innermost active normal cleanup 906 // scope, we don't need to worry about fixups. 907 if (TopCleanup == EHStack.stable_end() || 908 TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid 909 return true; 910 911 // Otherwise, we might need some cleanups. 912 return false; 913 } 914 915 916 /// Terminate the current block by emitting a branch which might leave 917 /// the current cleanup-protected scope. The target scope may not yet 918 /// be known, in which case this will require a fixup. 919 /// 920 /// As a side-effect, this method clears the insertion point. 921 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { 922 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) 923 && "stale jump destination"); 924 925 if (!HaveInsertPoint()) 926 return; 927 928 // Create the branch. 929 llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); 930 931 // Calculate the innermost active normal cleanup. 932 EHScopeStack::stable_iterator 933 TopCleanup = EHStack.getInnermostActiveNormalCleanup(); 934 935 // If we're not in an active normal cleanup scope, or if the 936 // destination scope is within the innermost active normal cleanup 937 // scope, we don't need to worry about fixups. 938 if (TopCleanup == EHStack.stable_end() || 939 TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid 940 Builder.ClearInsertionPoint(); 941 return; 942 } 943 944 // If we can't resolve the destination cleanup scope, just add this 945 // to the current cleanup scope as a branch fixup. 946 if (!Dest.getScopeDepth().isValid()) { 947 BranchFixup &Fixup = EHStack.addBranchFixup(); 948 Fixup.Destination = Dest.getBlock(); 949 Fixup.DestinationIndex = Dest.getDestIndex(); 950 Fixup.InitialBranch = BI; 951 Fixup.OptimisticBranchBlock = nullptr; 952 953 Builder.ClearInsertionPoint(); 954 return; 955 } 956 957 // Otherwise, thread through all the normal cleanups in scope. 958 959 // Store the index at the start. 960 llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); 961 new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI); 962 963 // Adjust BI to point to the first cleanup block. 964 { 965 EHCleanupScope &Scope = 966 cast<EHCleanupScope>(*EHStack.find(TopCleanup)); 967 BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); 968 } 969 970 // Add this destination to all the scopes involved. 971 EHScopeStack::stable_iterator I = TopCleanup; 972 EHScopeStack::stable_iterator E = Dest.getScopeDepth(); 973 if (E.strictlyEncloses(I)) { 974 while (true) { 975 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); 976 assert(Scope.isNormalCleanup()); 977 I = Scope.getEnclosingNormalCleanup(); 978 979 // If this is the last cleanup we're propagating through, tell it 980 // that there's a resolved jump moving through it. 981 if (!E.strictlyEncloses(I)) { 982 Scope.addBranchAfter(Index, Dest.getBlock()); 983 break; 984 } 985 986 // Otherwise, tell the scope that there's a jump propoagating 987 // through it. If this isn't new information, all the rest of 988 // the work has been done before. 989 if (!Scope.addBranchThrough(Dest.getBlock())) 990 break; 991 } 992 } 993 994 Builder.ClearInsertionPoint(); 995 } 996 997 static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, 998 EHScopeStack::stable_iterator C) { 999 // If we needed a normal block for any reason, that counts. 1000 if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) 1001 return true; 1002 1003 // Check whether any enclosed cleanups were needed. 1004 for (EHScopeStack::stable_iterator 1005 I = EHStack.getInnermostNormalCleanup(); 1006 I != C; ) { 1007 assert(C.strictlyEncloses(I)); 1008 EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); 1009 if (S.getNormalBlock()) return true; 1010 I = S.getEnclosingNormalCleanup(); 1011 } 1012 1013 return false; 1014 } 1015 1016 static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, 1017 EHScopeStack::stable_iterator cleanup) { 1018 // If we needed an EH block for any reason, that counts. 1019 if (EHStack.find(cleanup)->hasEHBranches()) 1020 return true; 1021 1022 // Check whether any enclosed cleanups were needed. 1023 for (EHScopeStack::stable_iterator 1024 i = EHStack.getInnermostEHScope(); i != cleanup; ) { 1025 assert(cleanup.strictlyEncloses(i)); 1026 1027 EHScope &scope = *EHStack.find(i); 1028 if (scope.hasEHBranches()) 1029 return true; 1030 1031 i = scope.getEnclosingEHScope(); 1032 } 1033 1034 return false; 1035 } 1036 1037 enum ForActivation_t { 1038 ForActivation, 1039 ForDeactivation 1040 }; 1041 1042 /// The given cleanup block is changing activation state. Configure a 1043 /// cleanup variable if necessary. 1044 /// 1045 /// It would be good if we had some way of determining if there were 1046 /// extra uses *after* the change-over point. 1047 static void SetupCleanupBlockActivation(CodeGenFunction &CGF, 1048 EHScopeStack::stable_iterator C, 1049 ForActivation_t kind, 1050 llvm::Instruction *dominatingIP) { 1051 EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); 1052 1053 // We always need the flag if we're activating the cleanup in a 1054 // conditional context, because we have to assume that the current 1055 // location doesn't necessarily dominate the cleanup's code. 1056 bool isActivatedInConditional = 1057 (kind == ForActivation && CGF.isInConditionalBranch()); 1058 1059 bool needFlag = false; 1060 1061 // Calculate whether the cleanup was used: 1062 1063 // - as a normal cleanup 1064 if (Scope.isNormalCleanup() && 1065 (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) { 1066 Scope.setTestFlagInNormalCleanup(); 1067 needFlag = true; 1068 } 1069 1070 // - as an EH cleanup 1071 if (Scope.isEHCleanup() && 1072 (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) { 1073 Scope.setTestFlagInEHCleanup(); 1074 needFlag = true; 1075 } 1076 1077 // If it hasn't yet been used as either, we're done. 1078 if (!needFlag) return; 1079 1080 llvm::AllocaInst *var = Scope.getActiveFlag(); 1081 if (!var) { 1082 var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive"); 1083 Scope.setActiveFlag(var); 1084 1085 assert(dominatingIP && "no existing variable and no dominating IP!"); 1086 1087 // Initialize to true or false depending on whether it was 1088 // active up to this point. 1089 llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation); 1090 1091 // If we're in a conditional block, ignore the dominating IP and 1092 // use the outermost conditional branch. 1093 if (CGF.isInConditionalBranch()) { 1094 CGF.setBeforeOutermostConditional(value, var); 1095 } else { 1096 new llvm::StoreInst(value, var, dominatingIP); 1097 } 1098 } 1099 1100 CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var); 1101 } 1102 1103 /// Activate a cleanup that was created in an inactivated state. 1104 void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C, 1105 llvm::Instruction *dominatingIP) { 1106 assert(C != EHStack.stable_end() && "activating bottom of stack?"); 1107 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1108 assert(!Scope.isActive() && "double activation"); 1109 1110 SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP); 1111 1112 Scope.setActive(true); 1113 } 1114 1115 /// Deactive a cleanup that was created in an active state. 1116 void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, 1117 llvm::Instruction *dominatingIP) { 1118 assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); 1119 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1120 assert(Scope.isActive() && "double deactivation"); 1121 1122 // If it's the top of the stack, just pop it. 1123 if (C == EHStack.stable_begin()) { 1124 // If it's a normal cleanup, we need to pretend that the 1125 // fallthrough is unreachable. 1126 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 1127 PopCleanupBlock(); 1128 Builder.restoreIP(SavedIP); 1129 return; 1130 } 1131 1132 // Otherwise, follow the general case. 1133 SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP); 1134 1135 Scope.setActive(false); 1136 } 1137 1138 llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() { 1139 if (!NormalCleanupDest) 1140 NormalCleanupDest = 1141 CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); 1142 return NormalCleanupDest; 1143 } 1144 1145 /// Emits all the code to cause the given temporary to be cleaned up. 1146 void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, 1147 QualType TempType, 1148 llvm::Value *Ptr) { 1149 pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, 1150 /*useEHCleanup*/ true); 1151 } 1152