1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Stmt nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGDebugInfo.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/StmtVisitor.h" 19 #include "clang/Basic/Builtins.h" 20 #include "clang/Basic/PrettyStackTrace.h" 21 #include "clang/Basic/TargetInfo.h" 22 #include "clang/Sema/LoopHint.h" 23 #include "clang/Sema/SemaDiagnostic.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/IR/CallSite.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/InlineAsm.h" 28 #include "llvm/IR/Intrinsics.h" 29 #include "llvm/IR/MDBuilder.h" 30 31 using namespace clang; 32 using namespace CodeGen; 33 34 //===----------------------------------------------------------------------===// 35 // Statement Emission 36 //===----------------------------------------------------------------------===// 37 38 void CodeGenFunction::EmitStopPoint(const Stmt *S) { 39 if (CGDebugInfo *DI = getDebugInfo()) { 40 SourceLocation Loc; 41 Loc = S->getLocStart(); 42 DI->EmitLocation(Builder, Loc); 43 44 LastStopPoint = Loc; 45 } 46 } 47 48 void CodeGenFunction::EmitStmt(const Stmt *S) { 49 assert(S && "Null statement?"); 50 PGO.setCurrentStmt(S); 51 52 // These statements have their own debug info handling. 53 if (EmitSimpleStmt(S)) 54 return; 55 56 // Check if we are generating unreachable code. 57 if (!HaveInsertPoint()) { 58 // If so, and the statement doesn't contain a label, then we do not need to 59 // generate actual code. This is safe because (1) the current point is 60 // unreachable, so we don't need to execute the code, and (2) we've already 61 // handled the statements which update internal data structures (like the 62 // local variable map) which could be used by subsequent statements. 63 if (!ContainsLabel(S)) { 64 // Verify that any decl statements were handled as simple, they may be in 65 // scope of subsequent reachable statements. 66 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); 67 return; 68 } 69 70 // Otherwise, make a new block to hold the code. 71 EnsureInsertPoint(); 72 } 73 74 // Generate a stoppoint if we are emitting debug info. 75 EmitStopPoint(S); 76 77 switch (S->getStmtClass()) { 78 case Stmt::NoStmtClass: 79 case Stmt::CXXCatchStmtClass: 80 case Stmt::SEHExceptStmtClass: 81 case Stmt::SEHFinallyStmtClass: 82 case Stmt::MSDependentExistsStmtClass: 83 llvm_unreachable("invalid statement class to emit generically"); 84 case Stmt::NullStmtClass: 85 case Stmt::CompoundStmtClass: 86 case Stmt::DeclStmtClass: 87 case Stmt::LabelStmtClass: 88 case Stmt::AttributedStmtClass: 89 case Stmt::GotoStmtClass: 90 case Stmt::BreakStmtClass: 91 case Stmt::ContinueStmtClass: 92 case Stmt::DefaultStmtClass: 93 case Stmt::CaseStmtClass: 94 case Stmt::SEHLeaveStmtClass: 95 llvm_unreachable("should have emitted these statements as simple"); 96 97 #define STMT(Type, Base) 98 #define ABSTRACT_STMT(Op) 99 #define EXPR(Type, Base) \ 100 case Stmt::Type##Class: 101 #include "clang/AST/StmtNodes.inc" 102 { 103 // Remember the block we came in on. 104 llvm::BasicBlock *incoming = Builder.GetInsertBlock(); 105 assert(incoming && "expression emission must have an insertion point"); 106 107 EmitIgnoredExpr(cast<Expr>(S)); 108 109 llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); 110 assert(outgoing && "expression emission cleared block!"); 111 112 // The expression emitters assume (reasonably!) that the insertion 113 // point is always set. To maintain that, the call-emission code 114 // for noreturn functions has to enter a new block with no 115 // predecessors. We want to kill that block and mark the current 116 // insertion point unreachable in the common case of a call like 117 // "exit();". Since expression emission doesn't otherwise create 118 // blocks with no predecessors, we can just test for that. 119 // However, we must be careful not to do this to our incoming 120 // block, because *statement* emission does sometimes create 121 // reachable blocks which will have no predecessors until later in 122 // the function. This occurs with, e.g., labels that are not 123 // reachable by fallthrough. 124 if (incoming != outgoing && outgoing->use_empty()) { 125 outgoing->eraseFromParent(); 126 Builder.ClearInsertionPoint(); 127 } 128 break; 129 } 130 131 case Stmt::IndirectGotoStmtClass: 132 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; 133 134 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; 135 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break; 136 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break; 137 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break; 138 139 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; 140 141 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; 142 case Stmt::GCCAsmStmtClass: // Intentional fall-through. 143 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; 144 case Stmt::CoroutineBodyStmtClass: 145 case Stmt::CoreturnStmtClass: 146 CGM.ErrorUnsupported(S, "coroutine"); 147 break; 148 case Stmt::CapturedStmtClass: { 149 const CapturedStmt *CS = cast<CapturedStmt>(S); 150 EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); 151 } 152 break; 153 case Stmt::ObjCAtTryStmtClass: 154 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); 155 break; 156 case Stmt::ObjCAtCatchStmtClass: 157 llvm_unreachable( 158 "@catch statements should be handled by EmitObjCAtTryStmt"); 159 case Stmt::ObjCAtFinallyStmtClass: 160 llvm_unreachable( 161 "@finally statements should be handled by EmitObjCAtTryStmt"); 162 case Stmt::ObjCAtThrowStmtClass: 163 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); 164 break; 165 case Stmt::ObjCAtSynchronizedStmtClass: 166 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); 167 break; 168 case Stmt::ObjCForCollectionStmtClass: 169 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); 170 break; 171 case Stmt::ObjCAutoreleasePoolStmtClass: 172 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); 173 break; 174 175 case Stmt::CXXTryStmtClass: 176 EmitCXXTryStmt(cast<CXXTryStmt>(*S)); 177 break; 178 case Stmt::CXXForRangeStmtClass: 179 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S)); 180 break; 181 case Stmt::SEHTryStmtClass: 182 EmitSEHTryStmt(cast<SEHTryStmt>(*S)); 183 break; 184 case Stmt::OMPParallelDirectiveClass: 185 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); 186 break; 187 case Stmt::OMPSimdDirectiveClass: 188 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S)); 189 break; 190 case Stmt::OMPForDirectiveClass: 191 EmitOMPForDirective(cast<OMPForDirective>(*S)); 192 break; 193 case Stmt::OMPForSimdDirectiveClass: 194 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S)); 195 break; 196 case Stmt::OMPSectionsDirectiveClass: 197 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S)); 198 break; 199 case Stmt::OMPSectionDirectiveClass: 200 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S)); 201 break; 202 case Stmt::OMPSingleDirectiveClass: 203 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S)); 204 break; 205 case Stmt::OMPMasterDirectiveClass: 206 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S)); 207 break; 208 case Stmt::OMPCriticalDirectiveClass: 209 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S)); 210 break; 211 case Stmt::OMPParallelForDirectiveClass: 212 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S)); 213 break; 214 case Stmt::OMPParallelForSimdDirectiveClass: 215 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S)); 216 break; 217 case Stmt::OMPParallelSectionsDirectiveClass: 218 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S)); 219 break; 220 case Stmt::OMPTaskDirectiveClass: 221 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S)); 222 break; 223 case Stmt::OMPTaskyieldDirectiveClass: 224 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S)); 225 break; 226 case Stmt::OMPBarrierDirectiveClass: 227 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S)); 228 break; 229 case Stmt::OMPTaskwaitDirectiveClass: 230 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S)); 231 break; 232 case Stmt::OMPTaskgroupDirectiveClass: 233 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S)); 234 break; 235 case Stmt::OMPFlushDirectiveClass: 236 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S)); 237 break; 238 case Stmt::OMPOrderedDirectiveClass: 239 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S)); 240 break; 241 case Stmt::OMPAtomicDirectiveClass: 242 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S)); 243 break; 244 case Stmt::OMPTargetDirectiveClass: 245 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S)); 246 break; 247 case Stmt::OMPTeamsDirectiveClass: 248 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S)); 249 break; 250 case Stmt::OMPCancellationPointDirectiveClass: 251 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S)); 252 break; 253 case Stmt::OMPCancelDirectiveClass: 254 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S)); 255 break; 256 case Stmt::OMPTargetDataDirectiveClass: 257 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S)); 258 break; 259 case Stmt::OMPTargetEnterDataDirectiveClass: 260 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S)); 261 break; 262 case Stmt::OMPTargetExitDataDirectiveClass: 263 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S)); 264 break; 265 case Stmt::OMPTargetParallelDirectiveClass: 266 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S)); 267 break; 268 case Stmt::OMPTargetParallelForDirectiveClass: 269 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S)); 270 break; 271 case Stmt::OMPTaskLoopDirectiveClass: 272 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S)); 273 break; 274 case Stmt::OMPTaskLoopSimdDirectiveClass: 275 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S)); 276 break; 277 case Stmt::OMPDistributeDirectiveClass: 278 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S)); 279 break; 280 case Stmt::OMPTargetUpdateDirectiveClass: 281 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S)); 282 break; 283 case Stmt::OMPDistributeParallelForDirectiveClass: 284 EmitOMPDistributeParallelForDirective( 285 cast<OMPDistributeParallelForDirective>(*S)); 286 break; 287 case Stmt::OMPDistributeParallelForSimdDirectiveClass: 288 EmitOMPDistributeParallelForSimdDirective( 289 cast<OMPDistributeParallelForSimdDirective>(*S)); 290 break; 291 case Stmt::OMPDistributeSimdDirectiveClass: 292 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S)); 293 break; 294 case Stmt::OMPTargetParallelForSimdDirectiveClass: 295 EmitOMPTargetParallelForSimdDirective( 296 cast<OMPTargetParallelForSimdDirective>(*S)); 297 break; 298 } 299 } 300 301 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) { 302 switch (S->getStmtClass()) { 303 default: return false; 304 case Stmt::NullStmtClass: break; 305 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break; 306 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break; 307 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break; 308 case Stmt::AttributedStmtClass: 309 EmitAttributedStmt(cast<AttributedStmt>(*S)); break; 310 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break; 311 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break; 312 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break; 313 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break; 314 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break; 315 case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break; 316 } 317 318 return true; 319 } 320 321 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, 322 /// this captures the expression result of the last sub-statement and returns it 323 /// (for use by the statement expression extension). 324 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, 325 AggValueSlot AggSlot) { 326 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), 327 "LLVM IR generation of compound statement ('{}')"); 328 329 // Keep track of the current cleanup stack depth, including debug scopes. 330 LexicalScope Scope(*this, S.getSourceRange()); 331 332 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); 333 } 334 335 Address 336 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, 337 bool GetLast, 338 AggValueSlot AggSlot) { 339 340 for (CompoundStmt::const_body_iterator I = S.body_begin(), 341 E = S.body_end()-GetLast; I != E; ++I) 342 EmitStmt(*I); 343 344 Address RetAlloca = Address::invalid(); 345 if (GetLast) { 346 // We have to special case labels here. They are statements, but when put 347 // at the end of a statement expression, they yield the value of their 348 // subexpression. Handle this by walking through all labels we encounter, 349 // emitting them before we evaluate the subexpr. 350 const Stmt *LastStmt = S.body_back(); 351 while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) { 352 EmitLabel(LS->getDecl()); 353 LastStmt = LS->getSubStmt(); 354 } 355 356 EnsureInsertPoint(); 357 358 QualType ExprTy = cast<Expr>(LastStmt)->getType(); 359 if (hasAggregateEvaluationKind(ExprTy)) { 360 EmitAggExpr(cast<Expr>(LastStmt), AggSlot); 361 } else { 362 // We can't return an RValue here because there might be cleanups at 363 // the end of the StmtExpr. Because of that, we have to emit the result 364 // here into a temporary alloca. 365 RetAlloca = CreateMemTemp(ExprTy); 366 EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(), 367 /*IsInit*/false); 368 } 369 370 } 371 372 return RetAlloca; 373 } 374 375 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { 376 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); 377 378 // If there is a cleanup stack, then we it isn't worth trying to 379 // simplify this block (we would need to remove it from the scope map 380 // and cleanup entry). 381 if (!EHStack.empty()) 382 return; 383 384 // Can only simplify direct branches. 385 if (!BI || !BI->isUnconditional()) 386 return; 387 388 // Can only simplify empty blocks. 389 if (BI->getIterator() != BB->begin()) 390 return; 391 392 BB->replaceAllUsesWith(BI->getSuccessor(0)); 393 BI->eraseFromParent(); 394 BB->eraseFromParent(); 395 } 396 397 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { 398 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 399 400 // Fall out of the current block (if necessary). 401 EmitBranch(BB); 402 403 if (IsFinished && BB->use_empty()) { 404 delete BB; 405 return; 406 } 407 408 // Place the block after the current block, if possible, or else at 409 // the end of the function. 410 if (CurBB && CurBB->getParent()) 411 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB); 412 else 413 CurFn->getBasicBlockList().push_back(BB); 414 Builder.SetInsertPoint(BB); 415 } 416 417 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { 418 // Emit a branch from the current block to the target one if this 419 // was a real block. If this was just a fall-through block after a 420 // terminator, don't emit it. 421 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 422 423 if (!CurBB || CurBB->getTerminator()) { 424 // If there is no insert point or the previous block is already 425 // terminated, don't touch it. 426 } else { 427 // Otherwise, create a fall-through branch. 428 Builder.CreateBr(Target); 429 } 430 431 Builder.ClearInsertionPoint(); 432 } 433 434 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { 435 bool inserted = false; 436 for (llvm::User *u : block->users()) { 437 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) { 438 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(), 439 block); 440 inserted = true; 441 break; 442 } 443 } 444 445 if (!inserted) 446 CurFn->getBasicBlockList().push_back(block); 447 448 Builder.SetInsertPoint(block); 449 } 450 451 CodeGenFunction::JumpDest 452 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { 453 JumpDest &Dest = LabelMap[D]; 454 if (Dest.isValid()) return Dest; 455 456 // Create, but don't insert, the new block. 457 Dest = JumpDest(createBasicBlock(D->getName()), 458 EHScopeStack::stable_iterator::invalid(), 459 NextCleanupDestIndex++); 460 return Dest; 461 } 462 463 void CodeGenFunction::EmitLabel(const LabelDecl *D) { 464 // Add this label to the current lexical scope if we're within any 465 // normal cleanups. Jumps "in" to this label --- when permitted by 466 // the language --- may need to be routed around such cleanups. 467 if (EHStack.hasNormalCleanups() && CurLexicalScope) 468 CurLexicalScope->addLabel(D); 469 470 JumpDest &Dest = LabelMap[D]; 471 472 // If we didn't need a forward reference to this label, just go 473 // ahead and create a destination at the current scope. 474 if (!Dest.isValid()) { 475 Dest = getJumpDestInCurrentScope(D->getName()); 476 477 // Otherwise, we need to give this label a target depth and remove 478 // it from the branch-fixups list. 479 } else { 480 assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); 481 Dest.setScopeDepth(EHStack.stable_begin()); 482 ResolveBranchFixups(Dest.getBlock()); 483 } 484 485 EmitBlock(Dest.getBlock()); 486 incrementProfileCounter(D->getStmt()); 487 } 488 489 /// Change the cleanup scope of the labels in this lexical scope to 490 /// match the scope of the enclosing context. 491 void CodeGenFunction::LexicalScope::rescopeLabels() { 492 assert(!Labels.empty()); 493 EHScopeStack::stable_iterator innermostScope 494 = CGF.EHStack.getInnermostNormalCleanup(); 495 496 // Change the scope depth of all the labels. 497 for (SmallVectorImpl<const LabelDecl*>::const_iterator 498 i = Labels.begin(), e = Labels.end(); i != e; ++i) { 499 assert(CGF.LabelMap.count(*i)); 500 JumpDest &dest = CGF.LabelMap.find(*i)->second; 501 assert(dest.getScopeDepth().isValid()); 502 assert(innermostScope.encloses(dest.getScopeDepth())); 503 dest.setScopeDepth(innermostScope); 504 } 505 506 // Reparent the labels if the new scope also has cleanups. 507 if (innermostScope != EHScopeStack::stable_end() && ParentScope) { 508 ParentScope->Labels.append(Labels.begin(), Labels.end()); 509 } 510 } 511 512 513 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { 514 EmitLabel(S.getDecl()); 515 EmitStmt(S.getSubStmt()); 516 } 517 518 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { 519 const Stmt *SubStmt = S.getSubStmt(); 520 switch (SubStmt->getStmtClass()) { 521 case Stmt::DoStmtClass: 522 EmitDoStmt(cast<DoStmt>(*SubStmt), S.getAttrs()); 523 break; 524 case Stmt::ForStmtClass: 525 EmitForStmt(cast<ForStmt>(*SubStmt), S.getAttrs()); 526 break; 527 case Stmt::WhileStmtClass: 528 EmitWhileStmt(cast<WhileStmt>(*SubStmt), S.getAttrs()); 529 break; 530 case Stmt::CXXForRangeStmtClass: 531 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*SubStmt), S.getAttrs()); 532 break; 533 default: 534 EmitStmt(SubStmt); 535 } 536 } 537 538 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { 539 // If this code is reachable then emit a stop point (if generating 540 // debug info). We have to do this ourselves because we are on the 541 // "simple" statement path. 542 if (HaveInsertPoint()) 543 EmitStopPoint(&S); 544 545 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); 546 } 547 548 549 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { 550 if (const LabelDecl *Target = S.getConstantTarget()) { 551 EmitBranchThroughCleanup(getJumpDestForLabel(Target)); 552 return; 553 } 554 555 // Ensure that we have an i8* for our PHI node. 556 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), 557 Int8PtrTy, "addr"); 558 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 559 560 // Get the basic block for the indirect goto. 561 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); 562 563 // The first instruction in the block has to be the PHI for the switch dest, 564 // add an entry for this branch. 565 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); 566 567 EmitBranch(IndGotoBB); 568 } 569 570 void CodeGenFunction::EmitIfStmt(const IfStmt &S) { 571 // C99 6.8.4.1: The first substatement is executed if the expression compares 572 // unequal to 0. The condition must be a scalar type. 573 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); 574 575 if (S.getInit()) 576 EmitStmt(S.getInit()); 577 578 if (S.getConditionVariable()) 579 EmitAutoVarDecl(*S.getConditionVariable()); 580 581 // If the condition constant folds and can be elided, try to avoid emitting 582 // the condition and the dead arm of the if/else. 583 bool CondConstant; 584 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, 585 S.isConstexpr())) { 586 // Figure out which block (then or else) is executed. 587 const Stmt *Executed = S.getThen(); 588 const Stmt *Skipped = S.getElse(); 589 if (!CondConstant) // Condition false? 590 std::swap(Executed, Skipped); 591 592 // If the skipped block has no labels in it, just emit the executed block. 593 // This avoids emitting dead code and simplifies the CFG substantially. 594 if (S.isConstexpr() || !ContainsLabel(Skipped)) { 595 if (CondConstant) 596 incrementProfileCounter(&S); 597 if (Executed) { 598 RunCleanupsScope ExecutedScope(*this); 599 EmitStmt(Executed); 600 } 601 return; 602 } 603 } 604 605 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit 606 // the conditional branch. 607 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); 608 llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); 609 llvm::BasicBlock *ElseBlock = ContBlock; 610 if (S.getElse()) 611 ElseBlock = createBasicBlock("if.else"); 612 613 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, 614 getProfileCount(S.getThen())); 615 616 // Emit the 'then' code. 617 EmitBlock(ThenBlock); 618 incrementProfileCounter(&S); 619 { 620 RunCleanupsScope ThenScope(*this); 621 EmitStmt(S.getThen()); 622 } 623 { 624 auto CurBlock = Builder.GetInsertBlock(); 625 EmitBranch(ContBlock); 626 // Eliminate any empty blocks that may have been created by nested 627 // control flow statements in the 'then' clause. 628 if (CurBlock) 629 SimplifyForwardingBlocks(CurBlock); 630 } 631 632 // Emit the 'else' code if present. 633 if (const Stmt *Else = S.getElse()) { 634 { 635 // There is no need to emit line number for an unconditional branch. 636 auto NL = ApplyDebugLocation::CreateEmpty(*this); 637 EmitBlock(ElseBlock); 638 } 639 { 640 RunCleanupsScope ElseScope(*this); 641 EmitStmt(Else); 642 } 643 { 644 // There is no need to emit line number for an unconditional branch. 645 auto NL = ApplyDebugLocation::CreateEmpty(*this); 646 auto CurBlock = Builder.GetInsertBlock(); 647 EmitBranch(ContBlock); 648 // Eliminate any empty blocks that may have been created by nested 649 // control flow statements emitted in the 'else' clause. 650 if (CurBlock) 651 SimplifyForwardingBlocks(CurBlock); 652 } 653 } 654 655 // Emit the continuation block for code after the if. 656 EmitBlock(ContBlock, true); 657 } 658 659 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, 660 ArrayRef<const Attr *> WhileAttrs) { 661 // Emit the header for the loop, which will also become 662 // the continue target. 663 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); 664 EmitBlock(LoopHeader.getBlock()); 665 666 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs, 667 Builder.getCurrentDebugLocation()); 668 669 // Create an exit block for when the condition fails, which will 670 // also become the break target. 671 JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); 672 673 // Store the blocks to use for break and continue. 674 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); 675 676 // C++ [stmt.while]p2: 677 // When the condition of a while statement is a declaration, the 678 // scope of the variable that is declared extends from its point 679 // of declaration (3.3.2) to the end of the while statement. 680 // [...] 681 // The object created in a condition is destroyed and created 682 // with each iteration of the loop. 683 RunCleanupsScope ConditionScope(*this); 684 685 if (S.getConditionVariable()) 686 EmitAutoVarDecl(*S.getConditionVariable()); 687 688 // Evaluate the conditional in the while header. C99 6.8.5.1: The 689 // evaluation of the controlling expression takes place before each 690 // execution of the loop body. 691 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 692 693 // while(1) is common, avoid extra exit blocks. Be sure 694 // to correctly handle break/continue though. 695 bool EmitBoolCondBranch = true; 696 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 697 if (C->isOne()) 698 EmitBoolCondBranch = false; 699 700 // As long as the condition is true, go to the loop body. 701 llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); 702 if (EmitBoolCondBranch) { 703 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 704 if (ConditionScope.requiresCleanups()) 705 ExitBlock = createBasicBlock("while.exit"); 706 Builder.CreateCondBr( 707 BoolCondVal, LoopBody, ExitBlock, 708 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 709 710 if (ExitBlock != LoopExit.getBlock()) { 711 EmitBlock(ExitBlock); 712 EmitBranchThroughCleanup(LoopExit); 713 } 714 } 715 716 // Emit the loop body. We have to emit this in a cleanup scope 717 // because it might be a singleton DeclStmt. 718 { 719 RunCleanupsScope BodyScope(*this); 720 EmitBlock(LoopBody); 721 incrementProfileCounter(&S); 722 EmitStmt(S.getBody()); 723 } 724 725 BreakContinueStack.pop_back(); 726 727 // Immediately force cleanup. 728 ConditionScope.ForceCleanup(); 729 730 EmitStopPoint(&S); 731 // Branch to the loop header again. 732 EmitBranch(LoopHeader.getBlock()); 733 734 LoopStack.pop(); 735 736 // Emit the exit block. 737 EmitBlock(LoopExit.getBlock(), true); 738 739 // The LoopHeader typically is just a branch if we skipped emitting 740 // a branch, try to erase it. 741 if (!EmitBoolCondBranch) 742 SimplifyForwardingBlocks(LoopHeader.getBlock()); 743 } 744 745 void CodeGenFunction::EmitDoStmt(const DoStmt &S, 746 ArrayRef<const Attr *> DoAttrs) { 747 JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); 748 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); 749 750 uint64_t ParentCount = getCurrentProfileCount(); 751 752 // Store the blocks to use for break and continue. 753 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); 754 755 // Emit the body of the loop. 756 llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); 757 758 LoopStack.push(LoopBody, CGM.getContext(), DoAttrs, 759 Builder.getCurrentDebugLocation()); 760 761 EmitBlockWithFallThrough(LoopBody, &S); 762 { 763 RunCleanupsScope BodyScope(*this); 764 EmitStmt(S.getBody()); 765 } 766 767 EmitBlock(LoopCond.getBlock()); 768 769 // C99 6.8.5.2: "The evaluation of the controlling expression takes place 770 // after each execution of the loop body." 771 772 // Evaluate the conditional in the while header. 773 // C99 6.8.5p2/p4: The first substatement is executed if the expression 774 // compares unequal to 0. The condition must be a scalar type. 775 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 776 777 BreakContinueStack.pop_back(); 778 779 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure 780 // to correctly handle break/continue though. 781 bool EmitBoolCondBranch = true; 782 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 783 if (C->isZero()) 784 EmitBoolCondBranch = false; 785 786 // As long as the condition is true, iterate the loop. 787 if (EmitBoolCondBranch) { 788 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; 789 Builder.CreateCondBr( 790 BoolCondVal, LoopBody, LoopExit.getBlock(), 791 createProfileWeightsForLoop(S.getCond(), BackedgeCount)); 792 } 793 794 LoopStack.pop(); 795 796 // Emit the exit block. 797 EmitBlock(LoopExit.getBlock()); 798 799 // The DoCond block typically is just a branch if we skipped 800 // emitting a branch, try to erase it. 801 if (!EmitBoolCondBranch) 802 SimplifyForwardingBlocks(LoopCond.getBlock()); 803 } 804 805 void CodeGenFunction::EmitForStmt(const ForStmt &S, 806 ArrayRef<const Attr *> ForAttrs) { 807 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 808 809 LexicalScope ForScope(*this, S.getSourceRange()); 810 811 llvm::DebugLoc DL = Builder.getCurrentDebugLocation(); 812 813 // Evaluate the first part before the loop. 814 if (S.getInit()) 815 EmitStmt(S.getInit()); 816 817 // Start the loop with a block that tests the condition. 818 // If there's an increment, the continue scope will be overwritten 819 // later. 820 JumpDest Continue = getJumpDestInCurrentScope("for.cond"); 821 llvm::BasicBlock *CondBlock = Continue.getBlock(); 822 EmitBlock(CondBlock); 823 824 LoopStack.push(CondBlock, CGM.getContext(), ForAttrs, DL); 825 826 // If the for loop doesn't have an increment we can just use the 827 // condition as the continue block. Otherwise we'll need to create 828 // a block for it (in the current scope, i.e. in the scope of the 829 // condition), and that we will become our continue block. 830 if (S.getInc()) 831 Continue = getJumpDestInCurrentScope("for.inc"); 832 833 // Store the blocks to use for break and continue. 834 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 835 836 // Create a cleanup scope for the condition variable cleanups. 837 LexicalScope ConditionScope(*this, S.getSourceRange()); 838 839 if (S.getCond()) { 840 // If the for statement has a condition scope, emit the local variable 841 // declaration. 842 if (S.getConditionVariable()) { 843 EmitAutoVarDecl(*S.getConditionVariable()); 844 } 845 846 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 847 // If there are any cleanups between here and the loop-exit scope, 848 // create a block to stage a loop exit along. 849 if (ForScope.requiresCleanups()) 850 ExitBlock = createBasicBlock("for.cond.cleanup"); 851 852 // As long as the condition is true, iterate the loop. 853 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 854 855 // C99 6.8.5p2/p4: The first substatement is executed if the expression 856 // compares unequal to 0. The condition must be a scalar type. 857 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 858 Builder.CreateCondBr( 859 BoolCondVal, ForBody, ExitBlock, 860 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 861 862 if (ExitBlock != LoopExit.getBlock()) { 863 EmitBlock(ExitBlock); 864 EmitBranchThroughCleanup(LoopExit); 865 } 866 867 EmitBlock(ForBody); 868 } else { 869 // Treat it as a non-zero constant. Don't even create a new block for the 870 // body, just fall into it. 871 } 872 incrementProfileCounter(&S); 873 874 { 875 // Create a separate cleanup scope for the body, in case it is not 876 // a compound statement. 877 RunCleanupsScope BodyScope(*this); 878 EmitStmt(S.getBody()); 879 } 880 881 // If there is an increment, emit it next. 882 if (S.getInc()) { 883 EmitBlock(Continue.getBlock()); 884 EmitStmt(S.getInc()); 885 } 886 887 BreakContinueStack.pop_back(); 888 889 ConditionScope.ForceCleanup(); 890 891 EmitStopPoint(&S); 892 EmitBranch(CondBlock); 893 894 ForScope.ForceCleanup(); 895 896 LoopStack.pop(); 897 898 // Emit the fall-through block. 899 EmitBlock(LoopExit.getBlock(), true); 900 } 901 902 void 903 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, 904 ArrayRef<const Attr *> ForAttrs) { 905 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 906 907 LexicalScope ForScope(*this, S.getSourceRange()); 908 909 llvm::DebugLoc DL = Builder.getCurrentDebugLocation(); 910 911 // Evaluate the first pieces before the loop. 912 EmitStmt(S.getRangeStmt()); 913 EmitStmt(S.getBeginStmt()); 914 EmitStmt(S.getEndStmt()); 915 916 // Start the loop with a block that tests the condition. 917 // If there's an increment, the continue scope will be overwritten 918 // later. 919 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 920 EmitBlock(CondBlock); 921 922 LoopStack.push(CondBlock, CGM.getContext(), ForAttrs, DL); 923 924 // If there are any cleanups between here and the loop-exit scope, 925 // create a block to stage a loop exit along. 926 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 927 if (ForScope.requiresCleanups()) 928 ExitBlock = createBasicBlock("for.cond.cleanup"); 929 930 // The loop body, consisting of the specified body and the loop variable. 931 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 932 933 // The body is executed if the expression, contextually converted 934 // to bool, is true. 935 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 936 Builder.CreateCondBr( 937 BoolCondVal, ForBody, ExitBlock, 938 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 939 940 if (ExitBlock != LoopExit.getBlock()) { 941 EmitBlock(ExitBlock); 942 EmitBranchThroughCleanup(LoopExit); 943 } 944 945 EmitBlock(ForBody); 946 incrementProfileCounter(&S); 947 948 // Create a block for the increment. In case of a 'continue', we jump there. 949 JumpDest Continue = getJumpDestInCurrentScope("for.inc"); 950 951 // Store the blocks to use for break and continue. 952 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 953 954 { 955 // Create a separate cleanup scope for the loop variable and body. 956 LexicalScope BodyScope(*this, S.getSourceRange()); 957 EmitStmt(S.getLoopVarStmt()); 958 EmitStmt(S.getBody()); 959 } 960 961 EmitStopPoint(&S); 962 // If there is an increment, emit it next. 963 EmitBlock(Continue.getBlock()); 964 EmitStmt(S.getInc()); 965 966 BreakContinueStack.pop_back(); 967 968 EmitBranch(CondBlock); 969 970 ForScope.ForceCleanup(); 971 972 LoopStack.pop(); 973 974 // Emit the fall-through block. 975 EmitBlock(LoopExit.getBlock(), true); 976 } 977 978 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { 979 if (RV.isScalar()) { 980 Builder.CreateStore(RV.getScalarVal(), ReturnValue); 981 } else if (RV.isAggregate()) { 982 EmitAggregateCopy(ReturnValue, RV.getAggregateAddress(), Ty); 983 } else { 984 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty), 985 /*init*/ true); 986 } 987 EmitBranchThroughCleanup(ReturnBlock); 988 } 989 990 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand 991 /// if the function returns void, or may be missing one if the function returns 992 /// non-void. Fun stuff :). 993 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { 994 // Returning from an outlined SEH helper is UB, and we already warn on it. 995 if (IsOutlinedSEHHelper) { 996 Builder.CreateUnreachable(); 997 Builder.ClearInsertionPoint(); 998 } 999 1000 // Emit the result value, even if unused, to evalute the side effects. 1001 const Expr *RV = S.getRetValue(); 1002 1003 // Treat block literals in a return expression as if they appeared 1004 // in their own scope. This permits a small, easily-implemented 1005 // exception to our over-conservative rules about not jumping to 1006 // statements following block literals with non-trivial cleanups. 1007 RunCleanupsScope cleanupScope(*this); 1008 if (const ExprWithCleanups *cleanups = 1009 dyn_cast_or_null<ExprWithCleanups>(RV)) { 1010 enterFullExpression(cleanups); 1011 RV = cleanups->getSubExpr(); 1012 } 1013 1014 // FIXME: Clean this up by using an LValue for ReturnTemp, 1015 // EmitStoreThroughLValue, and EmitAnyExpr. 1016 if (getLangOpts().ElideConstructors && 1017 S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { 1018 // Apply the named return value optimization for this return statement, 1019 // which means doing nothing: the appropriate result has already been 1020 // constructed into the NRVO variable. 1021 1022 // If there is an NRVO flag for this variable, set it to 1 into indicate 1023 // that the cleanup code should not destroy the variable. 1024 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) 1025 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag); 1026 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { 1027 // Make sure not to return anything, but evaluate the expression 1028 // for side effects. 1029 if (RV) 1030 EmitAnyExpr(RV); 1031 } else if (!RV) { 1032 // Do nothing (return value is left uninitialized) 1033 } else if (FnRetTy->isReferenceType()) { 1034 // If this function returns a reference, take the address of the expression 1035 // rather than the value. 1036 RValue Result = EmitReferenceBindingToExpr(RV); 1037 Builder.CreateStore(Result.getScalarVal(), ReturnValue); 1038 } else { 1039 switch (getEvaluationKind(RV->getType())) { 1040 case TEK_Scalar: 1041 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); 1042 break; 1043 case TEK_Complex: 1044 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()), 1045 /*isInit*/ true); 1046 break; 1047 case TEK_Aggregate: 1048 EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, 1049 Qualifiers(), 1050 AggValueSlot::IsDestructed, 1051 AggValueSlot::DoesNotNeedGCBarriers, 1052 AggValueSlot::IsNotAliased)); 1053 break; 1054 } 1055 } 1056 1057 ++NumReturnExprs; 1058 if (!RV || RV->isEvaluatable(getContext())) 1059 ++NumSimpleReturnExprs; 1060 1061 cleanupScope.ForceCleanup(); 1062 EmitBranchThroughCleanup(ReturnBlock); 1063 } 1064 1065 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { 1066 // As long as debug info is modeled with instructions, we have to ensure we 1067 // have a place to insert here and write the stop point here. 1068 if (HaveInsertPoint()) 1069 EmitStopPoint(&S); 1070 1071 for (const auto *I : S.decls()) 1072 EmitDecl(*I); 1073 } 1074 1075 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { 1076 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); 1077 1078 // If this code is reachable then emit a stop point (if generating 1079 // debug info). We have to do this ourselves because we are on the 1080 // "simple" statement path. 1081 if (HaveInsertPoint()) 1082 EmitStopPoint(&S); 1083 1084 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock); 1085 } 1086 1087 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { 1088 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); 1089 1090 // If this code is reachable then emit a stop point (if generating 1091 // debug info). We have to do this ourselves because we are on the 1092 // "simple" statement path. 1093 if (HaveInsertPoint()) 1094 EmitStopPoint(&S); 1095 1096 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock); 1097 } 1098 1099 /// EmitCaseStmtRange - If case statement range is not too big then 1100 /// add multiple cases to switch instruction, one for each value within 1101 /// the range. If range is too big then emit "if" condition check. 1102 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { 1103 assert(S.getRHS() && "Expected RHS value in CaseStmt"); 1104 1105 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); 1106 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); 1107 1108 // Emit the code for this case. We do this first to make sure it is 1109 // properly chained from our predecessor before generating the 1110 // switch machinery to enter this block. 1111 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1112 EmitBlockWithFallThrough(CaseDest, &S); 1113 EmitStmt(S.getSubStmt()); 1114 1115 // If range is empty, do nothing. 1116 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) 1117 return; 1118 1119 llvm::APInt Range = RHS - LHS; 1120 // FIXME: parameters such as this should not be hardcoded. 1121 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { 1122 // Range is small enough to add multiple switch instruction cases. 1123 uint64_t Total = getProfileCount(&S); 1124 unsigned NCases = Range.getZExtValue() + 1; 1125 // We only have one region counter for the entire set of cases here, so we 1126 // need to divide the weights evenly between the generated cases, ensuring 1127 // that the total weight is preserved. E.g., a weight of 5 over three cases 1128 // will be distributed as weights of 2, 2, and 1. 1129 uint64_t Weight = Total / NCases, Rem = Total % NCases; 1130 for (unsigned I = 0; I != NCases; ++I) { 1131 if (SwitchWeights) 1132 SwitchWeights->push_back(Weight + (Rem ? 1 : 0)); 1133 if (Rem) 1134 Rem--; 1135 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); 1136 LHS++; 1137 } 1138 return; 1139 } 1140 1141 // The range is too big. Emit "if" condition into a new block, 1142 // making sure to save and restore the current insertion point. 1143 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); 1144 1145 // Push this test onto the chain of range checks (which terminates 1146 // in the default basic block). The switch's default will be changed 1147 // to the top of this chain after switch emission is complete. 1148 llvm::BasicBlock *FalseDest = CaseRangeBlock; 1149 CaseRangeBlock = createBasicBlock("sw.caserange"); 1150 1151 CurFn->getBasicBlockList().push_back(CaseRangeBlock); 1152 Builder.SetInsertPoint(CaseRangeBlock); 1153 1154 // Emit range check. 1155 llvm::Value *Diff = 1156 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); 1157 llvm::Value *Cond = 1158 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); 1159 1160 llvm::MDNode *Weights = nullptr; 1161 if (SwitchWeights) { 1162 uint64_t ThisCount = getProfileCount(&S); 1163 uint64_t DefaultCount = (*SwitchWeights)[0]; 1164 Weights = createProfileWeights(ThisCount, DefaultCount); 1165 1166 // Since we're chaining the switch default through each large case range, we 1167 // need to update the weight for the default, ie, the first case, to include 1168 // this case. 1169 (*SwitchWeights)[0] += ThisCount; 1170 } 1171 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights); 1172 1173 // Restore the appropriate insertion point. 1174 if (RestoreBB) 1175 Builder.SetInsertPoint(RestoreBB); 1176 else 1177 Builder.ClearInsertionPoint(); 1178 } 1179 1180 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { 1181 // If there is no enclosing switch instance that we're aware of, then this 1182 // case statement and its block can be elided. This situation only happens 1183 // when we've constant-folded the switch, are emitting the constant case, 1184 // and part of the constant case includes another case statement. For 1185 // instance: switch (4) { case 4: do { case 5: } while (1); } 1186 if (!SwitchInsn) { 1187 EmitStmt(S.getSubStmt()); 1188 return; 1189 } 1190 1191 // Handle case ranges. 1192 if (S.getRHS()) { 1193 EmitCaseStmtRange(S); 1194 return; 1195 } 1196 1197 llvm::ConstantInt *CaseVal = 1198 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); 1199 1200 // If the body of the case is just a 'break', try to not emit an empty block. 1201 // If we're profiling or we're not optimizing, leave the block in for better 1202 // debug and coverage analysis. 1203 if (!CGM.getCodeGenOpts().hasProfileClangInstr() && 1204 CGM.getCodeGenOpts().OptimizationLevel > 0 && 1205 isa<BreakStmt>(S.getSubStmt())) { 1206 JumpDest Block = BreakContinueStack.back().BreakBlock; 1207 1208 // Only do this optimization if there are no cleanups that need emitting. 1209 if (isObviouslyBranchWithoutCleanups(Block)) { 1210 if (SwitchWeights) 1211 SwitchWeights->push_back(getProfileCount(&S)); 1212 SwitchInsn->addCase(CaseVal, Block.getBlock()); 1213 1214 // If there was a fallthrough into this case, make sure to redirect it to 1215 // the end of the switch as well. 1216 if (Builder.GetInsertBlock()) { 1217 Builder.CreateBr(Block.getBlock()); 1218 Builder.ClearInsertionPoint(); 1219 } 1220 return; 1221 } 1222 } 1223 1224 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1225 EmitBlockWithFallThrough(CaseDest, &S); 1226 if (SwitchWeights) 1227 SwitchWeights->push_back(getProfileCount(&S)); 1228 SwitchInsn->addCase(CaseVal, CaseDest); 1229 1230 // Recursively emitting the statement is acceptable, but is not wonderful for 1231 // code where we have many case statements nested together, i.e.: 1232 // case 1: 1233 // case 2: 1234 // case 3: etc. 1235 // Handling this recursively will create a new block for each case statement 1236 // that falls through to the next case which is IR intensive. It also causes 1237 // deep recursion which can run into stack depth limitations. Handle 1238 // sequential non-range case statements specially. 1239 const CaseStmt *CurCase = &S; 1240 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); 1241 1242 // Otherwise, iteratively add consecutive cases to this switch stmt. 1243 while (NextCase && NextCase->getRHS() == nullptr) { 1244 CurCase = NextCase; 1245 llvm::ConstantInt *CaseVal = 1246 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); 1247 1248 if (SwitchWeights) 1249 SwitchWeights->push_back(getProfileCount(NextCase)); 1250 if (CGM.getCodeGenOpts().hasProfileClangInstr()) { 1251 CaseDest = createBasicBlock("sw.bb"); 1252 EmitBlockWithFallThrough(CaseDest, &S); 1253 } 1254 1255 SwitchInsn->addCase(CaseVal, CaseDest); 1256 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); 1257 } 1258 1259 // Normal default recursion for non-cases. 1260 EmitStmt(CurCase->getSubStmt()); 1261 } 1262 1263 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { 1264 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); 1265 assert(DefaultBlock->empty() && 1266 "EmitDefaultStmt: Default block already defined?"); 1267 1268 EmitBlockWithFallThrough(DefaultBlock, &S); 1269 1270 EmitStmt(S.getSubStmt()); 1271 } 1272 1273 /// CollectStatementsForCase - Given the body of a 'switch' statement and a 1274 /// constant value that is being switched on, see if we can dead code eliminate 1275 /// the body of the switch to a simple series of statements to emit. Basically, 1276 /// on a switch (5) we want to find these statements: 1277 /// case 5: 1278 /// printf(...); <-- 1279 /// ++i; <-- 1280 /// break; 1281 /// 1282 /// and add them to the ResultStmts vector. If it is unsafe to do this 1283 /// transformation (for example, one of the elided statements contains a label 1284 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' 1285 /// should include statements after it (e.g. the printf() line is a substmt of 1286 /// the case) then return CSFC_FallThrough. If we handled it and found a break 1287 /// statement, then return CSFC_Success. 1288 /// 1289 /// If Case is non-null, then we are looking for the specified case, checking 1290 /// that nothing we jump over contains labels. If Case is null, then we found 1291 /// the case and are looking for the break. 1292 /// 1293 /// If the recursive walk actually finds our Case, then we set FoundCase to 1294 /// true. 1295 /// 1296 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; 1297 static CSFC_Result CollectStatementsForCase(const Stmt *S, 1298 const SwitchCase *Case, 1299 bool &FoundCase, 1300 SmallVectorImpl<const Stmt*> &ResultStmts) { 1301 // If this is a null statement, just succeed. 1302 if (!S) 1303 return Case ? CSFC_Success : CSFC_FallThrough; 1304 1305 // If this is the switchcase (case 4: or default) that we're looking for, then 1306 // we're in business. Just add the substatement. 1307 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { 1308 if (S == Case) { 1309 FoundCase = true; 1310 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase, 1311 ResultStmts); 1312 } 1313 1314 // Otherwise, this is some other case or default statement, just ignore it. 1315 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, 1316 ResultStmts); 1317 } 1318 1319 // If we are in the live part of the code and we found our break statement, 1320 // return a success! 1321 if (!Case && isa<BreakStmt>(S)) 1322 return CSFC_Success; 1323 1324 // If this is a switch statement, then it might contain the SwitchCase, the 1325 // break, or neither. 1326 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { 1327 // Handle this as two cases: we might be looking for the SwitchCase (if so 1328 // the skipped statements must be skippable) or we might already have it. 1329 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); 1330 if (Case) { 1331 // Keep track of whether we see a skipped declaration. The code could be 1332 // using the declaration even if it is skipped, so we can't optimize out 1333 // the decl if the kept statements might refer to it. 1334 bool HadSkippedDecl = false; 1335 1336 // If we're looking for the case, just see if we can skip each of the 1337 // substatements. 1338 for (; Case && I != E; ++I) { 1339 HadSkippedDecl |= isa<DeclStmt>(*I); 1340 1341 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { 1342 case CSFC_Failure: return CSFC_Failure; 1343 case CSFC_Success: 1344 // A successful result means that either 1) that the statement doesn't 1345 // have the case and is skippable, or 2) does contain the case value 1346 // and also contains the break to exit the switch. In the later case, 1347 // we just verify the rest of the statements are elidable. 1348 if (FoundCase) { 1349 // If we found the case and skipped declarations, we can't do the 1350 // optimization. 1351 if (HadSkippedDecl) 1352 return CSFC_Failure; 1353 1354 for (++I; I != E; ++I) 1355 if (CodeGenFunction::ContainsLabel(*I, true)) 1356 return CSFC_Failure; 1357 return CSFC_Success; 1358 } 1359 break; 1360 case CSFC_FallThrough: 1361 // If we have a fallthrough condition, then we must have found the 1362 // case started to include statements. Consider the rest of the 1363 // statements in the compound statement as candidates for inclusion. 1364 assert(FoundCase && "Didn't find case but returned fallthrough?"); 1365 // We recursively found Case, so we're not looking for it anymore. 1366 Case = nullptr; 1367 1368 // If we found the case and skipped declarations, we can't do the 1369 // optimization. 1370 if (HadSkippedDecl) 1371 return CSFC_Failure; 1372 break; 1373 } 1374 } 1375 } 1376 1377 // If we have statements in our range, then we know that the statements are 1378 // live and need to be added to the set of statements we're tracking. 1379 for (; I != E; ++I) { 1380 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) { 1381 case CSFC_Failure: return CSFC_Failure; 1382 case CSFC_FallThrough: 1383 // A fallthrough result means that the statement was simple and just 1384 // included in ResultStmt, keep adding them afterwards. 1385 break; 1386 case CSFC_Success: 1387 // A successful result means that we found the break statement and 1388 // stopped statement inclusion. We just ensure that any leftover stmts 1389 // are skippable and return success ourselves. 1390 for (++I; I != E; ++I) 1391 if (CodeGenFunction::ContainsLabel(*I, true)) 1392 return CSFC_Failure; 1393 return CSFC_Success; 1394 } 1395 } 1396 1397 return Case ? CSFC_Success : CSFC_FallThrough; 1398 } 1399 1400 // Okay, this is some other statement that we don't handle explicitly, like a 1401 // for statement or increment etc. If we are skipping over this statement, 1402 // just verify it doesn't have labels, which would make it invalid to elide. 1403 if (Case) { 1404 if (CodeGenFunction::ContainsLabel(S, true)) 1405 return CSFC_Failure; 1406 return CSFC_Success; 1407 } 1408 1409 // Otherwise, we want to include this statement. Everything is cool with that 1410 // so long as it doesn't contain a break out of the switch we're in. 1411 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; 1412 1413 // Otherwise, everything is great. Include the statement and tell the caller 1414 // that we fall through and include the next statement as well. 1415 ResultStmts.push_back(S); 1416 return CSFC_FallThrough; 1417 } 1418 1419 /// FindCaseStatementsForValue - Find the case statement being jumped to and 1420 /// then invoke CollectStatementsForCase to find the list of statements to emit 1421 /// for a switch on constant. See the comment above CollectStatementsForCase 1422 /// for more details. 1423 static bool FindCaseStatementsForValue(const SwitchStmt &S, 1424 const llvm::APSInt &ConstantCondValue, 1425 SmallVectorImpl<const Stmt*> &ResultStmts, 1426 ASTContext &C, 1427 const SwitchCase *&ResultCase) { 1428 // First step, find the switch case that is being branched to. We can do this 1429 // efficiently by scanning the SwitchCase list. 1430 const SwitchCase *Case = S.getSwitchCaseList(); 1431 const DefaultStmt *DefaultCase = nullptr; 1432 1433 for (; Case; Case = Case->getNextSwitchCase()) { 1434 // It's either a default or case. Just remember the default statement in 1435 // case we're not jumping to any numbered cases. 1436 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { 1437 DefaultCase = DS; 1438 continue; 1439 } 1440 1441 // Check to see if this case is the one we're looking for. 1442 const CaseStmt *CS = cast<CaseStmt>(Case); 1443 // Don't handle case ranges yet. 1444 if (CS->getRHS()) return false; 1445 1446 // If we found our case, remember it as 'case'. 1447 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) 1448 break; 1449 } 1450 1451 // If we didn't find a matching case, we use a default if it exists, or we 1452 // elide the whole switch body! 1453 if (!Case) { 1454 // It is safe to elide the body of the switch if it doesn't contain labels 1455 // etc. If it is safe, return successfully with an empty ResultStmts list. 1456 if (!DefaultCase) 1457 return !CodeGenFunction::ContainsLabel(&S); 1458 Case = DefaultCase; 1459 } 1460 1461 // Ok, we know which case is being jumped to, try to collect all the 1462 // statements that follow it. This can fail for a variety of reasons. Also, 1463 // check to see that the recursive walk actually found our case statement. 1464 // Insane cases like this can fail to find it in the recursive walk since we 1465 // don't handle every stmt kind: 1466 // switch (4) { 1467 // while (1) { 1468 // case 4: ... 1469 bool FoundCase = false; 1470 ResultCase = Case; 1471 return CollectStatementsForCase(S.getBody(), Case, FoundCase, 1472 ResultStmts) != CSFC_Failure && 1473 FoundCase; 1474 } 1475 1476 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { 1477 // Handle nested switch statements. 1478 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; 1479 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights; 1480 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; 1481 1482 // See if we can constant fold the condition of the switch and therefore only 1483 // emit the live case statement (if any) of the switch. 1484 llvm::APSInt ConstantCondValue; 1485 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { 1486 SmallVector<const Stmt*, 4> CaseStmts; 1487 const SwitchCase *Case = nullptr; 1488 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, 1489 getContext(), Case)) { 1490 if (Case) 1491 incrementProfileCounter(Case); 1492 RunCleanupsScope ExecutedScope(*this); 1493 1494 if (S.getInit()) 1495 EmitStmt(S.getInit()); 1496 1497 // Emit the condition variable if needed inside the entire cleanup scope 1498 // used by this special case for constant folded switches. 1499 if (S.getConditionVariable()) 1500 EmitAutoVarDecl(*S.getConditionVariable()); 1501 1502 // At this point, we are no longer "within" a switch instance, so 1503 // we can temporarily enforce this to ensure that any embedded case 1504 // statements are not emitted. 1505 SwitchInsn = nullptr; 1506 1507 // Okay, we can dead code eliminate everything except this case. Emit the 1508 // specified series of statements and we're good. 1509 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) 1510 EmitStmt(CaseStmts[i]); 1511 incrementProfileCounter(&S); 1512 1513 // Now we want to restore the saved switch instance so that nested 1514 // switches continue to function properly 1515 SwitchInsn = SavedSwitchInsn; 1516 1517 return; 1518 } 1519 } 1520 1521 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); 1522 1523 RunCleanupsScope ConditionScope(*this); 1524 1525 if (S.getInit()) 1526 EmitStmt(S.getInit()); 1527 1528 if (S.getConditionVariable()) 1529 EmitAutoVarDecl(*S.getConditionVariable()); 1530 llvm::Value *CondV = EmitScalarExpr(S.getCond()); 1531 1532 // Create basic block to hold stuff that comes after switch 1533 // statement. We also need to create a default block now so that 1534 // explicit case ranges tests can have a place to jump to on 1535 // failure. 1536 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); 1537 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); 1538 if (PGO.haveRegionCounts()) { 1539 // Walk the SwitchCase list to find how many there are. 1540 uint64_t DefaultCount = 0; 1541 unsigned NumCases = 0; 1542 for (const SwitchCase *Case = S.getSwitchCaseList(); 1543 Case; 1544 Case = Case->getNextSwitchCase()) { 1545 if (isa<DefaultStmt>(Case)) 1546 DefaultCount = getProfileCount(Case); 1547 NumCases += 1; 1548 } 1549 SwitchWeights = new SmallVector<uint64_t, 16>(); 1550 SwitchWeights->reserve(NumCases); 1551 // The default needs to be first. We store the edge count, so we already 1552 // know the right weight. 1553 SwitchWeights->push_back(DefaultCount); 1554 } 1555 CaseRangeBlock = DefaultBlock; 1556 1557 // Clear the insertion point to indicate we are in unreachable code. 1558 Builder.ClearInsertionPoint(); 1559 1560 // All break statements jump to NextBlock. If BreakContinueStack is non-empty 1561 // then reuse last ContinueBlock. 1562 JumpDest OuterContinue; 1563 if (!BreakContinueStack.empty()) 1564 OuterContinue = BreakContinueStack.back().ContinueBlock; 1565 1566 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); 1567 1568 // Emit switch body. 1569 EmitStmt(S.getBody()); 1570 1571 BreakContinueStack.pop_back(); 1572 1573 // Update the default block in case explicit case range tests have 1574 // been chained on top. 1575 SwitchInsn->setDefaultDest(CaseRangeBlock); 1576 1577 // If a default was never emitted: 1578 if (!DefaultBlock->getParent()) { 1579 // If we have cleanups, emit the default block so that there's a 1580 // place to jump through the cleanups from. 1581 if (ConditionScope.requiresCleanups()) { 1582 EmitBlock(DefaultBlock); 1583 1584 // Otherwise, just forward the default block to the switch end. 1585 } else { 1586 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); 1587 delete DefaultBlock; 1588 } 1589 } 1590 1591 ConditionScope.ForceCleanup(); 1592 1593 // Emit continuation. 1594 EmitBlock(SwitchExit.getBlock(), true); 1595 incrementProfileCounter(&S); 1596 1597 // If the switch has a condition wrapped by __builtin_unpredictable, 1598 // create metadata that specifies that the switch is unpredictable. 1599 // Don't bother if not optimizing because that metadata would not be used. 1600 auto *Call = dyn_cast<CallExpr>(S.getCond()); 1601 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1602 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1603 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1604 llvm::MDBuilder MDHelper(getLLVMContext()); 1605 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable, 1606 MDHelper.createUnpredictable()); 1607 } 1608 } 1609 1610 if (SwitchWeights) { 1611 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && 1612 "switch weights do not match switch cases"); 1613 // If there's only one jump destination there's no sense weighting it. 1614 if (SwitchWeights->size() > 1) 1615 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, 1616 createProfileWeights(*SwitchWeights)); 1617 delete SwitchWeights; 1618 } 1619 SwitchInsn = SavedSwitchInsn; 1620 SwitchWeights = SavedSwitchWeights; 1621 CaseRangeBlock = SavedCRBlock; 1622 } 1623 1624 static std::string 1625 SimplifyConstraint(const char *Constraint, const TargetInfo &Target, 1626 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) { 1627 std::string Result; 1628 1629 while (*Constraint) { 1630 switch (*Constraint) { 1631 default: 1632 Result += Target.convertConstraint(Constraint); 1633 break; 1634 // Ignore these 1635 case '*': 1636 case '?': 1637 case '!': 1638 case '=': // Will see this and the following in mult-alt constraints. 1639 case '+': 1640 break; 1641 case '#': // Ignore the rest of the constraint alternative. 1642 while (Constraint[1] && Constraint[1] != ',') 1643 Constraint++; 1644 break; 1645 case '&': 1646 case '%': 1647 Result += *Constraint; 1648 while (Constraint[1] && Constraint[1] == *Constraint) 1649 Constraint++; 1650 break; 1651 case ',': 1652 Result += "|"; 1653 break; 1654 case 'g': 1655 Result += "imr"; 1656 break; 1657 case '[': { 1658 assert(OutCons && 1659 "Must pass output names to constraints with a symbolic name"); 1660 unsigned Index; 1661 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); 1662 assert(result && "Could not resolve symbolic name"); (void)result; 1663 Result += llvm::utostr(Index); 1664 break; 1665 } 1666 } 1667 1668 Constraint++; 1669 } 1670 1671 return Result; 1672 } 1673 1674 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared 1675 /// as using a particular register add that as a constraint that will be used 1676 /// in this asm stmt. 1677 static std::string 1678 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, 1679 const TargetInfo &Target, CodeGenModule &CGM, 1680 const AsmStmt &Stmt, const bool EarlyClobber) { 1681 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); 1682 if (!AsmDeclRef) 1683 return Constraint; 1684 const ValueDecl &Value = *AsmDeclRef->getDecl(); 1685 const VarDecl *Variable = dyn_cast<VarDecl>(&Value); 1686 if (!Variable) 1687 return Constraint; 1688 if (Variable->getStorageClass() != SC_Register) 1689 return Constraint; 1690 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); 1691 if (!Attr) 1692 return Constraint; 1693 StringRef Register = Attr->getLabel(); 1694 assert(Target.isValidGCCRegisterName(Register)); 1695 // We're using validateOutputConstraint here because we only care if 1696 // this is a register constraint. 1697 TargetInfo::ConstraintInfo Info(Constraint, ""); 1698 if (Target.validateOutputConstraint(Info) && 1699 !Info.allowsRegister()) { 1700 CGM.ErrorUnsupported(&Stmt, "__asm__"); 1701 return Constraint; 1702 } 1703 // Canonicalize the register here before returning it. 1704 Register = Target.getNormalizedGCCRegisterName(Register); 1705 return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; 1706 } 1707 1708 llvm::Value* 1709 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, 1710 LValue InputValue, QualType InputType, 1711 std::string &ConstraintStr, 1712 SourceLocation Loc) { 1713 llvm::Value *Arg; 1714 if (Info.allowsRegister() || !Info.allowsMemory()) { 1715 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) { 1716 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal(); 1717 } else { 1718 llvm::Type *Ty = ConvertType(InputType); 1719 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); 1720 if (Size <= 64 && llvm::isPowerOf2_64(Size)) { 1721 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 1722 Ty = llvm::PointerType::getUnqual(Ty); 1723 1724 Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(), 1725 Ty)); 1726 } else { 1727 Arg = InputValue.getPointer(); 1728 ConstraintStr += '*'; 1729 } 1730 } 1731 } else { 1732 Arg = InputValue.getPointer(); 1733 ConstraintStr += '*'; 1734 } 1735 1736 return Arg; 1737 } 1738 1739 llvm::Value* CodeGenFunction::EmitAsmInput( 1740 const TargetInfo::ConstraintInfo &Info, 1741 const Expr *InputExpr, 1742 std::string &ConstraintStr) { 1743 // If this can't be a register or memory, i.e., has to be a constant 1744 // (immediate or symbolic), try to emit it as such. 1745 if (!Info.allowsRegister() && !Info.allowsMemory()) { 1746 llvm::APSInt Result; 1747 if (InputExpr->EvaluateAsInt(Result, getContext())) 1748 return llvm::ConstantInt::get(getLLVMContext(), Result); 1749 assert(!Info.requiresImmediateConstant() && 1750 "Required-immediate inlineasm arg isn't constant?"); 1751 } 1752 1753 if (Info.allowsRegister() || !Info.allowsMemory()) 1754 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) 1755 return EmitScalarExpr(InputExpr); 1756 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) 1757 return EmitScalarExpr(InputExpr); 1758 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); 1759 LValue Dest = EmitLValue(InputExpr); 1760 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, 1761 InputExpr->getExprLoc()); 1762 } 1763 1764 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline 1765 /// asm call instruction. The !srcloc MDNode contains a list of constant 1766 /// integers which are the source locations of the start of each line in the 1767 /// asm. 1768 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, 1769 CodeGenFunction &CGF) { 1770 SmallVector<llvm::Metadata *, 8> Locs; 1771 // Add the location of the first line to the MDNode. 1772 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 1773 CGF.Int32Ty, Str->getLocStart().getRawEncoding()))); 1774 StringRef StrVal = Str->getString(); 1775 if (!StrVal.empty()) { 1776 const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); 1777 const LangOptions &LangOpts = CGF.CGM.getLangOpts(); 1778 unsigned StartToken = 0; 1779 unsigned ByteOffset = 0; 1780 1781 // Add the location of the start of each subsequent line of the asm to the 1782 // MDNode. 1783 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) { 1784 if (StrVal[i] != '\n') continue; 1785 SourceLocation LineLoc = Str->getLocationOfByte( 1786 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset); 1787 Locs.push_back(llvm::ConstantAsMetadata::get( 1788 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding()))); 1789 } 1790 } 1791 1792 return llvm::MDNode::get(CGF.getLLVMContext(), Locs); 1793 } 1794 1795 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { 1796 // Assemble the final asm string. 1797 std::string AsmString = S.generateAsmString(getContext()); 1798 1799 // Get all the output and input constraints together. 1800 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; 1801 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; 1802 1803 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 1804 StringRef Name; 1805 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 1806 Name = GAS->getOutputName(i); 1807 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); 1808 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; 1809 assert(IsValid && "Failed to parse output constraint"); 1810 OutputConstraintInfos.push_back(Info); 1811 } 1812 1813 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 1814 StringRef Name; 1815 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 1816 Name = GAS->getInputName(i); 1817 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); 1818 bool IsValid = 1819 getTarget().validateInputConstraint(OutputConstraintInfos, Info); 1820 assert(IsValid && "Failed to parse input constraint"); (void)IsValid; 1821 InputConstraintInfos.push_back(Info); 1822 } 1823 1824 std::string Constraints; 1825 1826 std::vector<LValue> ResultRegDests; 1827 std::vector<QualType> ResultRegQualTys; 1828 std::vector<llvm::Type *> ResultRegTypes; 1829 std::vector<llvm::Type *> ResultTruncRegTypes; 1830 std::vector<llvm::Type *> ArgTypes; 1831 std::vector<llvm::Value*> Args; 1832 1833 // Keep track of inout constraints. 1834 std::string InOutConstraints; 1835 std::vector<llvm::Value*> InOutArgs; 1836 std::vector<llvm::Type*> InOutArgTypes; 1837 1838 // An inline asm can be marked readonly if it meets the following conditions: 1839 // - it doesn't have any sideeffects 1840 // - it doesn't clobber memory 1841 // - it doesn't return a value by-reference 1842 // It can be marked readnone if it doesn't have any input memory constraints 1843 // in addition to meeting the conditions listed above. 1844 bool ReadOnly = true, ReadNone = true; 1845 1846 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 1847 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; 1848 1849 // Simplify the output constraint. 1850 std::string OutputConstraint(S.getOutputConstraint(i)); 1851 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, 1852 getTarget()); 1853 1854 const Expr *OutExpr = S.getOutputExpr(i); 1855 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); 1856 1857 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, 1858 getTarget(), CGM, S, 1859 Info.earlyClobber()); 1860 1861 LValue Dest = EmitLValue(OutExpr); 1862 if (!Constraints.empty()) 1863 Constraints += ','; 1864 1865 // If this is a register output, then make the inline asm return it 1866 // by-value. If this is a memory result, return the value by-reference. 1867 if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) { 1868 Constraints += "=" + OutputConstraint; 1869 ResultRegQualTys.push_back(OutExpr->getType()); 1870 ResultRegDests.push_back(Dest); 1871 ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType())); 1872 ResultTruncRegTypes.push_back(ResultRegTypes.back()); 1873 1874 // If this output is tied to an input, and if the input is larger, then 1875 // we need to set the actual result type of the inline asm node to be the 1876 // same as the input type. 1877 if (Info.hasMatchingInput()) { 1878 unsigned InputNo; 1879 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { 1880 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; 1881 if (Input.hasTiedOperand() && Input.getTiedOperand() == i) 1882 break; 1883 } 1884 assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); 1885 1886 QualType InputTy = S.getInputExpr(InputNo)->getType(); 1887 QualType OutputType = OutExpr->getType(); 1888 1889 uint64_t InputSize = getContext().getTypeSize(InputTy); 1890 if (getContext().getTypeSize(OutputType) < InputSize) { 1891 // Form the asm to return the value as a larger integer or fp type. 1892 ResultRegTypes.back() = ConvertType(InputTy); 1893 } 1894 } 1895 if (llvm::Type* AdjTy = 1896 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 1897 ResultRegTypes.back())) 1898 ResultRegTypes.back() = AdjTy; 1899 else { 1900 CGM.getDiags().Report(S.getAsmLoc(), 1901 diag::err_asm_invalid_type_in_input) 1902 << OutExpr->getType() << OutputConstraint; 1903 } 1904 } else { 1905 ArgTypes.push_back(Dest.getAddress().getType()); 1906 Args.push_back(Dest.getPointer()); 1907 Constraints += "=*"; 1908 Constraints += OutputConstraint; 1909 ReadOnly = ReadNone = false; 1910 } 1911 1912 if (Info.isReadWrite()) { 1913 InOutConstraints += ','; 1914 1915 const Expr *InputExpr = S.getOutputExpr(i); 1916 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(), 1917 InOutConstraints, 1918 InputExpr->getExprLoc()); 1919 1920 if (llvm::Type* AdjTy = 1921 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 1922 Arg->getType())) 1923 Arg = Builder.CreateBitCast(Arg, AdjTy); 1924 1925 if (Info.allowsRegister()) 1926 InOutConstraints += llvm::utostr(i); 1927 else 1928 InOutConstraints += OutputConstraint; 1929 1930 InOutArgTypes.push_back(Arg->getType()); 1931 InOutArgs.push_back(Arg); 1932 } 1933 } 1934 1935 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) 1936 // to the return value slot. Only do this when returning in registers. 1937 if (isa<MSAsmStmt>(&S)) { 1938 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 1939 if (RetAI.isDirect() || RetAI.isExtend()) { 1940 // Make a fake lvalue for the return value slot. 1941 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy); 1942 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs( 1943 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, 1944 ResultRegDests, AsmString, S.getNumOutputs()); 1945 SawAsmBlock = true; 1946 } 1947 } 1948 1949 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 1950 const Expr *InputExpr = S.getInputExpr(i); 1951 1952 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; 1953 1954 if (Info.allowsMemory()) 1955 ReadNone = false; 1956 1957 if (!Constraints.empty()) 1958 Constraints += ','; 1959 1960 // Simplify the input constraint. 1961 std::string InputConstraint(S.getInputConstraint(i)); 1962 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), 1963 &OutputConstraintInfos); 1964 1965 InputConstraint = AddVariableConstraints( 1966 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), 1967 getTarget(), CGM, S, false /* No EarlyClobber */); 1968 1969 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints); 1970 1971 // If this input argument is tied to a larger output result, extend the 1972 // input to be the same size as the output. The LLVM backend wants to see 1973 // the input and output of a matching constraint be the same size. Note 1974 // that GCC does not define what the top bits are here. We use zext because 1975 // that is usually cheaper, but LLVM IR should really get an anyext someday. 1976 if (Info.hasTiedOperand()) { 1977 unsigned Output = Info.getTiedOperand(); 1978 QualType OutputType = S.getOutputExpr(Output)->getType(); 1979 QualType InputTy = InputExpr->getType(); 1980 1981 if (getContext().getTypeSize(OutputType) > 1982 getContext().getTypeSize(InputTy)) { 1983 // Use ptrtoint as appropriate so that we can do our extension. 1984 if (isa<llvm::PointerType>(Arg->getType())) 1985 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); 1986 llvm::Type *OutputTy = ConvertType(OutputType); 1987 if (isa<llvm::IntegerType>(OutputTy)) 1988 Arg = Builder.CreateZExt(Arg, OutputTy); 1989 else if (isa<llvm::PointerType>(OutputTy)) 1990 Arg = Builder.CreateZExt(Arg, IntPtrTy); 1991 else { 1992 assert(OutputTy->isFloatingPointTy() && "Unexpected output type"); 1993 Arg = Builder.CreateFPExt(Arg, OutputTy); 1994 } 1995 } 1996 } 1997 if (llvm::Type* AdjTy = 1998 getTargetHooks().adjustInlineAsmType(*this, InputConstraint, 1999 Arg->getType())) 2000 Arg = Builder.CreateBitCast(Arg, AdjTy); 2001 else 2002 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) 2003 << InputExpr->getType() << InputConstraint; 2004 2005 ArgTypes.push_back(Arg->getType()); 2006 Args.push_back(Arg); 2007 Constraints += InputConstraint; 2008 } 2009 2010 // Append the "input" part of inout constraints last. 2011 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { 2012 ArgTypes.push_back(InOutArgTypes[i]); 2013 Args.push_back(InOutArgs[i]); 2014 } 2015 Constraints += InOutConstraints; 2016 2017 // Clobbers 2018 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { 2019 StringRef Clobber = S.getClobber(i); 2020 2021 if (Clobber == "memory") 2022 ReadOnly = ReadNone = false; 2023 else if (Clobber != "cc") 2024 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); 2025 2026 if (!Constraints.empty()) 2027 Constraints += ','; 2028 2029 Constraints += "~{"; 2030 Constraints += Clobber; 2031 Constraints += '}'; 2032 } 2033 2034 // Add machine specific clobbers 2035 std::string MachineClobbers = getTarget().getClobbers(); 2036 if (!MachineClobbers.empty()) { 2037 if (!Constraints.empty()) 2038 Constraints += ','; 2039 Constraints += MachineClobbers; 2040 } 2041 2042 llvm::Type *ResultType; 2043 if (ResultRegTypes.empty()) 2044 ResultType = VoidTy; 2045 else if (ResultRegTypes.size() == 1) 2046 ResultType = ResultRegTypes[0]; 2047 else 2048 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); 2049 2050 llvm::FunctionType *FTy = 2051 llvm::FunctionType::get(ResultType, ArgTypes, false); 2052 2053 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; 2054 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? 2055 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT; 2056 llvm::InlineAsm *IA = 2057 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect, 2058 /* IsAlignStack */ false, AsmDialect); 2059 llvm::CallInst *Result = Builder.CreateCall(IA, Args); 2060 Result->addAttribute(llvm::AttributeSet::FunctionIndex, 2061 llvm::Attribute::NoUnwind); 2062 2063 if (isa<MSAsmStmt>(&S)) { 2064 // If the assembly contains any labels, mark the call noduplicate to prevent 2065 // defining the same ASM label twice (PR23715). This is pretty hacky, but it 2066 // works. 2067 if (AsmString.find("__MSASMLABEL_") != std::string::npos) 2068 Result->addAttribute(llvm::AttributeSet::FunctionIndex, 2069 llvm::Attribute::NoDuplicate); 2070 } 2071 2072 // Attach readnone and readonly attributes. 2073 if (!HasSideEffect) { 2074 if (ReadNone) 2075 Result->addAttribute(llvm::AttributeSet::FunctionIndex, 2076 llvm::Attribute::ReadNone); 2077 else if (ReadOnly) 2078 Result->addAttribute(llvm::AttributeSet::FunctionIndex, 2079 llvm::Attribute::ReadOnly); 2080 } 2081 2082 // Slap the source location of the inline asm into a !srcloc metadata on the 2083 // call. 2084 if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) { 2085 Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(), 2086 *this)); 2087 } else { 2088 // At least put the line number on MS inline asm blobs. 2089 auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding()); 2090 Result->setMetadata("srcloc", 2091 llvm::MDNode::get(getLLVMContext(), 2092 llvm::ConstantAsMetadata::get(Loc))); 2093 } 2094 2095 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 2096 // Conservatively, mark all inline asm blocks in CUDA as convergent 2097 // (meaning, they may call an intrinsically convergent op, such as bar.sync, 2098 // and so can't have certain optimizations applied around them). 2099 Result->addAttribute(llvm::AttributeSet::FunctionIndex, 2100 llvm::Attribute::Convergent); 2101 } 2102 2103 // Extract all of the register value results from the asm. 2104 std::vector<llvm::Value*> RegResults; 2105 if (ResultRegTypes.size() == 1) { 2106 RegResults.push_back(Result); 2107 } else { 2108 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { 2109 llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult"); 2110 RegResults.push_back(Tmp); 2111 } 2112 } 2113 2114 assert(RegResults.size() == ResultRegTypes.size()); 2115 assert(RegResults.size() == ResultTruncRegTypes.size()); 2116 assert(RegResults.size() == ResultRegDests.size()); 2117 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { 2118 llvm::Value *Tmp = RegResults[i]; 2119 2120 // If the result type of the LLVM IR asm doesn't match the result type of 2121 // the expression, do the conversion. 2122 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { 2123 llvm::Type *TruncTy = ResultTruncRegTypes[i]; 2124 2125 // Truncate the integer result to the right size, note that TruncTy can be 2126 // a pointer. 2127 if (TruncTy->isFloatingPointTy()) 2128 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); 2129 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { 2130 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); 2131 Tmp = Builder.CreateTrunc(Tmp, 2132 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize)); 2133 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); 2134 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { 2135 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); 2136 Tmp = Builder.CreatePtrToInt(Tmp, 2137 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize)); 2138 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 2139 } else if (TruncTy->isIntegerTy()) { 2140 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 2141 } else if (TruncTy->isVectorTy()) { 2142 Tmp = Builder.CreateBitCast(Tmp, TruncTy); 2143 } 2144 } 2145 2146 EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]); 2147 } 2148 } 2149 2150 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) { 2151 const RecordDecl *RD = S.getCapturedRecordDecl(); 2152 QualType RecordTy = getContext().getRecordType(RD); 2153 2154 // Initialize the captured struct. 2155 LValue SlotLV = 2156 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy); 2157 2158 RecordDecl::field_iterator CurField = RD->field_begin(); 2159 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 2160 E = S.capture_init_end(); 2161 I != E; ++I, ++CurField) { 2162 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); 2163 if (CurField->hasCapturedVLAType()) { 2164 auto VAT = CurField->getCapturedVLAType(); 2165 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV); 2166 } else { 2167 EmitInitializerForField(*CurField, LV, *I, None); 2168 } 2169 } 2170 2171 return SlotLV; 2172 } 2173 2174 /// Generate an outlined function for the body of a CapturedStmt, store any 2175 /// captured variables into the captured struct, and call the outlined function. 2176 llvm::Function * 2177 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { 2178 LValue CapStruct = InitCapturedStruct(S); 2179 2180 // Emit the CapturedDecl 2181 CodeGenFunction CGF(CGM, true); 2182 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K)); 2183 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S); 2184 delete CGF.CapturedStmtInfo; 2185 2186 // Emit call to the helper function. 2187 EmitCallOrInvoke(F, CapStruct.getPointer()); 2188 2189 return F; 2190 } 2191 2192 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { 2193 LValue CapStruct = InitCapturedStruct(S); 2194 return CapStruct.getAddress(); 2195 } 2196 2197 /// Creates the outlined function for a CapturedStmt. 2198 llvm::Function * 2199 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { 2200 assert(CapturedStmtInfo && 2201 "CapturedStmtInfo should be set when generating the captured function"); 2202 const CapturedDecl *CD = S.getCapturedDecl(); 2203 const RecordDecl *RD = S.getCapturedRecordDecl(); 2204 SourceLocation Loc = S.getLocStart(); 2205 assert(CD->hasBody() && "missing CapturedDecl body"); 2206 2207 // Build the argument list. 2208 ASTContext &Ctx = CGM.getContext(); 2209 FunctionArgList Args; 2210 Args.append(CD->param_begin(), CD->param_end()); 2211 2212 // Create the function declaration. 2213 FunctionType::ExtInfo ExtInfo; 2214 const CGFunctionInfo &FuncInfo = 2215 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); 2216 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 2217 2218 llvm::Function *F = 2219 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 2220 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 2221 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 2222 if (CD->isNothrow()) 2223 F->addFnAttr(llvm::Attribute::NoUnwind); 2224 2225 // Generate the function. 2226 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, 2227 CD->getLocation(), 2228 CD->getBody()->getLocStart()); 2229 // Set the context parameter in CapturedStmtInfo. 2230 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam()); 2231 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); 2232 2233 // Initialize variable-length arrays. 2234 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), 2235 Ctx.getTagDeclType(RD)); 2236 for (auto *FD : RD->fields()) { 2237 if (FD->hasCapturedVLAType()) { 2238 auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD), 2239 S.getLocStart()).getScalarVal(); 2240 auto VAT = FD->getCapturedVLAType(); 2241 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 2242 } 2243 } 2244 2245 // If 'this' is captured, load it into CXXThisValue. 2246 if (CapturedStmtInfo->isCXXThisExprCaptured()) { 2247 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); 2248 LValue ThisLValue = EmitLValueForField(Base, FD); 2249 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); 2250 } 2251 2252 PGO.assignRegionCounters(GlobalDecl(CD), F); 2253 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 2254 FinishFunction(CD->getBodyRBrace()); 2255 2256 return F; 2257 } 2258