Home | History | Annotate | Download | only in CodeGen
      1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This contains code to emit Stmt nodes as LLVM code.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "CodeGenFunction.h"
     15 #include "CGDebugInfo.h"
     16 #include "CodeGenModule.h"
     17 #include "TargetInfo.h"
     18 #include "clang/AST/StmtVisitor.h"
     19 #include "clang/Basic/PrettyStackTrace.h"
     20 #include "clang/Basic/TargetInfo.h"
     21 #include "clang/Sema/LoopHint.h"
     22 #include "clang/Sema/SemaDiagnostic.h"
     23 #include "llvm/ADT/StringExtras.h"
     24 #include "llvm/IR/CallSite.h"
     25 #include "llvm/IR/DataLayout.h"
     26 #include "llvm/IR/InlineAsm.h"
     27 #include "llvm/IR/Intrinsics.h"
     28 using namespace clang;
     29 using namespace CodeGen;
     30 
     31 //===----------------------------------------------------------------------===//
     32 //                              Statement Emission
     33 //===----------------------------------------------------------------------===//
     34 
     35 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
     36   if (CGDebugInfo *DI = getDebugInfo()) {
     37     SourceLocation Loc;
     38     Loc = S->getLocStart();
     39     DI->EmitLocation(Builder, Loc);
     40 
     41     LastStopPoint = Loc;
     42   }
     43 }
     44 
     45 void CodeGenFunction::EmitStmt(const Stmt *S) {
     46   assert(S && "Null statement?");
     47   PGO.setCurrentStmt(S);
     48 
     49   // These statements have their own debug info handling.
     50   if (EmitSimpleStmt(S))
     51     return;
     52 
     53   // Check if we are generating unreachable code.
     54   if (!HaveInsertPoint()) {
     55     // If so, and the statement doesn't contain a label, then we do not need to
     56     // generate actual code. This is safe because (1) the current point is
     57     // unreachable, so we don't need to execute the code, and (2) we've already
     58     // handled the statements which update internal data structures (like the
     59     // local variable map) which could be used by subsequent statements.
     60     if (!ContainsLabel(S)) {
     61       // Verify that any decl statements were handled as simple, they may be in
     62       // scope of subsequent reachable statements.
     63       assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
     64       return;
     65     }
     66 
     67     // Otherwise, make a new block to hold the code.
     68     EnsureInsertPoint();
     69   }
     70 
     71   // Generate a stoppoint if we are emitting debug info.
     72   EmitStopPoint(S);
     73 
     74   switch (S->getStmtClass()) {
     75   case Stmt::NoStmtClass:
     76   case Stmt::CXXCatchStmtClass:
     77   case Stmt::SEHExceptStmtClass:
     78   case Stmt::SEHFinallyStmtClass:
     79   case Stmt::MSDependentExistsStmtClass:
     80     llvm_unreachable("invalid statement class to emit generically");
     81   case Stmt::NullStmtClass:
     82   case Stmt::CompoundStmtClass:
     83   case Stmt::DeclStmtClass:
     84   case Stmt::LabelStmtClass:
     85   case Stmt::AttributedStmtClass:
     86   case Stmt::GotoStmtClass:
     87   case Stmt::BreakStmtClass:
     88   case Stmt::ContinueStmtClass:
     89   case Stmt::DefaultStmtClass:
     90   case Stmt::CaseStmtClass:
     91     llvm_unreachable("should have emitted these statements as simple");
     92 
     93 #define STMT(Type, Base)
     94 #define ABSTRACT_STMT(Op)
     95 #define EXPR(Type, Base) \
     96   case Stmt::Type##Class:
     97 #include "clang/AST/StmtNodes.inc"
     98   {
     99     // Remember the block we came in on.
    100     llvm::BasicBlock *incoming = Builder.GetInsertBlock();
    101     assert(incoming && "expression emission must have an insertion point");
    102 
    103     EmitIgnoredExpr(cast<Expr>(S));
    104 
    105     llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
    106     assert(outgoing && "expression emission cleared block!");
    107 
    108     // The expression emitters assume (reasonably!) that the insertion
    109     // point is always set.  To maintain that, the call-emission code
    110     // for noreturn functions has to enter a new block with no
    111     // predecessors.  We want to kill that block and mark the current
    112     // insertion point unreachable in the common case of a call like
    113     // "exit();".  Since expression emission doesn't otherwise create
    114     // blocks with no predecessors, we can just test for that.
    115     // However, we must be careful not to do this to our incoming
    116     // block, because *statement* emission does sometimes create
    117     // reachable blocks which will have no predecessors until later in
    118     // the function.  This occurs with, e.g., labels that are not
    119     // reachable by fallthrough.
    120     if (incoming != outgoing && outgoing->use_empty()) {
    121       outgoing->eraseFromParent();
    122       Builder.ClearInsertionPoint();
    123     }
    124     break;
    125   }
    126 
    127   case Stmt::IndirectGotoStmtClass:
    128     EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
    129 
    130   case Stmt::IfStmtClass:       EmitIfStmt(cast<IfStmt>(*S));             break;
    131   case Stmt::WhileStmtClass:    EmitWhileStmt(cast<WhileStmt>(*S));       break;
    132   case Stmt::DoStmtClass:       EmitDoStmt(cast<DoStmt>(*S));             break;
    133   case Stmt::ForStmtClass:      EmitForStmt(cast<ForStmt>(*S));           break;
    134 
    135   case Stmt::ReturnStmtClass:   EmitReturnStmt(cast<ReturnStmt>(*S));     break;
    136 
    137   case Stmt::SwitchStmtClass:   EmitSwitchStmt(cast<SwitchStmt>(*S));     break;
    138   case Stmt::GCCAsmStmtClass:   // Intentional fall-through.
    139   case Stmt::MSAsmStmtClass:    EmitAsmStmt(cast<AsmStmt>(*S));           break;
    140   case Stmt::CapturedStmtClass: {
    141     const CapturedStmt *CS = cast<CapturedStmt>(S);
    142     EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
    143     }
    144     break;
    145   case Stmt::ObjCAtTryStmtClass:
    146     EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
    147     break;
    148   case Stmt::ObjCAtCatchStmtClass:
    149     llvm_unreachable(
    150                     "@catch statements should be handled by EmitObjCAtTryStmt");
    151   case Stmt::ObjCAtFinallyStmtClass:
    152     llvm_unreachable(
    153                   "@finally statements should be handled by EmitObjCAtTryStmt");
    154   case Stmt::ObjCAtThrowStmtClass:
    155     EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
    156     break;
    157   case Stmt::ObjCAtSynchronizedStmtClass:
    158     EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
    159     break;
    160   case Stmt::ObjCForCollectionStmtClass:
    161     EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
    162     break;
    163   case Stmt::ObjCAutoreleasePoolStmtClass:
    164     EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
    165     break;
    166 
    167   case Stmt::CXXTryStmtClass:
    168     EmitCXXTryStmt(cast<CXXTryStmt>(*S));
    169     break;
    170   case Stmt::CXXForRangeStmtClass:
    171     EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
    172     break;
    173   case Stmt::SEHTryStmtClass:
    174     EmitSEHTryStmt(cast<SEHTryStmt>(*S));
    175     break;
    176   case Stmt::SEHLeaveStmtClass:
    177     EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
    178     break;
    179   case Stmt::OMPParallelDirectiveClass:
    180     EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
    181     break;
    182   case Stmt::OMPSimdDirectiveClass:
    183     EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
    184     break;
    185   case Stmt::OMPForDirectiveClass:
    186     EmitOMPForDirective(cast<OMPForDirective>(*S));
    187     break;
    188   case Stmt::OMPSectionsDirectiveClass:
    189     EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
    190     break;
    191   case Stmt::OMPSectionDirectiveClass:
    192     EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
    193     break;
    194   case Stmt::OMPSingleDirectiveClass:
    195     EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
    196     break;
    197   case Stmt::OMPParallelForDirectiveClass:
    198     EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
    199     break;
    200   case Stmt::OMPParallelSectionsDirectiveClass:
    201     EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
    202     break;
    203   }
    204 }
    205 
    206 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
    207   switch (S->getStmtClass()) {
    208   default: return false;
    209   case Stmt::NullStmtClass: break;
    210   case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
    211   case Stmt::DeclStmtClass:     EmitDeclStmt(cast<DeclStmt>(*S));         break;
    212   case Stmt::LabelStmtClass:    EmitLabelStmt(cast<LabelStmt>(*S));       break;
    213   case Stmt::AttributedStmtClass:
    214                             EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
    215   case Stmt::GotoStmtClass:     EmitGotoStmt(cast<GotoStmt>(*S));         break;
    216   case Stmt::BreakStmtClass:    EmitBreakStmt(cast<BreakStmt>(*S));       break;
    217   case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
    218   case Stmt::DefaultStmtClass:  EmitDefaultStmt(cast<DefaultStmt>(*S));   break;
    219   case Stmt::CaseStmtClass:     EmitCaseStmt(cast<CaseStmt>(*S));         break;
    220   }
    221 
    222   return true;
    223 }
    224 
    225 /// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
    226 /// this captures the expression result of the last sub-statement and returns it
    227 /// (for use by the statement expression extension).
    228 llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
    229                                                AggValueSlot AggSlot) {
    230   PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
    231                              "LLVM IR generation of compound statement ('{}')");
    232 
    233   // Keep track of the current cleanup stack depth, including debug scopes.
    234   LexicalScope Scope(*this, S.getSourceRange());
    235 
    236   return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
    237 }
    238 
    239 llvm::Value*
    240 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
    241                                               bool GetLast,
    242                                               AggValueSlot AggSlot) {
    243 
    244   for (CompoundStmt::const_body_iterator I = S.body_begin(),
    245        E = S.body_end()-GetLast; I != E; ++I)
    246     EmitStmt(*I);
    247 
    248   llvm::Value *RetAlloca = nullptr;
    249   if (GetLast) {
    250     // We have to special case labels here.  They are statements, but when put
    251     // at the end of a statement expression, they yield the value of their
    252     // subexpression.  Handle this by walking through all labels we encounter,
    253     // emitting them before we evaluate the subexpr.
    254     const Stmt *LastStmt = S.body_back();
    255     while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
    256       EmitLabel(LS->getDecl());
    257       LastStmt = LS->getSubStmt();
    258     }
    259 
    260     EnsureInsertPoint();
    261 
    262     QualType ExprTy = cast<Expr>(LastStmt)->getType();
    263     if (hasAggregateEvaluationKind(ExprTy)) {
    264       EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
    265     } else {
    266       // We can't return an RValue here because there might be cleanups at
    267       // the end of the StmtExpr.  Because of that, we have to emit the result
    268       // here into a temporary alloca.
    269       RetAlloca = CreateMemTemp(ExprTy);
    270       EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
    271                        /*IsInit*/false);
    272     }
    273 
    274   }
    275 
    276   return RetAlloca;
    277 }
    278 
    279 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
    280   llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
    281 
    282   // If there is a cleanup stack, then we it isn't worth trying to
    283   // simplify this block (we would need to remove it from the scope map
    284   // and cleanup entry).
    285   if (!EHStack.empty())
    286     return;
    287 
    288   // Can only simplify direct branches.
    289   if (!BI || !BI->isUnconditional())
    290     return;
    291 
    292   // Can only simplify empty blocks.
    293   if (BI != BB->begin())
    294     return;
    295 
    296   BB->replaceAllUsesWith(BI->getSuccessor(0));
    297   BI->eraseFromParent();
    298   BB->eraseFromParent();
    299 }
    300 
    301 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
    302   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
    303 
    304   // Fall out of the current block (if necessary).
    305   EmitBranch(BB);
    306 
    307   if (IsFinished && BB->use_empty()) {
    308     delete BB;
    309     return;
    310   }
    311 
    312   // Place the block after the current block, if possible, or else at
    313   // the end of the function.
    314   if (CurBB && CurBB->getParent())
    315     CurFn->getBasicBlockList().insertAfter(CurBB, BB);
    316   else
    317     CurFn->getBasicBlockList().push_back(BB);
    318   Builder.SetInsertPoint(BB);
    319 }
    320 
    321 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
    322   // Emit a branch from the current block to the target one if this
    323   // was a real block.  If this was just a fall-through block after a
    324   // terminator, don't emit it.
    325   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
    326 
    327   if (!CurBB || CurBB->getTerminator()) {
    328     // If there is no insert point or the previous block is already
    329     // terminated, don't touch it.
    330   } else {
    331     // Otherwise, create a fall-through branch.
    332     Builder.CreateBr(Target);
    333   }
    334 
    335   Builder.ClearInsertionPoint();
    336 }
    337 
    338 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
    339   bool inserted = false;
    340   for (llvm::User *u : block->users()) {
    341     if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
    342       CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
    343       inserted = true;
    344       break;
    345     }
    346   }
    347 
    348   if (!inserted)
    349     CurFn->getBasicBlockList().push_back(block);
    350 
    351   Builder.SetInsertPoint(block);
    352 }
    353 
    354 CodeGenFunction::JumpDest
    355 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
    356   JumpDest &Dest = LabelMap[D];
    357   if (Dest.isValid()) return Dest;
    358 
    359   // Create, but don't insert, the new block.
    360   Dest = JumpDest(createBasicBlock(D->getName()),
    361                   EHScopeStack::stable_iterator::invalid(),
    362                   NextCleanupDestIndex++);
    363   return Dest;
    364 }
    365 
    366 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
    367   // Add this label to the current lexical scope if we're within any
    368   // normal cleanups.  Jumps "in" to this label --- when permitted by
    369   // the language --- may need to be routed around such cleanups.
    370   if (EHStack.hasNormalCleanups() && CurLexicalScope)
    371     CurLexicalScope->addLabel(D);
    372 
    373   JumpDest &Dest = LabelMap[D];
    374 
    375   // If we didn't need a forward reference to this label, just go
    376   // ahead and create a destination at the current scope.
    377   if (!Dest.isValid()) {
    378     Dest = getJumpDestInCurrentScope(D->getName());
    379 
    380   // Otherwise, we need to give this label a target depth and remove
    381   // it from the branch-fixups list.
    382   } else {
    383     assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
    384     Dest.setScopeDepth(EHStack.stable_begin());
    385     ResolveBranchFixups(Dest.getBlock());
    386   }
    387 
    388   RegionCounter Cnt = getPGORegionCounter(D->getStmt());
    389   EmitBlock(Dest.getBlock());
    390   Cnt.beginRegion(Builder);
    391 }
    392 
    393 /// Change the cleanup scope of the labels in this lexical scope to
    394 /// match the scope of the enclosing context.
    395 void CodeGenFunction::LexicalScope::rescopeLabels() {
    396   assert(!Labels.empty());
    397   EHScopeStack::stable_iterator innermostScope
    398     = CGF.EHStack.getInnermostNormalCleanup();
    399 
    400   // Change the scope depth of all the labels.
    401   for (SmallVectorImpl<const LabelDecl*>::const_iterator
    402          i = Labels.begin(), e = Labels.end(); i != e; ++i) {
    403     assert(CGF.LabelMap.count(*i));
    404     JumpDest &dest = CGF.LabelMap.find(*i)->second;
    405     assert(dest.getScopeDepth().isValid());
    406     assert(innermostScope.encloses(dest.getScopeDepth()));
    407     dest.setScopeDepth(innermostScope);
    408   }
    409 
    410   // Reparent the labels if the new scope also has cleanups.
    411   if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
    412     ParentScope->Labels.append(Labels.begin(), Labels.end());
    413   }
    414 }
    415 
    416 
    417 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
    418   EmitLabel(S.getDecl());
    419   EmitStmt(S.getSubStmt());
    420 }
    421 
    422 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
    423   const Stmt *SubStmt = S.getSubStmt();
    424   switch (SubStmt->getStmtClass()) {
    425   case Stmt::DoStmtClass:
    426     EmitDoStmt(cast<DoStmt>(*SubStmt), S.getAttrs());
    427     break;
    428   case Stmt::ForStmtClass:
    429     EmitForStmt(cast<ForStmt>(*SubStmt), S.getAttrs());
    430     break;
    431   case Stmt::WhileStmtClass:
    432     EmitWhileStmt(cast<WhileStmt>(*SubStmt), S.getAttrs());
    433     break;
    434   case Stmt::CXXForRangeStmtClass:
    435     EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*SubStmt), S.getAttrs());
    436     break;
    437   default:
    438     EmitStmt(SubStmt);
    439   }
    440 }
    441 
    442 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
    443   // If this code is reachable then emit a stop point (if generating
    444   // debug info). We have to do this ourselves because we are on the
    445   // "simple" statement path.
    446   if (HaveInsertPoint())
    447     EmitStopPoint(&S);
    448 
    449   EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
    450 }
    451 
    452 
    453 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
    454   if (const LabelDecl *Target = S.getConstantTarget()) {
    455     EmitBranchThroughCleanup(getJumpDestForLabel(Target));
    456     return;
    457   }
    458 
    459   // Ensure that we have an i8* for our PHI node.
    460   llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
    461                                          Int8PtrTy, "addr");
    462   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
    463 
    464   // Get the basic block for the indirect goto.
    465   llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
    466 
    467   // The first instruction in the block has to be the PHI for the switch dest,
    468   // add an entry for this branch.
    469   cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
    470 
    471   EmitBranch(IndGotoBB);
    472 }
    473 
    474 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
    475   // C99 6.8.4.1: The first substatement is executed if the expression compares
    476   // unequal to 0.  The condition must be a scalar type.
    477   LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
    478   RegionCounter Cnt = getPGORegionCounter(&S);
    479 
    480   if (S.getConditionVariable())
    481     EmitAutoVarDecl(*S.getConditionVariable());
    482 
    483   // If the condition constant folds and can be elided, try to avoid emitting
    484   // the condition and the dead arm of the if/else.
    485   bool CondConstant;
    486   if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
    487     // Figure out which block (then or else) is executed.
    488     const Stmt *Executed = S.getThen();
    489     const Stmt *Skipped  = S.getElse();
    490     if (!CondConstant)  // Condition false?
    491       std::swap(Executed, Skipped);
    492 
    493     // If the skipped block has no labels in it, just emit the executed block.
    494     // This avoids emitting dead code and simplifies the CFG substantially.
    495     if (!ContainsLabel(Skipped)) {
    496       if (CondConstant)
    497         Cnt.beginRegion(Builder);
    498       if (Executed) {
    499         RunCleanupsScope ExecutedScope(*this);
    500         EmitStmt(Executed);
    501       }
    502       return;
    503     }
    504   }
    505 
    506   // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
    507   // the conditional branch.
    508   llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
    509   llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
    510   llvm::BasicBlock *ElseBlock = ContBlock;
    511   if (S.getElse())
    512     ElseBlock = createBasicBlock("if.else");
    513 
    514   EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Cnt.getCount());
    515 
    516   // Emit the 'then' code.
    517   EmitBlock(ThenBlock);
    518   Cnt.beginRegion(Builder);
    519   {
    520     RunCleanupsScope ThenScope(*this);
    521     EmitStmt(S.getThen());
    522   }
    523   EmitBranch(ContBlock);
    524 
    525   // Emit the 'else' code if present.
    526   if (const Stmt *Else = S.getElse()) {
    527     // There is no need to emit line number for unconditional branch.
    528     if (getDebugInfo())
    529       Builder.SetCurrentDebugLocation(llvm::DebugLoc());
    530     EmitBlock(ElseBlock);
    531     {
    532       RunCleanupsScope ElseScope(*this);
    533       EmitStmt(Else);
    534     }
    535     // There is no need to emit line number for unconditional branch.
    536     if (getDebugInfo())
    537       Builder.SetCurrentDebugLocation(llvm::DebugLoc());
    538     EmitBranch(ContBlock);
    539   }
    540 
    541   // Emit the continuation block for code after the if.
    542   EmitBlock(ContBlock, true);
    543 }
    544 
    545 void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context,
    546                                       llvm::BranchInst *CondBr,
    547                                       const ArrayRef<const Attr *> &Attrs) {
    548   // Return if there are no hints.
    549   if (Attrs.empty())
    550     return;
    551 
    552   // Add vectorize and unroll hints to the metadata on the conditional branch.
    553   SmallVector<llvm::Value *, 2> Metadata(1);
    554   for (const auto *Attr : Attrs) {
    555     const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
    556 
    557     // Skip non loop hint attributes
    558     if (!LH)
    559       continue;
    560 
    561     LoopHintAttr::OptionType Option = LH->getOption();
    562     int ValueInt = LH->getValue();
    563 
    564     const char *MetadataName;
    565     switch (Option) {
    566     case LoopHintAttr::Vectorize:
    567     case LoopHintAttr::VectorizeWidth:
    568       MetadataName = "llvm.loop.vectorize.width";
    569       break;
    570     case LoopHintAttr::Interleave:
    571     case LoopHintAttr::InterleaveCount:
    572       MetadataName = "llvm.loop.vectorize.unroll";
    573       break;
    574     case LoopHintAttr::Unroll:
    575       MetadataName = "llvm.loop.unroll.enable";
    576       break;
    577     case LoopHintAttr::UnrollCount:
    578       MetadataName = "llvm.loop.unroll.count";
    579       break;
    580     }
    581 
    582     llvm::Value *Value;
    583     llvm::MDString *Name;
    584     switch (Option) {
    585     case LoopHintAttr::Vectorize:
    586     case LoopHintAttr::Interleave:
    587       if (ValueInt == 1) {
    588         // FIXME: In the future I will modifiy the behavior of the metadata
    589         // so we can enable/disable vectorization and interleaving separately.
    590         Name = llvm::MDString::get(Context, "llvm.loop.vectorize.enable");
    591         Value = Builder.getTrue();
    592         break;
    593       }
    594       // Vectorization/interleaving is disabled, set width/count to 1.
    595       ValueInt = 1;
    596       // Fallthrough.
    597     case LoopHintAttr::VectorizeWidth:
    598     case LoopHintAttr::InterleaveCount:
    599       Name = llvm::MDString::get(Context, MetadataName);
    600       Value = llvm::ConstantInt::get(Int32Ty, ValueInt);
    601       break;
    602     case LoopHintAttr::Unroll:
    603       Name = llvm::MDString::get(Context, MetadataName);
    604       Value = (ValueInt == 0) ? Builder.getFalse() : Builder.getTrue();
    605       break;
    606     case LoopHintAttr::UnrollCount:
    607       Name = llvm::MDString::get(Context, MetadataName);
    608       Value = llvm::ConstantInt::get(Int32Ty, ValueInt);
    609       break;
    610     }
    611 
    612     SmallVector<llvm::Value *, 2> OpValues;
    613     OpValues.push_back(Name);
    614     OpValues.push_back(Value);
    615 
    616     // Set or overwrite metadata indicated by Name.
    617     Metadata.push_back(llvm::MDNode::get(Context, OpValues));
    618   }
    619 
    620   if (!Metadata.empty()) {
    621     // Add llvm.loop MDNode to CondBr.
    622     llvm::MDNode *LoopID = llvm::MDNode::get(Context, Metadata);
    623     LoopID->replaceOperandWith(0, LoopID); // First op points to itself.
    624 
    625     CondBr->setMetadata("llvm.loop", LoopID);
    626   }
    627 }
    628 
    629 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
    630                                     const ArrayRef<const Attr *> &WhileAttrs) {
    631   RegionCounter Cnt = getPGORegionCounter(&S);
    632 
    633   // Emit the header for the loop, which will also become
    634   // the continue target.
    635   JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
    636   EmitBlock(LoopHeader.getBlock());
    637 
    638   LoopStack.push(LoopHeader.getBlock());
    639 
    640   // Create an exit block for when the condition fails, which will
    641   // also become the break target.
    642   JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
    643 
    644   // Store the blocks to use for break and continue.
    645   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
    646 
    647   // C++ [stmt.while]p2:
    648   //   When the condition of a while statement is a declaration, the
    649   //   scope of the variable that is declared extends from its point
    650   //   of declaration (3.3.2) to the end of the while statement.
    651   //   [...]
    652   //   The object created in a condition is destroyed and created
    653   //   with each iteration of the loop.
    654   RunCleanupsScope ConditionScope(*this);
    655 
    656   if (S.getConditionVariable())
    657     EmitAutoVarDecl(*S.getConditionVariable());
    658 
    659   // Evaluate the conditional in the while header.  C99 6.8.5.1: The
    660   // evaluation of the controlling expression takes place before each
    661   // execution of the loop body.
    662   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
    663 
    664   // while(1) is common, avoid extra exit blocks.  Be sure
    665   // to correctly handle break/continue though.
    666   bool EmitBoolCondBranch = true;
    667   if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
    668     if (C->isOne())
    669       EmitBoolCondBranch = false;
    670 
    671   // As long as the condition is true, go to the loop body.
    672   llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
    673   if (EmitBoolCondBranch) {
    674     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
    675     if (ConditionScope.requiresCleanups())
    676       ExitBlock = createBasicBlock("while.exit");
    677     llvm::BranchInst *CondBr =
    678         Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock,
    679                              PGO.createLoopWeights(S.getCond(), Cnt));
    680 
    681     if (ExitBlock != LoopExit.getBlock()) {
    682       EmitBlock(ExitBlock);
    683       EmitBranchThroughCleanup(LoopExit);
    684     }
    685 
    686     // Attach metadata to loop body conditional branch.
    687     EmitCondBrHints(LoopBody->getContext(), CondBr, WhileAttrs);
    688   }
    689 
    690   // Emit the loop body.  We have to emit this in a cleanup scope
    691   // because it might be a singleton DeclStmt.
    692   {
    693     RunCleanupsScope BodyScope(*this);
    694     EmitBlock(LoopBody);
    695     Cnt.beginRegion(Builder);
    696     EmitStmt(S.getBody());
    697   }
    698 
    699   BreakContinueStack.pop_back();
    700 
    701   // Immediately force cleanup.
    702   ConditionScope.ForceCleanup();
    703 
    704   // Branch to the loop header again.
    705   EmitBranch(LoopHeader.getBlock());
    706 
    707   LoopStack.pop();
    708 
    709   // Emit the exit block.
    710   EmitBlock(LoopExit.getBlock(), true);
    711 
    712   // The LoopHeader typically is just a branch if we skipped emitting
    713   // a branch, try to erase it.
    714   if (!EmitBoolCondBranch)
    715     SimplifyForwardingBlocks(LoopHeader.getBlock());
    716 }
    717 
    718 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
    719                                  const ArrayRef<const Attr *> &DoAttrs) {
    720   JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
    721   JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
    722 
    723   RegionCounter Cnt = getPGORegionCounter(&S);
    724 
    725   // Store the blocks to use for break and continue.
    726   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
    727 
    728   // Emit the body of the loop.
    729   llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
    730 
    731   LoopStack.push(LoopBody);
    732 
    733   EmitBlockWithFallThrough(LoopBody, Cnt);
    734   {
    735     RunCleanupsScope BodyScope(*this);
    736     EmitStmt(S.getBody());
    737   }
    738 
    739   EmitBlock(LoopCond.getBlock());
    740 
    741   // C99 6.8.5.2: "The evaluation of the controlling expression takes place
    742   // after each execution of the loop body."
    743 
    744   // Evaluate the conditional in the while header.
    745   // C99 6.8.5p2/p4: The first substatement is executed if the expression
    746   // compares unequal to 0.  The condition must be a scalar type.
    747   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
    748 
    749   BreakContinueStack.pop_back();
    750 
    751   // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
    752   // to correctly handle break/continue though.
    753   bool EmitBoolCondBranch = true;
    754   if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
    755     if (C->isZero())
    756       EmitBoolCondBranch = false;
    757 
    758   // As long as the condition is true, iterate the loop.
    759   if (EmitBoolCondBranch) {
    760     llvm::BranchInst *CondBr =
    761         Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock(),
    762                              PGO.createLoopWeights(S.getCond(), Cnt));
    763 
    764     // Attach metadata to loop body conditional branch.
    765     EmitCondBrHints(LoopBody->getContext(), CondBr, DoAttrs);
    766   }
    767 
    768   LoopStack.pop();
    769 
    770   // Emit the exit block.
    771   EmitBlock(LoopExit.getBlock());
    772 
    773   // The DoCond block typically is just a branch if we skipped
    774   // emitting a branch, try to erase it.
    775   if (!EmitBoolCondBranch)
    776     SimplifyForwardingBlocks(LoopCond.getBlock());
    777 }
    778 
    779 void CodeGenFunction::EmitForStmt(const ForStmt &S,
    780                                   const ArrayRef<const Attr *> &ForAttrs) {
    781   JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
    782 
    783   RunCleanupsScope ForScope(*this);
    784 
    785   CGDebugInfo *DI = getDebugInfo();
    786   if (DI)
    787     DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
    788 
    789   // Evaluate the first part before the loop.
    790   if (S.getInit())
    791     EmitStmt(S.getInit());
    792 
    793   RegionCounter Cnt = getPGORegionCounter(&S);
    794 
    795   // Start the loop with a block that tests the condition.
    796   // If there's an increment, the continue scope will be overwritten
    797   // later.
    798   JumpDest Continue = getJumpDestInCurrentScope("for.cond");
    799   llvm::BasicBlock *CondBlock = Continue.getBlock();
    800   EmitBlock(CondBlock);
    801 
    802   LoopStack.push(CondBlock);
    803 
    804   // If the for loop doesn't have an increment we can just use the
    805   // condition as the continue block.  Otherwise we'll need to create
    806   // a block for it (in the current scope, i.e. in the scope of the
    807   // condition), and that we will become our continue block.
    808   if (S.getInc())
    809     Continue = getJumpDestInCurrentScope("for.inc");
    810 
    811   // Store the blocks to use for break and continue.
    812   BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
    813 
    814   // Create a cleanup scope for the condition variable cleanups.
    815   RunCleanupsScope ConditionScope(*this);
    816 
    817   if (S.getCond()) {
    818     // If the for statement has a condition scope, emit the local variable
    819     // declaration.
    820     if (S.getConditionVariable()) {
    821       EmitAutoVarDecl(*S.getConditionVariable());
    822     }
    823 
    824     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
    825     // If there are any cleanups between here and the loop-exit scope,
    826     // create a block to stage a loop exit along.
    827     if (ForScope.requiresCleanups())
    828       ExitBlock = createBasicBlock("for.cond.cleanup");
    829 
    830     // As long as the condition is true, iterate the loop.
    831     llvm::BasicBlock *ForBody = createBasicBlock("for.body");
    832 
    833     // C99 6.8.5p2/p4: The first substatement is executed if the expression
    834     // compares unequal to 0.  The condition must be a scalar type.
    835     llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
    836     llvm::BranchInst *CondBr =
    837         Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock,
    838                              PGO.createLoopWeights(S.getCond(), Cnt));
    839 
    840     // Attach metadata to loop body conditional branch.
    841     EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs);
    842 
    843     if (ExitBlock != LoopExit.getBlock()) {
    844       EmitBlock(ExitBlock);
    845       EmitBranchThroughCleanup(LoopExit);
    846     }
    847 
    848     EmitBlock(ForBody);
    849   } else {
    850     // Treat it as a non-zero constant.  Don't even create a new block for the
    851     // body, just fall into it.
    852   }
    853   Cnt.beginRegion(Builder);
    854 
    855   {
    856     // Create a separate cleanup scope for the body, in case it is not
    857     // a compound statement.
    858     RunCleanupsScope BodyScope(*this);
    859     EmitStmt(S.getBody());
    860   }
    861 
    862   // If there is an increment, emit it next.
    863   if (S.getInc()) {
    864     EmitBlock(Continue.getBlock());
    865     EmitStmt(S.getInc());
    866   }
    867 
    868   BreakContinueStack.pop_back();
    869 
    870   ConditionScope.ForceCleanup();
    871   EmitBranch(CondBlock);
    872 
    873   ForScope.ForceCleanup();
    874 
    875   if (DI)
    876     DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
    877 
    878   LoopStack.pop();
    879 
    880   // Emit the fall-through block.
    881   EmitBlock(LoopExit.getBlock(), true);
    882 }
    883 
    884 void
    885 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
    886                                      const ArrayRef<const Attr *> &ForAttrs) {
    887   JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
    888 
    889   RunCleanupsScope ForScope(*this);
    890 
    891   CGDebugInfo *DI = getDebugInfo();
    892   if (DI)
    893     DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
    894 
    895   // Evaluate the first pieces before the loop.
    896   EmitStmt(S.getRangeStmt());
    897   EmitStmt(S.getBeginEndStmt());
    898 
    899   RegionCounter Cnt = getPGORegionCounter(&S);
    900 
    901   // Start the loop with a block that tests the condition.
    902   // If there's an increment, the continue scope will be overwritten
    903   // later.
    904   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
    905   EmitBlock(CondBlock);
    906 
    907   LoopStack.push(CondBlock);
    908 
    909   // If there are any cleanups between here and the loop-exit scope,
    910   // create a block to stage a loop exit along.
    911   llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
    912   if (ForScope.requiresCleanups())
    913     ExitBlock = createBasicBlock("for.cond.cleanup");
    914 
    915   // The loop body, consisting of the specified body and the loop variable.
    916   llvm::BasicBlock *ForBody = createBasicBlock("for.body");
    917 
    918   // The body is executed if the expression, contextually converted
    919   // to bool, is true.
    920   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
    921   llvm::BranchInst *CondBr = Builder.CreateCondBr(
    922       BoolCondVal, ForBody, ExitBlock, PGO.createLoopWeights(S.getCond(), Cnt));
    923 
    924   // Attach metadata to loop body conditional branch.
    925   EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs);
    926 
    927   if (ExitBlock != LoopExit.getBlock()) {
    928     EmitBlock(ExitBlock);
    929     EmitBranchThroughCleanup(LoopExit);
    930   }
    931 
    932   EmitBlock(ForBody);
    933   Cnt.beginRegion(Builder);
    934 
    935   // Create a block for the increment. In case of a 'continue', we jump there.
    936   JumpDest Continue = getJumpDestInCurrentScope("for.inc");
    937 
    938   // Store the blocks to use for break and continue.
    939   BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
    940 
    941   {
    942     // Create a separate cleanup scope for the loop variable and body.
    943     RunCleanupsScope BodyScope(*this);
    944     EmitStmt(S.getLoopVarStmt());
    945     EmitStmt(S.getBody());
    946   }
    947 
    948   // If there is an increment, emit it next.
    949   EmitBlock(Continue.getBlock());
    950   EmitStmt(S.getInc());
    951 
    952   BreakContinueStack.pop_back();
    953 
    954   EmitBranch(CondBlock);
    955 
    956   ForScope.ForceCleanup();
    957 
    958   if (DI)
    959     DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
    960 
    961   LoopStack.pop();
    962 
    963   // Emit the fall-through block.
    964   EmitBlock(LoopExit.getBlock(), true);
    965 }
    966 
    967 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
    968   if (RV.isScalar()) {
    969     Builder.CreateStore(RV.getScalarVal(), ReturnValue);
    970   } else if (RV.isAggregate()) {
    971     EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
    972   } else {
    973     EmitStoreOfComplex(RV.getComplexVal(),
    974                        MakeNaturalAlignAddrLValue(ReturnValue, Ty),
    975                        /*init*/ true);
    976   }
    977   EmitBranchThroughCleanup(ReturnBlock);
    978 }
    979 
    980 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
    981 /// if the function returns void, or may be missing one if the function returns
    982 /// non-void.  Fun stuff :).
    983 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
    984   // Emit the result value, even if unused, to evalute the side effects.
    985   const Expr *RV = S.getRetValue();
    986 
    987   // Treat block literals in a return expression as if they appeared
    988   // in their own scope.  This permits a small, easily-implemented
    989   // exception to our over-conservative rules about not jumping to
    990   // statements following block literals with non-trivial cleanups.
    991   RunCleanupsScope cleanupScope(*this);
    992   if (const ExprWithCleanups *cleanups =
    993         dyn_cast_or_null<ExprWithCleanups>(RV)) {
    994     enterFullExpression(cleanups);
    995     RV = cleanups->getSubExpr();
    996   }
    997 
    998   // FIXME: Clean this up by using an LValue for ReturnTemp,
    999   // EmitStoreThroughLValue, and EmitAnyExpr.
   1000   if (getLangOpts().ElideConstructors &&
   1001       S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
   1002     // Apply the named return value optimization for this return statement,
   1003     // which means doing nothing: the appropriate result has already been
   1004     // constructed into the NRVO variable.
   1005 
   1006     // If there is an NRVO flag for this variable, set it to 1 into indicate
   1007     // that the cleanup code should not destroy the variable.
   1008     if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
   1009       Builder.CreateStore(Builder.getTrue(), NRVOFlag);
   1010   } else if (!ReturnValue || (RV && RV->getType()->isVoidType())) {
   1011     // Make sure not to return anything, but evaluate the expression
   1012     // for side effects.
   1013     if (RV)
   1014       EmitAnyExpr(RV);
   1015   } else if (!RV) {
   1016     // Do nothing (return value is left uninitialized)
   1017   } else if (FnRetTy->isReferenceType()) {
   1018     // If this function returns a reference, take the address of the expression
   1019     // rather than the value.
   1020     RValue Result = EmitReferenceBindingToExpr(RV);
   1021     Builder.CreateStore(Result.getScalarVal(), ReturnValue);
   1022   } else {
   1023     switch (getEvaluationKind(RV->getType())) {
   1024     case TEK_Scalar:
   1025       Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
   1026       break;
   1027     case TEK_Complex:
   1028       EmitComplexExprIntoLValue(RV,
   1029                      MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()),
   1030                                 /*isInit*/ true);
   1031       break;
   1032     case TEK_Aggregate: {
   1033       CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
   1034       EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment,
   1035                                             Qualifiers(),
   1036                                             AggValueSlot::IsDestructed,
   1037                                             AggValueSlot::DoesNotNeedGCBarriers,
   1038                                             AggValueSlot::IsNotAliased));
   1039       break;
   1040     }
   1041     }
   1042   }
   1043 
   1044   ++NumReturnExprs;
   1045   if (!RV || RV->isEvaluatable(getContext()))
   1046     ++NumSimpleReturnExprs;
   1047 
   1048   cleanupScope.ForceCleanup();
   1049   EmitBranchThroughCleanup(ReturnBlock);
   1050 }
   1051 
   1052 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
   1053   // As long as debug info is modeled with instructions, we have to ensure we
   1054   // have a place to insert here and write the stop point here.
   1055   if (HaveInsertPoint())
   1056     EmitStopPoint(&S);
   1057 
   1058   for (const auto *I : S.decls())
   1059     EmitDecl(*I);
   1060 }
   1061 
   1062 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
   1063   assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
   1064 
   1065   // If this code is reachable then emit a stop point (if generating
   1066   // debug info). We have to do this ourselves because we are on the
   1067   // "simple" statement path.
   1068   if (HaveInsertPoint())
   1069     EmitStopPoint(&S);
   1070 
   1071   EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
   1072 }
   1073 
   1074 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
   1075   assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
   1076 
   1077   // If this code is reachable then emit a stop point (if generating
   1078   // debug info). We have to do this ourselves because we are on the
   1079   // "simple" statement path.
   1080   if (HaveInsertPoint())
   1081     EmitStopPoint(&S);
   1082 
   1083   EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
   1084 }
   1085 
   1086 /// EmitCaseStmtRange - If case statement range is not too big then
   1087 /// add multiple cases to switch instruction, one for each value within
   1088 /// the range. If range is too big then emit "if" condition check.
   1089 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
   1090   assert(S.getRHS() && "Expected RHS value in CaseStmt");
   1091 
   1092   llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
   1093   llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
   1094 
   1095   RegionCounter CaseCnt = getPGORegionCounter(&S);
   1096 
   1097   // Emit the code for this case. We do this first to make sure it is
   1098   // properly chained from our predecessor before generating the
   1099   // switch machinery to enter this block.
   1100   llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
   1101   EmitBlockWithFallThrough(CaseDest, CaseCnt);
   1102   EmitStmt(S.getSubStmt());
   1103 
   1104   // If range is empty, do nothing.
   1105   if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
   1106     return;
   1107 
   1108   llvm::APInt Range = RHS - LHS;
   1109   // FIXME: parameters such as this should not be hardcoded.
   1110   if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
   1111     // Range is small enough to add multiple switch instruction cases.
   1112     uint64_t Total = CaseCnt.getCount();
   1113     unsigned NCases = Range.getZExtValue() + 1;
   1114     // We only have one region counter for the entire set of cases here, so we
   1115     // need to divide the weights evenly between the generated cases, ensuring
   1116     // that the total weight is preserved. E.g., a weight of 5 over three cases
   1117     // will be distributed as weights of 2, 2, and 1.
   1118     uint64_t Weight = Total / NCases, Rem = Total % NCases;
   1119     for (unsigned I = 0; I != NCases; ++I) {
   1120       if (SwitchWeights)
   1121         SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
   1122       if (Rem)
   1123         Rem--;
   1124       SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
   1125       LHS++;
   1126     }
   1127     return;
   1128   }
   1129 
   1130   // The range is too big. Emit "if" condition into a new block,
   1131   // making sure to save and restore the current insertion point.
   1132   llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
   1133 
   1134   // Push this test onto the chain of range checks (which terminates
   1135   // in the default basic block). The switch's default will be changed
   1136   // to the top of this chain after switch emission is complete.
   1137   llvm::BasicBlock *FalseDest = CaseRangeBlock;
   1138   CaseRangeBlock = createBasicBlock("sw.caserange");
   1139 
   1140   CurFn->getBasicBlockList().push_back(CaseRangeBlock);
   1141   Builder.SetInsertPoint(CaseRangeBlock);
   1142 
   1143   // Emit range check.
   1144   llvm::Value *Diff =
   1145     Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
   1146   llvm::Value *Cond =
   1147     Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
   1148 
   1149   llvm::MDNode *Weights = nullptr;
   1150   if (SwitchWeights) {
   1151     uint64_t ThisCount = CaseCnt.getCount();
   1152     uint64_t DefaultCount = (*SwitchWeights)[0];
   1153     Weights = PGO.createBranchWeights(ThisCount, DefaultCount);
   1154 
   1155     // Since we're chaining the switch default through each large case range, we
   1156     // need to update the weight for the default, ie, the first case, to include
   1157     // this case.
   1158     (*SwitchWeights)[0] += ThisCount;
   1159   }
   1160   Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
   1161 
   1162   // Restore the appropriate insertion point.
   1163   if (RestoreBB)
   1164     Builder.SetInsertPoint(RestoreBB);
   1165   else
   1166     Builder.ClearInsertionPoint();
   1167 }
   1168 
   1169 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
   1170   // If there is no enclosing switch instance that we're aware of, then this
   1171   // case statement and its block can be elided.  This situation only happens
   1172   // when we've constant-folded the switch, are emitting the constant case,
   1173   // and part of the constant case includes another case statement.  For
   1174   // instance: switch (4) { case 4: do { case 5: } while (1); }
   1175   if (!SwitchInsn) {
   1176     EmitStmt(S.getSubStmt());
   1177     return;
   1178   }
   1179 
   1180   // Handle case ranges.
   1181   if (S.getRHS()) {
   1182     EmitCaseStmtRange(S);
   1183     return;
   1184   }
   1185 
   1186   RegionCounter CaseCnt = getPGORegionCounter(&S);
   1187   llvm::ConstantInt *CaseVal =
   1188     Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
   1189 
   1190   // If the body of the case is just a 'break', try to not emit an empty block.
   1191   // If we're profiling or we're not optimizing, leave the block in for better
   1192   // debug and coverage analysis.
   1193   if (!CGM.getCodeGenOpts().ProfileInstrGenerate &&
   1194       CGM.getCodeGenOpts().OptimizationLevel > 0 &&
   1195       isa<BreakStmt>(S.getSubStmt())) {
   1196     JumpDest Block = BreakContinueStack.back().BreakBlock;
   1197 
   1198     // Only do this optimization if there are no cleanups that need emitting.
   1199     if (isObviouslyBranchWithoutCleanups(Block)) {
   1200       if (SwitchWeights)
   1201         SwitchWeights->push_back(CaseCnt.getCount());
   1202       SwitchInsn->addCase(CaseVal, Block.getBlock());
   1203 
   1204       // If there was a fallthrough into this case, make sure to redirect it to
   1205       // the end of the switch as well.
   1206       if (Builder.GetInsertBlock()) {
   1207         Builder.CreateBr(Block.getBlock());
   1208         Builder.ClearInsertionPoint();
   1209       }
   1210       return;
   1211     }
   1212   }
   1213 
   1214   llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
   1215   EmitBlockWithFallThrough(CaseDest, CaseCnt);
   1216   if (SwitchWeights)
   1217     SwitchWeights->push_back(CaseCnt.getCount());
   1218   SwitchInsn->addCase(CaseVal, CaseDest);
   1219 
   1220   // Recursively emitting the statement is acceptable, but is not wonderful for
   1221   // code where we have many case statements nested together, i.e.:
   1222   //  case 1:
   1223   //    case 2:
   1224   //      case 3: etc.
   1225   // Handling this recursively will create a new block for each case statement
   1226   // that falls through to the next case which is IR intensive.  It also causes
   1227   // deep recursion which can run into stack depth limitations.  Handle
   1228   // sequential non-range case statements specially.
   1229   const CaseStmt *CurCase = &S;
   1230   const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
   1231 
   1232   // Otherwise, iteratively add consecutive cases to this switch stmt.
   1233   while (NextCase && NextCase->getRHS() == nullptr) {
   1234     CurCase = NextCase;
   1235     llvm::ConstantInt *CaseVal =
   1236       Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
   1237 
   1238     CaseCnt = getPGORegionCounter(NextCase);
   1239     if (SwitchWeights)
   1240       SwitchWeights->push_back(CaseCnt.getCount());
   1241     if (CGM.getCodeGenOpts().ProfileInstrGenerate) {
   1242       CaseDest = createBasicBlock("sw.bb");
   1243       EmitBlockWithFallThrough(CaseDest, CaseCnt);
   1244     }
   1245 
   1246     SwitchInsn->addCase(CaseVal, CaseDest);
   1247     NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
   1248   }
   1249 
   1250   // Normal default recursion for non-cases.
   1251   EmitStmt(CurCase->getSubStmt());
   1252 }
   1253 
   1254 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
   1255   llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
   1256   assert(DefaultBlock->empty() &&
   1257          "EmitDefaultStmt: Default block already defined?");
   1258 
   1259   RegionCounter Cnt = getPGORegionCounter(&S);
   1260   EmitBlockWithFallThrough(DefaultBlock, Cnt);
   1261 
   1262   EmitStmt(S.getSubStmt());
   1263 }
   1264 
   1265 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
   1266 /// constant value that is being switched on, see if we can dead code eliminate
   1267 /// the body of the switch to a simple series of statements to emit.  Basically,
   1268 /// on a switch (5) we want to find these statements:
   1269 ///    case 5:
   1270 ///      printf(...);    <--
   1271 ///      ++i;            <--
   1272 ///      break;
   1273 ///
   1274 /// and add them to the ResultStmts vector.  If it is unsafe to do this
   1275 /// transformation (for example, one of the elided statements contains a label
   1276 /// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
   1277 /// should include statements after it (e.g. the printf() line is a substmt of
   1278 /// the case) then return CSFC_FallThrough.  If we handled it and found a break
   1279 /// statement, then return CSFC_Success.
   1280 ///
   1281 /// If Case is non-null, then we are looking for the specified case, checking
   1282 /// that nothing we jump over contains labels.  If Case is null, then we found
   1283 /// the case and are looking for the break.
   1284 ///
   1285 /// If the recursive walk actually finds our Case, then we set FoundCase to
   1286 /// true.
   1287 ///
   1288 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
   1289 static CSFC_Result CollectStatementsForCase(const Stmt *S,
   1290                                             const SwitchCase *Case,
   1291                                             bool &FoundCase,
   1292                               SmallVectorImpl<const Stmt*> &ResultStmts) {
   1293   // If this is a null statement, just succeed.
   1294   if (!S)
   1295     return Case ? CSFC_Success : CSFC_FallThrough;
   1296 
   1297   // If this is the switchcase (case 4: or default) that we're looking for, then
   1298   // we're in business.  Just add the substatement.
   1299   if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
   1300     if (S == Case) {
   1301       FoundCase = true;
   1302       return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
   1303                                       ResultStmts);
   1304     }
   1305 
   1306     // Otherwise, this is some other case or default statement, just ignore it.
   1307     return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
   1308                                     ResultStmts);
   1309   }
   1310 
   1311   // If we are in the live part of the code and we found our break statement,
   1312   // return a success!
   1313   if (!Case && isa<BreakStmt>(S))
   1314     return CSFC_Success;
   1315 
   1316   // If this is a switch statement, then it might contain the SwitchCase, the
   1317   // break, or neither.
   1318   if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
   1319     // Handle this as two cases: we might be looking for the SwitchCase (if so
   1320     // the skipped statements must be skippable) or we might already have it.
   1321     CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
   1322     if (Case) {
   1323       // Keep track of whether we see a skipped declaration.  The code could be
   1324       // using the declaration even if it is skipped, so we can't optimize out
   1325       // the decl if the kept statements might refer to it.
   1326       bool HadSkippedDecl = false;
   1327 
   1328       // If we're looking for the case, just see if we can skip each of the
   1329       // substatements.
   1330       for (; Case && I != E; ++I) {
   1331         HadSkippedDecl |= isa<DeclStmt>(*I);
   1332 
   1333         switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
   1334         case CSFC_Failure: return CSFC_Failure;
   1335         case CSFC_Success:
   1336           // A successful result means that either 1) that the statement doesn't
   1337           // have the case and is skippable, or 2) does contain the case value
   1338           // and also contains the break to exit the switch.  In the later case,
   1339           // we just verify the rest of the statements are elidable.
   1340           if (FoundCase) {
   1341             // If we found the case and skipped declarations, we can't do the
   1342             // optimization.
   1343             if (HadSkippedDecl)
   1344               return CSFC_Failure;
   1345 
   1346             for (++I; I != E; ++I)
   1347               if (CodeGenFunction::ContainsLabel(*I, true))
   1348                 return CSFC_Failure;
   1349             return CSFC_Success;
   1350           }
   1351           break;
   1352         case CSFC_FallThrough:
   1353           // If we have a fallthrough condition, then we must have found the
   1354           // case started to include statements.  Consider the rest of the
   1355           // statements in the compound statement as candidates for inclusion.
   1356           assert(FoundCase && "Didn't find case but returned fallthrough?");
   1357           // We recursively found Case, so we're not looking for it anymore.
   1358           Case = nullptr;
   1359 
   1360           // If we found the case and skipped declarations, we can't do the
   1361           // optimization.
   1362           if (HadSkippedDecl)
   1363             return CSFC_Failure;
   1364           break;
   1365         }
   1366       }
   1367     }
   1368 
   1369     // If we have statements in our range, then we know that the statements are
   1370     // live and need to be added to the set of statements we're tracking.
   1371     for (; I != E; ++I) {
   1372       switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
   1373       case CSFC_Failure: return CSFC_Failure;
   1374       case CSFC_FallThrough:
   1375         // A fallthrough result means that the statement was simple and just
   1376         // included in ResultStmt, keep adding them afterwards.
   1377         break;
   1378       case CSFC_Success:
   1379         // A successful result means that we found the break statement and
   1380         // stopped statement inclusion.  We just ensure that any leftover stmts
   1381         // are skippable and return success ourselves.
   1382         for (++I; I != E; ++I)
   1383           if (CodeGenFunction::ContainsLabel(*I, true))
   1384             return CSFC_Failure;
   1385         return CSFC_Success;
   1386       }
   1387     }
   1388 
   1389     return Case ? CSFC_Success : CSFC_FallThrough;
   1390   }
   1391 
   1392   // Okay, this is some other statement that we don't handle explicitly, like a
   1393   // for statement or increment etc.  If we are skipping over this statement,
   1394   // just verify it doesn't have labels, which would make it invalid to elide.
   1395   if (Case) {
   1396     if (CodeGenFunction::ContainsLabel(S, true))
   1397       return CSFC_Failure;
   1398     return CSFC_Success;
   1399   }
   1400 
   1401   // Otherwise, we want to include this statement.  Everything is cool with that
   1402   // so long as it doesn't contain a break out of the switch we're in.
   1403   if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
   1404 
   1405   // Otherwise, everything is great.  Include the statement and tell the caller
   1406   // that we fall through and include the next statement as well.
   1407   ResultStmts.push_back(S);
   1408   return CSFC_FallThrough;
   1409 }
   1410 
   1411 /// FindCaseStatementsForValue - Find the case statement being jumped to and
   1412 /// then invoke CollectStatementsForCase to find the list of statements to emit
   1413 /// for a switch on constant.  See the comment above CollectStatementsForCase
   1414 /// for more details.
   1415 static bool FindCaseStatementsForValue(const SwitchStmt &S,
   1416                                        const llvm::APSInt &ConstantCondValue,
   1417                                 SmallVectorImpl<const Stmt*> &ResultStmts,
   1418                                        ASTContext &C,
   1419                                        const SwitchCase *&ResultCase) {
   1420   // First step, find the switch case that is being branched to.  We can do this
   1421   // efficiently by scanning the SwitchCase list.
   1422   const SwitchCase *Case = S.getSwitchCaseList();
   1423   const DefaultStmt *DefaultCase = nullptr;
   1424 
   1425   for (; Case; Case = Case->getNextSwitchCase()) {
   1426     // It's either a default or case.  Just remember the default statement in
   1427     // case we're not jumping to any numbered cases.
   1428     if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
   1429       DefaultCase = DS;
   1430       continue;
   1431     }
   1432 
   1433     // Check to see if this case is the one we're looking for.
   1434     const CaseStmt *CS = cast<CaseStmt>(Case);
   1435     // Don't handle case ranges yet.
   1436     if (CS->getRHS()) return false;
   1437 
   1438     // If we found our case, remember it as 'case'.
   1439     if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
   1440       break;
   1441   }
   1442 
   1443   // If we didn't find a matching case, we use a default if it exists, or we
   1444   // elide the whole switch body!
   1445   if (!Case) {
   1446     // It is safe to elide the body of the switch if it doesn't contain labels
   1447     // etc.  If it is safe, return successfully with an empty ResultStmts list.
   1448     if (!DefaultCase)
   1449       return !CodeGenFunction::ContainsLabel(&S);
   1450     Case = DefaultCase;
   1451   }
   1452 
   1453   // Ok, we know which case is being jumped to, try to collect all the
   1454   // statements that follow it.  This can fail for a variety of reasons.  Also,
   1455   // check to see that the recursive walk actually found our case statement.
   1456   // Insane cases like this can fail to find it in the recursive walk since we
   1457   // don't handle every stmt kind:
   1458   // switch (4) {
   1459   //   while (1) {
   1460   //     case 4: ...
   1461   bool FoundCase = false;
   1462   ResultCase = Case;
   1463   return CollectStatementsForCase(S.getBody(), Case, FoundCase,
   1464                                   ResultStmts) != CSFC_Failure &&
   1465          FoundCase;
   1466 }
   1467 
   1468 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
   1469   // Handle nested switch statements.
   1470   llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
   1471   SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
   1472   llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
   1473 
   1474   // See if we can constant fold the condition of the switch and therefore only
   1475   // emit the live case statement (if any) of the switch.
   1476   llvm::APSInt ConstantCondValue;
   1477   if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
   1478     SmallVector<const Stmt*, 4> CaseStmts;
   1479     const SwitchCase *Case = nullptr;
   1480     if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
   1481                                    getContext(), Case)) {
   1482       if (Case) {
   1483         RegionCounter CaseCnt = getPGORegionCounter(Case);
   1484         CaseCnt.beginRegion(Builder);
   1485       }
   1486       RunCleanupsScope ExecutedScope(*this);
   1487 
   1488       // Emit the condition variable if needed inside the entire cleanup scope
   1489       // used by this special case for constant folded switches.
   1490       if (S.getConditionVariable())
   1491         EmitAutoVarDecl(*S.getConditionVariable());
   1492 
   1493       // At this point, we are no longer "within" a switch instance, so
   1494       // we can temporarily enforce this to ensure that any embedded case
   1495       // statements are not emitted.
   1496       SwitchInsn = nullptr;
   1497 
   1498       // Okay, we can dead code eliminate everything except this case.  Emit the
   1499       // specified series of statements and we're good.
   1500       for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
   1501         EmitStmt(CaseStmts[i]);
   1502       RegionCounter ExitCnt = getPGORegionCounter(&S);
   1503       ExitCnt.beginRegion(Builder);
   1504 
   1505       // Now we want to restore the saved switch instance so that nested
   1506       // switches continue to function properly
   1507       SwitchInsn = SavedSwitchInsn;
   1508 
   1509       return;
   1510     }
   1511   }
   1512 
   1513   JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
   1514 
   1515   RunCleanupsScope ConditionScope(*this);
   1516   if (S.getConditionVariable())
   1517     EmitAutoVarDecl(*S.getConditionVariable());
   1518   llvm::Value *CondV = EmitScalarExpr(S.getCond());
   1519 
   1520   // Create basic block to hold stuff that comes after switch
   1521   // statement. We also need to create a default block now so that
   1522   // explicit case ranges tests can have a place to jump to on
   1523   // failure.
   1524   llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
   1525   SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
   1526   if (PGO.haveRegionCounts()) {
   1527     // Walk the SwitchCase list to find how many there are.
   1528     uint64_t DefaultCount = 0;
   1529     unsigned NumCases = 0;
   1530     for (const SwitchCase *Case = S.getSwitchCaseList();
   1531          Case;
   1532          Case = Case->getNextSwitchCase()) {
   1533       if (isa<DefaultStmt>(Case))
   1534         DefaultCount = getPGORegionCounter(Case).getCount();
   1535       NumCases += 1;
   1536     }
   1537     SwitchWeights = new SmallVector<uint64_t, 16>();
   1538     SwitchWeights->reserve(NumCases);
   1539     // The default needs to be first. We store the edge count, so we already
   1540     // know the right weight.
   1541     SwitchWeights->push_back(DefaultCount);
   1542   }
   1543   CaseRangeBlock = DefaultBlock;
   1544 
   1545   // Clear the insertion point to indicate we are in unreachable code.
   1546   Builder.ClearInsertionPoint();
   1547 
   1548   // All break statements jump to NextBlock. If BreakContinueStack is non-empty
   1549   // then reuse last ContinueBlock.
   1550   JumpDest OuterContinue;
   1551   if (!BreakContinueStack.empty())
   1552     OuterContinue = BreakContinueStack.back().ContinueBlock;
   1553 
   1554   BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
   1555 
   1556   // Emit switch body.
   1557   EmitStmt(S.getBody());
   1558 
   1559   BreakContinueStack.pop_back();
   1560 
   1561   // Update the default block in case explicit case range tests have
   1562   // been chained on top.
   1563   SwitchInsn->setDefaultDest(CaseRangeBlock);
   1564 
   1565   // If a default was never emitted:
   1566   if (!DefaultBlock->getParent()) {
   1567     // If we have cleanups, emit the default block so that there's a
   1568     // place to jump through the cleanups from.
   1569     if (ConditionScope.requiresCleanups()) {
   1570       EmitBlock(DefaultBlock);
   1571 
   1572     // Otherwise, just forward the default block to the switch end.
   1573     } else {
   1574       DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
   1575       delete DefaultBlock;
   1576     }
   1577   }
   1578 
   1579   ConditionScope.ForceCleanup();
   1580 
   1581   // Emit continuation.
   1582   EmitBlock(SwitchExit.getBlock(), true);
   1583   RegionCounter ExitCnt = getPGORegionCounter(&S);
   1584   ExitCnt.beginRegion(Builder);
   1585 
   1586   if (SwitchWeights) {
   1587     assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
   1588            "switch weights do not match switch cases");
   1589     // If there's only one jump destination there's no sense weighting it.
   1590     if (SwitchWeights->size() > 1)
   1591       SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
   1592                               PGO.createBranchWeights(*SwitchWeights));
   1593     delete SwitchWeights;
   1594   }
   1595   SwitchInsn = SavedSwitchInsn;
   1596   SwitchWeights = SavedSwitchWeights;
   1597   CaseRangeBlock = SavedCRBlock;
   1598 }
   1599 
   1600 static std::string
   1601 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
   1602                  SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
   1603   std::string Result;
   1604 
   1605   while (*Constraint) {
   1606     switch (*Constraint) {
   1607     default:
   1608       Result += Target.convertConstraint(Constraint);
   1609       break;
   1610     // Ignore these
   1611     case '*':
   1612     case '?':
   1613     case '!':
   1614     case '=': // Will see this and the following in mult-alt constraints.
   1615     case '+':
   1616       break;
   1617     case '#': // Ignore the rest of the constraint alternative.
   1618       while (Constraint[1] && Constraint[1] != ',')
   1619         Constraint++;
   1620       break;
   1621     case ',':
   1622       Result += "|";
   1623       break;
   1624     case 'g':
   1625       Result += "imr";
   1626       break;
   1627     case '[': {
   1628       assert(OutCons &&
   1629              "Must pass output names to constraints with a symbolic name");
   1630       unsigned Index;
   1631       bool result = Target.resolveSymbolicName(Constraint,
   1632                                                &(*OutCons)[0],
   1633                                                OutCons->size(), Index);
   1634       assert(result && "Could not resolve symbolic name"); (void)result;
   1635       Result += llvm::utostr(Index);
   1636       break;
   1637     }
   1638     }
   1639 
   1640     Constraint++;
   1641   }
   1642 
   1643   return Result;
   1644 }
   1645 
   1646 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
   1647 /// as using a particular register add that as a constraint that will be used
   1648 /// in this asm stmt.
   1649 static std::string
   1650 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
   1651                        const TargetInfo &Target, CodeGenModule &CGM,
   1652                        const AsmStmt &Stmt) {
   1653   const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
   1654   if (!AsmDeclRef)
   1655     return Constraint;
   1656   const ValueDecl &Value = *AsmDeclRef->getDecl();
   1657   const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
   1658   if (!Variable)
   1659     return Constraint;
   1660   if (Variable->getStorageClass() != SC_Register)
   1661     return Constraint;
   1662   AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
   1663   if (!Attr)
   1664     return Constraint;
   1665   StringRef Register = Attr->getLabel();
   1666   assert(Target.isValidGCCRegisterName(Register));
   1667   // We're using validateOutputConstraint here because we only care if
   1668   // this is a register constraint.
   1669   TargetInfo::ConstraintInfo Info(Constraint, "");
   1670   if (Target.validateOutputConstraint(Info) &&
   1671       !Info.allowsRegister()) {
   1672     CGM.ErrorUnsupported(&Stmt, "__asm__");
   1673     return Constraint;
   1674   }
   1675   // Canonicalize the register here before returning it.
   1676   Register = Target.getNormalizedGCCRegisterName(Register);
   1677   return "{" + Register.str() + "}";
   1678 }
   1679 
   1680 llvm::Value*
   1681 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
   1682                                     LValue InputValue, QualType InputType,
   1683                                     std::string &ConstraintStr,
   1684                                     SourceLocation Loc) {
   1685   llvm::Value *Arg;
   1686   if (Info.allowsRegister() || !Info.allowsMemory()) {
   1687     if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
   1688       Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
   1689     } else {
   1690       llvm::Type *Ty = ConvertType(InputType);
   1691       uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
   1692       if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
   1693         Ty = llvm::IntegerType::get(getLLVMContext(), Size);
   1694         Ty = llvm::PointerType::getUnqual(Ty);
   1695 
   1696         Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
   1697                                                        Ty));
   1698       } else {
   1699         Arg = InputValue.getAddress();
   1700         ConstraintStr += '*';
   1701       }
   1702     }
   1703   } else {
   1704     Arg = InputValue.getAddress();
   1705     ConstraintStr += '*';
   1706   }
   1707 
   1708   return Arg;
   1709 }
   1710 
   1711 llvm::Value* CodeGenFunction::EmitAsmInput(
   1712                                          const TargetInfo::ConstraintInfo &Info,
   1713                                            const Expr *InputExpr,
   1714                                            std::string &ConstraintStr) {
   1715   if (Info.allowsRegister() || !Info.allowsMemory())
   1716     if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
   1717       return EmitScalarExpr(InputExpr);
   1718 
   1719   InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
   1720   LValue Dest = EmitLValue(InputExpr);
   1721   return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
   1722                             InputExpr->getExprLoc());
   1723 }
   1724 
   1725 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
   1726 /// asm call instruction.  The !srcloc MDNode contains a list of constant
   1727 /// integers which are the source locations of the start of each line in the
   1728 /// asm.
   1729 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
   1730                                       CodeGenFunction &CGF) {
   1731   SmallVector<llvm::Value *, 8> Locs;
   1732   // Add the location of the first line to the MDNode.
   1733   Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
   1734                                         Str->getLocStart().getRawEncoding()));
   1735   StringRef StrVal = Str->getString();
   1736   if (!StrVal.empty()) {
   1737     const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
   1738     const LangOptions &LangOpts = CGF.CGM.getLangOpts();
   1739 
   1740     // Add the location of the start of each subsequent line of the asm to the
   1741     // MDNode.
   1742     for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
   1743       if (StrVal[i] != '\n') continue;
   1744       SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
   1745                                                       CGF.getTarget());
   1746       Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
   1747                                             LineLoc.getRawEncoding()));
   1748     }
   1749   }
   1750 
   1751   return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
   1752 }
   1753 
   1754 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
   1755   // Assemble the final asm string.
   1756   std::string AsmString = S.generateAsmString(getContext());
   1757 
   1758   // Get all the output and input constraints together.
   1759   SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
   1760   SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
   1761 
   1762   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
   1763     StringRef Name;
   1764     if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
   1765       Name = GAS->getOutputName(i);
   1766     TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
   1767     bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
   1768     assert(IsValid && "Failed to parse output constraint");
   1769     OutputConstraintInfos.push_back(Info);
   1770   }
   1771 
   1772   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
   1773     StringRef Name;
   1774     if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
   1775       Name = GAS->getInputName(i);
   1776     TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
   1777     bool IsValid =
   1778       getTarget().validateInputConstraint(OutputConstraintInfos.data(),
   1779                                           S.getNumOutputs(), Info);
   1780     assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
   1781     InputConstraintInfos.push_back(Info);
   1782   }
   1783 
   1784   std::string Constraints;
   1785 
   1786   std::vector<LValue> ResultRegDests;
   1787   std::vector<QualType> ResultRegQualTys;
   1788   std::vector<llvm::Type *> ResultRegTypes;
   1789   std::vector<llvm::Type *> ResultTruncRegTypes;
   1790   std::vector<llvm::Type *> ArgTypes;
   1791   std::vector<llvm::Value*> Args;
   1792 
   1793   // Keep track of inout constraints.
   1794   std::string InOutConstraints;
   1795   std::vector<llvm::Value*> InOutArgs;
   1796   std::vector<llvm::Type*> InOutArgTypes;
   1797 
   1798   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
   1799     TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
   1800 
   1801     // Simplify the output constraint.
   1802     std::string OutputConstraint(S.getOutputConstraint(i));
   1803     OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
   1804                                           getTarget());
   1805 
   1806     const Expr *OutExpr = S.getOutputExpr(i);
   1807     OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
   1808 
   1809     OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
   1810                                               getTarget(), CGM, S);
   1811 
   1812     LValue Dest = EmitLValue(OutExpr);
   1813     if (!Constraints.empty())
   1814       Constraints += ',';
   1815 
   1816     // If this is a register output, then make the inline asm return it
   1817     // by-value.  If this is a memory result, return the value by-reference.
   1818     if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) {
   1819       Constraints += "=" + OutputConstraint;
   1820       ResultRegQualTys.push_back(OutExpr->getType());
   1821       ResultRegDests.push_back(Dest);
   1822       ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
   1823       ResultTruncRegTypes.push_back(ResultRegTypes.back());
   1824 
   1825       // If this output is tied to an input, and if the input is larger, then
   1826       // we need to set the actual result type of the inline asm node to be the
   1827       // same as the input type.
   1828       if (Info.hasMatchingInput()) {
   1829         unsigned InputNo;
   1830         for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
   1831           TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
   1832           if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
   1833             break;
   1834         }
   1835         assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
   1836 
   1837         QualType InputTy = S.getInputExpr(InputNo)->getType();
   1838         QualType OutputType = OutExpr->getType();
   1839 
   1840         uint64_t InputSize = getContext().getTypeSize(InputTy);
   1841         if (getContext().getTypeSize(OutputType) < InputSize) {
   1842           // Form the asm to return the value as a larger integer or fp type.
   1843           ResultRegTypes.back() = ConvertType(InputTy);
   1844         }
   1845       }
   1846       if (llvm::Type* AdjTy =
   1847             getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
   1848                                                  ResultRegTypes.back()))
   1849         ResultRegTypes.back() = AdjTy;
   1850       else {
   1851         CGM.getDiags().Report(S.getAsmLoc(),
   1852                               diag::err_asm_invalid_type_in_input)
   1853             << OutExpr->getType() << OutputConstraint;
   1854       }
   1855     } else {
   1856       ArgTypes.push_back(Dest.getAddress()->getType());
   1857       Args.push_back(Dest.getAddress());
   1858       Constraints += "=*";
   1859       Constraints += OutputConstraint;
   1860     }
   1861 
   1862     if (Info.isReadWrite()) {
   1863       InOutConstraints += ',';
   1864 
   1865       const Expr *InputExpr = S.getOutputExpr(i);
   1866       llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
   1867                                             InOutConstraints,
   1868                                             InputExpr->getExprLoc());
   1869 
   1870       if (llvm::Type* AdjTy =
   1871           getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
   1872                                                Arg->getType()))
   1873         Arg = Builder.CreateBitCast(Arg, AdjTy);
   1874 
   1875       if (Info.allowsRegister())
   1876         InOutConstraints += llvm::utostr(i);
   1877       else
   1878         InOutConstraints += OutputConstraint;
   1879 
   1880       InOutArgTypes.push_back(Arg->getType());
   1881       InOutArgs.push_back(Arg);
   1882     }
   1883   }
   1884 
   1885   unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
   1886 
   1887   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
   1888     const Expr *InputExpr = S.getInputExpr(i);
   1889 
   1890     TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
   1891 
   1892     if (!Constraints.empty())
   1893       Constraints += ',';
   1894 
   1895     // Simplify the input constraint.
   1896     std::string InputConstraint(S.getInputConstraint(i));
   1897     InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
   1898                                          &OutputConstraintInfos);
   1899 
   1900     InputConstraint =
   1901       AddVariableConstraints(InputConstraint,
   1902                             *InputExpr->IgnoreParenNoopCasts(getContext()),
   1903                             getTarget(), CGM, S);
   1904 
   1905     llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
   1906 
   1907     // If this input argument is tied to a larger output result, extend the
   1908     // input to be the same size as the output.  The LLVM backend wants to see
   1909     // the input and output of a matching constraint be the same size.  Note
   1910     // that GCC does not define what the top bits are here.  We use zext because
   1911     // that is usually cheaper, but LLVM IR should really get an anyext someday.
   1912     if (Info.hasTiedOperand()) {
   1913       unsigned Output = Info.getTiedOperand();
   1914       QualType OutputType = S.getOutputExpr(Output)->getType();
   1915       QualType InputTy = InputExpr->getType();
   1916 
   1917       if (getContext().getTypeSize(OutputType) >
   1918           getContext().getTypeSize(InputTy)) {
   1919         // Use ptrtoint as appropriate so that we can do our extension.
   1920         if (isa<llvm::PointerType>(Arg->getType()))
   1921           Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
   1922         llvm::Type *OutputTy = ConvertType(OutputType);
   1923         if (isa<llvm::IntegerType>(OutputTy))
   1924           Arg = Builder.CreateZExt(Arg, OutputTy);
   1925         else if (isa<llvm::PointerType>(OutputTy))
   1926           Arg = Builder.CreateZExt(Arg, IntPtrTy);
   1927         else {
   1928           assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
   1929           Arg = Builder.CreateFPExt(Arg, OutputTy);
   1930         }
   1931       }
   1932     }
   1933     if (llvm::Type* AdjTy =
   1934               getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
   1935                                                    Arg->getType()))
   1936       Arg = Builder.CreateBitCast(Arg, AdjTy);
   1937     else
   1938       CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
   1939           << InputExpr->getType() << InputConstraint;
   1940 
   1941     ArgTypes.push_back(Arg->getType());
   1942     Args.push_back(Arg);
   1943     Constraints += InputConstraint;
   1944   }
   1945 
   1946   // Append the "input" part of inout constraints last.
   1947   for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
   1948     ArgTypes.push_back(InOutArgTypes[i]);
   1949     Args.push_back(InOutArgs[i]);
   1950   }
   1951   Constraints += InOutConstraints;
   1952 
   1953   // Clobbers
   1954   for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
   1955     StringRef Clobber = S.getClobber(i);
   1956 
   1957     if (Clobber != "memory" && Clobber != "cc")
   1958     Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
   1959 
   1960     if (i != 0 || NumConstraints != 0)
   1961       Constraints += ',';
   1962 
   1963     Constraints += "~{";
   1964     Constraints += Clobber;
   1965     Constraints += '}';
   1966   }
   1967 
   1968   // Add machine specific clobbers
   1969   std::string MachineClobbers = getTarget().getClobbers();
   1970   if (!MachineClobbers.empty()) {
   1971     if (!Constraints.empty())
   1972       Constraints += ',';
   1973     Constraints += MachineClobbers;
   1974   }
   1975 
   1976   llvm::Type *ResultType;
   1977   if (ResultRegTypes.empty())
   1978     ResultType = VoidTy;
   1979   else if (ResultRegTypes.size() == 1)
   1980     ResultType = ResultRegTypes[0];
   1981   else
   1982     ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
   1983 
   1984   llvm::FunctionType *FTy =
   1985     llvm::FunctionType::get(ResultType, ArgTypes, false);
   1986 
   1987   bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
   1988   llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
   1989     llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
   1990   llvm::InlineAsm *IA =
   1991     llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
   1992                          /* IsAlignStack */ false, AsmDialect);
   1993   llvm::CallInst *Result = Builder.CreateCall(IA, Args);
   1994   Result->addAttribute(llvm::AttributeSet::FunctionIndex,
   1995                        llvm::Attribute::NoUnwind);
   1996 
   1997   // Slap the source location of the inline asm into a !srcloc metadata on the
   1998   // call.  FIXME: Handle metadata for MS-style inline asms.
   1999   if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
   2000     Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
   2001                                                    *this));
   2002 
   2003   // Extract all of the register value results from the asm.
   2004   std::vector<llvm::Value*> RegResults;
   2005   if (ResultRegTypes.size() == 1) {
   2006     RegResults.push_back(Result);
   2007   } else {
   2008     for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
   2009       llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
   2010       RegResults.push_back(Tmp);
   2011     }
   2012   }
   2013 
   2014   for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
   2015     llvm::Value *Tmp = RegResults[i];
   2016 
   2017     // If the result type of the LLVM IR asm doesn't match the result type of
   2018     // the expression, do the conversion.
   2019     if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
   2020       llvm::Type *TruncTy = ResultTruncRegTypes[i];
   2021 
   2022       // Truncate the integer result to the right size, note that TruncTy can be
   2023       // a pointer.
   2024       if (TruncTy->isFloatingPointTy())
   2025         Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
   2026       else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
   2027         uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
   2028         Tmp = Builder.CreateTrunc(Tmp,
   2029                    llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
   2030         Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
   2031       } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
   2032         uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
   2033         Tmp = Builder.CreatePtrToInt(Tmp,
   2034                    llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
   2035         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
   2036       } else if (TruncTy->isIntegerTy()) {
   2037         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
   2038       } else if (TruncTy->isVectorTy()) {
   2039         Tmp = Builder.CreateBitCast(Tmp, TruncTy);
   2040       }
   2041     }
   2042 
   2043     EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
   2044   }
   2045 }
   2046 
   2047 static LValue InitCapturedStruct(CodeGenFunction &CGF, const CapturedStmt &S) {
   2048   const RecordDecl *RD = S.getCapturedRecordDecl();
   2049   QualType RecordTy = CGF.getContext().getRecordType(RD);
   2050 
   2051   // Initialize the captured struct.
   2052   LValue SlotLV = CGF.MakeNaturalAlignAddrLValue(
   2053                     CGF.CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
   2054 
   2055   RecordDecl::field_iterator CurField = RD->field_begin();
   2056   for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(),
   2057                                            E = S.capture_init_end();
   2058        I != E; ++I, ++CurField) {
   2059     LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
   2060     CGF.EmitInitializerForField(*CurField, LV, *I, ArrayRef<VarDecl *>());
   2061   }
   2062 
   2063   return SlotLV;
   2064 }
   2065 
   2066 static void InitVLACaptures(CodeGenFunction &CGF, const CapturedStmt &S) {
   2067   for (auto &C : S.captures()) {
   2068     if (C.capturesVariable()) {
   2069       QualType QTy;
   2070       auto VD = C.getCapturedVar();
   2071       if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
   2072         QTy = PVD->getOriginalType();
   2073       else
   2074         QTy = VD->getType();
   2075       if (QTy->isVariablyModifiedType()) {
   2076         CGF.EmitVariablyModifiedType(QTy);
   2077       }
   2078     }
   2079   }
   2080 }
   2081 
   2082 /// Generate an outlined function for the body of a CapturedStmt, store any
   2083 /// captured variables into the captured struct, and call the outlined function.
   2084 llvm::Function *
   2085 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
   2086   LValue CapStruct = InitCapturedStruct(*this, S);
   2087 
   2088   // Emit the CapturedDecl
   2089   CodeGenFunction CGF(CGM, true);
   2090   CGF.CapturedStmtInfo = new CGCapturedStmtInfo(S, K);
   2091   llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
   2092   delete CGF.CapturedStmtInfo;
   2093 
   2094   // Emit call to the helper function.
   2095   EmitCallOrInvoke(F, CapStruct.getAddress());
   2096 
   2097   return F;
   2098 }
   2099 
   2100 llvm::Value *
   2101 CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
   2102   LValue CapStruct = InitCapturedStruct(*this, S);
   2103   return CapStruct.getAddress();
   2104 }
   2105 
   2106 /// Creates the outlined function for a CapturedStmt.
   2107 llvm::Function *
   2108 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
   2109   assert(CapturedStmtInfo &&
   2110     "CapturedStmtInfo should be set when generating the captured function");
   2111   const CapturedDecl *CD = S.getCapturedDecl();
   2112   const RecordDecl *RD = S.getCapturedRecordDecl();
   2113   SourceLocation Loc = S.getLocStart();
   2114   assert(CD->hasBody() && "missing CapturedDecl body");
   2115 
   2116   // Build the argument list.
   2117   ASTContext &Ctx = CGM.getContext();
   2118   FunctionArgList Args;
   2119   Args.append(CD->param_begin(), CD->param_end());
   2120 
   2121   // Create the function declaration.
   2122   FunctionType::ExtInfo ExtInfo;
   2123   const CGFunctionInfo &FuncInfo =
   2124       CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
   2125                                                     /*IsVariadic=*/false);
   2126   llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
   2127 
   2128   llvm::Function *F =
   2129     llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
   2130                            CapturedStmtInfo->getHelperName(), &CGM.getModule());
   2131   CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
   2132 
   2133   // Generate the function.
   2134   StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args,
   2135                 CD->getLocation(),
   2136                 CD->getBody()->getLocStart());
   2137   // Set the context parameter in CapturedStmtInfo.
   2138   llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()];
   2139   assert(DeclPtr && "missing context parameter for CapturedStmt");
   2140   CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
   2141 
   2142   // Initialize variable-length arrays.
   2143   InitVLACaptures(*this, S);
   2144 
   2145   // If 'this' is captured, load it into CXXThisValue.
   2146   if (CapturedStmtInfo->isCXXThisExprCaptured()) {
   2147     FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
   2148     LValue LV = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
   2149                                            Ctx.getTagDeclType(RD));
   2150     LValue ThisLValue = EmitLValueForField(LV, FD);
   2151     CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
   2152   }
   2153 
   2154   PGO.assignRegionCounters(CD, F);
   2155   CapturedStmtInfo->EmitBody(*this, CD->getBody());
   2156   FinishFunction(CD->getBodyRBrace());
   2157   PGO.emitInstrumentationData();
   2158   PGO.destroyRegionCounters();
   2159 
   2160   return F;
   2161 }
   2162