Home | History | Annotate | Download | only in CodeGen
      1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This contains code to emit Aggregate Expr nodes as LLVM code.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "CodeGenFunction.h"
     15 #include "CGObjCRuntime.h"
     16 #include "CodeGenModule.h"
     17 #include "clang/AST/ASTContext.h"
     18 #include "clang/AST/DeclCXX.h"
     19 #include "clang/AST/DeclTemplate.h"
     20 #include "clang/AST/StmtVisitor.h"
     21 #include "llvm/IR/Constants.h"
     22 #include "llvm/IR/Function.h"
     23 #include "llvm/IR/GlobalVariable.h"
     24 #include "llvm/IR/Intrinsics.h"
     25 using namespace clang;
     26 using namespace CodeGen;
     27 
     28 //===----------------------------------------------------------------------===//
     29 //                        Aggregate Expression Emitter
     30 //===----------------------------------------------------------------------===//
     31 
     32 namespace  {
     33 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
     34   CodeGenFunction &CGF;
     35   CGBuilderTy &Builder;
     36   AggValueSlot Dest;
     37 
     38   /// We want to use 'dest' as the return slot except under two
     39   /// conditions:
     40   ///   - The destination slot requires garbage collection, so we
     41   ///     need to use the GC API.
     42   ///   - The destination slot is potentially aliased.
     43   bool shouldUseDestForReturnSlot() const {
     44     return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
     45   }
     46 
     47   ReturnValueSlot getReturnValueSlot() const {
     48     if (!shouldUseDestForReturnSlot())
     49       return ReturnValueSlot();
     50 
     51     return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
     52   }
     53 
     54   AggValueSlot EnsureSlot(QualType T) {
     55     if (!Dest.isIgnored()) return Dest;
     56     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
     57   }
     58   void EnsureDest(QualType T) {
     59     if (!Dest.isIgnored()) return;
     60     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
     61   }
     62 
     63 public:
     64   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest)
     65     : CGF(cgf), Builder(CGF.Builder), Dest(Dest) {
     66   }
     67 
     68   //===--------------------------------------------------------------------===//
     69   //                               Utilities
     70   //===--------------------------------------------------------------------===//
     71 
     72   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
     73   /// represents a value lvalue, this method emits the address of the lvalue,
     74   /// then loads the result into DestPtr.
     75   void EmitAggLoadOfLValue(const Expr *E);
     76 
     77   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
     78   void EmitFinalDestCopy(QualType type, const LValue &src);
     79   void EmitFinalDestCopy(QualType type, RValue src,
     80                          CharUnits srcAlignment = CharUnits::Zero());
     81   void EmitCopy(QualType type, const AggValueSlot &dest,
     82                 const AggValueSlot &src);
     83 
     84   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
     85 
     86   void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
     87                      QualType elementType, InitListExpr *E);
     88 
     89   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
     90     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
     91       return AggValueSlot::NeedsGCBarriers;
     92     return AggValueSlot::DoesNotNeedGCBarriers;
     93   }
     94 
     95   bool TypeRequiresGCollection(QualType T);
     96 
     97   //===--------------------------------------------------------------------===//
     98   //                            Visitor Methods
     99   //===--------------------------------------------------------------------===//
    100 
    101   void VisitStmt(Stmt *S) {
    102     CGF.ErrorUnsupported(S, "aggregate expression");
    103   }
    104   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
    105   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
    106     Visit(GE->getResultExpr());
    107   }
    108   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
    109   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
    110     return Visit(E->getReplacement());
    111   }
    112 
    113   // l-values.
    114   void VisitDeclRefExpr(DeclRefExpr *E) {
    115     // For aggregates, we should always be able to emit the variable
    116     // as an l-value unless it's a reference.  This is due to the fact
    117     // that we can't actually ever see a normal l2r conversion on an
    118     // aggregate in C++, and in C there's no language standard
    119     // actively preventing us from listing variables in the captures
    120     // list of a block.
    121     if (E->getDecl()->getType()->isReferenceType()) {
    122       if (CodeGenFunction::ConstantEmission result
    123             = CGF.tryEmitAsConstant(E)) {
    124         EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
    125         return;
    126       }
    127     }
    128 
    129     EmitAggLoadOfLValue(E);
    130   }
    131 
    132   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
    133   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
    134   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
    135   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
    136   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
    137     EmitAggLoadOfLValue(E);
    138   }
    139   void VisitPredefinedExpr(const PredefinedExpr *E) {
    140     EmitAggLoadOfLValue(E);
    141   }
    142 
    143   // Operators.
    144   void VisitCastExpr(CastExpr *E);
    145   void VisitCallExpr(const CallExpr *E);
    146   void VisitStmtExpr(const StmtExpr *E);
    147   void VisitBinaryOperator(const BinaryOperator *BO);
    148   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
    149   void VisitBinAssign(const BinaryOperator *E);
    150   void VisitBinComma(const BinaryOperator *E);
    151 
    152   void VisitObjCMessageExpr(ObjCMessageExpr *E);
    153   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
    154     EmitAggLoadOfLValue(E);
    155   }
    156 
    157   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
    158   void VisitChooseExpr(const ChooseExpr *CE);
    159   void VisitInitListExpr(InitListExpr *E);
    160   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
    161   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
    162     Visit(DAE->getExpr());
    163   }
    164   void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
    165     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
    166     Visit(DIE->getExpr());
    167   }
    168   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
    169   void VisitCXXConstructExpr(const CXXConstructExpr *E);
    170   void VisitLambdaExpr(LambdaExpr *E);
    171   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
    172   void VisitExprWithCleanups(ExprWithCleanups *E);
    173   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
    174   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
    175   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
    176   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
    177 
    178   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
    179     if (E->isGLValue()) {
    180       LValue LV = CGF.EmitPseudoObjectLValue(E);
    181       return EmitFinalDestCopy(E->getType(), LV);
    182     }
    183 
    184     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
    185   }
    186 
    187   void VisitVAArgExpr(VAArgExpr *E);
    188 
    189   void EmitInitializationToLValue(Expr *E, LValue Address);
    190   void EmitNullInitializationToLValue(LValue Address);
    191   //  case Expr::ChooseExprClass:
    192   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
    193   void VisitAtomicExpr(AtomicExpr *E) {
    194     CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
    195   }
    196 };
    197 }  // end anonymous namespace.
    198 
    199 //===----------------------------------------------------------------------===//
    200 //                                Utilities
    201 //===----------------------------------------------------------------------===//
    202 
    203 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
    204 /// represents a value lvalue, this method emits the address of the lvalue,
    205 /// then loads the result into DestPtr.
    206 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
    207   LValue LV = CGF.EmitLValue(E);
    208 
    209   // If the type of the l-value is atomic, then do an atomic load.
    210   if (LV.getType()->isAtomicType()) {
    211     CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
    212     return;
    213   }
    214 
    215   EmitFinalDestCopy(E->getType(), LV);
    216 }
    217 
    218 /// \brief True if the given aggregate type requires special GC API calls.
    219 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
    220   // Only record types have members that might require garbage collection.
    221   const RecordType *RecordTy = T->getAs<RecordType>();
    222   if (!RecordTy) return false;
    223 
    224   // Don't mess with non-trivial C++ types.
    225   RecordDecl *Record = RecordTy->getDecl();
    226   if (isa<CXXRecordDecl>(Record) &&
    227       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
    228        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
    229     return false;
    230 
    231   // Check whether the type has an object member.
    232   return Record->hasObjectMember();
    233 }
    234 
    235 /// \brief Perform the final move to DestPtr if for some reason
    236 /// getReturnValueSlot() didn't use it directly.
    237 ///
    238 /// The idea is that you do something like this:
    239 ///   RValue Result = EmitSomething(..., getReturnValueSlot());
    240 ///   EmitMoveFromReturnSlot(E, Result);
    241 ///
    242 /// If nothing interferes, this will cause the result to be emitted
    243 /// directly into the return value slot.  Otherwise, a final move
    244 /// will be performed.
    245 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
    246   if (shouldUseDestForReturnSlot()) {
    247     // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
    248     // The possibility of undef rvalues complicates that a lot,
    249     // though, so we can't really assert.
    250     return;
    251   }
    252 
    253   // Otherwise, copy from there to the destination.
    254   assert(Dest.getAddr() != src.getAggregateAddr());
    255   std::pair<CharUnits, CharUnits> typeInfo =
    256     CGF.getContext().getTypeInfoInChars(E->getType());
    257   EmitFinalDestCopy(E->getType(), src, typeInfo.second);
    258 }
    259 
    260 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
    261 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
    262                                        CharUnits srcAlign) {
    263   assert(src.isAggregate() && "value must be aggregate value!");
    264   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
    265   EmitFinalDestCopy(type, srcLV);
    266 }
    267 
    268 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
    269 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
    270   // If Dest is ignored, then we're evaluating an aggregate expression
    271   // in a context that doesn't care about the result.  Note that loads
    272   // from volatile l-values force the existence of a non-ignored
    273   // destination.
    274   if (Dest.isIgnored())
    275     return;
    276 
    277   AggValueSlot srcAgg =
    278     AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
    279                             needsGC(type), AggValueSlot::IsAliased);
    280   EmitCopy(type, Dest, srcAgg);
    281 }
    282 
    283 /// Perform a copy from the source into the destination.
    284 ///
    285 /// \param type - the type of the aggregate being copied; qualifiers are
    286 ///   ignored
    287 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
    288                               const AggValueSlot &src) {
    289   if (dest.requiresGCollection()) {
    290     CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
    291     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
    292     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
    293                                                       dest.getAddr(),
    294                                                       src.getAddr(),
    295                                                       size);
    296     return;
    297   }
    298 
    299   // If the result of the assignment is used, copy the LHS there also.
    300   // It's volatile if either side is.  Use the minimum alignment of
    301   // the two sides.
    302   CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
    303                         dest.isVolatile() || src.isVolatile(),
    304                         std::min(dest.getAlignment(), src.getAlignment()));
    305 }
    306 
    307 /// \brief Emit the initializer for a std::initializer_list initialized with a
    308 /// real initializer list.
    309 void
    310 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
    311   // Emit an array containing the elements.  The array is externally destructed
    312   // if the std::initializer_list object is.
    313   ASTContext &Ctx = CGF.getContext();
    314   LValue Array = CGF.EmitLValue(E->getSubExpr());
    315   assert(Array.isSimple() && "initializer_list array not a simple lvalue");
    316   llvm::Value *ArrayPtr = Array.getAddress();
    317 
    318   const ConstantArrayType *ArrayType =
    319       Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
    320   assert(ArrayType && "std::initializer_list constructed from non-array");
    321 
    322   // FIXME: Perform the checks on the field types in SemaInit.
    323   RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
    324   RecordDecl::field_iterator Field = Record->field_begin();
    325   if (Field == Record->field_end()) {
    326     CGF.ErrorUnsupported(E, "weird std::initializer_list");
    327     return;
    328   }
    329 
    330   // Start pointer.
    331   if (!Field->getType()->isPointerType() ||
    332       !Ctx.hasSameType(Field->getType()->getPointeeType(),
    333                        ArrayType->getElementType())) {
    334     CGF.ErrorUnsupported(E, "weird std::initializer_list");
    335     return;
    336   }
    337 
    338   AggValueSlot Dest = EnsureSlot(E->getType());
    339   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
    340                                      Dest.getAlignment());
    341   LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
    342   llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
    343   llvm::Value *IdxStart[] = { Zero, Zero };
    344   llvm::Value *ArrayStart =
    345       Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart");
    346   CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
    347   ++Field;
    348 
    349   if (Field == Record->field_end()) {
    350     CGF.ErrorUnsupported(E, "weird std::initializer_list");
    351     return;
    352   }
    353 
    354   llvm::Value *Size = Builder.getInt(ArrayType->getSize());
    355   LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
    356   if (Field->getType()->isPointerType() &&
    357       Ctx.hasSameType(Field->getType()->getPointeeType(),
    358                       ArrayType->getElementType())) {
    359     // End pointer.
    360     llvm::Value *IdxEnd[] = { Zero, Size };
    361     llvm::Value *ArrayEnd =
    362         Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend");
    363     CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
    364   } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
    365     // Length.
    366     CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
    367   } else {
    368     CGF.ErrorUnsupported(E, "weird std::initializer_list");
    369     return;
    370   }
    371 }
    372 
    373 /// \brief Determine if E is a trivial array filler, that is, one that is
    374 /// equivalent to zero-initialization.
    375 static bool isTrivialFiller(Expr *E) {
    376   if (!E)
    377     return true;
    378 
    379   if (isa<ImplicitValueInitExpr>(E))
    380     return true;
    381 
    382   if (auto *ILE = dyn_cast<InitListExpr>(E)) {
    383     if (ILE->getNumInits())
    384       return false;
    385     return isTrivialFiller(ILE->getArrayFiller());
    386   }
    387 
    388   if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
    389     return Cons->getConstructor()->isDefaultConstructor() &&
    390            Cons->getConstructor()->isTrivial();
    391 
    392   // FIXME: Are there other cases where we can avoid emitting an initializer?
    393   return false;
    394 }
    395 
    396 /// \brief Emit initialization of an array from an initializer list.
    397 void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
    398                                    QualType elementType, InitListExpr *E) {
    399   uint64_t NumInitElements = E->getNumInits();
    400 
    401   uint64_t NumArrayElements = AType->getNumElements();
    402   assert(NumInitElements <= NumArrayElements);
    403 
    404   // DestPtr is an array*.  Construct an elementType* by drilling
    405   // down a level.
    406   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
    407   llvm::Value *indices[] = { zero, zero };
    408   llvm::Value *begin =
    409     Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
    410 
    411   // Exception safety requires us to destroy all the
    412   // already-constructed members if an initializer throws.
    413   // For that, we'll need an EH cleanup.
    414   QualType::DestructionKind dtorKind = elementType.isDestructedType();
    415   llvm::AllocaInst *endOfInit = nullptr;
    416   EHScopeStack::stable_iterator cleanup;
    417   llvm::Instruction *cleanupDominator = nullptr;
    418   if (CGF.needsEHCleanup(dtorKind)) {
    419     // In principle we could tell the cleanup where we are more
    420     // directly, but the control flow can get so varied here that it
    421     // would actually be quite complex.  Therefore we go through an
    422     // alloca.
    423     endOfInit = CGF.CreateTempAlloca(begin->getType(),
    424                                      "arrayinit.endOfInit");
    425     cleanupDominator = Builder.CreateStore(begin, endOfInit);
    426     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
    427                                          CGF.getDestroyer(dtorKind));
    428     cleanup = CGF.EHStack.stable_begin();
    429 
    430   // Otherwise, remember that we didn't need a cleanup.
    431   } else {
    432     dtorKind = QualType::DK_none;
    433   }
    434 
    435   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
    436 
    437   // The 'current element to initialize'.  The invariants on this
    438   // variable are complicated.  Essentially, after each iteration of
    439   // the loop, it points to the last initialized element, except
    440   // that it points to the beginning of the array before any
    441   // elements have been initialized.
    442   llvm::Value *element = begin;
    443 
    444   // Emit the explicit initializers.
    445   for (uint64_t i = 0; i != NumInitElements; ++i) {
    446     // Advance to the next element.
    447     if (i > 0) {
    448       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
    449 
    450       // Tell the cleanup that it needs to destroy up to this
    451       // element.  TODO: some of these stores can be trivially
    452       // observed to be unnecessary.
    453       if (endOfInit) Builder.CreateStore(element, endOfInit);
    454     }
    455 
    456     LValue elementLV = CGF.MakeAddrLValue(element, elementType);
    457     EmitInitializationToLValue(E->getInit(i), elementLV);
    458   }
    459 
    460   // Check whether there's a non-trivial array-fill expression.
    461   Expr *filler = E->getArrayFiller();
    462   bool hasTrivialFiller = isTrivialFiller(filler);
    463 
    464   // Any remaining elements need to be zero-initialized, possibly
    465   // using the filler expression.  We can skip this if the we're
    466   // emitting to zeroed memory.
    467   if (NumInitElements != NumArrayElements &&
    468       !(Dest.isZeroed() && hasTrivialFiller &&
    469         CGF.getTypes().isZeroInitializable(elementType))) {
    470 
    471     // Use an actual loop.  This is basically
    472     //   do { *array++ = filler; } while (array != end);
    473 
    474     // Advance to the start of the rest of the array.
    475     if (NumInitElements) {
    476       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
    477       if (endOfInit) Builder.CreateStore(element, endOfInit);
    478     }
    479 
    480     // Compute the end of the array.
    481     llvm::Value *end = Builder.CreateInBoundsGEP(begin,
    482                       llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
    483                                                  "arrayinit.end");
    484 
    485     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
    486     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
    487 
    488     // Jump into the body.
    489     CGF.EmitBlock(bodyBB);
    490     llvm::PHINode *currentElement =
    491       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
    492     currentElement->addIncoming(element, entryBB);
    493 
    494     // Emit the actual filler expression.
    495     LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
    496     if (filler)
    497       EmitInitializationToLValue(filler, elementLV);
    498     else
    499       EmitNullInitializationToLValue(elementLV);
    500 
    501     // Move on to the next element.
    502     llvm::Value *nextElement =
    503       Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
    504 
    505     // Tell the EH cleanup that we finished with the last element.
    506     if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
    507 
    508     // Leave the loop if we're done.
    509     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
    510                                              "arrayinit.done");
    511     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
    512     Builder.CreateCondBr(done, endBB, bodyBB);
    513     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
    514 
    515     CGF.EmitBlock(endBB);
    516   }
    517 
    518   // Leave the partial-array cleanup if we entered one.
    519   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
    520 }
    521 
    522 //===----------------------------------------------------------------------===//
    523 //                            Visitor Methods
    524 //===----------------------------------------------------------------------===//
    525 
    526 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
    527   Visit(E->GetTemporaryExpr());
    528 }
    529 
    530 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
    531   EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
    532 }
    533 
    534 void
    535 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
    536   if (Dest.isPotentiallyAliased() &&
    537       E->getType().isPODType(CGF.getContext())) {
    538     // For a POD type, just emit a load of the lvalue + a copy, because our
    539     // compound literal might alias the destination.
    540     EmitAggLoadOfLValue(E);
    541     return;
    542   }
    543 
    544   AggValueSlot Slot = EnsureSlot(E->getType());
    545   CGF.EmitAggExpr(E->getInitializer(), Slot);
    546 }
    547 
    548 /// Attempt to look through various unimportant expressions to find a
    549 /// cast of the given kind.
    550 static Expr *findPeephole(Expr *op, CastKind kind) {
    551   while (true) {
    552     op = op->IgnoreParens();
    553     if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
    554       if (castE->getCastKind() == kind)
    555         return castE->getSubExpr();
    556       if (castE->getCastKind() == CK_NoOp)
    557         continue;
    558     }
    559     return nullptr;
    560   }
    561 }
    562 
    563 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
    564   switch (E->getCastKind()) {
    565   case CK_Dynamic: {
    566     // FIXME: Can this actually happen? We have no test coverage for it.
    567     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
    568     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
    569                                       CodeGenFunction::TCK_Load);
    570     // FIXME: Do we also need to handle property references here?
    571     if (LV.isSimple())
    572       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
    573     else
    574       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
    575 
    576     if (!Dest.isIgnored())
    577       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
    578     break;
    579   }
    580 
    581   case CK_ToUnion: {
    582     if (Dest.isIgnored()) break;
    583 
    584     // GCC union extension
    585     QualType Ty = E->getSubExpr()->getType();
    586     QualType PtrTy = CGF.getContext().getPointerType(Ty);
    587     llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
    588                                                  CGF.ConvertType(PtrTy));
    589     EmitInitializationToLValue(E->getSubExpr(),
    590                                CGF.MakeAddrLValue(CastPtr, Ty));
    591     break;
    592   }
    593 
    594   case CK_DerivedToBase:
    595   case CK_BaseToDerived:
    596   case CK_UncheckedDerivedToBase: {
    597     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
    598                 "should have been unpacked before we got here");
    599   }
    600 
    601   case CK_NonAtomicToAtomic:
    602   case CK_AtomicToNonAtomic: {
    603     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
    604 
    605     // Determine the atomic and value types.
    606     QualType atomicType = E->getSubExpr()->getType();
    607     QualType valueType = E->getType();
    608     if (isToAtomic) std::swap(atomicType, valueType);
    609 
    610     assert(atomicType->isAtomicType());
    611     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
    612                           atomicType->castAs<AtomicType>()->getValueType()));
    613 
    614     // Just recurse normally if we're ignoring the result or the
    615     // atomic type doesn't change representation.
    616     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
    617       return Visit(E->getSubExpr());
    618     }
    619 
    620     CastKind peepholeTarget =
    621       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
    622 
    623     // These two cases are reverses of each other; try to peephole them.
    624     if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
    625       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
    626                                                      E->getType()) &&
    627            "peephole significantly changed types?");
    628       return Visit(op);
    629     }
    630 
    631     // If we're converting an r-value of non-atomic type to an r-value
    632     // of atomic type, just emit directly into the relevant sub-object.
    633     if (isToAtomic) {
    634       AggValueSlot valueDest = Dest;
    635       if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
    636         // Zero-initialize.  (Strictly speaking, we only need to intialize
    637         // the padding at the end, but this is simpler.)
    638         if (!Dest.isZeroed())
    639           CGF.EmitNullInitialization(Dest.getAddr(), atomicType);
    640 
    641         // Build a GEP to refer to the subobject.
    642         llvm::Value *valueAddr =
    643             CGF.Builder.CreateStructGEP(valueDest.getAddr(), 0);
    644         valueDest = AggValueSlot::forAddr(valueAddr,
    645                                           valueDest.getAlignment(),
    646                                           valueDest.getQualifiers(),
    647                                           valueDest.isExternallyDestructed(),
    648                                           valueDest.requiresGCollection(),
    649                                           valueDest.isPotentiallyAliased(),
    650                                           AggValueSlot::IsZeroed);
    651       }
    652 
    653       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
    654       return;
    655     }
    656 
    657     // Otherwise, we're converting an atomic type to a non-atomic type.
    658     // Make an atomic temporary, emit into that, and then copy the value out.
    659     AggValueSlot atomicSlot =
    660       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
    661     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
    662 
    663     llvm::Value *valueAddr =
    664       Builder.CreateStructGEP(atomicSlot.getAddr(), 0);
    665     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
    666     return EmitFinalDestCopy(valueType, rvalue);
    667   }
    668 
    669   case CK_LValueToRValue:
    670     // If we're loading from a volatile type, force the destination
    671     // into existence.
    672     if (E->getSubExpr()->getType().isVolatileQualified()) {
    673       EnsureDest(E->getType());
    674       return Visit(E->getSubExpr());
    675     }
    676 
    677     // fallthrough
    678 
    679   case CK_NoOp:
    680   case CK_UserDefinedConversion:
    681   case CK_ConstructorConversion:
    682     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
    683                                                    E->getType()) &&
    684            "Implicit cast types must be compatible");
    685     Visit(E->getSubExpr());
    686     break;
    687 
    688   case CK_LValueBitCast:
    689     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
    690 
    691   case CK_Dependent:
    692   case CK_BitCast:
    693   case CK_ArrayToPointerDecay:
    694   case CK_FunctionToPointerDecay:
    695   case CK_NullToPointer:
    696   case CK_NullToMemberPointer:
    697   case CK_BaseToDerivedMemberPointer:
    698   case CK_DerivedToBaseMemberPointer:
    699   case CK_MemberPointerToBoolean:
    700   case CK_ReinterpretMemberPointer:
    701   case CK_IntegralToPointer:
    702   case CK_PointerToIntegral:
    703   case CK_PointerToBoolean:
    704   case CK_ToVoid:
    705   case CK_VectorSplat:
    706   case CK_IntegralCast:
    707   case CK_IntegralToBoolean:
    708   case CK_IntegralToFloating:
    709   case CK_FloatingToIntegral:
    710   case CK_FloatingToBoolean:
    711   case CK_FloatingCast:
    712   case CK_CPointerToObjCPointerCast:
    713   case CK_BlockPointerToObjCPointerCast:
    714   case CK_AnyPointerToBlockPointerCast:
    715   case CK_ObjCObjectLValueCast:
    716   case CK_FloatingRealToComplex:
    717   case CK_FloatingComplexToReal:
    718   case CK_FloatingComplexToBoolean:
    719   case CK_FloatingComplexCast:
    720   case CK_FloatingComplexToIntegralComplex:
    721   case CK_IntegralRealToComplex:
    722   case CK_IntegralComplexToReal:
    723   case CK_IntegralComplexToBoolean:
    724   case CK_IntegralComplexCast:
    725   case CK_IntegralComplexToFloatingComplex:
    726   case CK_ARCProduceObject:
    727   case CK_ARCConsumeObject:
    728   case CK_ARCReclaimReturnedObject:
    729   case CK_ARCExtendBlockObject:
    730   case CK_CopyAndAutoreleaseBlockObject:
    731   case CK_BuiltinFnToFnPtr:
    732   case CK_ZeroToOCLEvent:
    733   case CK_AddressSpaceConversion:
    734     llvm_unreachable("cast kind invalid for aggregate types");
    735   }
    736 }
    737 
    738 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
    739   if (E->getCallReturnType()->isReferenceType()) {
    740     EmitAggLoadOfLValue(E);
    741     return;
    742   }
    743 
    744   RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
    745   EmitMoveFromReturnSlot(E, RV);
    746 }
    747 
    748 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
    749   RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
    750   EmitMoveFromReturnSlot(E, RV);
    751 }
    752 
    753 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
    754   CGF.EmitIgnoredExpr(E->getLHS());
    755   Visit(E->getRHS());
    756 }
    757 
    758 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
    759   CodeGenFunction::StmtExprEvaluation eval(CGF);
    760   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
    761 }
    762 
    763 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
    764   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
    765     VisitPointerToDataMemberBinaryOperator(E);
    766   else
    767     CGF.ErrorUnsupported(E, "aggregate binary expression");
    768 }
    769 
    770 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
    771                                                     const BinaryOperator *E) {
    772   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
    773   EmitFinalDestCopy(E->getType(), LV);
    774 }
    775 
    776 /// Is the value of the given expression possibly a reference to or
    777 /// into a __block variable?
    778 static bool isBlockVarRef(const Expr *E) {
    779   // Make sure we look through parens.
    780   E = E->IgnoreParens();
    781 
    782   // Check for a direct reference to a __block variable.
    783   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
    784     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
    785     return (var && var->hasAttr<BlocksAttr>());
    786   }
    787 
    788   // More complicated stuff.
    789 
    790   // Binary operators.
    791   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
    792     // For an assignment or pointer-to-member operation, just care
    793     // about the LHS.
    794     if (op->isAssignmentOp() || op->isPtrMemOp())
    795       return isBlockVarRef(op->getLHS());
    796 
    797     // For a comma, just care about the RHS.
    798     if (op->getOpcode() == BO_Comma)
    799       return isBlockVarRef(op->getRHS());
    800 
    801     // FIXME: pointer arithmetic?
    802     return false;
    803 
    804   // Check both sides of a conditional operator.
    805   } else if (const AbstractConditionalOperator *op
    806                = dyn_cast<AbstractConditionalOperator>(E)) {
    807     return isBlockVarRef(op->getTrueExpr())
    808         || isBlockVarRef(op->getFalseExpr());
    809 
    810   // OVEs are required to support BinaryConditionalOperators.
    811   } else if (const OpaqueValueExpr *op
    812                = dyn_cast<OpaqueValueExpr>(E)) {
    813     if (const Expr *src = op->getSourceExpr())
    814       return isBlockVarRef(src);
    815 
    816   // Casts are necessary to get things like (*(int*)&var) = foo().
    817   // We don't really care about the kind of cast here, except
    818   // we don't want to look through l2r casts, because it's okay
    819   // to get the *value* in a __block variable.
    820   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
    821     if (cast->getCastKind() == CK_LValueToRValue)
    822       return false;
    823     return isBlockVarRef(cast->getSubExpr());
    824 
    825   // Handle unary operators.  Again, just aggressively look through
    826   // it, ignoring the operation.
    827   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
    828     return isBlockVarRef(uop->getSubExpr());
    829 
    830   // Look into the base of a field access.
    831   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
    832     return isBlockVarRef(mem->getBase());
    833 
    834   // Look into the base of a subscript.
    835   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
    836     return isBlockVarRef(sub->getBase());
    837   }
    838 
    839   return false;
    840 }
    841 
    842 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
    843   // For an assignment to work, the value on the right has
    844   // to be compatible with the value on the left.
    845   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
    846                                                  E->getRHS()->getType())
    847          && "Invalid assignment");
    848 
    849   // If the LHS might be a __block variable, and the RHS can
    850   // potentially cause a block copy, we need to evaluate the RHS first
    851   // so that the assignment goes the right place.
    852   // This is pretty semantically fragile.
    853   if (isBlockVarRef(E->getLHS()) &&
    854       E->getRHS()->HasSideEffects(CGF.getContext())) {
    855     // Ensure that we have a destination, and evaluate the RHS into that.
    856     EnsureDest(E->getRHS()->getType());
    857     Visit(E->getRHS());
    858 
    859     // Now emit the LHS and copy into it.
    860     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
    861 
    862     // That copy is an atomic copy if the LHS is atomic.
    863     if (LHS.getType()->isAtomicType()) {
    864       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
    865       return;
    866     }
    867 
    868     EmitCopy(E->getLHS()->getType(),
    869              AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
    870                                      needsGC(E->getLHS()->getType()),
    871                                      AggValueSlot::IsAliased),
    872              Dest);
    873     return;
    874   }
    875 
    876   LValue LHS = CGF.EmitLValue(E->getLHS());
    877 
    878   // If we have an atomic type, evaluate into the destination and then
    879   // do an atomic copy.
    880   if (LHS.getType()->isAtomicType()) {
    881     EnsureDest(E->getRHS()->getType());
    882     Visit(E->getRHS());
    883     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
    884     return;
    885   }
    886 
    887   // Codegen the RHS so that it stores directly into the LHS.
    888   AggValueSlot LHSSlot =
    889     AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
    890                             needsGC(E->getLHS()->getType()),
    891                             AggValueSlot::IsAliased);
    892   // A non-volatile aggregate destination might have volatile member.
    893   if (!LHSSlot.isVolatile() &&
    894       CGF.hasVolatileMember(E->getLHS()->getType()))
    895     LHSSlot.setVolatile(true);
    896 
    897   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
    898 
    899   // Copy into the destination if the assignment isn't ignored.
    900   EmitFinalDestCopy(E->getType(), LHS);
    901 }
    902 
    903 void AggExprEmitter::
    904 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
    905   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
    906   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
    907   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
    908 
    909   // Bind the common expression if necessary.
    910   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
    911 
    912   RegionCounter Cnt = CGF.getPGORegionCounter(E);
    913   CodeGenFunction::ConditionalEvaluation eval(CGF);
    914   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, Cnt.getCount());
    915 
    916   // Save whether the destination's lifetime is externally managed.
    917   bool isExternallyDestructed = Dest.isExternallyDestructed();
    918 
    919   eval.begin(CGF);
    920   CGF.EmitBlock(LHSBlock);
    921   Cnt.beginRegion(Builder);
    922   Visit(E->getTrueExpr());
    923   eval.end(CGF);
    924 
    925   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
    926   CGF.Builder.CreateBr(ContBlock);
    927 
    928   // If the result of an agg expression is unused, then the emission
    929   // of the LHS might need to create a destination slot.  That's fine
    930   // with us, and we can safely emit the RHS into the same slot, but
    931   // we shouldn't claim that it's already being destructed.
    932   Dest.setExternallyDestructed(isExternallyDestructed);
    933 
    934   eval.begin(CGF);
    935   CGF.EmitBlock(RHSBlock);
    936   Visit(E->getFalseExpr());
    937   eval.end(CGF);
    938 
    939   CGF.EmitBlock(ContBlock);
    940 }
    941 
    942 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
    943   Visit(CE->getChosenSubExpr());
    944 }
    945 
    946 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
    947   llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
    948   llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
    949 
    950   if (!ArgPtr) {
    951     // If EmitVAArg fails, we fall back to the LLVM instruction.
    952     llvm::Value *Val =
    953         Builder.CreateVAArg(ArgValue, CGF.ConvertType(VE->getType()));
    954     if (!Dest.isIgnored())
    955       Builder.CreateStore(Val, Dest.getAddr());
    956     return;
    957   }
    958 
    959   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
    960 }
    961 
    962 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
    963   // Ensure that we have a slot, but if we already do, remember
    964   // whether it was externally destructed.
    965   bool wasExternallyDestructed = Dest.isExternallyDestructed();
    966   EnsureDest(E->getType());
    967 
    968   // We're going to push a destructor if there isn't already one.
    969   Dest.setExternallyDestructed();
    970 
    971   Visit(E->getSubExpr());
    972 
    973   // Push that destructor we promised.
    974   if (!wasExternallyDestructed)
    975     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
    976 }
    977 
    978 void
    979 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
    980   AggValueSlot Slot = EnsureSlot(E->getType());
    981   CGF.EmitCXXConstructExpr(E, Slot);
    982 }
    983 
    984 void
    985 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
    986   AggValueSlot Slot = EnsureSlot(E->getType());
    987   CGF.EmitLambdaExpr(E, Slot);
    988 }
    989 
    990 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
    991   CGF.enterFullExpression(E);
    992   CodeGenFunction::RunCleanupsScope cleanups(CGF);
    993   Visit(E->getSubExpr());
    994 }
    995 
    996 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
    997   QualType T = E->getType();
    998   AggValueSlot Slot = EnsureSlot(T);
    999   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
   1000 }
   1001 
   1002 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
   1003   QualType T = E->getType();
   1004   AggValueSlot Slot = EnsureSlot(T);
   1005   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
   1006 }
   1007 
   1008 /// isSimpleZero - If emitting this value will obviously just cause a store of
   1009 /// zero to memory, return true.  This can return false if uncertain, so it just
   1010 /// handles simple cases.
   1011 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
   1012   E = E->IgnoreParens();
   1013 
   1014   // 0
   1015   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
   1016     return IL->getValue() == 0;
   1017   // +0.0
   1018   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
   1019     return FL->getValue().isPosZero();
   1020   // int()
   1021   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
   1022       CGF.getTypes().isZeroInitializable(E->getType()))
   1023     return true;
   1024   // (int*)0 - Null pointer expressions.
   1025   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
   1026     return ICE->getCastKind() == CK_NullToPointer;
   1027   // '\0'
   1028   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
   1029     return CL->getValue() == 0;
   1030 
   1031   // Otherwise, hard case: conservatively return false.
   1032   return false;
   1033 }
   1034 
   1035 
   1036 void
   1037 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
   1038   QualType type = LV.getType();
   1039   // FIXME: Ignore result?
   1040   // FIXME: Are initializers affected by volatile?
   1041   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
   1042     // Storing "i32 0" to a zero'd memory location is a noop.
   1043     return;
   1044   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
   1045     return EmitNullInitializationToLValue(LV);
   1046   } else if (type->isReferenceType()) {
   1047     RValue RV = CGF.EmitReferenceBindingToExpr(E);
   1048     return CGF.EmitStoreThroughLValue(RV, LV);
   1049   }
   1050 
   1051   switch (CGF.getEvaluationKind(type)) {
   1052   case TEK_Complex:
   1053     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
   1054     return;
   1055   case TEK_Aggregate:
   1056     CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
   1057                                                AggValueSlot::IsDestructed,
   1058                                       AggValueSlot::DoesNotNeedGCBarriers,
   1059                                                AggValueSlot::IsNotAliased,
   1060                                                Dest.isZeroed()));
   1061     return;
   1062   case TEK_Scalar:
   1063     if (LV.isSimple()) {
   1064       CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
   1065     } else {
   1066       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
   1067     }
   1068     return;
   1069   }
   1070   llvm_unreachable("bad evaluation kind");
   1071 }
   1072 
   1073 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
   1074   QualType type = lv.getType();
   1075 
   1076   // If the destination slot is already zeroed out before the aggregate is
   1077   // copied into it, we don't have to emit any zeros here.
   1078   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
   1079     return;
   1080 
   1081   if (CGF.hasScalarEvaluationKind(type)) {
   1082     // For non-aggregates, we can store the appropriate null constant.
   1083     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
   1084     // Note that the following is not equivalent to
   1085     // EmitStoreThroughBitfieldLValue for ARC types.
   1086     if (lv.isBitField()) {
   1087       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
   1088     } else {
   1089       assert(lv.isSimple());
   1090       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
   1091     }
   1092   } else {
   1093     // There's a potential optimization opportunity in combining
   1094     // memsets; that would be easy for arrays, but relatively
   1095     // difficult for structures with the current code.
   1096     CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
   1097   }
   1098 }
   1099 
   1100 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
   1101 #if 0
   1102   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
   1103   // (Length of globals? Chunks of zeroed-out space?).
   1104   //
   1105   // If we can, prefer a copy from a global; this is a lot less code for long
   1106   // globals, and it's easier for the current optimizers to analyze.
   1107   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
   1108     llvm::GlobalVariable* GV =
   1109     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
   1110                              llvm::GlobalValue::InternalLinkage, C, "");
   1111     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
   1112     return;
   1113   }
   1114 #endif
   1115   if (E->hadArrayRangeDesignator())
   1116     CGF.ErrorUnsupported(E, "GNU array range designator extension");
   1117 
   1118   AggValueSlot Dest = EnsureSlot(E->getType());
   1119 
   1120   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
   1121                                      Dest.getAlignment());
   1122 
   1123   // Handle initialization of an array.
   1124   if (E->getType()->isArrayType()) {
   1125     if (E->isStringLiteralInit())
   1126       return Visit(E->getInit(0));
   1127 
   1128     QualType elementType =
   1129         CGF.getContext().getAsArrayType(E->getType())->getElementType();
   1130 
   1131     llvm::PointerType *APType =
   1132       cast<llvm::PointerType>(Dest.getAddr()->getType());
   1133     llvm::ArrayType *AType =
   1134       cast<llvm::ArrayType>(APType->getElementType());
   1135 
   1136     EmitArrayInit(Dest.getAddr(), AType, elementType, E);
   1137     return;
   1138   }
   1139 
   1140   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
   1141 
   1142   // Do struct initialization; this code just sets each individual member
   1143   // to the approprate value.  This makes bitfield support automatic;
   1144   // the disadvantage is that the generated code is more difficult for
   1145   // the optimizer, especially with bitfields.
   1146   unsigned NumInitElements = E->getNumInits();
   1147   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
   1148 
   1149   // Prepare a 'this' for CXXDefaultInitExprs.
   1150   CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr());
   1151 
   1152   if (record->isUnion()) {
   1153     // Only initialize one field of a union. The field itself is
   1154     // specified by the initializer list.
   1155     if (!E->getInitializedFieldInUnion()) {
   1156       // Empty union; we have nothing to do.
   1157 
   1158 #ifndef NDEBUG
   1159       // Make sure that it's really an empty and not a failure of
   1160       // semantic analysis.
   1161       for (const auto *Field : record->fields())
   1162         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
   1163 #endif
   1164       return;
   1165     }
   1166 
   1167     // FIXME: volatility
   1168     FieldDecl *Field = E->getInitializedFieldInUnion();
   1169 
   1170     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
   1171     if (NumInitElements) {
   1172       // Store the initializer into the field
   1173       EmitInitializationToLValue(E->getInit(0), FieldLoc);
   1174     } else {
   1175       // Default-initialize to null.
   1176       EmitNullInitializationToLValue(FieldLoc);
   1177     }
   1178 
   1179     return;
   1180   }
   1181 
   1182   // We'll need to enter cleanup scopes in case any of the member
   1183   // initializers throw an exception.
   1184   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
   1185   llvm::Instruction *cleanupDominator = nullptr;
   1186 
   1187   // Here we iterate over the fields; this makes it simpler to both
   1188   // default-initialize fields and skip over unnamed fields.
   1189   unsigned curInitIndex = 0;
   1190   for (const auto *field : record->fields()) {
   1191     // We're done once we hit the flexible array member.
   1192     if (field->getType()->isIncompleteArrayType())
   1193       break;
   1194 
   1195     // Always skip anonymous bitfields.
   1196     if (field->isUnnamedBitfield())
   1197       continue;
   1198 
   1199     // We're done if we reach the end of the explicit initializers, we
   1200     // have a zeroed object, and the rest of the fields are
   1201     // zero-initializable.
   1202     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
   1203         CGF.getTypes().isZeroInitializable(E->getType()))
   1204       break;
   1205 
   1206 
   1207     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
   1208     // We never generate write-barries for initialized fields.
   1209     LV.setNonGC(true);
   1210 
   1211     if (curInitIndex < NumInitElements) {
   1212       // Store the initializer into the field.
   1213       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
   1214     } else {
   1215       // We're out of initalizers; default-initialize to null
   1216       EmitNullInitializationToLValue(LV);
   1217     }
   1218 
   1219     // Push a destructor if necessary.
   1220     // FIXME: if we have an array of structures, all explicitly
   1221     // initialized, we can end up pushing a linear number of cleanups.
   1222     bool pushedCleanup = false;
   1223     if (QualType::DestructionKind dtorKind
   1224           = field->getType().isDestructedType()) {
   1225       assert(LV.isSimple());
   1226       if (CGF.needsEHCleanup(dtorKind)) {
   1227         if (!cleanupDominator)
   1228           cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
   1229 
   1230         CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
   1231                         CGF.getDestroyer(dtorKind), false);
   1232         cleanups.push_back(CGF.EHStack.stable_begin());
   1233         pushedCleanup = true;
   1234       }
   1235     }
   1236 
   1237     // If the GEP didn't get used because of a dead zero init or something
   1238     // else, clean it up for -O0 builds and general tidiness.
   1239     if (!pushedCleanup && LV.isSimple())
   1240       if (llvm::GetElementPtrInst *GEP =
   1241             dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
   1242         if (GEP->use_empty())
   1243           GEP->eraseFromParent();
   1244   }
   1245 
   1246   // Deactivate all the partial cleanups in reverse order, which
   1247   // generally means popping them.
   1248   for (unsigned i = cleanups.size(); i != 0; --i)
   1249     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
   1250 
   1251   // Destroy the placeholder if we made one.
   1252   if (cleanupDominator)
   1253     cleanupDominator->eraseFromParent();
   1254 }
   1255 
   1256 //===----------------------------------------------------------------------===//
   1257 //                        Entry Points into this File
   1258 //===----------------------------------------------------------------------===//
   1259 
   1260 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
   1261 /// non-zero bytes that will be stored when outputting the initializer for the
   1262 /// specified initializer expression.
   1263 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
   1264   E = E->IgnoreParens();
   1265 
   1266   // 0 and 0.0 won't require any non-zero stores!
   1267   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
   1268 
   1269   // If this is an initlist expr, sum up the size of sizes of the (present)
   1270   // elements.  If this is something weird, assume the whole thing is non-zero.
   1271   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
   1272   if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
   1273     return CGF.getContext().getTypeSizeInChars(E->getType());
   1274 
   1275   // InitListExprs for structs have to be handled carefully.  If there are
   1276   // reference members, we need to consider the size of the reference, not the
   1277   // referencee.  InitListExprs for unions and arrays can't have references.
   1278   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
   1279     if (!RT->isUnionType()) {
   1280       RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
   1281       CharUnits NumNonZeroBytes = CharUnits::Zero();
   1282 
   1283       unsigned ILEElement = 0;
   1284       for (const auto *Field : SD->fields()) {
   1285         // We're done once we hit the flexible array member or run out of
   1286         // InitListExpr elements.
   1287         if (Field->getType()->isIncompleteArrayType() ||
   1288             ILEElement == ILE->getNumInits())
   1289           break;
   1290         if (Field->isUnnamedBitfield())
   1291           continue;
   1292 
   1293         const Expr *E = ILE->getInit(ILEElement++);
   1294 
   1295         // Reference values are always non-null and have the width of a pointer.
   1296         if (Field->getType()->isReferenceType())
   1297           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
   1298               CGF.getTarget().getPointerWidth(0));
   1299         else
   1300           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
   1301       }
   1302 
   1303       return NumNonZeroBytes;
   1304     }
   1305   }
   1306 
   1307 
   1308   CharUnits NumNonZeroBytes = CharUnits::Zero();
   1309   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
   1310     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
   1311   return NumNonZeroBytes;
   1312 }
   1313 
   1314 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
   1315 /// zeros in it, emit a memset and avoid storing the individual zeros.
   1316 ///
   1317 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
   1318                                      CodeGenFunction &CGF) {
   1319   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
   1320   // volatile stores.
   1321   if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == nullptr)
   1322     return;
   1323 
   1324   // C++ objects with a user-declared constructor don't need zero'ing.
   1325   if (CGF.getLangOpts().CPlusPlus)
   1326     if (const RecordType *RT = CGF.getContext()
   1327                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
   1328       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
   1329       if (RD->hasUserDeclaredConstructor())
   1330         return;
   1331     }
   1332 
   1333   // If the type is 16-bytes or smaller, prefer individual stores over memset.
   1334   std::pair<CharUnits, CharUnits> TypeInfo =
   1335     CGF.getContext().getTypeInfoInChars(E->getType());
   1336   if (TypeInfo.first <= CharUnits::fromQuantity(16))
   1337     return;
   1338 
   1339   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
   1340   // we prefer to emit memset + individual stores for the rest.
   1341   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
   1342   if (NumNonZeroBytes*4 > TypeInfo.first)
   1343     return;
   1344 
   1345   // Okay, it seems like a good idea to use an initial memset, emit the call.
   1346   llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
   1347   CharUnits Align = TypeInfo.second;
   1348 
   1349   llvm::Value *Loc = Slot.getAddr();
   1350 
   1351   Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
   1352   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
   1353                            Align.getQuantity(), false);
   1354 
   1355   // Tell the AggExprEmitter that the slot is known zero.
   1356   Slot.setZeroed();
   1357 }
   1358 
   1359 
   1360 
   1361 
   1362 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
   1363 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
   1364 /// the value of the aggregate expression is not needed.  If VolatileDest is
   1365 /// true, DestPtr cannot be 0.
   1366 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
   1367   assert(E && hasAggregateEvaluationKind(E->getType()) &&
   1368          "Invalid aggregate expression to emit");
   1369   assert((Slot.getAddr() != nullptr || Slot.isIgnored()) &&
   1370          "slot has bits but no address");
   1371 
   1372   // Optimize the slot if possible.
   1373   CheckAggExprForMemSetUse(Slot, E, *this);
   1374 
   1375   AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E));
   1376 }
   1377 
   1378 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
   1379   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
   1380   llvm::Value *Temp = CreateMemTemp(E->getType());
   1381   LValue LV = MakeAddrLValue(Temp, E->getType());
   1382   EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
   1383                                          AggValueSlot::DoesNotNeedGCBarriers,
   1384                                          AggValueSlot::IsNotAliased));
   1385   return LV;
   1386 }
   1387 
   1388 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
   1389                                         llvm::Value *SrcPtr, QualType Ty,
   1390                                         bool isVolatile,
   1391                                         CharUnits alignment,
   1392                                         bool isAssignment) {
   1393   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
   1394 
   1395   if (getLangOpts().CPlusPlus) {
   1396     if (const RecordType *RT = Ty->getAs<RecordType>()) {
   1397       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
   1398       assert((Record->hasTrivialCopyConstructor() ||
   1399               Record->hasTrivialCopyAssignment() ||
   1400               Record->hasTrivialMoveConstructor() ||
   1401               Record->hasTrivialMoveAssignment()) &&
   1402              "Trying to aggregate-copy a type without a trivial copy/move "
   1403              "constructor or assignment operator");
   1404       // Ignore empty classes in C++.
   1405       if (Record->isEmpty())
   1406         return;
   1407     }
   1408   }
   1409 
   1410   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
   1411   // C99 6.5.16.1p3, which states "If the value being stored in an object is
   1412   // read from another object that overlaps in anyway the storage of the first
   1413   // object, then the overlap shall be exact and the two objects shall have
   1414   // qualified or unqualified versions of a compatible type."
   1415   //
   1416   // memcpy is not defined if the source and destination pointers are exactly
   1417   // equal, but other compilers do this optimization, and almost every memcpy
   1418   // implementation handles this case safely.  If there is a libc that does not
   1419   // safely handle this, we can add a target hook.
   1420 
   1421   // Get data size and alignment info for this aggregate. If this is an
   1422   // assignment don't copy the tail padding. Otherwise copying it is fine.
   1423   std::pair<CharUnits, CharUnits> TypeInfo;
   1424   if (isAssignment)
   1425     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
   1426   else
   1427     TypeInfo = getContext().getTypeInfoInChars(Ty);
   1428 
   1429   if (alignment.isZero())
   1430     alignment = TypeInfo.second;
   1431 
   1432   // FIXME: Handle variable sized types.
   1433 
   1434   // FIXME: If we have a volatile struct, the optimizer can remove what might
   1435   // appear to be `extra' memory ops:
   1436   //
   1437   // volatile struct { int i; } a, b;
   1438   //
   1439   // int main() {
   1440   //   a = b;
   1441   //   a = b;
   1442   // }
   1443   //
   1444   // we need to use a different call here.  We use isVolatile to indicate when
   1445   // either the source or the destination is volatile.
   1446 
   1447   llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
   1448   llvm::Type *DBP =
   1449     llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
   1450   DestPtr = Builder.CreateBitCast(DestPtr, DBP);
   1451 
   1452   llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
   1453   llvm::Type *SBP =
   1454     llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
   1455   SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
   1456 
   1457   // Don't do any of the memmove_collectable tests if GC isn't set.
   1458   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
   1459     // fall through
   1460   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
   1461     RecordDecl *Record = RecordTy->getDecl();
   1462     if (Record->hasObjectMember()) {
   1463       CharUnits size = TypeInfo.first;
   1464       llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
   1465       llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
   1466       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
   1467                                                     SizeVal);
   1468       return;
   1469     }
   1470   } else if (Ty->isArrayType()) {
   1471     QualType BaseType = getContext().getBaseElementType(Ty);
   1472     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
   1473       if (RecordTy->getDecl()->hasObjectMember()) {
   1474         CharUnits size = TypeInfo.first;
   1475         llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
   1476         llvm::Value *SizeVal =
   1477           llvm::ConstantInt::get(SizeTy, size.getQuantity());
   1478         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
   1479                                                       SizeVal);
   1480         return;
   1481       }
   1482     }
   1483   }
   1484 
   1485   // Determine the metadata to describe the position of any padding in this
   1486   // memcpy, as well as the TBAA tags for the members of the struct, in case
   1487   // the optimizer wishes to expand it in to scalar memory operations.
   1488   llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
   1489 
   1490   Builder.CreateMemCpy(DestPtr, SrcPtr,
   1491                        llvm::ConstantInt::get(IntPtrTy,
   1492                                               TypeInfo.first.getQuantity()),
   1493                        alignment.getQuantity(), isVolatile,
   1494                        /*TBAATag=*/nullptr, TBAAStructTag);
   1495 }
   1496