Home | History | Annotate | Download | only in CodeGen
      1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This contains code to emit Aggregate Expr nodes as LLVM code.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "CodeGenFunction.h"
     15 #include "CGObjCRuntime.h"
     16 #include "CodeGenModule.h"
     17 #include "clang/AST/ASTContext.h"
     18 #include "clang/AST/DeclCXX.h"
     19 #include "clang/AST/DeclTemplate.h"
     20 #include "clang/AST/StmtVisitor.h"
     21 #include "llvm/IR/Constants.h"
     22 #include "llvm/IR/Function.h"
     23 #include "llvm/IR/GlobalVariable.h"
     24 #include "llvm/IR/Intrinsics.h"
     25 using namespace clang;
     26 using namespace CodeGen;
     27 
     28 //===----------------------------------------------------------------------===//
     29 //                        Aggregate Expression Emitter
     30 //===----------------------------------------------------------------------===//
     31 
     32 namespace  {
     33 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
     34   CodeGenFunction &CGF;
     35   CGBuilderTy &Builder;
     36   AggValueSlot Dest;
     37   bool IsResultUnused;
     38 
     39   /// We want to use 'dest' as the return slot except under two
     40   /// conditions:
     41   ///   - The destination slot requires garbage collection, so we
     42   ///     need to use the GC API.
     43   ///   - The destination slot is potentially aliased.
     44   bool shouldUseDestForReturnSlot() const {
     45     return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
     46   }
     47 
     48   ReturnValueSlot getReturnValueSlot() const {
     49     if (!shouldUseDestForReturnSlot())
     50       return ReturnValueSlot();
     51 
     52     return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
     53                            IsResultUnused);
     54   }
     55 
     56   AggValueSlot EnsureSlot(QualType T) {
     57     if (!Dest.isIgnored()) return Dest;
     58     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
     59   }
     60   void EnsureDest(QualType T) {
     61     if (!Dest.isIgnored()) return;
     62     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
     63   }
     64 
     65 public:
     66   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
     67     : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
     68     IsResultUnused(IsResultUnused) { }
     69 
     70   //===--------------------------------------------------------------------===//
     71   //                               Utilities
     72   //===--------------------------------------------------------------------===//
     73 
     74   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
     75   /// represents a value lvalue, this method emits the address of the lvalue,
     76   /// then loads the result into DestPtr.
     77   void EmitAggLoadOfLValue(const Expr *E);
     78 
     79   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
     80   void EmitFinalDestCopy(QualType type, const LValue &src);
     81   void EmitFinalDestCopy(QualType type, RValue src);
     82   void EmitCopy(QualType type, const AggValueSlot &dest,
     83                 const AggValueSlot &src);
     84 
     85   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
     86 
     87   void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
     88                      QualType elementType, InitListExpr *E);
     89 
     90   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
     91     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
     92       return AggValueSlot::NeedsGCBarriers;
     93     return AggValueSlot::DoesNotNeedGCBarriers;
     94   }
     95 
     96   bool TypeRequiresGCollection(QualType T);
     97 
     98   //===--------------------------------------------------------------------===//
     99   //                            Visitor Methods
    100   //===--------------------------------------------------------------------===//
    101 
    102   void Visit(Expr *E) {
    103     ApplyDebugLocation DL(CGF, E);
    104     StmtVisitor<AggExprEmitter>::Visit(E);
    105   }
    106 
    107   void VisitStmt(Stmt *S) {
    108     CGF.ErrorUnsupported(S, "aggregate expression");
    109   }
    110   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
    111   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
    112     Visit(GE->getResultExpr());
    113   }
    114   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
    115   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
    116     return Visit(E->getReplacement());
    117   }
    118 
    119   // l-values.
    120   void VisitDeclRefExpr(DeclRefExpr *E) {
    121     // For aggregates, we should always be able to emit the variable
    122     // as an l-value unless it's a reference.  This is due to the fact
    123     // that we can't actually ever see a normal l2r conversion on an
    124     // aggregate in C++, and in C there's no language standard
    125     // actively preventing us from listing variables in the captures
    126     // list of a block.
    127     if (E->getDecl()->getType()->isReferenceType()) {
    128       if (CodeGenFunction::ConstantEmission result
    129             = CGF.tryEmitAsConstant(E)) {
    130         EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
    131         return;
    132       }
    133     }
    134 
    135     EmitAggLoadOfLValue(E);
    136   }
    137 
    138   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
    139   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
    140   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
    141   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
    142   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
    143     EmitAggLoadOfLValue(E);
    144   }
    145   void VisitPredefinedExpr(const PredefinedExpr *E) {
    146     EmitAggLoadOfLValue(E);
    147   }
    148 
    149   // Operators.
    150   void VisitCastExpr(CastExpr *E);
    151   void VisitCallExpr(const CallExpr *E);
    152   void VisitStmtExpr(const StmtExpr *E);
    153   void VisitBinaryOperator(const BinaryOperator *BO);
    154   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
    155   void VisitBinAssign(const BinaryOperator *E);
    156   void VisitBinComma(const BinaryOperator *E);
    157 
    158   void VisitObjCMessageExpr(ObjCMessageExpr *E);
    159   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
    160     EmitAggLoadOfLValue(E);
    161   }
    162 
    163   void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
    164   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
    165   void VisitChooseExpr(const ChooseExpr *CE);
    166   void VisitInitListExpr(InitListExpr *E);
    167   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
    168   void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
    169   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
    170     Visit(DAE->getExpr());
    171   }
    172   void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
    173     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
    174     Visit(DIE->getExpr());
    175   }
    176   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
    177   void VisitCXXConstructExpr(const CXXConstructExpr *E);
    178   void VisitLambdaExpr(LambdaExpr *E);
    179   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
    180   void VisitExprWithCleanups(ExprWithCleanups *E);
    181   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
    182   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
    183   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
    184   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
    185 
    186   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
    187     if (E->isGLValue()) {
    188       LValue LV = CGF.EmitPseudoObjectLValue(E);
    189       return EmitFinalDestCopy(E->getType(), LV);
    190     }
    191 
    192     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
    193   }
    194 
    195   void VisitVAArgExpr(VAArgExpr *E);
    196 
    197   void EmitInitializationToLValue(Expr *E, LValue Address);
    198   void EmitNullInitializationToLValue(LValue Address);
    199   //  case Expr::ChooseExprClass:
    200   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
    201   void VisitAtomicExpr(AtomicExpr *E) {
    202     RValue Res = CGF.EmitAtomicExpr(E);
    203     EmitFinalDestCopy(E->getType(), Res);
    204   }
    205 };
    206 }  // end anonymous namespace.
    207 
    208 //===----------------------------------------------------------------------===//
    209 //                                Utilities
    210 //===----------------------------------------------------------------------===//
    211 
    212 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
    213 /// represents a value lvalue, this method emits the address of the lvalue,
    214 /// then loads the result into DestPtr.
    215 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
    216   LValue LV = CGF.EmitLValue(E);
    217 
    218   // If the type of the l-value is atomic, then do an atomic load.
    219   if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
    220     CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
    221     return;
    222   }
    223 
    224   EmitFinalDestCopy(E->getType(), LV);
    225 }
    226 
    227 /// \brief True if the given aggregate type requires special GC API calls.
    228 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
    229   // Only record types have members that might require garbage collection.
    230   const RecordType *RecordTy = T->getAs<RecordType>();
    231   if (!RecordTy) return false;
    232 
    233   // Don't mess with non-trivial C++ types.
    234   RecordDecl *Record = RecordTy->getDecl();
    235   if (isa<CXXRecordDecl>(Record) &&
    236       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
    237        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
    238     return false;
    239 
    240   // Check whether the type has an object member.
    241   return Record->hasObjectMember();
    242 }
    243 
    244 /// \brief Perform the final move to DestPtr if for some reason
    245 /// getReturnValueSlot() didn't use it directly.
    246 ///
    247 /// The idea is that you do something like this:
    248 ///   RValue Result = EmitSomething(..., getReturnValueSlot());
    249 ///   EmitMoveFromReturnSlot(E, Result);
    250 ///
    251 /// If nothing interferes, this will cause the result to be emitted
    252 /// directly into the return value slot.  Otherwise, a final move
    253 /// will be performed.
    254 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
    255   if (shouldUseDestForReturnSlot()) {
    256     // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
    257     // The possibility of undef rvalues complicates that a lot,
    258     // though, so we can't really assert.
    259     return;
    260   }
    261 
    262   // Otherwise, copy from there to the destination.
    263   assert(Dest.getPointer() != src.getAggregatePointer());
    264   EmitFinalDestCopy(E->getType(), src);
    265 }
    266 
    267 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
    268 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
    269   assert(src.isAggregate() && "value must be aggregate value!");
    270   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
    271   EmitFinalDestCopy(type, srcLV);
    272 }
    273 
    274 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
    275 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
    276   // If Dest is ignored, then we're evaluating an aggregate expression
    277   // in a context that doesn't care about the result.  Note that loads
    278   // from volatile l-values force the existence of a non-ignored
    279   // destination.
    280   if (Dest.isIgnored())
    281     return;
    282 
    283   AggValueSlot srcAgg =
    284     AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
    285                             needsGC(type), AggValueSlot::IsAliased);
    286   EmitCopy(type, Dest, srcAgg);
    287 }
    288 
    289 /// Perform a copy from the source into the destination.
    290 ///
    291 /// \param type - the type of the aggregate being copied; qualifiers are
    292 ///   ignored
    293 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
    294                               const AggValueSlot &src) {
    295   if (dest.requiresGCollection()) {
    296     CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
    297     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
    298     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
    299                                                       dest.getAddress(),
    300                                                       src.getAddress(),
    301                                                       size);
    302     return;
    303   }
    304 
    305   // If the result of the assignment is used, copy the LHS there also.
    306   // It's volatile if either side is.  Use the minimum alignment of
    307   // the two sides.
    308   CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
    309                         dest.isVolatile() || src.isVolatile());
    310 }
    311 
    312 /// \brief Emit the initializer for a std::initializer_list initialized with a
    313 /// real initializer list.
    314 void
    315 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
    316   // Emit an array containing the elements.  The array is externally destructed
    317   // if the std::initializer_list object is.
    318   ASTContext &Ctx = CGF.getContext();
    319   LValue Array = CGF.EmitLValue(E->getSubExpr());
    320   assert(Array.isSimple() && "initializer_list array not a simple lvalue");
    321   Address ArrayPtr = Array.getAddress();
    322 
    323   const ConstantArrayType *ArrayType =
    324       Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
    325   assert(ArrayType && "std::initializer_list constructed from non-array");
    326 
    327   // FIXME: Perform the checks on the field types in SemaInit.
    328   RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
    329   RecordDecl::field_iterator Field = Record->field_begin();
    330   if (Field == Record->field_end()) {
    331     CGF.ErrorUnsupported(E, "weird std::initializer_list");
    332     return;
    333   }
    334 
    335   // Start pointer.
    336   if (!Field->getType()->isPointerType() ||
    337       !Ctx.hasSameType(Field->getType()->getPointeeType(),
    338                        ArrayType->getElementType())) {
    339     CGF.ErrorUnsupported(E, "weird std::initializer_list");
    340     return;
    341   }
    342 
    343   AggValueSlot Dest = EnsureSlot(E->getType());
    344   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
    345   LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
    346   llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
    347   llvm::Value *IdxStart[] = { Zero, Zero };
    348   llvm::Value *ArrayStart =
    349       Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
    350   CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
    351   ++Field;
    352 
    353   if (Field == Record->field_end()) {
    354     CGF.ErrorUnsupported(E, "weird std::initializer_list");
    355     return;
    356   }
    357 
    358   llvm::Value *Size = Builder.getInt(ArrayType->getSize());
    359   LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
    360   if (Field->getType()->isPointerType() &&
    361       Ctx.hasSameType(Field->getType()->getPointeeType(),
    362                       ArrayType->getElementType())) {
    363     // End pointer.
    364     llvm::Value *IdxEnd[] = { Zero, Size };
    365     llvm::Value *ArrayEnd =
    366         Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
    367     CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
    368   } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
    369     // Length.
    370     CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
    371   } else {
    372     CGF.ErrorUnsupported(E, "weird std::initializer_list");
    373     return;
    374   }
    375 }
    376 
    377 /// \brief Determine if E is a trivial array filler, that is, one that is
    378 /// equivalent to zero-initialization.
    379 static bool isTrivialFiller(Expr *E) {
    380   if (!E)
    381     return true;
    382 
    383   if (isa<ImplicitValueInitExpr>(E))
    384     return true;
    385 
    386   if (auto *ILE = dyn_cast<InitListExpr>(E)) {
    387     if (ILE->getNumInits())
    388       return false;
    389     return isTrivialFiller(ILE->getArrayFiller());
    390   }
    391 
    392   if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
    393     return Cons->getConstructor()->isDefaultConstructor() &&
    394            Cons->getConstructor()->isTrivial();
    395 
    396   // FIXME: Are there other cases where we can avoid emitting an initializer?
    397   return false;
    398 }
    399 
    400 /// \brief Emit initialization of an array from an initializer list.
    401 void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
    402                                    QualType elementType, InitListExpr *E) {
    403   uint64_t NumInitElements = E->getNumInits();
    404 
    405   uint64_t NumArrayElements = AType->getNumElements();
    406   assert(NumInitElements <= NumArrayElements);
    407 
    408   // DestPtr is an array*.  Construct an elementType* by drilling
    409   // down a level.
    410   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
    411   llvm::Value *indices[] = { zero, zero };
    412   llvm::Value *begin =
    413     Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
    414 
    415   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
    416   CharUnits elementAlign =
    417     DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
    418 
    419   // Exception safety requires us to destroy all the
    420   // already-constructed members if an initializer throws.
    421   // For that, we'll need an EH cleanup.
    422   QualType::DestructionKind dtorKind = elementType.isDestructedType();
    423   Address endOfInit = Address::invalid();
    424   EHScopeStack::stable_iterator cleanup;
    425   llvm::Instruction *cleanupDominator = nullptr;
    426   if (CGF.needsEHCleanup(dtorKind)) {
    427     // In principle we could tell the cleanup where we are more
    428     // directly, but the control flow can get so varied here that it
    429     // would actually be quite complex.  Therefore we go through an
    430     // alloca.
    431     endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
    432                                      "arrayinit.endOfInit");
    433     cleanupDominator = Builder.CreateStore(begin, endOfInit);
    434     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
    435                                          elementAlign,
    436                                          CGF.getDestroyer(dtorKind));
    437     cleanup = CGF.EHStack.stable_begin();
    438 
    439   // Otherwise, remember that we didn't need a cleanup.
    440   } else {
    441     dtorKind = QualType::DK_none;
    442   }
    443 
    444   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
    445 
    446   // The 'current element to initialize'.  The invariants on this
    447   // variable are complicated.  Essentially, after each iteration of
    448   // the loop, it points to the last initialized element, except
    449   // that it points to the beginning of the array before any
    450   // elements have been initialized.
    451   llvm::Value *element = begin;
    452 
    453   // Emit the explicit initializers.
    454   for (uint64_t i = 0; i != NumInitElements; ++i) {
    455     // Advance to the next element.
    456     if (i > 0) {
    457       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
    458 
    459       // Tell the cleanup that it needs to destroy up to this
    460       // element.  TODO: some of these stores can be trivially
    461       // observed to be unnecessary.
    462       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
    463     }
    464 
    465     LValue elementLV =
    466       CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
    467     EmitInitializationToLValue(E->getInit(i), elementLV);
    468   }
    469 
    470   // Check whether there's a non-trivial array-fill expression.
    471   Expr *filler = E->getArrayFiller();
    472   bool hasTrivialFiller = isTrivialFiller(filler);
    473 
    474   // Any remaining elements need to be zero-initialized, possibly
    475   // using the filler expression.  We can skip this if the we're
    476   // emitting to zeroed memory.
    477   if (NumInitElements != NumArrayElements &&
    478       !(Dest.isZeroed() && hasTrivialFiller &&
    479         CGF.getTypes().isZeroInitializable(elementType))) {
    480 
    481     // Use an actual loop.  This is basically
    482     //   do { *array++ = filler; } while (array != end);
    483 
    484     // Advance to the start of the rest of the array.
    485     if (NumInitElements) {
    486       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
    487       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
    488     }
    489 
    490     // Compute the end of the array.
    491     llvm::Value *end = Builder.CreateInBoundsGEP(begin,
    492                       llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
    493                                                  "arrayinit.end");
    494 
    495     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
    496     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
    497 
    498     // Jump into the body.
    499     CGF.EmitBlock(bodyBB);
    500     llvm::PHINode *currentElement =
    501       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
    502     currentElement->addIncoming(element, entryBB);
    503 
    504     // Emit the actual filler expression.
    505     LValue elementLV =
    506       CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
    507     if (filler)
    508       EmitInitializationToLValue(filler, elementLV);
    509     else
    510       EmitNullInitializationToLValue(elementLV);
    511 
    512     // Move on to the next element.
    513     llvm::Value *nextElement =
    514       Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
    515 
    516     // Tell the EH cleanup that we finished with the last element.
    517     if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
    518 
    519     // Leave the loop if we're done.
    520     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
    521                                              "arrayinit.done");
    522     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
    523     Builder.CreateCondBr(done, endBB, bodyBB);
    524     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
    525 
    526     CGF.EmitBlock(endBB);
    527   }
    528 
    529   // Leave the partial-array cleanup if we entered one.
    530   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
    531 }
    532 
    533 //===----------------------------------------------------------------------===//
    534 //                            Visitor Methods
    535 //===----------------------------------------------------------------------===//
    536 
    537 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
    538   Visit(E->GetTemporaryExpr());
    539 }
    540 
    541 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
    542   EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
    543 }
    544 
    545 void
    546 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
    547   if (Dest.isPotentiallyAliased() &&
    548       E->getType().isPODType(CGF.getContext())) {
    549     // For a POD type, just emit a load of the lvalue + a copy, because our
    550     // compound literal might alias the destination.
    551     EmitAggLoadOfLValue(E);
    552     return;
    553   }
    554 
    555   AggValueSlot Slot = EnsureSlot(E->getType());
    556   CGF.EmitAggExpr(E->getInitializer(), Slot);
    557 }
    558 
    559 /// Attempt to look through various unimportant expressions to find a
    560 /// cast of the given kind.
    561 static Expr *findPeephole(Expr *op, CastKind kind) {
    562   while (true) {
    563     op = op->IgnoreParens();
    564     if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
    565       if (castE->getCastKind() == kind)
    566         return castE->getSubExpr();
    567       if (castE->getCastKind() == CK_NoOp)
    568         continue;
    569     }
    570     return nullptr;
    571   }
    572 }
    573 
    574 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
    575   if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
    576     CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
    577   switch (E->getCastKind()) {
    578   case CK_Dynamic: {
    579     // FIXME: Can this actually happen? We have no test coverage for it.
    580     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
    581     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
    582                                       CodeGenFunction::TCK_Load);
    583     // FIXME: Do we also need to handle property references here?
    584     if (LV.isSimple())
    585       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
    586     else
    587       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
    588 
    589     if (!Dest.isIgnored())
    590       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
    591     break;
    592   }
    593 
    594   case CK_ToUnion: {
    595     // Evaluate even if the destination is ignored.
    596     if (Dest.isIgnored()) {
    597       CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
    598                       /*ignoreResult=*/true);
    599       break;
    600     }
    601 
    602     // GCC union extension
    603     QualType Ty = E->getSubExpr()->getType();
    604     Address CastPtr =
    605       Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
    606     EmitInitializationToLValue(E->getSubExpr(),
    607                                CGF.MakeAddrLValue(CastPtr, Ty));
    608     break;
    609   }
    610 
    611   case CK_DerivedToBase:
    612   case CK_BaseToDerived:
    613   case CK_UncheckedDerivedToBase: {
    614     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
    615                 "should have been unpacked before we got here");
    616   }
    617 
    618   case CK_NonAtomicToAtomic:
    619   case CK_AtomicToNonAtomic: {
    620     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
    621 
    622     // Determine the atomic and value types.
    623     QualType atomicType = E->getSubExpr()->getType();
    624     QualType valueType = E->getType();
    625     if (isToAtomic) std::swap(atomicType, valueType);
    626 
    627     assert(atomicType->isAtomicType());
    628     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
    629                           atomicType->castAs<AtomicType>()->getValueType()));
    630 
    631     // Just recurse normally if we're ignoring the result or the
    632     // atomic type doesn't change representation.
    633     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
    634       return Visit(E->getSubExpr());
    635     }
    636 
    637     CastKind peepholeTarget =
    638       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
    639 
    640     // These two cases are reverses of each other; try to peephole them.
    641     if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
    642       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
    643                                                      E->getType()) &&
    644            "peephole significantly changed types?");
    645       return Visit(op);
    646     }
    647 
    648     // If we're converting an r-value of non-atomic type to an r-value
    649     // of atomic type, just emit directly into the relevant sub-object.
    650     if (isToAtomic) {
    651       AggValueSlot valueDest = Dest;
    652       if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
    653         // Zero-initialize.  (Strictly speaking, we only need to intialize
    654         // the padding at the end, but this is simpler.)
    655         if (!Dest.isZeroed())
    656           CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
    657 
    658         // Build a GEP to refer to the subobject.
    659         Address valueAddr =
    660             CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
    661                                         CharUnits());
    662         valueDest = AggValueSlot::forAddr(valueAddr,
    663                                           valueDest.getQualifiers(),
    664                                           valueDest.isExternallyDestructed(),
    665                                           valueDest.requiresGCollection(),
    666                                           valueDest.isPotentiallyAliased(),
    667                                           AggValueSlot::IsZeroed);
    668       }
    669 
    670       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
    671       return;
    672     }
    673 
    674     // Otherwise, we're converting an atomic type to a non-atomic type.
    675     // Make an atomic temporary, emit into that, and then copy the value out.
    676     AggValueSlot atomicSlot =
    677       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
    678     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
    679 
    680     Address valueAddr =
    681       Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
    682     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
    683     return EmitFinalDestCopy(valueType, rvalue);
    684   }
    685 
    686   case CK_LValueToRValue:
    687     // If we're loading from a volatile type, force the destination
    688     // into existence.
    689     if (E->getSubExpr()->getType().isVolatileQualified()) {
    690       EnsureDest(E->getType());
    691       return Visit(E->getSubExpr());
    692     }
    693 
    694     // fallthrough
    695 
    696   case CK_NoOp:
    697   case CK_UserDefinedConversion:
    698   case CK_ConstructorConversion:
    699     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
    700                                                    E->getType()) &&
    701            "Implicit cast types must be compatible");
    702     Visit(E->getSubExpr());
    703     break;
    704 
    705   case CK_LValueBitCast:
    706     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
    707 
    708   case CK_Dependent:
    709   case CK_BitCast:
    710   case CK_ArrayToPointerDecay:
    711   case CK_FunctionToPointerDecay:
    712   case CK_NullToPointer:
    713   case CK_NullToMemberPointer:
    714   case CK_BaseToDerivedMemberPointer:
    715   case CK_DerivedToBaseMemberPointer:
    716   case CK_MemberPointerToBoolean:
    717   case CK_ReinterpretMemberPointer:
    718   case CK_IntegralToPointer:
    719   case CK_PointerToIntegral:
    720   case CK_PointerToBoolean:
    721   case CK_ToVoid:
    722   case CK_VectorSplat:
    723   case CK_IntegralCast:
    724   case CK_IntegralToBoolean:
    725   case CK_IntegralToFloating:
    726   case CK_FloatingToIntegral:
    727   case CK_FloatingToBoolean:
    728   case CK_FloatingCast:
    729   case CK_CPointerToObjCPointerCast:
    730   case CK_BlockPointerToObjCPointerCast:
    731   case CK_AnyPointerToBlockPointerCast:
    732   case CK_ObjCObjectLValueCast:
    733   case CK_FloatingRealToComplex:
    734   case CK_FloatingComplexToReal:
    735   case CK_FloatingComplexToBoolean:
    736   case CK_FloatingComplexCast:
    737   case CK_FloatingComplexToIntegralComplex:
    738   case CK_IntegralRealToComplex:
    739   case CK_IntegralComplexToReal:
    740   case CK_IntegralComplexToBoolean:
    741   case CK_IntegralComplexCast:
    742   case CK_IntegralComplexToFloatingComplex:
    743   case CK_ARCProduceObject:
    744   case CK_ARCConsumeObject:
    745   case CK_ARCReclaimReturnedObject:
    746   case CK_ARCExtendBlockObject:
    747   case CK_CopyAndAutoreleaseBlockObject:
    748   case CK_BuiltinFnToFnPtr:
    749   case CK_ZeroToOCLEvent:
    750   case CK_AddressSpaceConversion:
    751     llvm_unreachable("cast kind invalid for aggregate types");
    752   }
    753 }
    754 
    755 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
    756   if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
    757     EmitAggLoadOfLValue(E);
    758     return;
    759   }
    760 
    761   RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
    762   EmitMoveFromReturnSlot(E, RV);
    763 }
    764 
    765 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
    766   RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
    767   EmitMoveFromReturnSlot(E, RV);
    768 }
    769 
    770 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
    771   CGF.EmitIgnoredExpr(E->getLHS());
    772   Visit(E->getRHS());
    773 }
    774 
    775 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
    776   CodeGenFunction::StmtExprEvaluation eval(CGF);
    777   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
    778 }
    779 
    780 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
    781   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
    782     VisitPointerToDataMemberBinaryOperator(E);
    783   else
    784     CGF.ErrorUnsupported(E, "aggregate binary expression");
    785 }
    786 
    787 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
    788                                                     const BinaryOperator *E) {
    789   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
    790   EmitFinalDestCopy(E->getType(), LV);
    791 }
    792 
    793 /// Is the value of the given expression possibly a reference to or
    794 /// into a __block variable?
    795 static bool isBlockVarRef(const Expr *E) {
    796   // Make sure we look through parens.
    797   E = E->IgnoreParens();
    798 
    799   // Check for a direct reference to a __block variable.
    800   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
    801     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
    802     return (var && var->hasAttr<BlocksAttr>());
    803   }
    804 
    805   // More complicated stuff.
    806 
    807   // Binary operators.
    808   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
    809     // For an assignment or pointer-to-member operation, just care
    810     // about the LHS.
    811     if (op->isAssignmentOp() || op->isPtrMemOp())
    812       return isBlockVarRef(op->getLHS());
    813 
    814     // For a comma, just care about the RHS.
    815     if (op->getOpcode() == BO_Comma)
    816       return isBlockVarRef(op->getRHS());
    817 
    818     // FIXME: pointer arithmetic?
    819     return false;
    820 
    821   // Check both sides of a conditional operator.
    822   } else if (const AbstractConditionalOperator *op
    823                = dyn_cast<AbstractConditionalOperator>(E)) {
    824     return isBlockVarRef(op->getTrueExpr())
    825         || isBlockVarRef(op->getFalseExpr());
    826 
    827   // OVEs are required to support BinaryConditionalOperators.
    828   } else if (const OpaqueValueExpr *op
    829                = dyn_cast<OpaqueValueExpr>(E)) {
    830     if (const Expr *src = op->getSourceExpr())
    831       return isBlockVarRef(src);
    832 
    833   // Casts are necessary to get things like (*(int*)&var) = foo().
    834   // We don't really care about the kind of cast here, except
    835   // we don't want to look through l2r casts, because it's okay
    836   // to get the *value* in a __block variable.
    837   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
    838     if (cast->getCastKind() == CK_LValueToRValue)
    839       return false;
    840     return isBlockVarRef(cast->getSubExpr());
    841 
    842   // Handle unary operators.  Again, just aggressively look through
    843   // it, ignoring the operation.
    844   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
    845     return isBlockVarRef(uop->getSubExpr());
    846 
    847   // Look into the base of a field access.
    848   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
    849     return isBlockVarRef(mem->getBase());
    850 
    851   // Look into the base of a subscript.
    852   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
    853     return isBlockVarRef(sub->getBase());
    854   }
    855 
    856   return false;
    857 }
    858 
    859 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
    860   // For an assignment to work, the value on the right has
    861   // to be compatible with the value on the left.
    862   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
    863                                                  E->getRHS()->getType())
    864          && "Invalid assignment");
    865 
    866   // If the LHS might be a __block variable, and the RHS can
    867   // potentially cause a block copy, we need to evaluate the RHS first
    868   // so that the assignment goes the right place.
    869   // This is pretty semantically fragile.
    870   if (isBlockVarRef(E->getLHS()) &&
    871       E->getRHS()->HasSideEffects(CGF.getContext())) {
    872     // Ensure that we have a destination, and evaluate the RHS into that.
    873     EnsureDest(E->getRHS()->getType());
    874     Visit(E->getRHS());
    875 
    876     // Now emit the LHS and copy into it.
    877     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
    878 
    879     // That copy is an atomic copy if the LHS is atomic.
    880     if (LHS.getType()->isAtomicType() ||
    881         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
    882       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
    883       return;
    884     }
    885 
    886     EmitCopy(E->getLHS()->getType(),
    887              AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
    888                                      needsGC(E->getLHS()->getType()),
    889                                      AggValueSlot::IsAliased),
    890              Dest);
    891     return;
    892   }
    893 
    894   LValue LHS = CGF.EmitLValue(E->getLHS());
    895 
    896   // If we have an atomic type, evaluate into the destination and then
    897   // do an atomic copy.
    898   if (LHS.getType()->isAtomicType() ||
    899       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
    900     EnsureDest(E->getRHS()->getType());
    901     Visit(E->getRHS());
    902     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
    903     return;
    904   }
    905 
    906   // Codegen the RHS so that it stores directly into the LHS.
    907   AggValueSlot LHSSlot =
    908     AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
    909                             needsGC(E->getLHS()->getType()),
    910                             AggValueSlot::IsAliased);
    911   // A non-volatile aggregate destination might have volatile member.
    912   if (!LHSSlot.isVolatile() &&
    913       CGF.hasVolatileMember(E->getLHS()->getType()))
    914     LHSSlot.setVolatile(true);
    915 
    916   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
    917 
    918   // Copy into the destination if the assignment isn't ignored.
    919   EmitFinalDestCopy(E->getType(), LHS);
    920 }
    921 
    922 void AggExprEmitter::
    923 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
    924   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
    925   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
    926   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
    927 
    928   // Bind the common expression if necessary.
    929   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
    930 
    931   CodeGenFunction::ConditionalEvaluation eval(CGF);
    932   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
    933                            CGF.getProfileCount(E));
    934 
    935   // Save whether the destination's lifetime is externally managed.
    936   bool isExternallyDestructed = Dest.isExternallyDestructed();
    937 
    938   eval.begin(CGF);
    939   CGF.EmitBlock(LHSBlock);
    940   CGF.incrementProfileCounter(E);
    941   Visit(E->getTrueExpr());
    942   eval.end(CGF);
    943 
    944   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
    945   CGF.Builder.CreateBr(ContBlock);
    946 
    947   // If the result of an agg expression is unused, then the emission
    948   // of the LHS might need to create a destination slot.  That's fine
    949   // with us, and we can safely emit the RHS into the same slot, but
    950   // we shouldn't claim that it's already being destructed.
    951   Dest.setExternallyDestructed(isExternallyDestructed);
    952 
    953   eval.begin(CGF);
    954   CGF.EmitBlock(RHSBlock);
    955   Visit(E->getFalseExpr());
    956   eval.end(CGF);
    957 
    958   CGF.EmitBlock(ContBlock);
    959 }
    960 
    961 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
    962   Visit(CE->getChosenSubExpr());
    963 }
    964 
    965 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
    966   Address ArgValue = Address::invalid();
    967   Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
    968 
    969   if (!ArgPtr.isValid()) {
    970     // If EmitVAArg fails, we fall back to the LLVM instruction.
    971     llvm::Value *Val = Builder.CreateVAArg(ArgValue.getPointer(),
    972                                            CGF.ConvertType(VE->getType()));
    973     if (!Dest.isIgnored())
    974       Builder.CreateStore(Val, Dest.getAddress());
    975     return;
    976   }
    977 
    978   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
    979 }
    980 
    981 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
    982   // Ensure that we have a slot, but if we already do, remember
    983   // whether it was externally destructed.
    984   bool wasExternallyDestructed = Dest.isExternallyDestructed();
    985   EnsureDest(E->getType());
    986 
    987   // We're going to push a destructor if there isn't already one.
    988   Dest.setExternallyDestructed();
    989 
    990   Visit(E->getSubExpr());
    991 
    992   // Push that destructor we promised.
    993   if (!wasExternallyDestructed)
    994     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
    995 }
    996 
    997 void
    998 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
    999   AggValueSlot Slot = EnsureSlot(E->getType());
   1000   CGF.EmitCXXConstructExpr(E, Slot);
   1001 }
   1002 
   1003 void
   1004 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
   1005   AggValueSlot Slot = EnsureSlot(E->getType());
   1006   CGF.EmitLambdaExpr(E, Slot);
   1007 }
   1008 
   1009 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
   1010   CGF.enterFullExpression(E);
   1011   CodeGenFunction::RunCleanupsScope cleanups(CGF);
   1012   Visit(E->getSubExpr());
   1013 }
   1014 
   1015 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
   1016   QualType T = E->getType();
   1017   AggValueSlot Slot = EnsureSlot(T);
   1018   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
   1019 }
   1020 
   1021 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
   1022   QualType T = E->getType();
   1023   AggValueSlot Slot = EnsureSlot(T);
   1024   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
   1025 }
   1026 
   1027 /// isSimpleZero - If emitting this value will obviously just cause a store of
   1028 /// zero to memory, return true.  This can return false if uncertain, so it just
   1029 /// handles simple cases.
   1030 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
   1031   E = E->IgnoreParens();
   1032 
   1033   // 0
   1034   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
   1035     return IL->getValue() == 0;
   1036   // +0.0
   1037   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
   1038     return FL->getValue().isPosZero();
   1039   // int()
   1040   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
   1041       CGF.getTypes().isZeroInitializable(E->getType()))
   1042     return true;
   1043   // (int*)0 - Null pointer expressions.
   1044   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
   1045     return ICE->getCastKind() == CK_NullToPointer;
   1046   // '\0'
   1047   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
   1048     return CL->getValue() == 0;
   1049 
   1050   // Otherwise, hard case: conservatively return false.
   1051   return false;
   1052 }
   1053 
   1054 
   1055 void
   1056 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
   1057   QualType type = LV.getType();
   1058   // FIXME: Ignore result?
   1059   // FIXME: Are initializers affected by volatile?
   1060   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
   1061     // Storing "i32 0" to a zero'd memory location is a noop.
   1062     return;
   1063   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
   1064     return EmitNullInitializationToLValue(LV);
   1065   } else if (isa<NoInitExpr>(E)) {
   1066     // Do nothing.
   1067     return;
   1068   } else if (type->isReferenceType()) {
   1069     RValue RV = CGF.EmitReferenceBindingToExpr(E);
   1070     return CGF.EmitStoreThroughLValue(RV, LV);
   1071   }
   1072 
   1073   switch (CGF.getEvaluationKind(type)) {
   1074   case TEK_Complex:
   1075     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
   1076     return;
   1077   case TEK_Aggregate:
   1078     CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
   1079                                                AggValueSlot::IsDestructed,
   1080                                       AggValueSlot::DoesNotNeedGCBarriers,
   1081                                                AggValueSlot::IsNotAliased,
   1082                                                Dest.isZeroed()));
   1083     return;
   1084   case TEK_Scalar:
   1085     if (LV.isSimple()) {
   1086       CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
   1087     } else {
   1088       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
   1089     }
   1090     return;
   1091   }
   1092   llvm_unreachable("bad evaluation kind");
   1093 }
   1094 
   1095 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
   1096   QualType type = lv.getType();
   1097 
   1098   // If the destination slot is already zeroed out before the aggregate is
   1099   // copied into it, we don't have to emit any zeros here.
   1100   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
   1101     return;
   1102 
   1103   if (CGF.hasScalarEvaluationKind(type)) {
   1104     // For non-aggregates, we can store the appropriate null constant.
   1105     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
   1106     // Note that the following is not equivalent to
   1107     // EmitStoreThroughBitfieldLValue for ARC types.
   1108     if (lv.isBitField()) {
   1109       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
   1110     } else {
   1111       assert(lv.isSimple());
   1112       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
   1113     }
   1114   } else {
   1115     // There's a potential optimization opportunity in combining
   1116     // memsets; that would be easy for arrays, but relatively
   1117     // difficult for structures with the current code.
   1118     CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
   1119   }
   1120 }
   1121 
   1122 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
   1123 #if 0
   1124   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
   1125   // (Length of globals? Chunks of zeroed-out space?).
   1126   //
   1127   // If we can, prefer a copy from a global; this is a lot less code for long
   1128   // globals, and it's easier for the current optimizers to analyze.
   1129   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
   1130     llvm::GlobalVariable* GV =
   1131     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
   1132                              llvm::GlobalValue::InternalLinkage, C, "");
   1133     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
   1134     return;
   1135   }
   1136 #endif
   1137   if (E->hadArrayRangeDesignator())
   1138     CGF.ErrorUnsupported(E, "GNU array range designator extension");
   1139 
   1140   AggValueSlot Dest = EnsureSlot(E->getType());
   1141 
   1142   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
   1143 
   1144   // Handle initialization of an array.
   1145   if (E->getType()->isArrayType()) {
   1146     if (E->isStringLiteralInit())
   1147       return Visit(E->getInit(0));
   1148 
   1149     QualType elementType =
   1150         CGF.getContext().getAsArrayType(E->getType())->getElementType();
   1151 
   1152     auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
   1153     EmitArrayInit(Dest.getAddress(), AType, elementType, E);
   1154     return;
   1155   }
   1156 
   1157   if (E->getType()->isAtomicType()) {
   1158     // An _Atomic(T) object can be list-initialized from an expression
   1159     // of the same type.
   1160     assert(E->getNumInits() == 1 &&
   1161            CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
   1162                                                    E->getType()) &&
   1163            "unexpected list initialization for atomic object");
   1164     return Visit(E->getInit(0));
   1165   }
   1166 
   1167   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
   1168 
   1169   // Do struct initialization; this code just sets each individual member
   1170   // to the approprate value.  This makes bitfield support automatic;
   1171   // the disadvantage is that the generated code is more difficult for
   1172   // the optimizer, especially with bitfields.
   1173   unsigned NumInitElements = E->getNumInits();
   1174   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
   1175 
   1176   // Prepare a 'this' for CXXDefaultInitExprs.
   1177   CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
   1178 
   1179   if (record->isUnion()) {
   1180     // Only initialize one field of a union. The field itself is
   1181     // specified by the initializer list.
   1182     if (!E->getInitializedFieldInUnion()) {
   1183       // Empty union; we have nothing to do.
   1184 
   1185 #ifndef NDEBUG
   1186       // Make sure that it's really an empty and not a failure of
   1187       // semantic analysis.
   1188       for (const auto *Field : record->fields())
   1189         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
   1190 #endif
   1191       return;
   1192     }
   1193 
   1194     // FIXME: volatility
   1195     FieldDecl *Field = E->getInitializedFieldInUnion();
   1196 
   1197     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
   1198     if (NumInitElements) {
   1199       // Store the initializer into the field
   1200       EmitInitializationToLValue(E->getInit(0), FieldLoc);
   1201     } else {
   1202       // Default-initialize to null.
   1203       EmitNullInitializationToLValue(FieldLoc);
   1204     }
   1205 
   1206     return;
   1207   }
   1208 
   1209   // We'll need to enter cleanup scopes in case any of the member
   1210   // initializers throw an exception.
   1211   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
   1212   llvm::Instruction *cleanupDominator = nullptr;
   1213 
   1214   // Here we iterate over the fields; this makes it simpler to both
   1215   // default-initialize fields and skip over unnamed fields.
   1216   unsigned curInitIndex = 0;
   1217   for (const auto *field : record->fields()) {
   1218     // We're done once we hit the flexible array member.
   1219     if (field->getType()->isIncompleteArrayType())
   1220       break;
   1221 
   1222     // Always skip anonymous bitfields.
   1223     if (field->isUnnamedBitfield())
   1224       continue;
   1225 
   1226     // We're done if we reach the end of the explicit initializers, we
   1227     // have a zeroed object, and the rest of the fields are
   1228     // zero-initializable.
   1229     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
   1230         CGF.getTypes().isZeroInitializable(E->getType()))
   1231       break;
   1232 
   1233 
   1234     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
   1235     // We never generate write-barries for initialized fields.
   1236     LV.setNonGC(true);
   1237 
   1238     if (curInitIndex < NumInitElements) {
   1239       // Store the initializer into the field.
   1240       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
   1241     } else {
   1242       // We're out of initalizers; default-initialize to null
   1243       EmitNullInitializationToLValue(LV);
   1244     }
   1245 
   1246     // Push a destructor if necessary.
   1247     // FIXME: if we have an array of structures, all explicitly
   1248     // initialized, we can end up pushing a linear number of cleanups.
   1249     bool pushedCleanup = false;
   1250     if (QualType::DestructionKind dtorKind
   1251           = field->getType().isDestructedType()) {
   1252       assert(LV.isSimple());
   1253       if (CGF.needsEHCleanup(dtorKind)) {
   1254         if (!cleanupDominator)
   1255           cleanupDominator = CGF.Builder.CreateAlignedLoad(
   1256               CGF.Int8Ty,
   1257               llvm::Constant::getNullValue(CGF.Int8PtrTy),
   1258               CharUnits::One()); // placeholder
   1259 
   1260         CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
   1261                         CGF.getDestroyer(dtorKind), false);
   1262         cleanups.push_back(CGF.EHStack.stable_begin());
   1263         pushedCleanup = true;
   1264       }
   1265     }
   1266 
   1267     // If the GEP didn't get used because of a dead zero init or something
   1268     // else, clean it up for -O0 builds and general tidiness.
   1269     if (!pushedCleanup && LV.isSimple())
   1270       if (llvm::GetElementPtrInst *GEP =
   1271             dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
   1272         if (GEP->use_empty())
   1273           GEP->eraseFromParent();
   1274   }
   1275 
   1276   // Deactivate all the partial cleanups in reverse order, which
   1277   // generally means popping them.
   1278   for (unsigned i = cleanups.size(); i != 0; --i)
   1279     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
   1280 
   1281   // Destroy the placeholder if we made one.
   1282   if (cleanupDominator)
   1283     cleanupDominator->eraseFromParent();
   1284 }
   1285 
   1286 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
   1287   AggValueSlot Dest = EnsureSlot(E->getType());
   1288 
   1289   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
   1290   EmitInitializationToLValue(E->getBase(), DestLV);
   1291   VisitInitListExpr(E->getUpdater());
   1292 }
   1293 
   1294 //===----------------------------------------------------------------------===//
   1295 //                        Entry Points into this File
   1296 //===----------------------------------------------------------------------===//
   1297 
   1298 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
   1299 /// non-zero bytes that will be stored when outputting the initializer for the
   1300 /// specified initializer expression.
   1301 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
   1302   E = E->IgnoreParens();
   1303 
   1304   // 0 and 0.0 won't require any non-zero stores!
   1305   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
   1306 
   1307   // If this is an initlist expr, sum up the size of sizes of the (present)
   1308   // elements.  If this is something weird, assume the whole thing is non-zero.
   1309   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
   1310   if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
   1311     return CGF.getContext().getTypeSizeInChars(E->getType());
   1312 
   1313   // InitListExprs for structs have to be handled carefully.  If there are
   1314   // reference members, we need to consider the size of the reference, not the
   1315   // referencee.  InitListExprs for unions and arrays can't have references.
   1316   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
   1317     if (!RT->isUnionType()) {
   1318       RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
   1319       CharUnits NumNonZeroBytes = CharUnits::Zero();
   1320 
   1321       unsigned ILEElement = 0;
   1322       for (const auto *Field : SD->fields()) {
   1323         // We're done once we hit the flexible array member or run out of
   1324         // InitListExpr elements.
   1325         if (Field->getType()->isIncompleteArrayType() ||
   1326             ILEElement == ILE->getNumInits())
   1327           break;
   1328         if (Field->isUnnamedBitfield())
   1329           continue;
   1330 
   1331         const Expr *E = ILE->getInit(ILEElement++);
   1332 
   1333         // Reference values are always non-null and have the width of a pointer.
   1334         if (Field->getType()->isReferenceType())
   1335           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
   1336               CGF.getTarget().getPointerWidth(0));
   1337         else
   1338           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
   1339       }
   1340 
   1341       return NumNonZeroBytes;
   1342     }
   1343   }
   1344 
   1345 
   1346   CharUnits NumNonZeroBytes = CharUnits::Zero();
   1347   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
   1348     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
   1349   return NumNonZeroBytes;
   1350 }
   1351 
   1352 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
   1353 /// zeros in it, emit a memset and avoid storing the individual zeros.
   1354 ///
   1355 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
   1356                                      CodeGenFunction &CGF) {
   1357   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
   1358   // volatile stores.
   1359   if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
   1360     return;
   1361 
   1362   // C++ objects with a user-declared constructor don't need zero'ing.
   1363   if (CGF.getLangOpts().CPlusPlus)
   1364     if (const RecordType *RT = CGF.getContext()
   1365                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
   1366       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
   1367       if (RD->hasUserDeclaredConstructor())
   1368         return;
   1369     }
   1370 
   1371   // If the type is 16-bytes or smaller, prefer individual stores over memset.
   1372   CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
   1373   if (Size <= CharUnits::fromQuantity(16))
   1374     return;
   1375 
   1376   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
   1377   // we prefer to emit memset + individual stores for the rest.
   1378   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
   1379   if (NumNonZeroBytes*4 > Size)
   1380     return;
   1381 
   1382   // Okay, it seems like a good idea to use an initial memset, emit the call.
   1383   llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
   1384 
   1385   Address Loc = Slot.getAddress();
   1386   Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
   1387   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
   1388 
   1389   // Tell the AggExprEmitter that the slot is known zero.
   1390   Slot.setZeroed();
   1391 }
   1392 
   1393 
   1394 
   1395 
   1396 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
   1397 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
   1398 /// the value of the aggregate expression is not needed.  If VolatileDest is
   1399 /// true, DestPtr cannot be 0.
   1400 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
   1401   assert(E && hasAggregateEvaluationKind(E->getType()) &&
   1402          "Invalid aggregate expression to emit");
   1403   assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
   1404          "slot has bits but no address");
   1405 
   1406   // Optimize the slot if possible.
   1407   CheckAggExprForMemSetUse(Slot, E, *this);
   1408 
   1409   AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
   1410 }
   1411 
   1412 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
   1413   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
   1414   Address Temp = CreateMemTemp(E->getType());
   1415   LValue LV = MakeAddrLValue(Temp, E->getType());
   1416   EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
   1417                                          AggValueSlot::DoesNotNeedGCBarriers,
   1418                                          AggValueSlot::IsNotAliased));
   1419   return LV;
   1420 }
   1421 
   1422 void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
   1423                                         Address SrcPtr, QualType Ty,
   1424                                         bool isVolatile,
   1425                                         bool isAssignment) {
   1426   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
   1427 
   1428   if (getLangOpts().CPlusPlus) {
   1429     if (const RecordType *RT = Ty->getAs<RecordType>()) {
   1430       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
   1431       assert((Record->hasTrivialCopyConstructor() ||
   1432               Record->hasTrivialCopyAssignment() ||
   1433               Record->hasTrivialMoveConstructor() ||
   1434               Record->hasTrivialMoveAssignment() ||
   1435               Record->isUnion()) &&
   1436              "Trying to aggregate-copy a type without a trivial copy/move "
   1437              "constructor or assignment operator");
   1438       // Ignore empty classes in C++.
   1439       if (Record->isEmpty())
   1440         return;
   1441     }
   1442   }
   1443 
   1444   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
   1445   // C99 6.5.16.1p3, which states "If the value being stored in an object is
   1446   // read from another object that overlaps in anyway the storage of the first
   1447   // object, then the overlap shall be exact and the two objects shall have
   1448   // qualified or unqualified versions of a compatible type."
   1449   //
   1450   // memcpy is not defined if the source and destination pointers are exactly
   1451   // equal, but other compilers do this optimization, and almost every memcpy
   1452   // implementation handles this case safely.  If there is a libc that does not
   1453   // safely handle this, we can add a target hook.
   1454 
   1455   // Get data size info for this aggregate. If this is an assignment,
   1456   // don't copy the tail padding, because we might be assigning into a
   1457   // base subobject where the tail padding is claimed.  Otherwise,
   1458   // copying it is fine.
   1459   std::pair<CharUnits, CharUnits> TypeInfo;
   1460   if (isAssignment)
   1461     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
   1462   else
   1463     TypeInfo = getContext().getTypeInfoInChars(Ty);
   1464 
   1465   llvm::Value *SizeVal = nullptr;
   1466   if (TypeInfo.first.isZero()) {
   1467     // But note that getTypeInfo returns 0 for a VLA.
   1468     if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
   1469             getContext().getAsArrayType(Ty))) {
   1470       QualType BaseEltTy;
   1471       SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
   1472       TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
   1473       std::pair<CharUnits, CharUnits> LastElementTypeInfo;
   1474       if (!isAssignment)
   1475         LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
   1476       assert(!TypeInfo.first.isZero());
   1477       SizeVal = Builder.CreateNUWMul(
   1478           SizeVal,
   1479           llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
   1480       if (!isAssignment) {
   1481         SizeVal = Builder.CreateNUWSub(
   1482             SizeVal,
   1483             llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
   1484         SizeVal = Builder.CreateNUWAdd(
   1485             SizeVal, llvm::ConstantInt::get(
   1486                          SizeTy, LastElementTypeInfo.first.getQuantity()));
   1487       }
   1488     }
   1489   }
   1490   if (!SizeVal) {
   1491     SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
   1492   }
   1493 
   1494   // FIXME: If we have a volatile struct, the optimizer can remove what might
   1495   // appear to be `extra' memory ops:
   1496   //
   1497   // volatile struct { int i; } a, b;
   1498   //
   1499   // int main() {
   1500   //   a = b;
   1501   //   a = b;
   1502   // }
   1503   //
   1504   // we need to use a different call here.  We use isVolatile to indicate when
   1505   // either the source or the destination is volatile.
   1506 
   1507   DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
   1508   SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
   1509 
   1510   // Don't do any of the memmove_collectable tests if GC isn't set.
   1511   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
   1512     // fall through
   1513   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
   1514     RecordDecl *Record = RecordTy->getDecl();
   1515     if (Record->hasObjectMember()) {
   1516       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
   1517                                                     SizeVal);
   1518       return;
   1519     }
   1520   } else if (Ty->isArrayType()) {
   1521     QualType BaseType = getContext().getBaseElementType(Ty);
   1522     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
   1523       if (RecordTy->getDecl()->hasObjectMember()) {
   1524         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
   1525                                                       SizeVal);
   1526         return;
   1527       }
   1528     }
   1529   }
   1530 
   1531   auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
   1532 
   1533   // Determine the metadata to describe the position of any padding in this
   1534   // memcpy, as well as the TBAA tags for the members of the struct, in case
   1535   // the optimizer wishes to expand it in to scalar memory operations.
   1536   if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
   1537     Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
   1538 }
   1539