1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Aggregate Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGObjCRuntime.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclTemplate.h" 20 #include "clang/AST/StmtVisitor.h" 21 #include "llvm/Constants.h" 22 #include "llvm/Function.h" 23 #include "llvm/GlobalVariable.h" 24 #include "llvm/Intrinsics.h" 25 using namespace clang; 26 using namespace CodeGen; 27 28 //===----------------------------------------------------------------------===// 29 // Aggregate Expression Emitter 30 //===----------------------------------------------------------------------===// 31 32 namespace { 33 class AggExprEmitter : public StmtVisitor<AggExprEmitter> { 34 CodeGenFunction &CGF; 35 CGBuilderTy &Builder; 36 AggValueSlot Dest; 37 bool IgnoreResult; 38 39 /// We want to use 'dest' as the return slot except under two 40 /// conditions: 41 /// - The destination slot requires garbage collection, so we 42 /// need to use the GC API. 43 /// - The destination slot is potentially aliased. 44 bool shouldUseDestForReturnSlot() const { 45 return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased()); 46 } 47 48 ReturnValueSlot getReturnValueSlot() const { 49 if (!shouldUseDestForReturnSlot()) 50 return ReturnValueSlot(); 51 52 return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile()); 53 } 54 55 AggValueSlot EnsureSlot(QualType T) { 56 if (!Dest.isIgnored()) return Dest; 57 return CGF.CreateAggTemp(T, "agg.tmp.ensured"); 58 } 59 60 public: 61 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, 62 bool ignore) 63 : CGF(cgf), Builder(CGF.Builder), Dest(Dest), 64 IgnoreResult(ignore) { 65 } 66 67 //===--------------------------------------------------------------------===// 68 // Utilities 69 //===--------------------------------------------------------------------===// 70 71 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 72 /// represents a value lvalue, this method emits the address of the lvalue, 73 /// then loads the result into DestPtr. 74 void EmitAggLoadOfLValue(const Expr *E); 75 76 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 77 void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false); 78 void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false, 79 unsigned Alignment = 0); 80 81 void EmitMoveFromReturnSlot(const Expr *E, RValue Src); 82 83 void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList); 84 void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, 85 QualType elementType, InitListExpr *E); 86 87 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { 88 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) 89 return AggValueSlot::NeedsGCBarriers; 90 return AggValueSlot::DoesNotNeedGCBarriers; 91 } 92 93 bool TypeRequiresGCollection(QualType T); 94 95 //===--------------------------------------------------------------------===// 96 // Visitor Methods 97 //===--------------------------------------------------------------------===// 98 99 void VisitStmt(Stmt *S) { 100 CGF.ErrorUnsupported(S, "aggregate expression"); 101 } 102 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } 103 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 104 Visit(GE->getResultExpr()); 105 } 106 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } 107 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 108 return Visit(E->getReplacement()); 109 } 110 111 // l-values. 112 void VisitDeclRefExpr(DeclRefExpr *E) { 113 // For aggregates, we should always be able to emit the variable 114 // as an l-value unless it's a reference. This is due to the fact 115 // that we can't actually ever see a normal l2r conversion on an 116 // aggregate in C++, and in C there's no language standard 117 // actively preventing us from listing variables in the captures 118 // list of a block. 119 if (E->getDecl()->getType()->isReferenceType()) { 120 if (CodeGenFunction::ConstantEmission result 121 = CGF.tryEmitAsConstant(E)) { 122 EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E)); 123 return; 124 } 125 } 126 127 EmitAggLoadOfLValue(E); 128 } 129 130 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } 131 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } 132 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } 133 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); 134 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 135 EmitAggLoadOfLValue(E); 136 } 137 void VisitPredefinedExpr(const PredefinedExpr *E) { 138 EmitAggLoadOfLValue(E); 139 } 140 141 // Operators. 142 void VisitCastExpr(CastExpr *E); 143 void VisitCallExpr(const CallExpr *E); 144 void VisitStmtExpr(const StmtExpr *E); 145 void VisitBinaryOperator(const BinaryOperator *BO); 146 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); 147 void VisitBinAssign(const BinaryOperator *E); 148 void VisitBinComma(const BinaryOperator *E); 149 150 void VisitObjCMessageExpr(ObjCMessageExpr *E); 151 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 152 EmitAggLoadOfLValue(E); 153 } 154 155 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); 156 void VisitChooseExpr(const ChooseExpr *CE); 157 void VisitInitListExpr(InitListExpr *E); 158 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); 159 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 160 Visit(DAE->getExpr()); 161 } 162 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); 163 void VisitCXXConstructExpr(const CXXConstructExpr *E); 164 void VisitLambdaExpr(LambdaExpr *E); 165 void VisitExprWithCleanups(ExprWithCleanups *E); 166 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); 167 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } 168 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); 169 void VisitOpaqueValueExpr(OpaqueValueExpr *E); 170 171 void VisitPseudoObjectExpr(PseudoObjectExpr *E) { 172 if (E->isGLValue()) { 173 LValue LV = CGF.EmitPseudoObjectLValue(E); 174 return EmitFinalDestCopy(E, LV); 175 } 176 177 CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType())); 178 } 179 180 void VisitVAArgExpr(VAArgExpr *E); 181 182 void EmitInitializationToLValue(Expr *E, LValue Address); 183 void EmitNullInitializationToLValue(LValue Address); 184 // case Expr::ChooseExprClass: 185 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } 186 void VisitAtomicExpr(AtomicExpr *E) { 187 CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr()); 188 } 189 }; 190 } // end anonymous namespace. 191 192 //===----------------------------------------------------------------------===// 193 // Utilities 194 //===----------------------------------------------------------------------===// 195 196 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 197 /// represents a value lvalue, this method emits the address of the lvalue, 198 /// then loads the result into DestPtr. 199 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { 200 LValue LV = CGF.EmitLValue(E); 201 EmitFinalDestCopy(E, LV); 202 } 203 204 /// \brief True if the given aggregate type requires special GC API calls. 205 bool AggExprEmitter::TypeRequiresGCollection(QualType T) { 206 // Only record types have members that might require garbage collection. 207 const RecordType *RecordTy = T->getAs<RecordType>(); 208 if (!RecordTy) return false; 209 210 // Don't mess with non-trivial C++ types. 211 RecordDecl *Record = RecordTy->getDecl(); 212 if (isa<CXXRecordDecl>(Record) && 213 (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() || 214 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) 215 return false; 216 217 // Check whether the type has an object member. 218 return Record->hasObjectMember(); 219 } 220 221 /// \brief Perform the final move to DestPtr if for some reason 222 /// getReturnValueSlot() didn't use it directly. 223 /// 224 /// The idea is that you do something like this: 225 /// RValue Result = EmitSomething(..., getReturnValueSlot()); 226 /// EmitMoveFromReturnSlot(E, Result); 227 /// 228 /// If nothing interferes, this will cause the result to be emitted 229 /// directly into the return value slot. Otherwise, a final move 230 /// will be performed. 231 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) { 232 if (shouldUseDestForReturnSlot()) { 233 // Logically, Dest.getAddr() should equal Src.getAggregateAddr(). 234 // The possibility of undef rvalues complicates that a lot, 235 // though, so we can't really assert. 236 return; 237 } 238 239 // Otherwise, do a final copy, 240 assert(Dest.getAddr() != Src.getAggregateAddr()); 241 std::pair<CharUnits, CharUnits> TypeInfo = 242 CGF.getContext().getTypeInfoInChars(E->getType()); 243 CharUnits Alignment = std::min(TypeInfo.second, Dest.getAlignment()); 244 EmitFinalDestCopy(E, Src, /*Ignore*/ true, Alignment.getQuantity()); 245 } 246 247 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 248 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore, 249 unsigned Alignment) { 250 assert(Src.isAggregate() && "value must be aggregate value!"); 251 252 // If Dest is ignored, then we're evaluating an aggregate expression 253 // in a context (like an expression statement) that doesn't care 254 // about the result. C says that an lvalue-to-rvalue conversion is 255 // performed in these cases; C++ says that it is not. In either 256 // case, we don't actually need to do anything unless the value is 257 // volatile. 258 if (Dest.isIgnored()) { 259 if (!Src.isVolatileQualified() || 260 CGF.CGM.getLangOpts().CPlusPlus || 261 (IgnoreResult && Ignore)) 262 return; 263 264 // If the source is volatile, we must read from it; to do that, we need 265 // some place to put it. 266 Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp"); 267 } 268 269 if (Dest.requiresGCollection()) { 270 CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType()); 271 llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); 272 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 273 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, 274 Dest.getAddr(), 275 Src.getAggregateAddr(), 276 SizeVal); 277 return; 278 } 279 // If the result of the assignment is used, copy the LHS there also. 280 // FIXME: Pass VolatileDest as well. I think we also need to merge volatile 281 // from the source as well, as we can't eliminate it if either operand 282 // is volatile, unless copy has volatile for both source and destination.. 283 CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(), 284 Dest.isVolatile()|Src.isVolatileQualified(), 285 Alignment); 286 } 287 288 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 289 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) { 290 assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc"); 291 292 CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment()); 293 EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity()); 294 } 295 296 static QualType GetStdInitializerListElementType(QualType T) { 297 // Just assume that this is really std::initializer_list. 298 ClassTemplateSpecializationDecl *specialization = 299 cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl()); 300 return specialization->getTemplateArgs()[0].getAsType(); 301 } 302 303 /// \brief Prepare cleanup for the temporary array. 304 static void EmitStdInitializerListCleanup(CodeGenFunction &CGF, 305 QualType arrayType, 306 llvm::Value *addr, 307 const InitListExpr *initList) { 308 QualType::DestructionKind dtorKind = arrayType.isDestructedType(); 309 if (!dtorKind) 310 return; // Type doesn't need destroying. 311 if (dtorKind != QualType::DK_cxx_destructor) { 312 CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list"); 313 return; 314 } 315 316 CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind); 317 CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer, 318 /*EHCleanup=*/true); 319 } 320 321 /// \brief Emit the initializer for a std::initializer_list initialized with a 322 /// real initializer list. 323 void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr, 324 InitListExpr *initList) { 325 // We emit an array containing the elements, then have the init list point 326 // at the array. 327 ASTContext &ctx = CGF.getContext(); 328 unsigned numInits = initList->getNumInits(); 329 QualType element = GetStdInitializerListElementType(initList->getType()); 330 llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits); 331 QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0); 332 llvm::Type *LTy = CGF.ConvertTypeForMem(array); 333 llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy); 334 alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity()); 335 alloc->setName(".initlist."); 336 337 EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList); 338 339 // FIXME: The diagnostics are somewhat out of place here. 340 RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl(); 341 RecordDecl::field_iterator field = record->field_begin(); 342 if (field == record->field_end()) { 343 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 344 return; 345 } 346 347 QualType elementPtr = ctx.getPointerType(element.withConst()); 348 349 // Start pointer. 350 if (!ctx.hasSameType(field->getType(), elementPtr)) { 351 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 352 return; 353 } 354 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(destPtr, initList->getType()); 355 LValue start = CGF.EmitLValueForFieldInitialization(DestLV, *field); 356 llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart"); 357 CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start); 358 ++field; 359 360 if (field == record->field_end()) { 361 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 362 return; 363 } 364 LValue endOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *field); 365 if (ctx.hasSameType(field->getType(), elementPtr)) { 366 // End pointer. 367 llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend"); 368 CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength); 369 } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) { 370 // Length. 371 CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength); 372 } else { 373 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 374 return; 375 } 376 377 if (!Dest.isExternallyDestructed()) 378 EmitStdInitializerListCleanup(CGF, array, alloc, initList); 379 } 380 381 /// \brief Emit initialization of an array from an initializer list. 382 void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, 383 QualType elementType, InitListExpr *E) { 384 uint64_t NumInitElements = E->getNumInits(); 385 386 uint64_t NumArrayElements = AType->getNumElements(); 387 assert(NumInitElements <= NumArrayElements); 388 389 // DestPtr is an array*. Construct an elementType* by drilling 390 // down a level. 391 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); 392 llvm::Value *indices[] = { zero, zero }; 393 llvm::Value *begin = 394 Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin"); 395 396 // Exception safety requires us to destroy all the 397 // already-constructed members if an initializer throws. 398 // For that, we'll need an EH cleanup. 399 QualType::DestructionKind dtorKind = elementType.isDestructedType(); 400 llvm::AllocaInst *endOfInit = 0; 401 EHScopeStack::stable_iterator cleanup; 402 llvm::Instruction *cleanupDominator = 0; 403 if (CGF.needsEHCleanup(dtorKind)) { 404 // In principle we could tell the cleanup where we are more 405 // directly, but the control flow can get so varied here that it 406 // would actually be quite complex. Therefore we go through an 407 // alloca. 408 endOfInit = CGF.CreateTempAlloca(begin->getType(), 409 "arrayinit.endOfInit"); 410 cleanupDominator = Builder.CreateStore(begin, endOfInit); 411 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, 412 CGF.getDestroyer(dtorKind)); 413 cleanup = CGF.EHStack.stable_begin(); 414 415 // Otherwise, remember that we didn't need a cleanup. 416 } else { 417 dtorKind = QualType::DK_none; 418 } 419 420 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); 421 422 // The 'current element to initialize'. The invariants on this 423 // variable are complicated. Essentially, after each iteration of 424 // the loop, it points to the last initialized element, except 425 // that it points to the beginning of the array before any 426 // elements have been initialized. 427 llvm::Value *element = begin; 428 429 // Emit the explicit initializers. 430 for (uint64_t i = 0; i != NumInitElements; ++i) { 431 // Advance to the next element. 432 if (i > 0) { 433 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element"); 434 435 // Tell the cleanup that it needs to destroy up to this 436 // element. TODO: some of these stores can be trivially 437 // observed to be unnecessary. 438 if (endOfInit) Builder.CreateStore(element, endOfInit); 439 } 440 441 // If these are nested std::initializer_list inits, do them directly, 442 // because they are conceptually the same "location". 443 InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i)); 444 if (initList && initList->initializesStdInitializerList()) { 445 EmitStdInitializerList(element, initList); 446 } else { 447 LValue elementLV = CGF.MakeAddrLValue(element, elementType); 448 EmitInitializationToLValue(E->getInit(i), elementLV); 449 } 450 } 451 452 // Check whether there's a non-trivial array-fill expression. 453 // Note that this will be a CXXConstructExpr even if the element 454 // type is an array (or array of array, etc.) of class type. 455 Expr *filler = E->getArrayFiller(); 456 bool hasTrivialFiller = true; 457 if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) { 458 assert(cons->getConstructor()->isDefaultConstructor()); 459 hasTrivialFiller = cons->getConstructor()->isTrivial(); 460 } 461 462 // Any remaining elements need to be zero-initialized, possibly 463 // using the filler expression. We can skip this if the we're 464 // emitting to zeroed memory. 465 if (NumInitElements != NumArrayElements && 466 !(Dest.isZeroed() && hasTrivialFiller && 467 CGF.getTypes().isZeroInitializable(elementType))) { 468 469 // Use an actual loop. This is basically 470 // do { *array++ = filler; } while (array != end); 471 472 // Advance to the start of the rest of the array. 473 if (NumInitElements) { 474 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start"); 475 if (endOfInit) Builder.CreateStore(element, endOfInit); 476 } 477 478 // Compute the end of the array. 479 llvm::Value *end = Builder.CreateInBoundsGEP(begin, 480 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), 481 "arrayinit.end"); 482 483 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 484 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); 485 486 // Jump into the body. 487 CGF.EmitBlock(bodyBB); 488 llvm::PHINode *currentElement = 489 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); 490 currentElement->addIncoming(element, entryBB); 491 492 // Emit the actual filler expression. 493 LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType); 494 if (filler) 495 EmitInitializationToLValue(filler, elementLV); 496 else 497 EmitNullInitializationToLValue(elementLV); 498 499 // Move on to the next element. 500 llvm::Value *nextElement = 501 Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next"); 502 503 // Tell the EH cleanup that we finished with the last element. 504 if (endOfInit) Builder.CreateStore(nextElement, endOfInit); 505 506 // Leave the loop if we're done. 507 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, 508 "arrayinit.done"); 509 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); 510 Builder.CreateCondBr(done, endBB, bodyBB); 511 currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); 512 513 CGF.EmitBlock(endBB); 514 } 515 516 // Leave the partial-array cleanup if we entered one. 517 if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); 518 } 519 520 //===----------------------------------------------------------------------===// 521 // Visitor Methods 522 //===----------------------------------------------------------------------===// 523 524 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ 525 Visit(E->GetTemporaryExpr()); 526 } 527 528 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { 529 EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e)); 530 } 531 532 void 533 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 534 if (E->getType().isPODType(CGF.getContext())) { 535 // For a POD type, just emit a load of the lvalue + a copy, because our 536 // compound literal might alias the destination. 537 // FIXME: This is a band-aid; the real problem appears to be in our handling 538 // of assignments, where we store directly into the LHS without checking 539 // whether anything in the RHS aliases. 540 EmitAggLoadOfLValue(E); 541 return; 542 } 543 544 AggValueSlot Slot = EnsureSlot(E->getType()); 545 CGF.EmitAggExpr(E->getInitializer(), Slot); 546 } 547 548 549 void AggExprEmitter::VisitCastExpr(CastExpr *E) { 550 switch (E->getCastKind()) { 551 case CK_Dynamic: { 552 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); 553 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr()); 554 // FIXME: Do we also need to handle property references here? 555 if (LV.isSimple()) 556 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); 557 else 558 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); 559 560 if (!Dest.isIgnored()) 561 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); 562 break; 563 } 564 565 case CK_ToUnion: { 566 if (Dest.isIgnored()) break; 567 568 // GCC union extension 569 QualType Ty = E->getSubExpr()->getType(); 570 QualType PtrTy = CGF.getContext().getPointerType(Ty); 571 llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(), 572 CGF.ConvertType(PtrTy)); 573 EmitInitializationToLValue(E->getSubExpr(), 574 CGF.MakeAddrLValue(CastPtr, Ty)); 575 break; 576 } 577 578 case CK_DerivedToBase: 579 case CK_BaseToDerived: 580 case CK_UncheckedDerivedToBase: { 581 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " 582 "should have been unpacked before we got here"); 583 } 584 585 case CK_LValueToRValue: // hope for downstream optimization 586 case CK_NoOp: 587 case CK_AtomicToNonAtomic: 588 case CK_NonAtomicToAtomic: 589 case CK_UserDefinedConversion: 590 case CK_ConstructorConversion: 591 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), 592 E->getType()) && 593 "Implicit cast types must be compatible"); 594 Visit(E->getSubExpr()); 595 break; 596 597 case CK_LValueBitCast: 598 llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); 599 600 case CK_Dependent: 601 case CK_BitCast: 602 case CK_ArrayToPointerDecay: 603 case CK_FunctionToPointerDecay: 604 case CK_NullToPointer: 605 case CK_NullToMemberPointer: 606 case CK_BaseToDerivedMemberPointer: 607 case CK_DerivedToBaseMemberPointer: 608 case CK_MemberPointerToBoolean: 609 case CK_ReinterpretMemberPointer: 610 case CK_IntegralToPointer: 611 case CK_PointerToIntegral: 612 case CK_PointerToBoolean: 613 case CK_ToVoid: 614 case CK_VectorSplat: 615 case CK_IntegralCast: 616 case CK_IntegralToBoolean: 617 case CK_IntegralToFloating: 618 case CK_FloatingToIntegral: 619 case CK_FloatingToBoolean: 620 case CK_FloatingCast: 621 case CK_CPointerToObjCPointerCast: 622 case CK_BlockPointerToObjCPointerCast: 623 case CK_AnyPointerToBlockPointerCast: 624 case CK_ObjCObjectLValueCast: 625 case CK_FloatingRealToComplex: 626 case CK_FloatingComplexToReal: 627 case CK_FloatingComplexToBoolean: 628 case CK_FloatingComplexCast: 629 case CK_FloatingComplexToIntegralComplex: 630 case CK_IntegralRealToComplex: 631 case CK_IntegralComplexToReal: 632 case CK_IntegralComplexToBoolean: 633 case CK_IntegralComplexCast: 634 case CK_IntegralComplexToFloatingComplex: 635 case CK_ARCProduceObject: 636 case CK_ARCConsumeObject: 637 case CK_ARCReclaimReturnedObject: 638 case CK_ARCExtendBlockObject: 639 case CK_CopyAndAutoreleaseBlockObject: 640 llvm_unreachable("cast kind invalid for aggregate types"); 641 } 642 } 643 644 void AggExprEmitter::VisitCallExpr(const CallExpr *E) { 645 if (E->getCallReturnType()->isReferenceType()) { 646 EmitAggLoadOfLValue(E); 647 return; 648 } 649 650 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot()); 651 EmitMoveFromReturnSlot(E, RV); 652 } 653 654 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { 655 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot()); 656 EmitMoveFromReturnSlot(E, RV); 657 } 658 659 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { 660 CGF.EmitIgnoredExpr(E->getLHS()); 661 Visit(E->getRHS()); 662 } 663 664 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { 665 CodeGenFunction::StmtExprEvaluation eval(CGF); 666 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); 667 } 668 669 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { 670 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) 671 VisitPointerToDataMemberBinaryOperator(E); 672 else 673 CGF.ErrorUnsupported(E, "aggregate binary expression"); 674 } 675 676 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( 677 const BinaryOperator *E) { 678 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); 679 EmitFinalDestCopy(E, LV); 680 } 681 682 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { 683 // For an assignment to work, the value on the right has 684 // to be compatible with the value on the left. 685 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), 686 E->getRHS()->getType()) 687 && "Invalid assignment"); 688 689 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS())) 690 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) 691 if (VD->hasAttr<BlocksAttr>() && 692 E->getRHS()->HasSideEffects(CGF.getContext())) { 693 // When __block variable on LHS, the RHS must be evaluated first 694 // as it may change the 'forwarding' field via call to Block_copy. 695 LValue RHS = CGF.EmitLValue(E->getRHS()); 696 LValue LHS = CGF.EmitLValue(E->getLHS()); 697 Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, 698 needsGC(E->getLHS()->getType()), 699 AggValueSlot::IsAliased); 700 EmitFinalDestCopy(E, RHS, true); 701 return; 702 } 703 704 LValue LHS = CGF.EmitLValue(E->getLHS()); 705 706 // Codegen the RHS so that it stores directly into the LHS. 707 AggValueSlot LHSSlot = 708 AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, 709 needsGC(E->getLHS()->getType()), 710 AggValueSlot::IsAliased); 711 CGF.EmitAggExpr(E->getRHS(), LHSSlot, false); 712 EmitFinalDestCopy(E, LHS, true); 713 } 714 715 void AggExprEmitter:: 716 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 717 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 718 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 719 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 720 721 // Bind the common expression if necessary. 722 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 723 724 CodeGenFunction::ConditionalEvaluation eval(CGF); 725 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 726 727 // Save whether the destination's lifetime is externally managed. 728 bool isExternallyDestructed = Dest.isExternallyDestructed(); 729 730 eval.begin(CGF); 731 CGF.EmitBlock(LHSBlock); 732 Visit(E->getTrueExpr()); 733 eval.end(CGF); 734 735 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); 736 CGF.Builder.CreateBr(ContBlock); 737 738 // If the result of an agg expression is unused, then the emission 739 // of the LHS might need to create a destination slot. That's fine 740 // with us, and we can safely emit the RHS into the same slot, but 741 // we shouldn't claim that it's already being destructed. 742 Dest.setExternallyDestructed(isExternallyDestructed); 743 744 eval.begin(CGF); 745 CGF.EmitBlock(RHSBlock); 746 Visit(E->getFalseExpr()); 747 eval.end(CGF); 748 749 CGF.EmitBlock(ContBlock); 750 } 751 752 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { 753 Visit(CE->getChosenSubExpr(CGF.getContext())); 754 } 755 756 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 757 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 758 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 759 760 if (!ArgPtr) { 761 CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); 762 return; 763 } 764 765 EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType())); 766 } 767 768 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { 769 // Ensure that we have a slot, but if we already do, remember 770 // whether it was externally destructed. 771 bool wasExternallyDestructed = Dest.isExternallyDestructed(); 772 Dest = EnsureSlot(E->getType()); 773 774 // We're going to push a destructor if there isn't already one. 775 Dest.setExternallyDestructed(); 776 777 Visit(E->getSubExpr()); 778 779 // Push that destructor we promised. 780 if (!wasExternallyDestructed) 781 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr()); 782 } 783 784 void 785 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { 786 AggValueSlot Slot = EnsureSlot(E->getType()); 787 CGF.EmitCXXConstructExpr(E, Slot); 788 } 789 790 void 791 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { 792 AggValueSlot Slot = EnsureSlot(E->getType()); 793 CGF.EmitLambdaExpr(E, Slot); 794 } 795 796 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 797 CGF.enterFullExpression(E); 798 CodeGenFunction::RunCleanupsScope cleanups(CGF); 799 Visit(E->getSubExpr()); 800 } 801 802 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { 803 QualType T = E->getType(); 804 AggValueSlot Slot = EnsureSlot(T); 805 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 806 } 807 808 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { 809 QualType T = E->getType(); 810 AggValueSlot Slot = EnsureSlot(T); 811 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 812 } 813 814 /// isSimpleZero - If emitting this value will obviously just cause a store of 815 /// zero to memory, return true. This can return false if uncertain, so it just 816 /// handles simple cases. 817 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { 818 E = E->IgnoreParens(); 819 820 // 0 821 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) 822 return IL->getValue() == 0; 823 // +0.0 824 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) 825 return FL->getValue().isPosZero(); 826 // int() 827 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && 828 CGF.getTypes().isZeroInitializable(E->getType())) 829 return true; 830 // (int*)0 - Null pointer expressions. 831 if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) 832 return ICE->getCastKind() == CK_NullToPointer; 833 // '\0' 834 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) 835 return CL->getValue() == 0; 836 837 // Otherwise, hard case: conservatively return false. 838 return false; 839 } 840 841 842 void 843 AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) { 844 QualType type = LV.getType(); 845 // FIXME: Ignore result? 846 // FIXME: Are initializers affected by volatile? 847 if (Dest.isZeroed() && isSimpleZero(E, CGF)) { 848 // Storing "i32 0" to a zero'd memory location is a noop. 849 } else if (isa<ImplicitValueInitExpr>(E)) { 850 EmitNullInitializationToLValue(LV); 851 } else if (type->isReferenceType()) { 852 RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 853 CGF.EmitStoreThroughLValue(RV, LV); 854 } else if (type->isAnyComplexType()) { 855 CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); 856 } else if (CGF.hasAggregateLLVMType(type)) { 857 CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, 858 AggValueSlot::IsDestructed, 859 AggValueSlot::DoesNotNeedGCBarriers, 860 AggValueSlot::IsNotAliased, 861 Dest.isZeroed())); 862 } else if (LV.isSimple()) { 863 CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false); 864 } else { 865 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); 866 } 867 } 868 869 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { 870 QualType type = lv.getType(); 871 872 // If the destination slot is already zeroed out before the aggregate is 873 // copied into it, we don't have to emit any zeros here. 874 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) 875 return; 876 877 if (!CGF.hasAggregateLLVMType(type)) { 878 // For non-aggregates, we can store zero. 879 llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type)); 880 // Note that the following is not equivalent to 881 // EmitStoreThroughBitfieldLValue for ARC types. 882 if (lv.isBitField()) { 883 CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv); 884 } else { 885 assert(lv.isSimple()); 886 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true); 887 } 888 } else { 889 // There's a potential optimization opportunity in combining 890 // memsets; that would be easy for arrays, but relatively 891 // difficult for structures with the current code. 892 CGF.EmitNullInitialization(lv.getAddress(), lv.getType()); 893 } 894 } 895 896 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { 897 #if 0 898 // FIXME: Assess perf here? Figure out what cases are worth optimizing here 899 // (Length of globals? Chunks of zeroed-out space?). 900 // 901 // If we can, prefer a copy from a global; this is a lot less code for long 902 // globals, and it's easier for the current optimizers to analyze. 903 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { 904 llvm::GlobalVariable* GV = 905 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, 906 llvm::GlobalValue::InternalLinkage, C, ""); 907 EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType())); 908 return; 909 } 910 #endif 911 if (E->hadArrayRangeDesignator()) 912 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 913 914 if (E->initializesStdInitializerList()) { 915 EmitStdInitializerList(Dest.getAddr(), E); 916 return; 917 } 918 919 AggValueSlot Dest = EnsureSlot(E->getType()); 920 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(), 921 Dest.getAlignment()); 922 923 // Handle initialization of an array. 924 if (E->getType()->isArrayType()) { 925 if (E->isStringLiteralInit()) 926 return Visit(E->getInit(0)); 927 928 QualType elementType = 929 CGF.getContext().getAsArrayType(E->getType())->getElementType(); 930 931 llvm::PointerType *APType = 932 cast<llvm::PointerType>(Dest.getAddr()->getType()); 933 llvm::ArrayType *AType = 934 cast<llvm::ArrayType>(APType->getElementType()); 935 936 EmitArrayInit(Dest.getAddr(), AType, elementType, E); 937 return; 938 } 939 940 assert(E->getType()->isRecordType() && "Only support structs/unions here!"); 941 942 // Do struct initialization; this code just sets each individual member 943 // to the approprate value. This makes bitfield support automatic; 944 // the disadvantage is that the generated code is more difficult for 945 // the optimizer, especially with bitfields. 946 unsigned NumInitElements = E->getNumInits(); 947 RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl(); 948 949 if (record->isUnion()) { 950 // Only initialize one field of a union. The field itself is 951 // specified by the initializer list. 952 if (!E->getInitializedFieldInUnion()) { 953 // Empty union; we have nothing to do. 954 955 #ifndef NDEBUG 956 // Make sure that it's really an empty and not a failure of 957 // semantic analysis. 958 for (RecordDecl::field_iterator Field = record->field_begin(), 959 FieldEnd = record->field_end(); 960 Field != FieldEnd; ++Field) 961 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); 962 #endif 963 return; 964 } 965 966 // FIXME: volatility 967 FieldDecl *Field = E->getInitializedFieldInUnion(); 968 969 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field); 970 if (NumInitElements) { 971 // Store the initializer into the field 972 EmitInitializationToLValue(E->getInit(0), FieldLoc); 973 } else { 974 // Default-initialize to null. 975 EmitNullInitializationToLValue(FieldLoc); 976 } 977 978 return; 979 } 980 981 // We'll need to enter cleanup scopes in case any of the member 982 // initializers throw an exception. 983 SmallVector<EHScopeStack::stable_iterator, 16> cleanups; 984 llvm::Instruction *cleanupDominator = 0; 985 986 // Here we iterate over the fields; this makes it simpler to both 987 // default-initialize fields and skip over unnamed fields. 988 unsigned curInitIndex = 0; 989 for (RecordDecl::field_iterator field = record->field_begin(), 990 fieldEnd = record->field_end(); 991 field != fieldEnd; ++field) { 992 // We're done once we hit the flexible array member. 993 if (field->getType()->isIncompleteArrayType()) 994 break; 995 996 // Always skip anonymous bitfields. 997 if (field->isUnnamedBitfield()) 998 continue; 999 1000 // We're done if we reach the end of the explicit initializers, we 1001 // have a zeroed object, and the rest of the fields are 1002 // zero-initializable. 1003 if (curInitIndex == NumInitElements && Dest.isZeroed() && 1004 CGF.getTypes().isZeroInitializable(E->getType())) 1005 break; 1006 1007 1008 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, *field); 1009 // We never generate write-barries for initialized fields. 1010 LV.setNonGC(true); 1011 1012 if (curInitIndex < NumInitElements) { 1013 // Store the initializer into the field. 1014 EmitInitializationToLValue(E->getInit(curInitIndex++), LV); 1015 } else { 1016 // We're out of initalizers; default-initialize to null 1017 EmitNullInitializationToLValue(LV); 1018 } 1019 1020 // Push a destructor if necessary. 1021 // FIXME: if we have an array of structures, all explicitly 1022 // initialized, we can end up pushing a linear number of cleanups. 1023 bool pushedCleanup = false; 1024 if (QualType::DestructionKind dtorKind 1025 = field->getType().isDestructedType()) { 1026 assert(LV.isSimple()); 1027 if (CGF.needsEHCleanup(dtorKind)) { 1028 if (!cleanupDominator) 1029 cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder 1030 1031 CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(), 1032 CGF.getDestroyer(dtorKind), false); 1033 cleanups.push_back(CGF.EHStack.stable_begin()); 1034 pushedCleanup = true; 1035 } 1036 } 1037 1038 // If the GEP didn't get used because of a dead zero init or something 1039 // else, clean it up for -O0 builds and general tidiness. 1040 if (!pushedCleanup && LV.isSimple()) 1041 if (llvm::GetElementPtrInst *GEP = 1042 dyn_cast<llvm::GetElementPtrInst>(LV.getAddress())) 1043 if (GEP->use_empty()) 1044 GEP->eraseFromParent(); 1045 } 1046 1047 // Deactivate all the partial cleanups in reverse order, which 1048 // generally means popping them. 1049 for (unsigned i = cleanups.size(); i != 0; --i) 1050 CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); 1051 1052 // Destroy the placeholder if we made one. 1053 if (cleanupDominator) 1054 cleanupDominator->eraseFromParent(); 1055 } 1056 1057 //===----------------------------------------------------------------------===// 1058 // Entry Points into this File 1059 //===----------------------------------------------------------------------===// 1060 1061 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of 1062 /// non-zero bytes that will be stored when outputting the initializer for the 1063 /// specified initializer expression. 1064 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { 1065 E = E->IgnoreParens(); 1066 1067 // 0 and 0.0 won't require any non-zero stores! 1068 if (isSimpleZero(E, CGF)) return CharUnits::Zero(); 1069 1070 // If this is an initlist expr, sum up the size of sizes of the (present) 1071 // elements. If this is something weird, assume the whole thing is non-zero. 1072 const InitListExpr *ILE = dyn_cast<InitListExpr>(E); 1073 if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType())) 1074 return CGF.getContext().getTypeSizeInChars(E->getType()); 1075 1076 // InitListExprs for structs have to be handled carefully. If there are 1077 // reference members, we need to consider the size of the reference, not the 1078 // referencee. InitListExprs for unions and arrays can't have references. 1079 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 1080 if (!RT->isUnionType()) { 1081 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl(); 1082 CharUnits NumNonZeroBytes = CharUnits::Zero(); 1083 1084 unsigned ILEElement = 0; 1085 for (RecordDecl::field_iterator Field = SD->field_begin(), 1086 FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) { 1087 // We're done once we hit the flexible array member or run out of 1088 // InitListExpr elements. 1089 if (Field->getType()->isIncompleteArrayType() || 1090 ILEElement == ILE->getNumInits()) 1091 break; 1092 if (Field->isUnnamedBitfield()) 1093 continue; 1094 1095 const Expr *E = ILE->getInit(ILEElement++); 1096 1097 // Reference values are always non-null and have the width of a pointer. 1098 if (Field->getType()->isReferenceType()) 1099 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( 1100 CGF.getContext().getTargetInfo().getPointerWidth(0)); 1101 else 1102 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); 1103 } 1104 1105 return NumNonZeroBytes; 1106 } 1107 } 1108 1109 1110 CharUnits NumNonZeroBytes = CharUnits::Zero(); 1111 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 1112 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); 1113 return NumNonZeroBytes; 1114 } 1115 1116 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of 1117 /// zeros in it, emit a memset and avoid storing the individual zeros. 1118 /// 1119 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, 1120 CodeGenFunction &CGF) { 1121 // If the slot is already known to be zeroed, nothing to do. Don't mess with 1122 // volatile stores. 1123 if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return; 1124 1125 // C++ objects with a user-declared constructor don't need zero'ing. 1126 if (CGF.getContext().getLangOpts().CPlusPlus) 1127 if (const RecordType *RT = CGF.getContext() 1128 .getBaseElementType(E->getType())->getAs<RecordType>()) { 1129 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1130 if (RD->hasUserDeclaredConstructor()) 1131 return; 1132 } 1133 1134 // If the type is 16-bytes or smaller, prefer individual stores over memset. 1135 std::pair<CharUnits, CharUnits> TypeInfo = 1136 CGF.getContext().getTypeInfoInChars(E->getType()); 1137 if (TypeInfo.first <= CharUnits::fromQuantity(16)) 1138 return; 1139 1140 // Check to see if over 3/4 of the initializer are known to be zero. If so, 1141 // we prefer to emit memset + individual stores for the rest. 1142 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); 1143 if (NumNonZeroBytes*4 > TypeInfo.first) 1144 return; 1145 1146 // Okay, it seems like a good idea to use an initial memset, emit the call. 1147 llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity()); 1148 CharUnits Align = TypeInfo.second; 1149 1150 llvm::Value *Loc = Slot.getAddr(); 1151 1152 Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy); 1153 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, 1154 Align.getQuantity(), false); 1155 1156 // Tell the AggExprEmitter that the slot is known zero. 1157 Slot.setZeroed(); 1158 } 1159 1160 1161 1162 1163 /// EmitAggExpr - Emit the computation of the specified expression of aggregate 1164 /// type. The result is computed into DestPtr. Note that if DestPtr is null, 1165 /// the value of the aggregate expression is not needed. If VolatileDest is 1166 /// true, DestPtr cannot be 0. 1167 /// 1168 /// \param IsInitializer - true if this evaluation is initializing an 1169 /// object whose lifetime is already being managed. 1170 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot, 1171 bool IgnoreResult) { 1172 assert(E && hasAggregateLLVMType(E->getType()) && 1173 "Invalid aggregate expression to emit"); 1174 assert((Slot.getAddr() != 0 || Slot.isIgnored()) && 1175 "slot has bits but no address"); 1176 1177 // Optimize the slot if possible. 1178 CheckAggExprForMemSetUse(Slot, E, *this); 1179 1180 AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E)); 1181 } 1182 1183 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { 1184 assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!"); 1185 llvm::Value *Temp = CreateMemTemp(E->getType()); 1186 LValue LV = MakeAddrLValue(Temp, E->getType()); 1187 EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, 1188 AggValueSlot::DoesNotNeedGCBarriers, 1189 AggValueSlot::IsNotAliased)); 1190 return LV; 1191 } 1192 1193 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, 1194 llvm::Value *SrcPtr, QualType Ty, 1195 bool isVolatile, unsigned Alignment) { 1196 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); 1197 1198 if (getContext().getLangOpts().CPlusPlus) { 1199 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1200 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); 1201 assert((Record->hasTrivialCopyConstructor() || 1202 Record->hasTrivialCopyAssignment() || 1203 Record->hasTrivialMoveConstructor() || 1204 Record->hasTrivialMoveAssignment()) && 1205 "Trying to aggregate-copy a type without a trivial copy " 1206 "constructor or assignment operator"); 1207 // Ignore empty classes in C++. 1208 if (Record->isEmpty()) 1209 return; 1210 } 1211 } 1212 1213 // Aggregate assignment turns into llvm.memcpy. This is almost valid per 1214 // C99 6.5.16.1p3, which states "If the value being stored in an object is 1215 // read from another object that overlaps in anyway the storage of the first 1216 // object, then the overlap shall be exact and the two objects shall have 1217 // qualified or unqualified versions of a compatible type." 1218 // 1219 // memcpy is not defined if the source and destination pointers are exactly 1220 // equal, but other compilers do this optimization, and almost every memcpy 1221 // implementation handles this case safely. If there is a libc that does not 1222 // safely handle this, we can add a target hook. 1223 1224 // Get size and alignment info for this aggregate. 1225 std::pair<CharUnits, CharUnits> TypeInfo = 1226 getContext().getTypeInfoInChars(Ty); 1227 1228 if (!Alignment) 1229 Alignment = TypeInfo.second.getQuantity(); 1230 1231 // FIXME: Handle variable sized types. 1232 1233 // FIXME: If we have a volatile struct, the optimizer can remove what might 1234 // appear to be `extra' memory ops: 1235 // 1236 // volatile struct { int i; } a, b; 1237 // 1238 // int main() { 1239 // a = b; 1240 // a = b; 1241 // } 1242 // 1243 // we need to use a different call here. We use isVolatile to indicate when 1244 // either the source or the destination is volatile. 1245 1246 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 1247 llvm::Type *DBP = 1248 llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace()); 1249 DestPtr = Builder.CreateBitCast(DestPtr, DBP); 1250 1251 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 1252 llvm::Type *SBP = 1253 llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace()); 1254 SrcPtr = Builder.CreateBitCast(SrcPtr, SBP); 1255 1256 // Don't do any of the memmove_collectable tests if GC isn't set. 1257 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { 1258 // fall through 1259 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 1260 RecordDecl *Record = RecordTy->getDecl(); 1261 if (Record->hasObjectMember()) { 1262 CharUnits size = TypeInfo.first; 1263 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 1264 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 1265 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 1266 SizeVal); 1267 return; 1268 } 1269 } else if (Ty->isArrayType()) { 1270 QualType BaseType = getContext().getBaseElementType(Ty); 1271 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { 1272 if (RecordTy->getDecl()->hasObjectMember()) { 1273 CharUnits size = TypeInfo.first; 1274 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 1275 llvm::Value *SizeVal = 1276 llvm::ConstantInt::get(SizeTy, size.getQuantity()); 1277 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 1278 SizeVal); 1279 return; 1280 } 1281 } 1282 } 1283 1284 Builder.CreateMemCpy(DestPtr, SrcPtr, 1285 llvm::ConstantInt::get(IntPtrTy, 1286 TypeInfo.first.getQuantity()), 1287 Alignment, isVolatile); 1288 } 1289 1290 void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc, 1291 const Expr *init) { 1292 const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init); 1293 if (cleanups) 1294 init = cleanups->getSubExpr(); 1295 1296 if (isa<InitListExpr>(init) && 1297 cast<InitListExpr>(init)->initializesStdInitializerList()) { 1298 // We initialized this std::initializer_list with an initializer list. 1299 // A backing array was created. Push a cleanup for it. 1300 EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init)); 1301 } 1302 } 1303 1304 static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF, 1305 llvm::Value *arrayStart, 1306 const InitListExpr *init) { 1307 // Check if there are any recursive cleanups to do, i.e. if we have 1308 // std::initializer_list<std::initializer_list<obj>> list = {{obj()}}; 1309 // then we need to destroy the inner array as well. 1310 for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) { 1311 const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i)); 1312 if (!subInit || !subInit->initializesStdInitializerList()) 1313 continue; 1314 1315 // This one needs to be destroyed. Get the address of the std::init_list. 1316 llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i); 1317 llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset, 1318 "std.initlist"); 1319 CGF.EmitStdInitializerListCleanup(loc, subInit); 1320 } 1321 } 1322 1323 void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc, 1324 const InitListExpr *init) { 1325 ASTContext &ctx = getContext(); 1326 QualType element = GetStdInitializerListElementType(init->getType()); 1327 unsigned numInits = init->getNumInits(); 1328 llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits); 1329 QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0); 1330 QualType arrayPtr = ctx.getPointerType(array); 1331 llvm::Type *arrayPtrType = ConvertType(arrayPtr); 1332 1333 // lvalue is the location of a std::initializer_list, which as its first 1334 // element has a pointer to the array we want to destroy. 1335 llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer"); 1336 llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress"); 1337 1338 ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init); 1339 1340 llvm::Value *arrayAddress = 1341 Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress"); 1342 ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init); 1343 } 1344