1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Builder implementation for CGRecordLayout objects. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGRecordLayout.h" 15 #include "CGCXXABI.h" 16 #include "CodeGenTypes.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/Attr.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/Frontend/CodeGenOptions.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/raw_ostream.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 namespace { 33 34 class CGRecordLayoutBuilder { 35 public: 36 /// FieldTypes - Holds the LLVM types that the struct is created from. 37 /// 38 SmallVector<llvm::Type *, 16> FieldTypes; 39 40 /// BaseSubobjectType - Holds the LLVM type for the non-virtual part 41 /// of the struct. For example, consider: 42 /// 43 /// struct A { int i; }; 44 /// struct B { void *v; }; 45 /// struct C : virtual A, B { }; 46 /// 47 /// The LLVM type of C will be 48 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B } 49 /// 50 /// And the LLVM type of the non-virtual base struct will be 51 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 } 52 /// 53 /// This only gets initialized if the base subobject type is 54 /// different from the complete-object type. 55 llvm::StructType *BaseSubobjectType; 56 57 /// FieldInfo - Holds a field and its corresponding LLVM field number. 58 llvm::DenseMap<const FieldDecl *, unsigned> Fields; 59 60 /// BitFieldInfo - Holds location and size information about a bit field. 61 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields; 62 63 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases; 64 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases; 65 66 /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are 67 /// primary base classes for some other direct or indirect base class. 68 CXXIndirectPrimaryBaseSet IndirectPrimaryBases; 69 70 /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid 71 /// avoid laying out virtual bases more than once. 72 llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases; 73 74 /// IsZeroInitializable - Whether this struct can be C++ 75 /// zero-initialized with an LLVM zeroinitializer. 76 bool IsZeroInitializable; 77 bool IsZeroInitializableAsBase; 78 79 /// Packed - Whether the resulting LLVM struct will be packed or not. 80 bool Packed; 81 82 private: 83 CodeGenTypes &Types; 84 85 /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the 86 /// last base laid out. Used so that we can replace the last laid out base 87 /// type with an i8 array if needed. 88 struct LastLaidOutBaseInfo { 89 CharUnits Offset; 90 CharUnits NonVirtualSize; 91 92 bool isValid() const { return !NonVirtualSize.isZero(); } 93 void invalidate() { NonVirtualSize = CharUnits::Zero(); } 94 95 } LastLaidOutBase; 96 97 /// Alignment - Contains the alignment of the RecordDecl. 98 CharUnits Alignment; 99 100 /// NextFieldOffset - Holds the next field offset. 101 CharUnits NextFieldOffset; 102 103 /// LayoutUnionField - Will layout a field in an union and return the type 104 /// that the field will have. 105 llvm::Type *LayoutUnionField(const FieldDecl *Field, 106 const ASTRecordLayout &Layout); 107 108 /// LayoutUnion - Will layout a union RecordDecl. 109 void LayoutUnion(const RecordDecl *D); 110 111 /// Lay out a sequence of contiguous bitfields. 112 bool LayoutBitfields(const ASTRecordLayout &Layout, 113 unsigned &FirstFieldNo, 114 RecordDecl::field_iterator &FI, 115 RecordDecl::field_iterator FE); 116 117 /// LayoutField - try to layout all fields in the record decl. 118 /// Returns false if the operation failed because the struct is not packed. 119 bool LayoutFields(const RecordDecl *D); 120 121 /// Layout a single base, virtual or non-virtual 122 bool LayoutBase(const CXXRecordDecl *base, 123 const CGRecordLayout &baseLayout, 124 CharUnits baseOffset); 125 126 /// LayoutVirtualBase - layout a single virtual base. 127 bool LayoutVirtualBase(const CXXRecordDecl *base, 128 CharUnits baseOffset); 129 130 /// LayoutVirtualBases - layout the virtual bases of a record decl. 131 bool LayoutVirtualBases(const CXXRecordDecl *RD, 132 const ASTRecordLayout &Layout); 133 134 /// MSLayoutVirtualBases - layout the virtual bases of a record decl, 135 /// like MSVC. 136 bool MSLayoutVirtualBases(const CXXRecordDecl *RD, 137 const ASTRecordLayout &Layout); 138 139 /// LayoutNonVirtualBase - layout a single non-virtual base. 140 bool LayoutNonVirtualBase(const CXXRecordDecl *base, 141 CharUnits baseOffset); 142 143 /// LayoutNonVirtualBases - layout the virtual bases of a record decl. 144 bool LayoutNonVirtualBases(const CXXRecordDecl *RD, 145 const ASTRecordLayout &Layout); 146 147 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types. 148 bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD); 149 150 /// LayoutField - layout a single field. Returns false if the operation failed 151 /// because the current struct is not packed. 152 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); 153 154 /// LayoutBitField - layout a single bit field. 155 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); 156 157 /// AppendField - Appends a field with the given offset and type. 158 void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy); 159 160 /// AppendPadding - Appends enough padding bytes so that the total 161 /// struct size is a multiple of the field alignment. 162 void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment); 163 164 /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the 165 /// tail padding of a previous base. If this happens, the type of the previous 166 /// base needs to be changed to an array of i8. Returns true if the last 167 /// laid out base was resized. 168 bool ResizeLastBaseFieldIfNecessary(CharUnits offset); 169 170 /// getByteArrayType - Returns a byte array type with the given number of 171 /// elements. 172 llvm::Type *getByteArrayType(CharUnits NumBytes); 173 174 /// AppendBytes - Append a given number of bytes to the record. 175 void AppendBytes(CharUnits numBytes); 176 177 /// AppendTailPadding - Append enough tail padding so that the type will have 178 /// the passed size. 179 void AppendTailPadding(CharUnits RecordSize); 180 181 CharUnits getTypeAlignment(llvm::Type *Ty) const; 182 183 /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the 184 /// LLVM element types. 185 CharUnits getAlignmentAsLLVMStruct() const; 186 187 /// CheckZeroInitializable - Check if the given type contains a pointer 188 /// to data member. 189 void CheckZeroInitializable(QualType T); 190 191 public: 192 CGRecordLayoutBuilder(CodeGenTypes &Types) 193 : BaseSubobjectType(0), 194 IsZeroInitializable(true), IsZeroInitializableAsBase(true), 195 Packed(false), Types(Types) { } 196 197 /// Layout - Will layout a RecordDecl. 198 void Layout(const RecordDecl *D); 199 }; 200 201 } 202 203 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { 204 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment(); 205 Packed = D->hasAttr<PackedAttr>(); 206 207 if (D->isUnion()) { 208 LayoutUnion(D); 209 return; 210 } 211 212 if (LayoutFields(D)) 213 return; 214 215 // We weren't able to layout the struct. Try again with a packed struct 216 Packed = true; 217 LastLaidOutBase.invalidate(); 218 NextFieldOffset = CharUnits::Zero(); 219 FieldTypes.clear(); 220 Fields.clear(); 221 BitFields.clear(); 222 NonVirtualBases.clear(); 223 VirtualBases.clear(); 224 225 LayoutFields(D); 226 } 227 228 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 229 const FieldDecl *FD, 230 uint64_t Offset, uint64_t Size, 231 uint64_t StorageSize, 232 uint64_t StorageAlignment) { 233 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); 234 CharUnits TypeSizeInBytes = 235 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty)); 236 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes); 237 238 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); 239 240 if (Size > TypeSizeInBits) { 241 // We have a wide bit-field. The extra bits are only used for padding, so 242 // if we have a bitfield of type T, with size N: 243 // 244 // T t : N; 245 // 246 // We can just assume that it's: 247 // 248 // T t : sizeof(T); 249 // 250 Size = TypeSizeInBits; 251 } 252 253 // Reverse the bit offsets for big endian machines. Because we represent 254 // a bitfield as a single large integer load, we can imagine the bits 255 // counting from the most-significant-bit instead of the 256 // least-significant-bit. 257 if (Types.getDataLayout().isBigEndian()) { 258 Offset = StorageSize - (Offset + Size); 259 } 260 261 return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageAlignment); 262 } 263 264 /// \brief Layout the range of bitfields from BFI to BFE as contiguous storage. 265 bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout, 266 unsigned &FirstFieldNo, 267 RecordDecl::field_iterator &FI, 268 RecordDecl::field_iterator FE) { 269 assert(FI != FE); 270 uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo); 271 uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 272 273 unsigned CharAlign = Types.getTarget().getCharAlign(); 274 assert(FirstFieldOffset % CharAlign == 0 && 275 "First field offset is misaligned"); 276 CharUnits FirstFieldOffsetInBytes 277 = Types.getContext().toCharUnitsFromBits(FirstFieldOffset); 278 279 unsigned StorageAlignment 280 = llvm::MinAlign(Alignment.getQuantity(), 281 FirstFieldOffsetInBytes.getQuantity()); 282 283 if (FirstFieldOffset < NextFieldOffsetInBits) { 284 CharUnits FieldOffsetInCharUnits = 285 Types.getContext().toCharUnitsFromBits(FirstFieldOffset); 286 287 // Try to resize the last base field. 288 if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits)) 289 llvm_unreachable("We must be able to resize the last base if we need to " 290 "pack bits into it."); 291 292 NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 293 assert(FirstFieldOffset >= NextFieldOffsetInBits); 294 } 295 296 // Append padding if necessary. 297 AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset), 298 CharUnits::One()); 299 300 // Find the last bitfield in a contiguous run of bitfields. 301 RecordDecl::field_iterator BFI = FI; 302 unsigned LastFieldNo = FirstFieldNo; 303 uint64_t NextContiguousFieldOffset = FirstFieldOffset; 304 for (RecordDecl::field_iterator FJ = FI; 305 (FJ != FE && (*FJ)->isBitField() && 306 NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) && 307 (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) { 308 NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext()); 309 ++LastFieldNo; 310 311 // We must use packed structs for packed fields, and also unnamed bit 312 // fields since they don't affect the struct alignment. 313 if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName())) 314 return false; 315 } 316 RecordDecl::field_iterator BFE = llvm::next(FI); 317 --LastFieldNo; 318 assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields"); 319 FieldDecl *LastFD = *FI; 320 321 // Find the last bitfield's offset, add its size, and round it up to the 322 // character alignment to compute the storage required. 323 uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo); 324 uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext()); 325 uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset; 326 CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits( 327 llvm::RoundUpToAlignment(TotalBits, CharAlign)); 328 uint64_t StorageBits = Types.getContext().toBits(StorageBytes); 329 330 // Grow the storage to encompass any known padding in the layout when doing 331 // so will make the storage a power-of-two. There are two cases when we can 332 // do this. The first is when we have a subsequent field and can widen up to 333 // its offset. The second is when the data size of the AST record layout is 334 // past the end of the current storage. The latter is true when there is tail 335 // padding on a struct and no members of a super class can be packed into it. 336 // 337 // Note that we widen the storage as much as possible here to express the 338 // maximum latitude the language provides, and rely on the backend to lower 339 // these in conjunction with shifts and masks to narrower operations where 340 // beneficial. 341 uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize()); 342 if (BFE != FE) 343 // If there are more fields to be laid out, the offset at the end of the 344 // bitfield is the offset of the next field in the record. 345 EndOffset = Layout.getFieldOffset(LastFieldNo + 1); 346 assert(EndOffset >= (FirstFieldOffset + TotalBits) && 347 "End offset is not past the end of the known storage bits."); 348 uint64_t SpaceBits = EndOffset - FirstFieldOffset; 349 uint64_t LongBits = Types.getTarget().getLongWidth(); 350 uint64_t WidenedBits = (StorageBits / LongBits) * LongBits + 351 llvm::NextPowerOf2(StorageBits % LongBits - 1); 352 assert(WidenedBits >= StorageBits && "Widening shrunk the bits!"); 353 if (WidenedBits <= SpaceBits) { 354 StorageBits = WidenedBits; 355 StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits); 356 assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes)); 357 } 358 359 unsigned FieldIndex = FieldTypes.size(); 360 AppendBytes(StorageBytes); 361 362 // Now walk the bitfields associating them with this field of storage and 363 // building up the bitfield specific info. 364 unsigned FieldNo = FirstFieldNo; 365 for (; BFI != BFE; ++BFI, ++FieldNo) { 366 FieldDecl *FD = *BFI; 367 uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset; 368 uint64_t FieldSize = FD->getBitWidthValue(Types.getContext()); 369 Fields[FD] = FieldIndex; 370 BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize, 371 StorageBits, StorageAlignment); 372 } 373 FirstFieldNo = LastFieldNo; 374 return true; 375 } 376 377 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, 378 uint64_t fieldOffset) { 379 // If the field is packed, then we need a packed struct. 380 if (!Packed && D->hasAttr<PackedAttr>()) 381 return false; 382 383 assert(!D->isBitField() && "Bitfields should be laid out seperately."); 384 385 CheckZeroInitializable(D->getType()); 386 387 assert(fieldOffset % Types.getTarget().getCharWidth() == 0 388 && "field offset is not on a byte boundary!"); 389 CharUnits fieldOffsetInBytes 390 = Types.getContext().toCharUnitsFromBits(fieldOffset); 391 392 llvm::Type *Ty = Types.ConvertTypeForMem(D->getType()); 393 CharUnits typeAlignment = getTypeAlignment(Ty); 394 395 // If the type alignment is larger then the struct alignment, we must use 396 // a packed struct. 397 if (typeAlignment > Alignment) { 398 assert(!Packed && "Alignment is wrong even with packed struct!"); 399 return false; 400 } 401 402 if (!Packed) { 403 if (const RecordType *RT = D->getType()->getAs<RecordType>()) { 404 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); 405 if (const MaxFieldAlignmentAttr *MFAA = 406 RD->getAttr<MaxFieldAlignmentAttr>()) { 407 if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment)) 408 return false; 409 } 410 } 411 } 412 413 // Round up the field offset to the alignment of the field type. 414 CharUnits alignedNextFieldOffsetInBytes = 415 NextFieldOffset.RoundUpToAlignment(typeAlignment); 416 417 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 418 // Try to resize the last base field. 419 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) { 420 alignedNextFieldOffsetInBytes = 421 NextFieldOffset.RoundUpToAlignment(typeAlignment); 422 } 423 } 424 425 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 426 assert(!Packed && "Could not place field even with packed struct!"); 427 return false; 428 } 429 430 AppendPadding(fieldOffsetInBytes, typeAlignment); 431 432 // Now append the field. 433 Fields[D] = FieldTypes.size(); 434 AppendField(fieldOffsetInBytes, Ty); 435 436 LastLaidOutBase.invalidate(); 437 return true; 438 } 439 440 llvm::Type * 441 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, 442 const ASTRecordLayout &Layout) { 443 Fields[Field] = 0; 444 if (Field->isBitField()) { 445 uint64_t FieldSize = Field->getBitWidthValue(Types.getContext()); 446 447 // Ignore zero sized bit fields. 448 if (FieldSize == 0) 449 return 0; 450 451 unsigned StorageBits = llvm::RoundUpToAlignment( 452 FieldSize, Types.getTarget().getCharAlign()); 453 CharUnits NumBytesToAppend 454 = Types.getContext().toCharUnitsFromBits(StorageBits); 455 456 llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); 457 if (NumBytesToAppend > CharUnits::One()) 458 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity()); 459 460 // Add the bit field info. 461 BitFields[Field] = CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize, 462 StorageBits, 463 Alignment.getQuantity()); 464 return FieldTy; 465 } 466 467 // This is a regular union field. 468 return Types.ConvertTypeForMem(Field->getType()); 469 } 470 471 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { 472 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!"); 473 474 const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D); 475 476 llvm::Type *unionType = 0; 477 CharUnits unionSize = CharUnits::Zero(); 478 CharUnits unionAlign = CharUnits::Zero(); 479 480 bool hasOnlyZeroSizedBitFields = true; 481 bool checkedFirstFieldZeroInit = false; 482 483 unsigned fieldNo = 0; 484 for (RecordDecl::field_iterator field = D->field_begin(), 485 fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) { 486 assert(layout.getFieldOffset(fieldNo) == 0 && 487 "Union field offset did not start at the beginning of record!"); 488 llvm::Type *fieldType = LayoutUnionField(*field, layout); 489 490 if (!fieldType) 491 continue; 492 493 if (field->getDeclName() && !checkedFirstFieldZeroInit) { 494 CheckZeroInitializable(field->getType()); 495 checkedFirstFieldZeroInit = true; 496 } 497 498 hasOnlyZeroSizedBitFields = false; 499 500 CharUnits fieldAlign = CharUnits::fromQuantity( 501 Types.getDataLayout().getABITypeAlignment(fieldType)); 502 CharUnits fieldSize = CharUnits::fromQuantity( 503 Types.getDataLayout().getTypeAllocSize(fieldType)); 504 505 if (fieldAlign < unionAlign) 506 continue; 507 508 if (fieldAlign > unionAlign || fieldSize > unionSize) { 509 unionType = fieldType; 510 unionAlign = fieldAlign; 511 unionSize = fieldSize; 512 } 513 } 514 515 // Now add our field. 516 if (unionType) { 517 AppendField(CharUnits::Zero(), unionType); 518 519 if (getTypeAlignment(unionType) > layout.getAlignment()) { 520 // We need a packed struct. 521 Packed = true; 522 unionAlign = CharUnits::One(); 523 } 524 } 525 if (unionAlign.isZero()) { 526 (void)hasOnlyZeroSizedBitFields; 527 assert(hasOnlyZeroSizedBitFields && 528 "0-align record did not have all zero-sized bit-fields!"); 529 unionAlign = CharUnits::One(); 530 } 531 532 // Append tail padding. 533 CharUnits recordSize = layout.getSize(); 534 if (recordSize > unionSize) 535 AppendPadding(recordSize, unionAlign); 536 } 537 538 bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base, 539 const CGRecordLayout &baseLayout, 540 CharUnits baseOffset) { 541 ResizeLastBaseFieldIfNecessary(baseOffset); 542 543 AppendPadding(baseOffset, CharUnits::One()); 544 545 const ASTRecordLayout &baseASTLayout 546 = Types.getContext().getASTRecordLayout(base); 547 548 LastLaidOutBase.Offset = NextFieldOffset; 549 LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize(); 550 551 llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); 552 if (getTypeAlignment(subobjectType) > Alignment) 553 return false; 554 555 AppendField(baseOffset, subobjectType); 556 return true; 557 } 558 559 bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base, 560 CharUnits baseOffset) { 561 // Ignore empty bases. 562 if (base->isEmpty()) return true; 563 564 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 565 if (IsZeroInitializableAsBase) { 566 assert(IsZeroInitializable && 567 "class zero-initializable as base but not as complete object"); 568 569 IsZeroInitializable = IsZeroInitializableAsBase = 570 baseLayout.isZeroInitializableAsBase(); 571 } 572 573 if (!LayoutBase(base, baseLayout, baseOffset)) 574 return false; 575 NonVirtualBases[base] = (FieldTypes.size() - 1); 576 return true; 577 } 578 579 bool 580 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base, 581 CharUnits baseOffset) { 582 // Ignore empty bases. 583 if (base->isEmpty()) return true; 584 585 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 586 if (IsZeroInitializable) 587 IsZeroInitializable = baseLayout.isZeroInitializableAsBase(); 588 589 if (!LayoutBase(base, baseLayout, baseOffset)) 590 return false; 591 VirtualBases[base] = (FieldTypes.size() - 1); 592 return true; 593 } 594 595 bool 596 CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD, 597 const ASTRecordLayout &Layout) { 598 if (!RD->getNumVBases()) 599 return true; 600 601 // The vbases list is uniqued and ordered by a depth-first 602 // traversal, which is what we need here. 603 for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), 604 E = RD->vbases_end(); I != E; ++I) { 605 606 const CXXRecordDecl *BaseDecl = 607 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 608 609 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 610 if (!LayoutVirtualBase(BaseDecl, vbaseOffset)) 611 return false; 612 } 613 return true; 614 } 615 616 /// LayoutVirtualBases - layout the non-virtual bases of a record decl. 617 bool 618 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, 619 const ASTRecordLayout &Layout) { 620 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 621 E = RD->bases_end(); I != E; ++I) { 622 const CXXRecordDecl *BaseDecl = 623 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 624 625 // We only want to lay out virtual bases that aren't indirect primary bases 626 // of some other base. 627 if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) { 628 // Only lay out the base once. 629 if (!LaidOutVirtualBases.insert(BaseDecl)) 630 continue; 631 632 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 633 if (!LayoutVirtualBase(BaseDecl, vbaseOffset)) 634 return false; 635 } 636 637 if (!BaseDecl->getNumVBases()) { 638 // This base isn't interesting since it doesn't have any virtual bases. 639 continue; 640 } 641 642 if (!LayoutVirtualBases(BaseDecl, Layout)) 643 return false; 644 } 645 return true; 646 } 647 648 bool 649 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD, 650 const ASTRecordLayout &Layout) { 651 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); 652 653 // If we have a primary base, lay it out first. 654 if (PrimaryBase) { 655 if (!Layout.isPrimaryBaseVirtual()) { 656 if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero())) 657 return false; 658 } else { 659 if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero())) 660 return false; 661 } 662 663 // Otherwise, add a vtable / vf-table if the layout says to do so. 664 } else if (Layout.hasOwnVFPtr()) { 665 llvm::Type *FunctionType = 666 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 667 /*isVarArg=*/true); 668 llvm::Type *VTableTy = FunctionType->getPointerTo(); 669 670 if (getTypeAlignment(VTableTy) > Alignment) { 671 // FIXME: Should we allow this to happen in Sema? 672 assert(!Packed && "Alignment is wrong even with packed struct!"); 673 return false; 674 } 675 676 assert(NextFieldOffset.isZero() && 677 "VTable pointer must come first!"); 678 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 679 } 680 681 // Layout the non-virtual bases. 682 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 683 E = RD->bases_end(); I != E; ++I) { 684 if (I->isVirtual()) 685 continue; 686 687 const CXXRecordDecl *BaseDecl = 688 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 689 690 // We've already laid out the primary base. 691 if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual()) 692 continue; 693 694 if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl))) 695 return false; 696 } 697 698 // Add a vb-table pointer if the layout insists. 699 if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) { 700 CharUnits VBPtrOffset = Layout.getVBPtrOffset(); 701 llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext()); 702 AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr)); 703 AppendField(VBPtrOffset, Vbptr); 704 } 705 706 return true; 707 } 708 709 bool 710 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { 711 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD); 712 713 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 714 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 715 CharUnits AlignedNonVirtualTypeSize = 716 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 717 718 // First check if we can use the same fields as for the complete class. 719 CharUnits RecordSize = Layout.getSize(); 720 if (AlignedNonVirtualTypeSize == RecordSize) 721 return true; 722 723 // Check if we need padding. 724 CharUnits AlignedNextFieldOffset = 725 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 726 727 if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) { 728 assert(!Packed && "cannot layout even as packed struct"); 729 return false; // Needs packing. 730 } 731 732 bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset); 733 if (needsPadding) { 734 CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset; 735 FieldTypes.push_back(getByteArrayType(NumBytes)); 736 } 737 738 BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(), 739 FieldTypes, "", Packed); 740 Types.addRecordTypeName(RD, BaseSubobjectType, ".base"); 741 742 // Pull the padding back off. 743 if (needsPadding) 744 FieldTypes.pop_back(); 745 746 return true; 747 } 748 749 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { 750 assert(!D->isUnion() && "Can't call LayoutFields on a union!"); 751 assert(!Alignment.isZero() && "Did not set alignment!"); 752 753 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 754 755 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); 756 if (RD) 757 if (!LayoutNonVirtualBases(RD, Layout)) 758 return false; 759 760 unsigned FieldNo = 0; 761 762 for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end(); 763 FI != FE; ++FI, ++FieldNo) { 764 FieldDecl *FD = *FI; 765 766 // If this field is a bitfield, layout all of the consecutive 767 // non-zero-length bitfields and the last zero-length bitfield; these will 768 // all share storage. 769 if (FD->isBitField()) { 770 // If all we have is a zero-width bitfield, skip it. 771 if (FD->getBitWidthValue(Types.getContext()) == 0) 772 continue; 773 774 // Layout this range of bitfields. 775 if (!LayoutBitfields(Layout, FieldNo, FI, FE)) { 776 assert(!Packed && 777 "Could not layout bitfields even with a packed LLVM struct!"); 778 return false; 779 } 780 assert(FI != FE && "Advanced past the last bitfield"); 781 continue; 782 } 783 784 if (!LayoutField(FD, Layout.getFieldOffset(FieldNo))) { 785 assert(!Packed && 786 "Could not layout fields even with a packed LLVM struct!"); 787 return false; 788 } 789 } 790 791 if (RD) { 792 // We've laid out the non-virtual bases and the fields, now compute the 793 // non-virtual base field types. 794 if (!ComputeNonVirtualBaseType(RD)) { 795 assert(!Packed && "Could not layout even with a packed LLVM struct!"); 796 return false; 797 } 798 799 // Lay out the virtual bases. The MS ABI uses a different 800 // algorithm here due to the lack of primary virtual bases. 801 if (Types.getTarget().getCXXABI().hasPrimaryVBases()) { 802 RD->getIndirectPrimaryBases(IndirectPrimaryBases); 803 if (Layout.isPrimaryBaseVirtual()) 804 IndirectPrimaryBases.insert(Layout.getPrimaryBase()); 805 806 if (!LayoutVirtualBases(RD, Layout)) 807 return false; 808 } else { 809 if (!MSLayoutVirtualBases(RD, Layout)) 810 return false; 811 } 812 } 813 814 // Append tail padding if necessary. 815 AppendTailPadding(Layout.getSize()); 816 817 return true; 818 } 819 820 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) { 821 ResizeLastBaseFieldIfNecessary(RecordSize); 822 823 assert(NextFieldOffset <= RecordSize && "Size mismatch!"); 824 825 CharUnits AlignedNextFieldOffset = 826 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 827 828 if (AlignedNextFieldOffset == RecordSize) { 829 // We don't need any padding. 830 return; 831 } 832 833 CharUnits NumPadBytes = RecordSize - NextFieldOffset; 834 AppendBytes(NumPadBytes); 835 } 836 837 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset, 838 llvm::Type *fieldType) { 839 CharUnits fieldSize = 840 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(fieldType)); 841 842 FieldTypes.push_back(fieldType); 843 844 NextFieldOffset = fieldOffset + fieldSize; 845 } 846 847 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset, 848 CharUnits fieldAlignment) { 849 assert(NextFieldOffset <= fieldOffset && 850 "Incorrect field layout!"); 851 852 // Do nothing if we're already at the right offset. 853 if (fieldOffset == NextFieldOffset) return; 854 855 // If we're not emitting a packed LLVM type, try to avoid adding 856 // unnecessary padding fields. 857 if (!Packed) { 858 // Round up the field offset to the alignment of the field type. 859 CharUnits alignedNextFieldOffset = 860 NextFieldOffset.RoundUpToAlignment(fieldAlignment); 861 assert(alignedNextFieldOffset <= fieldOffset); 862 863 // If that's the right offset, we're done. 864 if (alignedNextFieldOffset == fieldOffset) return; 865 } 866 867 // Otherwise we need explicit padding. 868 CharUnits padding = fieldOffset - NextFieldOffset; 869 AppendBytes(padding); 870 } 871 872 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) { 873 // Check if we have a base to resize. 874 if (!LastLaidOutBase.isValid()) 875 return false; 876 877 // This offset does not overlap with the tail padding. 878 if (offset >= NextFieldOffset) 879 return false; 880 881 // Restore the field offset and append an i8 array instead. 882 FieldTypes.pop_back(); 883 NextFieldOffset = LastLaidOutBase.Offset; 884 AppendBytes(LastLaidOutBase.NonVirtualSize); 885 LastLaidOutBase.invalidate(); 886 887 return true; 888 } 889 890 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { 891 assert(!numBytes.isZero() && "Empty byte arrays aren't allowed."); 892 893 llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); 894 if (numBytes > CharUnits::One()) 895 Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity()); 896 897 return Ty; 898 } 899 900 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) { 901 if (numBytes.isZero()) 902 return; 903 904 // Append the padding field 905 AppendField(NextFieldOffset, getByteArrayType(numBytes)); 906 } 907 908 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const { 909 if (Packed) 910 return CharUnits::One(); 911 912 return CharUnits::fromQuantity(Types.getDataLayout().getABITypeAlignment(Ty)); 913 } 914 915 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const { 916 if (Packed) 917 return CharUnits::One(); 918 919 CharUnits maxAlignment = CharUnits::One(); 920 for (size_t i = 0; i != FieldTypes.size(); ++i) 921 maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i])); 922 923 return maxAlignment; 924 } 925 926 /// Merge in whether a field of the given type is zero-initializable. 927 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { 928 // This record already contains a member pointer. 929 if (!IsZeroInitializableAsBase) 930 return; 931 932 // Can only have member pointers if we're compiling C++. 933 if (!Types.getContext().getLangOpts().CPlusPlus) 934 return; 935 936 const Type *elementType = T->getBaseElementTypeUnsafe(); 937 938 if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) { 939 if (!Types.getCXXABI().isZeroInitializable(MPT)) 940 IsZeroInitializable = IsZeroInitializableAsBase = false; 941 } else if (const RecordType *RT = elementType->getAs<RecordType>()) { 942 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 943 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); 944 if (!Layout.isZeroInitializable()) 945 IsZeroInitializable = IsZeroInitializableAsBase = false; 946 } 947 } 948 949 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, 950 llvm::StructType *Ty) { 951 CGRecordLayoutBuilder Builder(*this); 952 953 Builder.Layout(D); 954 955 Ty->setBody(Builder.FieldTypes, Builder.Packed); 956 957 // If we're in C++, compute the base subobject type. 958 llvm::StructType *BaseTy = 0; 959 if (isa<CXXRecordDecl>(D) && !D->isUnion()) { 960 BaseTy = Builder.BaseSubobjectType; 961 if (!BaseTy) BaseTy = Ty; 962 } 963 964 CGRecordLayout *RL = 965 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable, 966 Builder.IsZeroInitializableAsBase); 967 968 RL->NonVirtualBases.swap(Builder.NonVirtualBases); 969 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases); 970 971 // Add all the field numbers. 972 RL->FieldInfo.swap(Builder.Fields); 973 974 // Add bitfield info. 975 RL->BitFields.swap(Builder.BitFields); 976 977 // Dump the layout, if requested. 978 if (getContext().getLangOpts().DumpRecordLayouts) { 979 llvm::outs() << "\n*** Dumping IRgen Record Layout\n"; 980 llvm::outs() << "Record: "; 981 D->dump(llvm::outs()); 982 llvm::outs() << "\nLayout: "; 983 RL->print(llvm::outs()); 984 } 985 986 #ifndef NDEBUG 987 // Verify that the computed LLVM struct size matches the AST layout size. 988 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); 989 990 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize()); 991 assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) && 992 "Type size mismatch!"); 993 994 if (BaseTy) { 995 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 996 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 997 CharUnits AlignedNonVirtualTypeSize = 998 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 999 1000 uint64_t AlignedNonVirtualTypeSizeInBits = 1001 getContext().toBits(AlignedNonVirtualTypeSize); 1002 1003 assert(AlignedNonVirtualTypeSizeInBits == 1004 getDataLayout().getTypeAllocSizeInBits(BaseTy) && 1005 "Type size mismatch!"); 1006 } 1007 1008 // Verify that the LLVM and AST field offsets agree. 1009 llvm::StructType *ST = 1010 dyn_cast<llvm::StructType>(RL->getLLVMType()); 1011 const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST); 1012 1013 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); 1014 RecordDecl::field_iterator it = D->field_begin(); 1015 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { 1016 const FieldDecl *FD = *it; 1017 1018 // For non-bit-fields, just check that the LLVM struct offset matches the 1019 // AST offset. 1020 if (!FD->isBitField()) { 1021 unsigned FieldNo = RL->getLLVMFieldNo(FD); 1022 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && 1023 "Invalid field offset!"); 1024 continue; 1025 } 1026 1027 // Ignore unnamed bit-fields. 1028 if (!FD->getDeclName()) 1029 continue; 1030 1031 // Don't inspect zero-length bitfields. 1032 if (FD->getBitWidthValue(getContext()) == 0) 1033 continue; 1034 1035 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); 1036 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD)); 1037 1038 // Unions have overlapping elements dictating their layout, but for 1039 // non-unions we can verify that this section of the layout is the exact 1040 // expected size. 1041 if (D->isUnion()) { 1042 // For unions we verify that the start is zero and the size 1043 // is in-bounds. However, on BE systems, the offset may be non-zero, but 1044 // the size + offset should match the storage size in that case as it 1045 // "starts" at the back. 1046 if (getDataLayout().isBigEndian()) 1047 assert(static_cast<unsigned>(Info.Offset + Info.Size) == 1048 Info.StorageSize && 1049 "Big endian union bitfield does not end at the back"); 1050 else 1051 assert(Info.Offset == 0 && 1052 "Little endian union bitfield with a non-zero offset"); 1053 assert(Info.StorageSize <= SL->getSizeInBits() && 1054 "Union not large enough for bitfield storage"); 1055 } else { 1056 assert(Info.StorageSize == 1057 getDataLayout().getTypeAllocSizeInBits(ElementTy) && 1058 "Storage size does not match the element type size"); 1059 } 1060 assert(Info.Size > 0 && "Empty bitfield!"); 1061 assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize && 1062 "Bitfield outside of its allocated storage"); 1063 } 1064 #endif 1065 1066 return RL; 1067 } 1068 1069 void CGRecordLayout::print(raw_ostream &OS) const { 1070 OS << "<CGRecordLayout\n"; 1071 OS << " LLVMType:" << *CompleteObjectType << "\n"; 1072 if (BaseSubobjectType) 1073 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; 1074 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; 1075 OS << " BitFields:[\n"; 1076 1077 // Print bit-field infos in declaration order. 1078 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; 1079 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator 1080 it = BitFields.begin(), ie = BitFields.end(); 1081 it != ie; ++it) { 1082 const RecordDecl *RD = it->first->getParent(); 1083 unsigned Index = 0; 1084 for (RecordDecl::field_iterator 1085 it2 = RD->field_begin(); *it2 != it->first; ++it2) 1086 ++Index; 1087 BFIs.push_back(std::make_pair(Index, &it->second)); 1088 } 1089 llvm::array_pod_sort(BFIs.begin(), BFIs.end()); 1090 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { 1091 OS.indent(4); 1092 BFIs[i].second->print(OS); 1093 OS << "\n"; 1094 } 1095 1096 OS << "]>\n"; 1097 } 1098 1099 void CGRecordLayout::dump() const { 1100 print(llvm::errs()); 1101 } 1102 1103 void CGBitFieldInfo::print(raw_ostream &OS) const { 1104 OS << "<CGBitFieldInfo" 1105 << " Offset:" << Offset 1106 << " Size:" << Size 1107 << " IsSigned:" << IsSigned 1108 << " StorageSize:" << StorageSize 1109 << " StorageAlignment:" << StorageAlignment << ">"; 1110 } 1111 1112 void CGBitFieldInfo::dump() const { 1113 print(llvm::errs()); 1114 } 1115