1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Builder implementation for CGRecordLayout objects. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGRecordLayout.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/CXXInheritance.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/Expr.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/Frontend/CodeGenOptions.h" 22 #include "CodeGenTypes.h" 23 #include "CGCXXABI.h" 24 #include "llvm/DerivedTypes.h" 25 #include "llvm/Type.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include "llvm/Target/TargetData.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 namespace { 33 34 class CGRecordLayoutBuilder { 35 public: 36 /// FieldTypes - Holds the LLVM types that the struct is created from. 37 /// 38 llvm::SmallVector<llvm::Type *, 16> FieldTypes; 39 40 /// BaseSubobjectType - Holds the LLVM type for the non-virtual part 41 /// of the struct. For example, consider: 42 /// 43 /// struct A { int i; }; 44 /// struct B { void *v; }; 45 /// struct C : virtual A, B { }; 46 /// 47 /// The LLVM type of C will be 48 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B } 49 /// 50 /// And the LLVM type of the non-virtual base struct will be 51 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 } 52 /// 53 /// This only gets initialized if the base subobject type is 54 /// different from the complete-object type. 55 llvm::StructType *BaseSubobjectType; 56 57 /// FieldInfo - Holds a field and its corresponding LLVM field number. 58 llvm::DenseMap<const FieldDecl *, unsigned> Fields; 59 60 /// BitFieldInfo - Holds location and size information about a bit field. 61 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields; 62 63 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases; 64 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases; 65 66 /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are 67 /// primary base classes for some other direct or indirect base class. 68 CXXIndirectPrimaryBaseSet IndirectPrimaryBases; 69 70 /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid 71 /// avoid laying out virtual bases more than once. 72 llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases; 73 74 /// IsZeroInitializable - Whether this struct can be C++ 75 /// zero-initialized with an LLVM zeroinitializer. 76 bool IsZeroInitializable; 77 bool IsZeroInitializableAsBase; 78 79 /// Packed - Whether the resulting LLVM struct will be packed or not. 80 bool Packed; 81 82 /// IsMsStruct - Whether ms_struct is in effect or not 83 bool IsMsStruct; 84 85 private: 86 CodeGenTypes &Types; 87 88 /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the 89 /// last base laid out. Used so that we can replace the last laid out base 90 /// type with an i8 array if needed. 91 struct LastLaidOutBaseInfo { 92 CharUnits Offset; 93 CharUnits NonVirtualSize; 94 95 bool isValid() const { return !NonVirtualSize.isZero(); } 96 void invalidate() { NonVirtualSize = CharUnits::Zero(); } 97 98 } LastLaidOutBase; 99 100 /// Alignment - Contains the alignment of the RecordDecl. 101 CharUnits Alignment; 102 103 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field, 104 /// this will have the number of bits still available in the field. 105 char BitsAvailableInLastField; 106 107 /// NextFieldOffset - Holds the next field offset. 108 CharUnits NextFieldOffset; 109 110 /// LayoutUnionField - Will layout a field in an union and return the type 111 /// that the field will have. 112 llvm::Type *LayoutUnionField(const FieldDecl *Field, 113 const ASTRecordLayout &Layout); 114 115 /// LayoutUnion - Will layout a union RecordDecl. 116 void LayoutUnion(const RecordDecl *D); 117 118 /// LayoutField - try to layout all fields in the record decl. 119 /// Returns false if the operation failed because the struct is not packed. 120 bool LayoutFields(const RecordDecl *D); 121 122 /// Layout a single base, virtual or non-virtual 123 void LayoutBase(const CXXRecordDecl *base, 124 const CGRecordLayout &baseLayout, 125 CharUnits baseOffset); 126 127 /// LayoutVirtualBase - layout a single virtual base. 128 void LayoutVirtualBase(const CXXRecordDecl *base, 129 CharUnits baseOffset); 130 131 /// LayoutVirtualBases - layout the virtual bases of a record decl. 132 void LayoutVirtualBases(const CXXRecordDecl *RD, 133 const ASTRecordLayout &Layout); 134 135 /// LayoutNonVirtualBase - layout a single non-virtual base. 136 void LayoutNonVirtualBase(const CXXRecordDecl *base, 137 CharUnits baseOffset); 138 139 /// LayoutNonVirtualBases - layout the virtual bases of a record decl. 140 void LayoutNonVirtualBases(const CXXRecordDecl *RD, 141 const ASTRecordLayout &Layout); 142 143 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types. 144 bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD); 145 146 /// LayoutField - layout a single field. Returns false if the operation failed 147 /// because the current struct is not packed. 148 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); 149 150 /// LayoutBitField - layout a single bit field. 151 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); 152 153 /// AppendField - Appends a field with the given offset and type. 154 void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy); 155 156 /// AppendPadding - Appends enough padding bytes so that the total 157 /// struct size is a multiple of the field alignment. 158 void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment); 159 160 /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the 161 /// tail padding of a previous base. If this happens, the type of the previous 162 /// base needs to be changed to an array of i8. Returns true if the last 163 /// laid out base was resized. 164 bool ResizeLastBaseFieldIfNecessary(CharUnits offset); 165 166 /// getByteArrayType - Returns a byte array type with the given number of 167 /// elements. 168 llvm::Type *getByteArrayType(CharUnits NumBytes); 169 170 /// AppendBytes - Append a given number of bytes to the record. 171 void AppendBytes(CharUnits numBytes); 172 173 /// AppendTailPadding - Append enough tail padding so that the type will have 174 /// the passed size. 175 void AppendTailPadding(CharUnits RecordSize); 176 177 CharUnits getTypeAlignment(llvm::Type *Ty) const; 178 179 /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the 180 /// LLVM element types. 181 CharUnits getAlignmentAsLLVMStruct() const; 182 183 /// CheckZeroInitializable - Check if the given type contains a pointer 184 /// to data member. 185 void CheckZeroInitializable(QualType T); 186 187 public: 188 CGRecordLayoutBuilder(CodeGenTypes &Types) 189 : BaseSubobjectType(0), 190 IsZeroInitializable(true), IsZeroInitializableAsBase(true), 191 Packed(false), IsMsStruct(false), 192 Types(Types), BitsAvailableInLastField(0) { } 193 194 /// Layout - Will layout a RecordDecl. 195 void Layout(const RecordDecl *D); 196 }; 197 198 } 199 200 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { 201 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment(); 202 Packed = D->hasAttr<PackedAttr>(); 203 204 IsMsStruct = D->hasAttr<MsStructAttr>(); 205 206 if (D->isUnion()) { 207 LayoutUnion(D); 208 return; 209 } 210 211 if (LayoutFields(D)) 212 return; 213 214 // We weren't able to layout the struct. Try again with a packed struct 215 Packed = true; 216 LastLaidOutBase.invalidate(); 217 NextFieldOffset = CharUnits::Zero(); 218 FieldTypes.clear(); 219 Fields.clear(); 220 BitFields.clear(); 221 NonVirtualBases.clear(); 222 VirtualBases.clear(); 223 224 LayoutFields(D); 225 } 226 227 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 228 const FieldDecl *FD, 229 uint64_t FieldOffset, 230 uint64_t FieldSize, 231 uint64_t ContainingTypeSizeInBits, 232 unsigned ContainingTypeAlign) { 233 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); 234 CharUnits TypeSizeInBytes = 235 CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty)); 236 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes); 237 238 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); 239 240 if (FieldSize > TypeSizeInBits) { 241 // We have a wide bit-field. The extra bits are only used for padding, so 242 // if we have a bitfield of type T, with size N: 243 // 244 // T t : N; 245 // 246 // We can just assume that it's: 247 // 248 // T t : sizeof(T); 249 // 250 FieldSize = TypeSizeInBits; 251 } 252 253 // in big-endian machines the first fields are in higher bit positions, 254 // so revert the offset. The byte offsets are reversed(back) later. 255 if (Types.getTargetData().isBigEndian()) { 256 FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize); 257 } 258 259 // Compute the access components. The policy we use is to start by attempting 260 // to access using the width of the bit-field type itself and to always access 261 // at aligned indices of that type. If such an access would fail because it 262 // extends past the bound of the type, then we reduce size to the next smaller 263 // power of two and retry. The current algorithm assumes pow2 sized types, 264 // although this is easy to fix. 265 // 266 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!"); 267 CGBitFieldInfo::AccessInfo Components[3]; 268 unsigned NumComponents = 0; 269 unsigned AccessedTargetBits = 0; // The number of target bits accessed. 270 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt. 271 272 // If requested, widen the initial bit-field access to be register sized. The 273 // theory is that this is most likely to allow multiple accesses into the same 274 // structure to be coalesced, and that the backend should be smart enough to 275 // narrow the store if no coalescing is ever done. 276 // 277 // The subsequent code will handle align these access to common boundaries and 278 // guaranteeing that we do not access past the end of the structure. 279 if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) { 280 if (AccessWidth < Types.getTarget().getRegisterWidth()) 281 AccessWidth = Types.getTarget().getRegisterWidth(); 282 } 283 284 // Round down from the field offset to find the first access position that is 285 // at an aligned offset of the initial access type. 286 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth); 287 288 // Adjust initial access size to fit within record. 289 while (AccessWidth > Types.getTarget().getCharWidth() && 290 AccessStart + AccessWidth > ContainingTypeSizeInBits) { 291 AccessWidth >>= 1; 292 AccessStart = FieldOffset - (FieldOffset % AccessWidth); 293 } 294 295 while (AccessedTargetBits < FieldSize) { 296 // Check that we can access using a type of this size, without reading off 297 // the end of the structure. This can occur with packed structures and 298 // -fno-bitfield-type-align, for example. 299 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) { 300 // If so, reduce access size to the next smaller power-of-two and retry. 301 AccessWidth >>= 1; 302 assert(AccessWidth >= Types.getTarget().getCharWidth() 303 && "Cannot access under byte size!"); 304 continue; 305 } 306 307 // Otherwise, add an access component. 308 309 // First, compute the bits inside this access which are part of the 310 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the 311 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits 312 // in the target that we are reading. 313 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!"); 314 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!"); 315 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset); 316 uint64_t AccessBitsInFieldSize = 317 std::min(AccessWidth + AccessStart, 318 FieldOffset + FieldSize) - AccessBitsInFieldStart; 319 320 assert(NumComponents < 3 && "Unexpected number of components!"); 321 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++]; 322 AI.FieldIndex = 0; 323 // FIXME: We still follow the old access pattern of only using the field 324 // byte offset. We should switch this once we fix the struct layout to be 325 // pretty. 326 327 // on big-endian machines we reverted the bit offset because first fields are 328 // in higher bits. But this also reverts the bytes, so fix this here by reverting 329 // the byte offset on big-endian machines. 330 if (Types.getTargetData().isBigEndian()) { 331 AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits( 332 ContainingTypeSizeInBits - AccessStart - AccessWidth); 333 } else { 334 AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart); 335 } 336 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart; 337 AI.AccessWidth = AccessWidth; 338 AI.AccessAlignment = Types.getContext().toCharUnitsFromBits( 339 llvm::MinAlign(ContainingTypeAlign, AccessStart)); 340 AI.TargetBitOffset = AccessedTargetBits; 341 AI.TargetBitWidth = AccessBitsInFieldSize; 342 343 AccessStart += AccessWidth; 344 AccessedTargetBits += AI.TargetBitWidth; 345 } 346 347 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!"); 348 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned); 349 } 350 351 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 352 const FieldDecl *FD, 353 uint64_t FieldOffset, 354 uint64_t FieldSize) { 355 const RecordDecl *RD = FD->getParent(); 356 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD); 357 uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize()); 358 unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment()); 359 360 return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits, 361 ContainingTypeAlign); 362 } 363 364 void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D, 365 uint64_t fieldOffset) { 366 uint64_t fieldSize = 367 D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue(); 368 369 if (fieldSize == 0) 370 return; 371 372 uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 373 CharUnits numBytesToAppend; 374 unsigned charAlign = Types.getContext().Target.getCharAlign(); 375 376 if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) { 377 assert(fieldOffset % charAlign == 0 && 378 "Field offset not aligned correctly"); 379 380 CharUnits fieldOffsetInCharUnits = 381 Types.getContext().toCharUnitsFromBits(fieldOffset); 382 383 // Try to resize the last base field. 384 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits)) 385 nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 386 } 387 388 if (fieldOffset < nextFieldOffsetInBits) { 389 assert(BitsAvailableInLastField && "Bitfield size mismatch!"); 390 assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte"); 391 392 // The bitfield begins in the previous bit-field. 393 numBytesToAppend = Types.getContext().toCharUnitsFromBits( 394 llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 395 charAlign)); 396 } else { 397 assert(fieldOffset % charAlign == 0 && 398 "Field offset not aligned correctly"); 399 400 // Append padding if necessary. 401 AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset), 402 CharUnits::One()); 403 404 numBytesToAppend = Types.getContext().toCharUnitsFromBits( 405 llvm::RoundUpToAlignment(fieldSize, charAlign)); 406 407 assert(!numBytesToAppend.isZero() && "No bytes to append!"); 408 } 409 410 // Add the bit field info. 411 BitFields.insert(std::make_pair(D, 412 CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize))); 413 414 AppendBytes(numBytesToAppend); 415 416 BitsAvailableInLastField = 417 Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize); 418 } 419 420 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, 421 uint64_t fieldOffset) { 422 // If the field is packed, then we need a packed struct. 423 if (!Packed && D->hasAttr<PackedAttr>()) 424 return false; 425 426 if (D->isBitField()) { 427 // We must use packed structs for unnamed bit fields since they 428 // don't affect the struct alignment. 429 if (!Packed && !D->getDeclName()) 430 return false; 431 432 LayoutBitField(D, fieldOffset); 433 return true; 434 } 435 436 CheckZeroInitializable(D->getType()); 437 438 assert(fieldOffset % Types.getTarget().getCharWidth() == 0 439 && "field offset is not on a byte boundary!"); 440 CharUnits fieldOffsetInBytes 441 = Types.getContext().toCharUnitsFromBits(fieldOffset); 442 443 llvm::Type *Ty = Types.ConvertTypeForMem(D->getType()); 444 CharUnits typeAlignment = getTypeAlignment(Ty); 445 446 // If the type alignment is larger then the struct alignment, we must use 447 // a packed struct. 448 if (typeAlignment > Alignment) { 449 assert(!Packed && "Alignment is wrong even with packed struct!"); 450 return false; 451 } 452 453 if (!Packed) { 454 if (const RecordType *RT = D->getType()->getAs<RecordType>()) { 455 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); 456 if (const MaxFieldAlignmentAttr *MFAA = 457 RD->getAttr<MaxFieldAlignmentAttr>()) { 458 if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment)) 459 return false; 460 } 461 } 462 } 463 464 // Round up the field offset to the alignment of the field type. 465 CharUnits alignedNextFieldOffsetInBytes = 466 NextFieldOffset.RoundUpToAlignment(typeAlignment); 467 468 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 469 // Try to resize the last base field. 470 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) { 471 alignedNextFieldOffsetInBytes = 472 NextFieldOffset.RoundUpToAlignment(typeAlignment); 473 } 474 } 475 476 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 477 assert(!Packed && "Could not place field even with packed struct!"); 478 return false; 479 } 480 481 AppendPadding(fieldOffsetInBytes, typeAlignment); 482 483 // Now append the field. 484 Fields[D] = FieldTypes.size(); 485 AppendField(fieldOffsetInBytes, Ty); 486 487 LastLaidOutBase.invalidate(); 488 return true; 489 } 490 491 llvm::Type * 492 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, 493 const ASTRecordLayout &Layout) { 494 if (Field->isBitField()) { 495 uint64_t FieldSize = 496 Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue(); 497 498 // Ignore zero sized bit fields. 499 if (FieldSize == 0) 500 return 0; 501 502 llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); 503 CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits( 504 llvm::RoundUpToAlignment(FieldSize, 505 Types.getContext().Target.getCharAlign())); 506 507 if (NumBytesToAppend > CharUnits::One()) 508 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity()); 509 510 // Add the bit field info. 511 BitFields.insert(std::make_pair(Field, 512 CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize))); 513 return FieldTy; 514 } 515 516 // This is a regular union field. 517 Fields[Field] = 0; 518 return Types.ConvertTypeForMem(Field->getType()); 519 } 520 521 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { 522 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!"); 523 524 const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D); 525 526 llvm::Type *unionType = 0; 527 CharUnits unionSize = CharUnits::Zero(); 528 CharUnits unionAlign = CharUnits::Zero(); 529 530 bool hasOnlyZeroSizedBitFields = true; 531 532 unsigned fieldNo = 0; 533 for (RecordDecl::field_iterator field = D->field_begin(), 534 fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) { 535 assert(layout.getFieldOffset(fieldNo) == 0 && 536 "Union field offset did not start at the beginning of record!"); 537 llvm::Type *fieldType = LayoutUnionField(*field, layout); 538 539 if (!fieldType) 540 continue; 541 542 hasOnlyZeroSizedBitFields = false; 543 544 CharUnits fieldAlign = CharUnits::fromQuantity( 545 Types.getTargetData().getABITypeAlignment(fieldType)); 546 CharUnits fieldSize = CharUnits::fromQuantity( 547 Types.getTargetData().getTypeAllocSize(fieldType)); 548 549 if (fieldAlign < unionAlign) 550 continue; 551 552 if (fieldAlign > unionAlign || fieldSize > unionSize) { 553 unionType = fieldType; 554 unionAlign = fieldAlign; 555 unionSize = fieldSize; 556 } 557 } 558 559 // Now add our field. 560 if (unionType) { 561 AppendField(CharUnits::Zero(), unionType); 562 563 if (getTypeAlignment(unionType) > layout.getAlignment()) { 564 // We need a packed struct. 565 Packed = true; 566 unionAlign = CharUnits::One(); 567 } 568 } 569 if (unionAlign.isZero()) { 570 assert(hasOnlyZeroSizedBitFields && 571 "0-align record did not have all zero-sized bit-fields!"); 572 unionAlign = CharUnits::One(); 573 } 574 575 // Append tail padding. 576 CharUnits recordSize = layout.getSize(); 577 if (recordSize > unionSize) 578 AppendPadding(recordSize, unionAlign); 579 } 580 581 void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base, 582 const CGRecordLayout &baseLayout, 583 CharUnits baseOffset) { 584 ResizeLastBaseFieldIfNecessary(baseOffset); 585 586 AppendPadding(baseOffset, CharUnits::One()); 587 588 const ASTRecordLayout &baseASTLayout 589 = Types.getContext().getASTRecordLayout(base); 590 591 LastLaidOutBase.Offset = NextFieldOffset; 592 LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize(); 593 594 // Fields and bases can be laid out in the tail padding of previous 595 // bases. If this happens, we need to allocate the base as an i8 596 // array; otherwise, we can use the subobject type. However, 597 // actually doing that would require knowledge of what immediately 598 // follows this base in the layout, so instead we do a conservative 599 // approximation, which is to use the base subobject type if it 600 // has the same LLVM storage size as the nvsize. 601 602 llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); 603 AppendField(baseOffset, subobjectType); 604 } 605 606 void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base, 607 CharUnits baseOffset) { 608 // Ignore empty bases. 609 if (base->isEmpty()) return; 610 611 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 612 if (IsZeroInitializableAsBase) { 613 assert(IsZeroInitializable && 614 "class zero-initializable as base but not as complete object"); 615 616 IsZeroInitializable = IsZeroInitializableAsBase = 617 baseLayout.isZeroInitializableAsBase(); 618 } 619 620 LayoutBase(base, baseLayout, baseOffset); 621 NonVirtualBases[base] = (FieldTypes.size() - 1); 622 } 623 624 void 625 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base, 626 CharUnits baseOffset) { 627 // Ignore empty bases. 628 if (base->isEmpty()) return; 629 630 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 631 if (IsZeroInitializable) 632 IsZeroInitializable = baseLayout.isZeroInitializableAsBase(); 633 634 LayoutBase(base, baseLayout, baseOffset); 635 VirtualBases[base] = (FieldTypes.size() - 1); 636 } 637 638 /// LayoutVirtualBases - layout the non-virtual bases of a record decl. 639 void 640 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, 641 const ASTRecordLayout &Layout) { 642 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 643 E = RD->bases_end(); I != E; ++I) { 644 const CXXRecordDecl *BaseDecl = 645 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 646 647 // We only want to lay out virtual bases that aren't indirect primary bases 648 // of some other base. 649 if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) { 650 // Only lay out the base once. 651 if (!LaidOutVirtualBases.insert(BaseDecl)) 652 continue; 653 654 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 655 LayoutVirtualBase(BaseDecl, vbaseOffset); 656 } 657 658 if (!BaseDecl->getNumVBases()) { 659 // This base isn't interesting since it doesn't have any virtual bases. 660 continue; 661 } 662 663 LayoutVirtualBases(BaseDecl, Layout); 664 } 665 } 666 667 void 668 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD, 669 const ASTRecordLayout &Layout) { 670 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); 671 672 // Check if we need to add a vtable pointer. 673 if (RD->isDynamicClass()) { 674 if (!PrimaryBase) { 675 llvm::Type *FunctionType = 676 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 677 /*isVarArg=*/true); 678 llvm::Type *VTableTy = FunctionType->getPointerTo(); 679 680 assert(NextFieldOffset.isZero() && 681 "VTable pointer must come first!"); 682 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 683 } else { 684 if (!Layout.isPrimaryBaseVirtual()) 685 LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()); 686 else 687 LayoutVirtualBase(PrimaryBase, CharUnits::Zero()); 688 } 689 } 690 691 // Layout the non-virtual bases. 692 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 693 E = RD->bases_end(); I != E; ++I) { 694 if (I->isVirtual()) 695 continue; 696 697 const CXXRecordDecl *BaseDecl = 698 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 699 700 // We've already laid out the primary base. 701 if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual()) 702 continue; 703 704 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)); 705 } 706 } 707 708 bool 709 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { 710 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD); 711 712 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 713 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 714 CharUnits AlignedNonVirtualTypeSize = 715 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 716 717 // First check if we can use the same fields as for the complete class. 718 CharUnits RecordSize = Layout.getSize(); 719 if (AlignedNonVirtualTypeSize == RecordSize) 720 return true; 721 722 // Check if we need padding. 723 CharUnits AlignedNextFieldOffset = 724 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 725 726 if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) { 727 assert(!Packed && "cannot layout even as packed struct"); 728 return false; // Needs packing. 729 } 730 731 bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset); 732 if (needsPadding) { 733 CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset; 734 FieldTypes.push_back(getByteArrayType(NumBytes)); 735 } 736 737 738 BaseSubobjectType = llvm::StructType::createNamed(Types.getLLVMContext(), "", 739 FieldTypes, Packed); 740 Types.addRecordTypeName(RD, BaseSubobjectType, ".base"); 741 742 // Pull the padding back off. 743 if (needsPadding) 744 FieldTypes.pop_back(); 745 746 return true; 747 } 748 749 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { 750 assert(!D->isUnion() && "Can't call LayoutFields on a union!"); 751 assert(!Alignment.isZero() && "Did not set alignment!"); 752 753 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 754 755 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); 756 if (RD) 757 LayoutNonVirtualBases(RD, Layout); 758 759 unsigned FieldNo = 0; 760 const FieldDecl *LastFD = 0; 761 762 for (RecordDecl::field_iterator Field = D->field_begin(), 763 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { 764 if (IsMsStruct) { 765 // Zero-length bitfields following non-bitfield members are 766 // ignored: 767 const FieldDecl *FD = (*Field); 768 if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) { 769 --FieldNo; 770 continue; 771 } 772 LastFD = FD; 773 } 774 775 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) { 776 assert(!Packed && 777 "Could not layout fields even with a packed LLVM struct!"); 778 return false; 779 } 780 } 781 782 if (RD) { 783 // We've laid out the non-virtual bases and the fields, now compute the 784 // non-virtual base field types. 785 if (!ComputeNonVirtualBaseType(RD)) { 786 assert(!Packed && "Could not layout even with a packed LLVM struct!"); 787 return false; 788 } 789 790 // And lay out the virtual bases. 791 RD->getIndirectPrimaryBases(IndirectPrimaryBases); 792 if (Layout.isPrimaryBaseVirtual()) 793 IndirectPrimaryBases.insert(Layout.getPrimaryBase()); 794 LayoutVirtualBases(RD, Layout); 795 } 796 797 // Append tail padding if necessary. 798 AppendTailPadding(Layout.getSize()); 799 800 return true; 801 } 802 803 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) { 804 ResizeLastBaseFieldIfNecessary(RecordSize); 805 806 assert(NextFieldOffset <= RecordSize && "Size mismatch!"); 807 808 CharUnits AlignedNextFieldOffset = 809 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 810 811 if (AlignedNextFieldOffset == RecordSize) { 812 // We don't need any padding. 813 return; 814 } 815 816 CharUnits NumPadBytes = RecordSize - NextFieldOffset; 817 AppendBytes(NumPadBytes); 818 } 819 820 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset, 821 llvm::Type *fieldType) { 822 CharUnits fieldSize = 823 CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType)); 824 825 FieldTypes.push_back(fieldType); 826 827 NextFieldOffset = fieldOffset + fieldSize; 828 BitsAvailableInLastField = 0; 829 } 830 831 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset, 832 CharUnits fieldAlignment) { 833 assert(NextFieldOffset <= fieldOffset && 834 "Incorrect field layout!"); 835 836 // Round up the field offset to the alignment of the field type. 837 CharUnits alignedNextFieldOffset = 838 NextFieldOffset.RoundUpToAlignment(fieldAlignment); 839 840 if (alignedNextFieldOffset < fieldOffset) { 841 // Even with alignment, the field offset is not at the right place, 842 // insert padding. 843 CharUnits padding = fieldOffset - NextFieldOffset; 844 845 AppendBytes(padding); 846 } 847 } 848 849 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) { 850 // Check if we have a base to resize. 851 if (!LastLaidOutBase.isValid()) 852 return false; 853 854 // This offset does not overlap with the tail padding. 855 if (offset >= NextFieldOffset) 856 return false; 857 858 // Restore the field offset and append an i8 array instead. 859 FieldTypes.pop_back(); 860 NextFieldOffset = LastLaidOutBase.Offset; 861 AppendBytes(LastLaidOutBase.NonVirtualSize); 862 LastLaidOutBase.invalidate(); 863 864 return true; 865 } 866 867 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { 868 assert(!numBytes.isZero() && "Empty byte arrays aren't allowed."); 869 870 llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); 871 if (numBytes > CharUnits::One()) 872 Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity()); 873 874 return Ty; 875 } 876 877 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) { 878 if (numBytes.isZero()) 879 return; 880 881 // Append the padding field 882 AppendField(NextFieldOffset, getByteArrayType(numBytes)); 883 } 884 885 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const { 886 if (Packed) 887 return CharUnits::One(); 888 889 return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty)); 890 } 891 892 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const { 893 if (Packed) 894 return CharUnits::One(); 895 896 CharUnits maxAlignment = CharUnits::One(); 897 for (size_t i = 0; i != FieldTypes.size(); ++i) 898 maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i])); 899 900 return maxAlignment; 901 } 902 903 /// Merge in whether a field of the given type is zero-initializable. 904 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { 905 // This record already contains a member pointer. 906 if (!IsZeroInitializableAsBase) 907 return; 908 909 // Can only have member pointers if we're compiling C++. 910 if (!Types.getContext().getLangOptions().CPlusPlus) 911 return; 912 913 const Type *elementType = T->getBaseElementTypeUnsafe(); 914 915 if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) { 916 if (!Types.getCXXABI().isZeroInitializable(MPT)) 917 IsZeroInitializable = IsZeroInitializableAsBase = false; 918 } else if (const RecordType *RT = elementType->getAs<RecordType>()) { 919 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 920 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); 921 if (!Layout.isZeroInitializable()) 922 IsZeroInitializable = IsZeroInitializableAsBase = false; 923 } 924 } 925 926 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, 927 llvm::StructType *Ty) { 928 CGRecordLayoutBuilder Builder(*this); 929 930 Builder.Layout(D); 931 932 Ty->setBody(Builder.FieldTypes, Builder.Packed); 933 934 // If we're in C++, compute the base subobject type. 935 llvm::StructType *BaseTy = 0; 936 if (isa<CXXRecordDecl>(D)) { 937 BaseTy = Builder.BaseSubobjectType; 938 if (!BaseTy) BaseTy = Ty; 939 } 940 941 CGRecordLayout *RL = 942 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable, 943 Builder.IsZeroInitializableAsBase); 944 945 RL->NonVirtualBases.swap(Builder.NonVirtualBases); 946 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases); 947 948 // Add all the field numbers. 949 RL->FieldInfo.swap(Builder.Fields); 950 951 // Add bitfield info. 952 RL->BitFields.swap(Builder.BitFields); 953 954 // Dump the layout, if requested. 955 if (getContext().getLangOptions().DumpRecordLayouts) { 956 llvm::errs() << "\n*** Dumping IRgen Record Layout\n"; 957 llvm::errs() << "Record: "; 958 D->dump(); 959 llvm::errs() << "\nLayout: "; 960 RL->dump(); 961 } 962 963 #ifndef NDEBUG 964 // Verify that the computed LLVM struct size matches the AST layout size. 965 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); 966 967 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize()); 968 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) && 969 "Type size mismatch!"); 970 971 if (BaseTy) { 972 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 973 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 974 CharUnits AlignedNonVirtualTypeSize = 975 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 976 977 uint64_t AlignedNonVirtualTypeSizeInBits = 978 getContext().toBits(AlignedNonVirtualTypeSize); 979 980 assert(AlignedNonVirtualTypeSizeInBits == 981 getTargetData().getTypeAllocSizeInBits(BaseTy) && 982 "Type size mismatch!"); 983 } 984 985 // Verify that the LLVM and AST field offsets agree. 986 llvm::StructType *ST = 987 dyn_cast<llvm::StructType>(RL->getLLVMType()); 988 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST); 989 990 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); 991 RecordDecl::field_iterator it = D->field_begin(); 992 const FieldDecl *LastFD = 0; 993 bool IsMsStruct = D->hasAttr<MsStructAttr>(); 994 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { 995 const FieldDecl *FD = *it; 996 997 // For non-bit-fields, just check that the LLVM struct offset matches the 998 // AST offset. 999 if (!FD->isBitField()) { 1000 unsigned FieldNo = RL->getLLVMFieldNo(FD); 1001 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && 1002 "Invalid field offset!"); 1003 LastFD = FD; 1004 continue; 1005 } 1006 1007 if (IsMsStruct) { 1008 // Zero-length bitfields following non-bitfield members are 1009 // ignored: 1010 if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) { 1011 --i; 1012 continue; 1013 } 1014 LastFD = FD; 1015 } 1016 1017 // Ignore unnamed bit-fields. 1018 if (!FD->getDeclName()) { 1019 LastFD = FD; 1020 continue; 1021 } 1022 1023 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); 1024 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1025 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1026 1027 // Verify that every component access is within the structure. 1028 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex); 1029 uint64_t AccessBitOffset = FieldOffset + 1030 getContext().toBits(AI.FieldByteOffset); 1031 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits && 1032 "Invalid bit-field access (out of range)!"); 1033 } 1034 } 1035 #endif 1036 1037 return RL; 1038 } 1039 1040 void CGRecordLayout::print(llvm::raw_ostream &OS) const { 1041 OS << "<CGRecordLayout\n"; 1042 OS << " LLVMType:" << *CompleteObjectType << "\n"; 1043 if (BaseSubobjectType) 1044 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; 1045 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; 1046 OS << " BitFields:[\n"; 1047 1048 // Print bit-field infos in declaration order. 1049 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; 1050 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator 1051 it = BitFields.begin(), ie = BitFields.end(); 1052 it != ie; ++it) { 1053 const RecordDecl *RD = it->first->getParent(); 1054 unsigned Index = 0; 1055 for (RecordDecl::field_iterator 1056 it2 = RD->field_begin(); *it2 != it->first; ++it2) 1057 ++Index; 1058 BFIs.push_back(std::make_pair(Index, &it->second)); 1059 } 1060 llvm::array_pod_sort(BFIs.begin(), BFIs.end()); 1061 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { 1062 OS.indent(4); 1063 BFIs[i].second->print(OS); 1064 OS << "\n"; 1065 } 1066 1067 OS << "]>\n"; 1068 } 1069 1070 void CGRecordLayout::dump() const { 1071 print(llvm::errs()); 1072 } 1073 1074 void CGBitFieldInfo::print(llvm::raw_ostream &OS) const { 1075 OS << "<CGBitFieldInfo"; 1076 OS << " Size:" << Size; 1077 OS << " IsSigned:" << IsSigned << "\n"; 1078 1079 OS.indent(4 + strlen("<CGBitFieldInfo")); 1080 OS << " NumComponents:" << getNumComponents(); 1081 OS << " Components: ["; 1082 if (getNumComponents()) { 1083 OS << "\n"; 1084 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) { 1085 const AccessInfo &AI = getComponent(i); 1086 OS.indent(8); 1087 OS << "<AccessInfo" 1088 << " FieldIndex:" << AI.FieldIndex 1089 << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity() 1090 << " FieldBitStart:" << AI.FieldBitStart 1091 << " AccessWidth:" << AI.AccessWidth << "\n"; 1092 OS.indent(8 + strlen("<AccessInfo")); 1093 OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity() 1094 << " TargetBitOffset:" << AI.TargetBitOffset 1095 << " TargetBitWidth:" << AI.TargetBitWidth 1096 << ">\n"; 1097 } 1098 OS.indent(4); 1099 } 1100 OS << "]>"; 1101 } 1102 1103 void CGBitFieldInfo::dump() const { 1104 print(llvm::errs()); 1105 } 1106