Home | History | Annotate | Download | only in src
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_SPACES_H_
     29 #define V8_SPACES_H_
     30 
     31 #include "allocation.h"
     32 #include "hashmap.h"
     33 #include "list.h"
     34 #include "log.h"
     35 #include "v8utils.h"
     36 
     37 namespace v8 {
     38 namespace internal {
     39 
     40 class Isolate;
     41 
     42 // -----------------------------------------------------------------------------
     43 // Heap structures:
     44 //
     45 // A JS heap consists of a young generation, an old generation, and a large
     46 // object space. The young generation is divided into two semispaces. A
     47 // scavenger implements Cheney's copying algorithm. The old generation is
     48 // separated into a map space and an old object space. The map space contains
     49 // all (and only) map objects, the rest of old objects go into the old space.
     50 // The old generation is collected by a mark-sweep-compact collector.
     51 //
     52 // The semispaces of the young generation are contiguous.  The old and map
     53 // spaces consists of a list of pages. A page has a page header and an object
     54 // area.
     55 //
     56 // There is a separate large object space for objects larger than
     57 // Page::kMaxHeapObjectSize, so that they do not have to move during
     58 // collection. The large object space is paged. Pages in large object space
     59 // may be larger than the page size.
     60 //
     61 // A store-buffer based write barrier is used to keep track of intergenerational
     62 // references.  See store-buffer.h.
     63 //
     64 // During scavenges and mark-sweep collections we sometimes (after a store
     65 // buffer overflow) iterate intergenerational pointers without decoding heap
     66 // object maps so if the page belongs to old pointer space or large object
     67 // space it is essential to guarantee that the page does not contain any
     68 // garbage pointers to new space: every pointer aligned word which satisfies
     69 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
     70 // new space. Thus objects in old pointer and large object spaces should have a
     71 // special layout (e.g. no bare integer fields). This requirement does not
     72 // apply to map space which is iterated in a special fashion. However we still
     73 // require pointer fields of dead maps to be cleaned.
     74 //
     75 // To enable lazy cleaning of old space pages we can mark chunks of the page
     76 // as being garbage.  Garbage sections are marked with a special map.  These
     77 // sections are skipped when scanning the page, even if we are otherwise
     78 // scanning without regard for object boundaries.  Garbage sections are chained
     79 // together to form a free list after a GC.  Garbage sections created outside
     80 // of GCs by object trunctation etc. may not be in the free list chain.  Very
     81 // small free spaces are ignored, they need only be cleaned of bogus pointers
     82 // into new space.
     83 //
     84 // Each page may have up to one special garbage section.  The start of this
     85 // section is denoted by the top field in the space.  The end of the section
     86 // is denoted by the limit field in the space.  This special garbage section
     87 // is not marked with a free space map in the data.  The point of this section
     88 // is to enable linear allocation without having to constantly update the byte
     89 // array every time the top field is updated and a new object is created.  The
     90 // special garbage section is not in the chain of garbage sections.
     91 //
     92 // Since the top and limit fields are in the space, not the page, only one page
     93 // has a special garbage section, and if the top and limit are equal then there
     94 // is no special garbage section.
     95 
     96 // Some assertion macros used in the debugging mode.
     97 
     98 #define ASSERT_PAGE_ALIGNED(address)                                           \
     99   ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
    100 
    101 #define ASSERT_OBJECT_ALIGNED(address)                                         \
    102   ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
    103 
    104 #define ASSERT_OBJECT_SIZE(size)                                               \
    105   ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
    106 
    107 #define ASSERT_PAGE_OFFSET(offset)                                             \
    108   ASSERT((Page::kObjectStartOffset <= offset)                                  \
    109       && (offset <= Page::kPageSize))
    110 
    111 #define ASSERT_MAP_PAGE_INDEX(index)                                           \
    112   ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
    113 
    114 
    115 class PagedSpace;
    116 class MemoryAllocator;
    117 class AllocationInfo;
    118 class Space;
    119 class FreeList;
    120 class MemoryChunk;
    121 
    122 class MarkBit {
    123  public:
    124   typedef uint32_t CellType;
    125 
    126   inline MarkBit(CellType* cell, CellType mask, bool data_only)
    127       : cell_(cell), mask_(mask), data_only_(data_only) { }
    128 
    129   inline CellType* cell() { return cell_; }
    130   inline CellType mask() { return mask_; }
    131 
    132 #ifdef DEBUG
    133   bool operator==(const MarkBit& other) {
    134     return cell_ == other.cell_ && mask_ == other.mask_;
    135   }
    136 #endif
    137 
    138   inline void Set() { *cell_ |= mask_; }
    139   inline bool Get() { return (*cell_ & mask_) != 0; }
    140   inline void Clear() { *cell_ &= ~mask_; }
    141 
    142   inline bool data_only() { return data_only_; }
    143 
    144   inline MarkBit Next() {
    145     CellType new_mask = mask_ << 1;
    146     if (new_mask == 0) {
    147       return MarkBit(cell_ + 1, 1, data_only_);
    148     } else {
    149       return MarkBit(cell_, new_mask, data_only_);
    150     }
    151   }
    152 
    153  private:
    154   CellType* cell_;
    155   CellType mask_;
    156   // This boolean indicates that the object is in a data-only space with no
    157   // pointers.  This enables some optimizations when marking.
    158   // It is expected that this field is inlined and turned into control flow
    159   // at the place where the MarkBit object is created.
    160   bool data_only_;
    161 };
    162 
    163 
    164 // Bitmap is a sequence of cells each containing fixed number of bits.
    165 class Bitmap {
    166  public:
    167   static const uint32_t kBitsPerCell = 32;
    168   static const uint32_t kBitsPerCellLog2 = 5;
    169   static const uint32_t kBitIndexMask = kBitsPerCell - 1;
    170   static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
    171   static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
    172 
    173   static const size_t kLength =
    174     (1 << kPageSizeBits) >> (kPointerSizeLog2);
    175 
    176   static const size_t kSize =
    177     (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
    178 
    179 
    180   static int CellsForLength(int length) {
    181     return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
    182   }
    183 
    184   int CellsCount() {
    185     return CellsForLength(kLength);
    186   }
    187 
    188   static int SizeFor(int cells_count) {
    189     return sizeof(MarkBit::CellType) * cells_count;
    190   }
    191 
    192   INLINE(static uint32_t IndexToCell(uint32_t index)) {
    193     return index >> kBitsPerCellLog2;
    194   }
    195 
    196   INLINE(static uint32_t CellToIndex(uint32_t index)) {
    197     return index << kBitsPerCellLog2;
    198   }
    199 
    200   INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
    201     return (index + kBitIndexMask) & ~kBitIndexMask;
    202   }
    203 
    204   INLINE(MarkBit::CellType* cells()) {
    205     return reinterpret_cast<MarkBit::CellType*>(this);
    206   }
    207 
    208   INLINE(Address address()) {
    209     return reinterpret_cast<Address>(this);
    210   }
    211 
    212   INLINE(static Bitmap* FromAddress(Address addr)) {
    213     return reinterpret_cast<Bitmap*>(addr);
    214   }
    215 
    216   inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
    217     MarkBit::CellType mask = 1 << (index & kBitIndexMask);
    218     MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
    219     return MarkBit(cell, mask, data_only);
    220   }
    221 
    222   static inline void Clear(MemoryChunk* chunk);
    223 
    224   static void PrintWord(uint32_t word, uint32_t himask = 0) {
    225     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
    226       if ((mask & himask) != 0) PrintF("[");
    227       PrintF((mask & word) ? "1" : "0");
    228       if ((mask & himask) != 0) PrintF("]");
    229     }
    230   }
    231 
    232   class CellPrinter {
    233    public:
    234     CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
    235 
    236     void Print(uint32_t pos, uint32_t cell) {
    237       if (cell == seq_type) {
    238         seq_length++;
    239         return;
    240       }
    241 
    242       Flush();
    243 
    244       if (IsSeq(cell)) {
    245         seq_start = pos;
    246         seq_length = 0;
    247         seq_type = cell;
    248         return;
    249       }
    250 
    251       PrintF("%d: ", pos);
    252       PrintWord(cell);
    253       PrintF("\n");
    254     }
    255 
    256     void Flush() {
    257       if (seq_length > 0) {
    258         PrintF("%d: %dx%d\n",
    259                seq_start,
    260                seq_type == 0 ? 0 : 1,
    261                seq_length * kBitsPerCell);
    262         seq_length = 0;
    263       }
    264     }
    265 
    266     static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
    267 
    268    private:
    269     uint32_t seq_start;
    270     uint32_t seq_type;
    271     uint32_t seq_length;
    272   };
    273 
    274   void Print() {
    275     CellPrinter printer;
    276     for (int i = 0; i < CellsCount(); i++) {
    277       printer.Print(i, cells()[i]);
    278     }
    279     printer.Flush();
    280     PrintF("\n");
    281   }
    282 
    283   bool IsClean() {
    284     for (int i = 0; i < CellsCount(); i++) {
    285       if (cells()[i] != 0) {
    286         return false;
    287       }
    288     }
    289     return true;
    290   }
    291 };
    292 
    293 
    294 class SkipList;
    295 class SlotsBuffer;
    296 
    297 // MemoryChunk represents a memory region owned by a specific space.
    298 // It is divided into the header and the body. Chunk start is always
    299 // 1MB aligned. Start of the body is aligned so it can accommodate
    300 // any heap object.
    301 class MemoryChunk {
    302  public:
    303   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
    304   static MemoryChunk* FromAddress(Address a) {
    305     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
    306   }
    307 
    308   // Only works for addresses in pointer spaces, not data or code spaces.
    309   static inline MemoryChunk* FromAnyPointerAddress(Address addr);
    310 
    311   Address address() { return reinterpret_cast<Address>(this); }
    312 
    313   bool is_valid() { return address() != NULL; }
    314 
    315   MemoryChunk* next_chunk() const { return next_chunk_; }
    316   MemoryChunk* prev_chunk() const { return prev_chunk_; }
    317 
    318   void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
    319   void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
    320 
    321   Space* owner() const {
    322     if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
    323         kFailureTag) {
    324       return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
    325                                       kFailureTag);
    326     } else {
    327       return NULL;
    328     }
    329   }
    330 
    331   void set_owner(Space* space) {
    332     ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
    333     owner_ = reinterpret_cast<Address>(space) + kFailureTag;
    334     ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
    335            kFailureTag);
    336   }
    337 
    338   VirtualMemory* reserved_memory() {
    339     return &reservation_;
    340   }
    341 
    342   void InitializeReservedMemory() {
    343     reservation_.Reset();
    344   }
    345 
    346   void set_reserved_memory(VirtualMemory* reservation) {
    347     ASSERT_NOT_NULL(reservation);
    348     reservation_.TakeControl(reservation);
    349   }
    350 
    351   bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
    352   void initialize_scan_on_scavenge(bool scan) {
    353     if (scan) {
    354       SetFlag(SCAN_ON_SCAVENGE);
    355     } else {
    356       ClearFlag(SCAN_ON_SCAVENGE);
    357     }
    358   }
    359   inline void set_scan_on_scavenge(bool scan);
    360 
    361   int store_buffer_counter() { return store_buffer_counter_; }
    362   void set_store_buffer_counter(int counter) {
    363     store_buffer_counter_ = counter;
    364   }
    365 
    366   bool Contains(Address addr) {
    367     return addr >= area_start() && addr < area_end();
    368   }
    369 
    370   // Checks whether addr can be a limit of addresses in this page.
    371   // It's a limit if it's in the page, or if it's just after the
    372   // last byte of the page.
    373   bool ContainsLimit(Address addr) {
    374     return addr >= area_start() && addr <= area_end();
    375   }
    376 
    377   // Every n write barrier invocations we go to runtime even though
    378   // we could have handled it in generated code.  This lets us check
    379   // whether we have hit the limit and should do some more marking.
    380   static const int kWriteBarrierCounterGranularity = 500;
    381 
    382   enum MemoryChunkFlags {
    383     IS_EXECUTABLE,
    384     ABOUT_TO_BE_FREED,
    385     POINTERS_TO_HERE_ARE_INTERESTING,
    386     POINTERS_FROM_HERE_ARE_INTERESTING,
    387     SCAN_ON_SCAVENGE,
    388     IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
    389     IN_TO_SPACE,    // All pages in new space has one of these two set.
    390     NEW_SPACE_BELOW_AGE_MARK,
    391     CONTAINS_ONLY_DATA,
    392     EVACUATION_CANDIDATE,
    393     RESCAN_ON_EVACUATION,
    394 
    395     // Pages swept precisely can be iterated, hitting only the live objects.
    396     // Whereas those swept conservatively cannot be iterated over. Both flags
    397     // indicate that marking bits have been cleared by the sweeper, otherwise
    398     // marking bits are still intact.
    399     WAS_SWEPT_PRECISELY,
    400     WAS_SWEPT_CONSERVATIVELY,
    401 
    402     // Large objects can have a progress bar in their page header. These object
    403     // are scanned in increments and will be kept black while being scanned.
    404     // Even if the mutator writes to them they will be kept black and a white
    405     // to grey transition is performed in the value.
    406     HAS_PROGRESS_BAR,
    407 
    408     // Last flag, keep at bottom.
    409     NUM_MEMORY_CHUNK_FLAGS
    410   };
    411 
    412 
    413   static const int kPointersToHereAreInterestingMask =
    414       1 << POINTERS_TO_HERE_ARE_INTERESTING;
    415 
    416   static const int kPointersFromHereAreInterestingMask =
    417       1 << POINTERS_FROM_HERE_ARE_INTERESTING;
    418 
    419   static const int kEvacuationCandidateMask =
    420       1 << EVACUATION_CANDIDATE;
    421 
    422   static const int kSkipEvacuationSlotsRecordingMask =
    423       (1 << EVACUATION_CANDIDATE) |
    424       (1 << RESCAN_ON_EVACUATION) |
    425       (1 << IN_FROM_SPACE) |
    426       (1 << IN_TO_SPACE);
    427 
    428 
    429   void SetFlag(int flag) {
    430     flags_ |= static_cast<uintptr_t>(1) << flag;
    431   }
    432 
    433   void ClearFlag(int flag) {
    434     flags_ &= ~(static_cast<uintptr_t>(1) << flag);
    435   }
    436 
    437   void SetFlagTo(int flag, bool value) {
    438     if (value) {
    439       SetFlag(flag);
    440     } else {
    441       ClearFlag(flag);
    442     }
    443   }
    444 
    445   bool IsFlagSet(int flag) {
    446     return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
    447   }
    448 
    449   // Set or clear multiple flags at a time. The flags in the mask
    450   // are set to the value in "flags", the rest retain the current value
    451   // in flags_.
    452   void SetFlags(intptr_t flags, intptr_t mask) {
    453     flags_ = (flags_ & ~mask) | (flags & mask);
    454   }
    455 
    456   // Return all current flags.
    457   intptr_t GetFlags() { return flags_; }
    458 
    459   intptr_t parallel_sweeping() const {
    460     return parallel_sweeping_;
    461   }
    462 
    463   void set_parallel_sweeping(intptr_t state) {
    464     parallel_sweeping_ = state;
    465   }
    466 
    467   bool TryParallelSweeping() {
    468     return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
    469   }
    470 
    471   // Manage live byte count (count of bytes known to be live,
    472   // because they are marked black).
    473   void ResetLiveBytes() {
    474     if (FLAG_gc_verbose) {
    475       PrintF("ResetLiveBytes:%p:%x->0\n",
    476              static_cast<void*>(this), live_byte_count_);
    477     }
    478     live_byte_count_ = 0;
    479   }
    480   void IncrementLiveBytes(int by) {
    481     if (FLAG_gc_verbose) {
    482       printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
    483              static_cast<void*>(this), live_byte_count_,
    484              ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
    485              live_byte_count_ + by);
    486     }
    487     live_byte_count_ += by;
    488     ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
    489   }
    490   int LiveBytes() {
    491     ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
    492     return live_byte_count_;
    493   }
    494 
    495   int write_barrier_counter() {
    496     return static_cast<int>(write_barrier_counter_);
    497   }
    498 
    499   void set_write_barrier_counter(int counter) {
    500     write_barrier_counter_ = counter;
    501   }
    502 
    503   int progress_bar() {
    504     ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
    505     return progress_bar_;
    506   }
    507 
    508   void set_progress_bar(int progress_bar) {
    509     ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
    510     progress_bar_ = progress_bar;
    511   }
    512 
    513   void ResetProgressBar() {
    514     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
    515       set_progress_bar(0);
    516       ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
    517     }
    518   }
    519 
    520   bool IsLeftOfProgressBar(Object** slot) {
    521     Address slot_address = reinterpret_cast<Address>(slot);
    522     ASSERT(slot_address > this->address());
    523     return (slot_address - (this->address() + kObjectStartOffset)) <
    524            progress_bar();
    525   }
    526 
    527   static void IncrementLiveBytesFromGC(Address address, int by) {
    528     MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
    529   }
    530 
    531   static void IncrementLiveBytesFromMutator(Address address, int by);
    532 
    533   static const intptr_t kAlignment =
    534       (static_cast<uintptr_t>(1) << kPageSizeBits);
    535 
    536   static const intptr_t kAlignmentMask = kAlignment - 1;
    537 
    538   static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
    539 
    540   static const intptr_t kLiveBytesOffset =
    541      kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
    542      kPointerSize + kPointerSize +
    543      kPointerSize + kPointerSize + kPointerSize + kIntSize;
    544 
    545   static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
    546 
    547   static const size_t kWriteBarrierCounterOffset =
    548       kSlotsBufferOffset + kPointerSize + kPointerSize;
    549 
    550   static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
    551                                     kIntSize + kIntSize + kPointerSize +
    552                                     5 * kPointerSize;
    553 
    554   static const int kBodyOffset =
    555       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
    556 
    557   // The start offset of the object area in a page. Aligned to both maps and
    558   // code alignment to be suitable for both.  Also aligned to 32 words because
    559   // the marking bitmap is arranged in 32 bit chunks.
    560   static const int kObjectStartAlignment = 32 * kPointerSize;
    561   static const int kObjectStartOffset = kBodyOffset - 1 +
    562       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
    563 
    564   size_t size() const { return size_; }
    565 
    566   void set_size(size_t size) {
    567     size_ = size;
    568   }
    569 
    570   void SetArea(Address area_start, Address area_end) {
    571     area_start_ = area_start;
    572     area_end_ = area_end;
    573   }
    574 
    575   Executability executable() {
    576     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
    577   }
    578 
    579   bool ContainsOnlyData() {
    580     return IsFlagSet(CONTAINS_ONLY_DATA);
    581   }
    582 
    583   bool InNewSpace() {
    584     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
    585   }
    586 
    587   bool InToSpace() {
    588     return IsFlagSet(IN_TO_SPACE);
    589   }
    590 
    591   bool InFromSpace() {
    592     return IsFlagSet(IN_FROM_SPACE);
    593   }
    594 
    595   // ---------------------------------------------------------------------
    596   // Markbits support
    597 
    598   inline Bitmap* markbits() {
    599     return Bitmap::FromAddress(address() + kHeaderSize);
    600   }
    601 
    602   void PrintMarkbits() { markbits()->Print(); }
    603 
    604   inline uint32_t AddressToMarkbitIndex(Address addr) {
    605     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
    606   }
    607 
    608   inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
    609     const intptr_t offset =
    610         reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
    611 
    612     return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
    613   }
    614 
    615   inline Address MarkbitIndexToAddress(uint32_t index) {
    616     return this->address() + (index << kPointerSizeLog2);
    617   }
    618 
    619   void InsertAfter(MemoryChunk* other);
    620   void Unlink();
    621 
    622   inline Heap* heap() { return heap_; }
    623 
    624   static const int kFlagsOffset = kPointerSize * 3;
    625 
    626   bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
    627 
    628   bool ShouldSkipEvacuationSlotRecording() {
    629     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
    630   }
    631 
    632   inline SkipList* skip_list() {
    633     return skip_list_;
    634   }
    635 
    636   inline void set_skip_list(SkipList* skip_list) {
    637     skip_list_ = skip_list;
    638   }
    639 
    640   inline SlotsBuffer* slots_buffer() {
    641     return slots_buffer_;
    642   }
    643 
    644   inline SlotsBuffer** slots_buffer_address() {
    645     return &slots_buffer_;
    646   }
    647 
    648   void MarkEvacuationCandidate() {
    649     ASSERT(slots_buffer_ == NULL);
    650     SetFlag(EVACUATION_CANDIDATE);
    651   }
    652 
    653   void ClearEvacuationCandidate() {
    654     ASSERT(slots_buffer_ == NULL);
    655     ClearFlag(EVACUATION_CANDIDATE);
    656   }
    657 
    658   Address area_start() { return area_start_; }
    659   Address area_end() { return area_end_; }
    660   int area_size() {
    661     return static_cast<int>(area_end() - area_start());
    662   }
    663   bool CommitArea(size_t requested);
    664 
    665   // Approximate amount of physical memory committed for this chunk.
    666   size_t CommittedPhysicalMemory() {
    667     return high_water_mark_;
    668   }
    669 
    670   static inline void UpdateHighWaterMark(Address mark);
    671 
    672  protected:
    673   MemoryChunk* next_chunk_;
    674   MemoryChunk* prev_chunk_;
    675   size_t size_;
    676   intptr_t flags_;
    677 
    678   // Start and end of allocatable memory on this chunk.
    679   Address area_start_;
    680   Address area_end_;
    681 
    682   // If the chunk needs to remember its memory reservation, it is stored here.
    683   VirtualMemory reservation_;
    684   // The identity of the owning space.  This is tagged as a failure pointer, but
    685   // no failure can be in an object, so this can be distinguished from any entry
    686   // in a fixed array.
    687   Address owner_;
    688   Heap* heap_;
    689   // Used by the store buffer to keep track of which pages to mark scan-on-
    690   // scavenge.
    691   int store_buffer_counter_;
    692   // Count of bytes marked black on page.
    693   int live_byte_count_;
    694   SlotsBuffer* slots_buffer_;
    695   SkipList* skip_list_;
    696   intptr_t write_barrier_counter_;
    697   // Used by the incremental marker to keep track of the scanning progress in
    698   // large objects that have a progress bar and are scanned in increments.
    699   int progress_bar_;
    700   // Assuming the initial allocation on a page is sequential,
    701   // count highest number of bytes ever allocated on the page.
    702   int high_water_mark_;
    703 
    704   intptr_t parallel_sweeping_;
    705 
    706   // PagedSpace free-list statistics.
    707   intptr_t available_in_small_free_list_;
    708   intptr_t available_in_medium_free_list_;
    709   intptr_t available_in_large_free_list_;
    710   intptr_t available_in_huge_free_list_;
    711   intptr_t non_available_small_blocks_;
    712 
    713   static MemoryChunk* Initialize(Heap* heap,
    714                                  Address base,
    715                                  size_t size,
    716                                  Address area_start,
    717                                  Address area_end,
    718                                  Executability executable,
    719                                  Space* owner);
    720 
    721   friend class MemoryAllocator;
    722 };
    723 
    724 
    725 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
    726 
    727 
    728 // -----------------------------------------------------------------------------
    729 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
    730 //
    731 // The only way to get a page pointer is by calling factory methods:
    732 //   Page* p = Page::FromAddress(addr); or
    733 //   Page* p = Page::FromAllocationTop(top);
    734 class Page : public MemoryChunk {
    735  public:
    736   // Returns the page containing a given address. The address ranges
    737   // from [page_addr .. page_addr + kPageSize[
    738   // This only works if the object is in fact in a page.  See also MemoryChunk::
    739   // FromAddress() and FromAnyAddress().
    740   INLINE(static Page* FromAddress(Address a)) {
    741     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
    742   }
    743 
    744   // Returns the page containing an allocation top. Because an allocation
    745   // top address can be the upper bound of the page, we need to subtract
    746   // it with kPointerSize first. The address ranges from
    747   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
    748   INLINE(static Page* FromAllocationTop(Address top)) {
    749     Page* p = FromAddress(top - kPointerSize);
    750     return p;
    751   }
    752 
    753   // Returns the next page in the chain of pages owned by a space.
    754   inline Page* next_page();
    755   inline Page* prev_page();
    756   inline void set_next_page(Page* page);
    757   inline void set_prev_page(Page* page);
    758 
    759   // Checks whether an address is page aligned.
    760   static bool IsAlignedToPageSize(Address a) {
    761     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
    762   }
    763 
    764   // Returns the offset of a given address to this page.
    765   INLINE(int Offset(Address a)) {
    766     int offset = static_cast<int>(a - address());
    767     return offset;
    768   }
    769 
    770   // Returns the address for a given offset to the this page.
    771   Address OffsetToAddress(int offset) {
    772     ASSERT_PAGE_OFFSET(offset);
    773     return address() + offset;
    774   }
    775 
    776   // ---------------------------------------------------------------------
    777 
    778   // Page size in bytes.  This must be a multiple of the OS page size.
    779   static const int kPageSize = 1 << kPageSizeBits;
    780 
    781   // Object area size in bytes.
    782   static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
    783 
    784   // Maximum object size that fits in a page. Objects larger than that size
    785   // are allocated in large object space and are never moved in memory. This
    786   // also applies to new space allocation, since objects are never migrated
    787   // from new space to large object space.  Takes double alignment into account.
    788   static const int kMaxNonCodeHeapObjectSize =
    789       kNonCodeObjectAreaSize - kPointerSize;
    790 
    791   // Page size mask.
    792   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
    793 
    794   inline void ClearGCFields();
    795 
    796   static inline Page* Initialize(Heap* heap,
    797                                  MemoryChunk* chunk,
    798                                  Executability executable,
    799                                  PagedSpace* owner);
    800 
    801   void InitializeAsAnchor(PagedSpace* owner);
    802 
    803   bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
    804   bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
    805   bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
    806 
    807   void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
    808   void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
    809 
    810   void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
    811   void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
    812 
    813   void ResetFreeListStatistics();
    814 
    815 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
    816   type name() { return name##_; }                 \
    817   void set_##name(type name) { name##_ = name; }  \
    818   void add_##name(type name) { name##_ += name; }
    819 
    820   FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
    821   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
    822   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
    823   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
    824   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
    825 
    826 #undef FRAGMENTATION_STATS_ACCESSORS
    827 
    828 #ifdef DEBUG
    829   void Print();
    830 #endif  // DEBUG
    831 
    832   friend class MemoryAllocator;
    833 };
    834 
    835 
    836 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
    837 
    838 
    839 class LargePage : public MemoryChunk {
    840  public:
    841   HeapObject* GetObject() {
    842     return HeapObject::FromAddress(area_start());
    843   }
    844 
    845   inline LargePage* next_page() const {
    846     return static_cast<LargePage*>(next_chunk());
    847   }
    848 
    849   inline void set_next_page(LargePage* page) {
    850     set_next_chunk(page);
    851   }
    852  private:
    853   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
    854 
    855   friend class MemoryAllocator;
    856 };
    857 
    858 STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
    859 
    860 // ----------------------------------------------------------------------------
    861 // Space is the abstract superclass for all allocation spaces.
    862 class Space : public Malloced {
    863  public:
    864   Space(Heap* heap, AllocationSpace id, Executability executable)
    865       : heap_(heap), id_(id), executable_(executable) {}
    866 
    867   virtual ~Space() {}
    868 
    869   Heap* heap() const { return heap_; }
    870 
    871   // Does the space need executable memory?
    872   Executability executable() { return executable_; }
    873 
    874   // Identity used in error reporting.
    875   AllocationSpace identity() { return id_; }
    876 
    877   // Returns allocated size.
    878   virtual intptr_t Size() = 0;
    879 
    880   // Returns size of objects. Can differ from the allocated size
    881   // (e.g. see LargeObjectSpace).
    882   virtual intptr_t SizeOfObjects() { return Size(); }
    883 
    884   virtual int RoundSizeDownToObjectAlignment(int size) {
    885     if (id_ == CODE_SPACE) {
    886       return RoundDown(size, kCodeAlignment);
    887     } else {
    888       return RoundDown(size, kPointerSize);
    889     }
    890   }
    891 
    892 #ifdef DEBUG
    893   virtual void Print() = 0;
    894 #endif
    895 
    896  private:
    897   Heap* heap_;
    898   AllocationSpace id_;
    899   Executability executable_;
    900 };
    901 
    902 
    903 // ----------------------------------------------------------------------------
    904 // All heap objects containing executable code (code objects) must be allocated
    905 // from a 2 GB range of memory, so that they can call each other using 32-bit
    906 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
    907 // displacements cover the entire 4GB virtual address space.  On 64-bit
    908 // platforms, we support this using the CodeRange object, which reserves and
    909 // manages a range of virtual memory.
    910 class CodeRange {
    911  public:
    912   explicit CodeRange(Isolate* isolate);
    913   ~CodeRange() { TearDown(); }
    914 
    915   // Reserves a range of virtual memory, but does not commit any of it.
    916   // Can only be called once, at heap initialization time.
    917   // Returns false on failure.
    918   bool SetUp(const size_t requested_size);
    919 
    920   // Frees the range of virtual memory, and frees the data structures used to
    921   // manage it.
    922   void TearDown();
    923 
    924   bool exists() { return this != NULL && code_range_ != NULL; }
    925   Address start() {
    926     if (this == NULL || code_range_ == NULL) return NULL;
    927     return static_cast<Address>(code_range_->address());
    928   }
    929   bool contains(Address address) {
    930     if (this == NULL || code_range_ == NULL) return false;
    931     Address start = static_cast<Address>(code_range_->address());
    932     return start <= address && address < start + code_range_->size();
    933   }
    934 
    935   // Allocates a chunk of memory from the large-object portion of
    936   // the code range.  On platforms with no separate code range, should
    937   // not be called.
    938   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
    939                                             const size_t commit_size,
    940                                             size_t* allocated);
    941   bool CommitRawMemory(Address start, size_t length);
    942   bool UncommitRawMemory(Address start, size_t length);
    943   void FreeRawMemory(Address buf, size_t length);
    944 
    945  private:
    946   Isolate* isolate_;
    947 
    948   // The reserved range of virtual memory that all code objects are put in.
    949   VirtualMemory* code_range_;
    950   // Plain old data class, just a struct plus a constructor.
    951   class FreeBlock {
    952    public:
    953     FreeBlock(Address start_arg, size_t size_arg)
    954         : start(start_arg), size(size_arg) {
    955       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
    956       ASSERT(size >= static_cast<size_t>(Page::kPageSize));
    957     }
    958     FreeBlock(void* start_arg, size_t size_arg)
    959         : start(static_cast<Address>(start_arg)), size(size_arg) {
    960       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
    961       ASSERT(size >= static_cast<size_t>(Page::kPageSize));
    962     }
    963 
    964     Address start;
    965     size_t size;
    966   };
    967 
    968   // Freed blocks of memory are added to the free list.  When the allocation
    969   // list is exhausted, the free list is sorted and merged to make the new
    970   // allocation list.
    971   List<FreeBlock> free_list_;
    972   // Memory is allocated from the free blocks on the allocation list.
    973   // The block at current_allocation_block_index_ is the current block.
    974   List<FreeBlock> allocation_list_;
    975   int current_allocation_block_index_;
    976 
    977   // Finds a block on the allocation list that contains at least the
    978   // requested amount of memory.  If none is found, sorts and merges
    979   // the existing free memory blocks, and searches again.
    980   // If none can be found, terminates V8 with FatalProcessOutOfMemory.
    981   void GetNextAllocationBlock(size_t requested);
    982   // Compares the start addresses of two free blocks.
    983   static int CompareFreeBlockAddress(const FreeBlock* left,
    984                                      const FreeBlock* right);
    985 
    986   DISALLOW_COPY_AND_ASSIGN(CodeRange);
    987 };
    988 
    989 
    990 class SkipList {
    991  public:
    992   SkipList() {
    993     Clear();
    994   }
    995 
    996   void Clear() {
    997     for (int idx = 0; idx < kSize; idx++) {
    998       starts_[idx] = reinterpret_cast<Address>(-1);
    999     }
   1000   }
   1001 
   1002   Address StartFor(Address addr) {
   1003     return starts_[RegionNumber(addr)];
   1004   }
   1005 
   1006   void AddObject(Address addr, int size) {
   1007     int start_region = RegionNumber(addr);
   1008     int end_region = RegionNumber(addr + size - kPointerSize);
   1009     for (int idx = start_region; idx <= end_region; idx++) {
   1010       if (starts_[idx] > addr) starts_[idx] = addr;
   1011     }
   1012   }
   1013 
   1014   static inline int RegionNumber(Address addr) {
   1015     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
   1016   }
   1017 
   1018   static void Update(Address addr, int size) {
   1019     Page* page = Page::FromAddress(addr);
   1020     SkipList* list = page->skip_list();
   1021     if (list == NULL) {
   1022       list = new SkipList();
   1023       page->set_skip_list(list);
   1024     }
   1025 
   1026     list->AddObject(addr, size);
   1027   }
   1028 
   1029  private:
   1030   static const int kRegionSizeLog2 = 13;
   1031   static const int kRegionSize = 1 << kRegionSizeLog2;
   1032   static const int kSize = Page::kPageSize / kRegionSize;
   1033 
   1034   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
   1035 
   1036   Address starts_[kSize];
   1037 };
   1038 
   1039 
   1040 // ----------------------------------------------------------------------------
   1041 // A space acquires chunks of memory from the operating system. The memory
   1042 // allocator allocated and deallocates pages for the paged heap spaces and large
   1043 // pages for large object space.
   1044 //
   1045 // Each space has to manage it's own pages.
   1046 //
   1047 class MemoryAllocator {
   1048  public:
   1049   explicit MemoryAllocator(Isolate* isolate);
   1050 
   1051   // Initializes its internal bookkeeping structures.
   1052   // Max capacity of the total space and executable memory limit.
   1053   bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
   1054 
   1055   void TearDown();
   1056 
   1057   Page* AllocatePage(
   1058       intptr_t size, PagedSpace* owner, Executability executable);
   1059 
   1060   LargePage* AllocateLargePage(
   1061       intptr_t object_size, Space* owner, Executability executable);
   1062 
   1063   void Free(MemoryChunk* chunk);
   1064 
   1065   // Returns the maximum available bytes of heaps.
   1066   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
   1067 
   1068   // Returns allocated spaces in bytes.
   1069   intptr_t Size() { return size_; }
   1070 
   1071   // Returns the maximum available executable bytes of heaps.
   1072   intptr_t AvailableExecutable() {
   1073     if (capacity_executable_ < size_executable_) return 0;
   1074     return capacity_executable_ - size_executable_;
   1075   }
   1076 
   1077   // Returns allocated executable spaces in bytes.
   1078   intptr_t SizeExecutable() { return size_executable_; }
   1079 
   1080   // Returns maximum available bytes that the old space can have.
   1081   intptr_t MaxAvailable() {
   1082     return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
   1083   }
   1084 
   1085 #ifdef DEBUG
   1086   // Reports statistic info of the space.
   1087   void ReportStatistics();
   1088 #endif
   1089 
   1090   // Returns a MemoryChunk in which the memory region from commit_area_size to
   1091   // reserve_area_size of the chunk area is reserved but not committed, it
   1092   // could be committed later by calling MemoryChunk::CommitArea.
   1093   MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
   1094                              intptr_t commit_area_size,
   1095                              Executability executable,
   1096                              Space* space);
   1097 
   1098   Address ReserveAlignedMemory(size_t requested,
   1099                                size_t alignment,
   1100                                VirtualMemory* controller);
   1101   Address AllocateAlignedMemory(size_t reserve_size,
   1102                                 size_t commit_size,
   1103                                 size_t alignment,
   1104                                 Executability executable,
   1105                                 VirtualMemory* controller);
   1106 
   1107   void FreeMemory(VirtualMemory* reservation, Executability executable);
   1108   void FreeMemory(Address addr, size_t size, Executability executable);
   1109 
   1110   // Commit a contiguous block of memory from the initial chunk.  Assumes that
   1111   // the address is not NULL, the size is greater than zero, and that the
   1112   // block is contained in the initial chunk.  Returns true if it succeeded
   1113   // and false otherwise.
   1114   bool CommitBlock(Address start, size_t size, Executability executable);
   1115 
   1116   // Uncommit a contiguous block of memory [start..(start+size)[.
   1117   // start is not NULL, the size is greater than zero, and the
   1118   // block is contained in the initial chunk.  Returns true if it succeeded
   1119   // and false otherwise.
   1120   bool UncommitBlock(Address start, size_t size);
   1121 
   1122   // Zaps a contiguous block of memory [start..(start+size)[ thus
   1123   // filling it up with a recognizable non-NULL bit pattern.
   1124   void ZapBlock(Address start, size_t size);
   1125 
   1126   void PerformAllocationCallback(ObjectSpace space,
   1127                                  AllocationAction action,
   1128                                  size_t size);
   1129 
   1130   void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
   1131                                           ObjectSpace space,
   1132                                           AllocationAction action);
   1133 
   1134   void RemoveMemoryAllocationCallback(
   1135       MemoryAllocationCallback callback);
   1136 
   1137   bool MemoryAllocationCallbackRegistered(
   1138       MemoryAllocationCallback callback);
   1139 
   1140   static int CodePageGuardStartOffset();
   1141 
   1142   static int CodePageGuardSize();
   1143 
   1144   static int CodePageAreaStartOffset();
   1145 
   1146   static int CodePageAreaEndOffset();
   1147 
   1148   static int CodePageAreaSize() {
   1149     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
   1150   }
   1151 
   1152   MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
   1153                                                      Address start,
   1154                                                      size_t commit_size,
   1155                                                      size_t reserved_size);
   1156 
   1157  private:
   1158   Isolate* isolate_;
   1159 
   1160   // Maximum space size in bytes.
   1161   size_t capacity_;
   1162   // Maximum subset of capacity_ that can be executable
   1163   size_t capacity_executable_;
   1164 
   1165   // Allocated space size in bytes.
   1166   size_t size_;
   1167   // Allocated executable space size in bytes.
   1168   size_t size_executable_;
   1169 
   1170   struct MemoryAllocationCallbackRegistration {
   1171     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
   1172                                          ObjectSpace space,
   1173                                          AllocationAction action)
   1174         : callback(callback), space(space), action(action) {
   1175     }
   1176     MemoryAllocationCallback callback;
   1177     ObjectSpace space;
   1178     AllocationAction action;
   1179   };
   1180 
   1181   // A List of callback that are triggered when memory is allocated or free'd
   1182   List<MemoryAllocationCallbackRegistration>
   1183       memory_allocation_callbacks_;
   1184 
   1185   // Initializes pages in a chunk. Returns the first page address.
   1186   // This function and GetChunkId() are provided for the mark-compact
   1187   // collector to rebuild page headers in the from space, which is
   1188   // used as a marking stack and its page headers are destroyed.
   1189   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
   1190                                PagedSpace* owner);
   1191 
   1192   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
   1193 };
   1194 
   1195 
   1196 // -----------------------------------------------------------------------------
   1197 // Interface for heap object iterator to be implemented by all object space
   1198 // object iterators.
   1199 //
   1200 // NOTE: The space specific object iterators also implements the own next()
   1201 //       method which is used to avoid using virtual functions
   1202 //       iterating a specific space.
   1203 
   1204 class ObjectIterator : public Malloced {
   1205  public:
   1206   virtual ~ObjectIterator() { }
   1207 
   1208   virtual HeapObject* next_object() = 0;
   1209 };
   1210 
   1211 
   1212 // -----------------------------------------------------------------------------
   1213 // Heap object iterator in new/old/map spaces.
   1214 //
   1215 // A HeapObjectIterator iterates objects from the bottom of the given space
   1216 // to its top or from the bottom of the given page to its top.
   1217 //
   1218 // If objects are allocated in the page during iteration the iterator may
   1219 // or may not iterate over those objects.  The caller must create a new
   1220 // iterator in order to be sure to visit these new objects.
   1221 class HeapObjectIterator: public ObjectIterator {
   1222  public:
   1223   // Creates a new object iterator in a given space.
   1224   // If the size function is not given, the iterator calls the default
   1225   // Object::Size().
   1226   explicit HeapObjectIterator(PagedSpace* space);
   1227   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
   1228   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
   1229 
   1230   // Advance to the next object, skipping free spaces and other fillers and
   1231   // skipping the special garbage section of which there is one per space.
   1232   // Returns NULL when the iteration has ended.
   1233   inline HeapObject* Next() {
   1234     do {
   1235       HeapObject* next_obj = FromCurrentPage();
   1236       if (next_obj != NULL) return next_obj;
   1237     } while (AdvanceToNextPage());
   1238     return NULL;
   1239   }
   1240 
   1241   virtual HeapObject* next_object() {
   1242     return Next();
   1243   }
   1244 
   1245  private:
   1246   enum PageMode { kOnePageOnly, kAllPagesInSpace };
   1247 
   1248   Address cur_addr_;  // Current iteration point.
   1249   Address cur_end_;   // End iteration point.
   1250   HeapObjectCallback size_func_;  // Size function or NULL.
   1251   PagedSpace* space_;
   1252   PageMode page_mode_;
   1253 
   1254   // Fast (inlined) path of next().
   1255   inline HeapObject* FromCurrentPage();
   1256 
   1257   // Slow path of next(), goes into the next page.  Returns false if the
   1258   // iteration has ended.
   1259   bool AdvanceToNextPage();
   1260 
   1261   // Initializes fields.
   1262   inline void Initialize(PagedSpace* owner,
   1263                          Address start,
   1264                          Address end,
   1265                          PageMode mode,
   1266                          HeapObjectCallback size_func);
   1267 };
   1268 
   1269 
   1270 // -----------------------------------------------------------------------------
   1271 // A PageIterator iterates the pages in a paged space.
   1272 
   1273 class PageIterator BASE_EMBEDDED {
   1274  public:
   1275   explicit inline PageIterator(PagedSpace* space);
   1276 
   1277   inline bool has_next();
   1278   inline Page* next();
   1279 
   1280  private:
   1281   PagedSpace* space_;
   1282   Page* prev_page_;  // Previous page returned.
   1283   // Next page that will be returned.  Cached here so that we can use this
   1284   // iterator for operations that deallocate pages.
   1285   Page* next_page_;
   1286 };
   1287 
   1288 
   1289 // -----------------------------------------------------------------------------
   1290 // A space has a circular list of pages. The next page can be accessed via
   1291 // Page::next_page() call.
   1292 
   1293 // An abstraction of allocation and relocation pointers in a page-structured
   1294 // space.
   1295 class AllocationInfo {
   1296  public:
   1297   AllocationInfo() : top(NULL), limit(NULL) {
   1298   }
   1299 
   1300   Address top;  // Current allocation top.
   1301   Address limit;  // Current allocation limit.
   1302 
   1303 #ifdef DEBUG
   1304   bool VerifyPagedAllocation() {
   1305     return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
   1306         && (top <= limit);
   1307   }
   1308 #endif
   1309 };
   1310 
   1311 
   1312 // An abstraction of the accounting statistics of a page-structured space.
   1313 // The 'capacity' of a space is the number of object-area bytes (i.e., not
   1314 // including page bookkeeping structures) currently in the space. The 'size'
   1315 // of a space is the number of allocated bytes, the 'waste' in the space is
   1316 // the number of bytes that are not allocated and not available to
   1317 // allocation without reorganizing the space via a GC (e.g. small blocks due
   1318 // to internal fragmentation, top of page areas in map space), and the bytes
   1319 // 'available' is the number of unallocated bytes that are not waste.  The
   1320 // capacity is the sum of size, waste, and available.
   1321 //
   1322 // The stats are only set by functions that ensure they stay balanced. These
   1323 // functions increase or decrease one of the non-capacity stats in
   1324 // conjunction with capacity, or else they always balance increases and
   1325 // decreases to the non-capacity stats.
   1326 class AllocationStats BASE_EMBEDDED {
   1327  public:
   1328   AllocationStats() { Clear(); }
   1329 
   1330   // Zero out all the allocation statistics (i.e., no capacity).
   1331   void Clear() {
   1332     capacity_ = 0;
   1333     size_ = 0;
   1334     waste_ = 0;
   1335   }
   1336 
   1337   void ClearSizeWaste() {
   1338     size_ = capacity_;
   1339     waste_ = 0;
   1340   }
   1341 
   1342   // Reset the allocation statistics (i.e., available = capacity with no
   1343   // wasted or allocated bytes).
   1344   void Reset() {
   1345     size_ = 0;
   1346     waste_ = 0;
   1347   }
   1348 
   1349   // Accessors for the allocation statistics.
   1350   intptr_t Capacity() { return capacity_; }
   1351   intptr_t Size() { return size_; }
   1352   intptr_t Waste() { return waste_; }
   1353 
   1354   // Grow the space by adding available bytes.  They are initially marked as
   1355   // being in use (part of the size), but will normally be immediately freed,
   1356   // putting them on the free list and removing them from size_.
   1357   void ExpandSpace(int size_in_bytes) {
   1358     capacity_ += size_in_bytes;
   1359     size_ += size_in_bytes;
   1360     ASSERT(size_ >= 0);
   1361   }
   1362 
   1363   // Shrink the space by removing available bytes.  Since shrinking is done
   1364   // during sweeping, bytes have been marked as being in use (part of the size)
   1365   // and are hereby freed.
   1366   void ShrinkSpace(int size_in_bytes) {
   1367     capacity_ -= size_in_bytes;
   1368     size_ -= size_in_bytes;
   1369     ASSERT(size_ >= 0);
   1370   }
   1371 
   1372   // Allocate from available bytes (available -> size).
   1373   void AllocateBytes(intptr_t size_in_bytes) {
   1374     size_ += size_in_bytes;
   1375     ASSERT(size_ >= 0);
   1376   }
   1377 
   1378   // Free allocated bytes, making them available (size -> available).
   1379   void DeallocateBytes(intptr_t size_in_bytes) {
   1380     size_ -= size_in_bytes;
   1381     ASSERT(size_ >= 0);
   1382   }
   1383 
   1384   // Waste free bytes (available -> waste).
   1385   void WasteBytes(int size_in_bytes) {
   1386     size_ -= size_in_bytes;
   1387     waste_ += size_in_bytes;
   1388     ASSERT(size_ >= 0);
   1389   }
   1390 
   1391  private:
   1392   intptr_t capacity_;
   1393   intptr_t size_;
   1394   intptr_t waste_;
   1395 };
   1396 
   1397 
   1398 // -----------------------------------------------------------------------------
   1399 // Free lists for old object spaces
   1400 //
   1401 // Free-list nodes are free blocks in the heap.  They look like heap objects
   1402 // (free-list node pointers have the heap object tag, and they have a map like
   1403 // a heap object).  They have a size and a next pointer.  The next pointer is
   1404 // the raw address of the next free list node (or NULL).
   1405 class FreeListNode: public HeapObject {
   1406  public:
   1407   // Obtain a free-list node from a raw address.  This is not a cast because
   1408   // it does not check nor require that the first word at the address is a map
   1409   // pointer.
   1410   static FreeListNode* FromAddress(Address address) {
   1411     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
   1412   }
   1413 
   1414   static inline bool IsFreeListNode(HeapObject* object);
   1415 
   1416   // Set the size in bytes, which can be read with HeapObject::Size().  This
   1417   // function also writes a map to the first word of the block so that it
   1418   // looks like a heap object to the garbage collector and heap iteration
   1419   // functions.
   1420   void set_size(Heap* heap, int size_in_bytes);
   1421 
   1422   // Accessors for the next field.
   1423   inline FreeListNode* next();
   1424   inline FreeListNode** next_address();
   1425   inline void set_next(FreeListNode* next);
   1426 
   1427   inline void Zap();
   1428 
   1429   static inline FreeListNode* cast(MaybeObject* maybe) {
   1430     ASSERT(!maybe->IsFailure());
   1431     return reinterpret_cast<FreeListNode*>(maybe);
   1432   }
   1433 
   1434  private:
   1435   static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
   1436 
   1437   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
   1438 };
   1439 
   1440 
   1441 // The free list category holds a pointer to the top element and a pointer to
   1442 // the end element of the linked list of free memory blocks.
   1443 class FreeListCategory {
   1444  public:
   1445   FreeListCategory() :
   1446       top_(NULL),
   1447       end_(NULL),
   1448       mutex_(OS::CreateMutex()),
   1449       available_(0) {}
   1450 
   1451   ~FreeListCategory() {
   1452     delete mutex_;
   1453   }
   1454 
   1455   intptr_t Concatenate(FreeListCategory* category);
   1456 
   1457   void Reset();
   1458 
   1459   void Free(FreeListNode* node, int size_in_bytes);
   1460 
   1461   FreeListNode* PickNodeFromList(int *node_size);
   1462   FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
   1463 
   1464   intptr_t EvictFreeListItemsInList(Page* p);
   1465 
   1466   void RepairFreeList(Heap* heap);
   1467 
   1468   FreeListNode** GetTopAddress() { return &top_; }
   1469   FreeListNode* top() const { return top_; }
   1470   void set_top(FreeListNode* top) { top_ = top; }
   1471 
   1472   FreeListNode** GetEndAddress() { return &end_; }
   1473   FreeListNode* end() const { return end_; }
   1474   void set_end(FreeListNode* end) { end_ = end; }
   1475 
   1476   int* GetAvailableAddress() { return &available_; }
   1477   int available() const { return available_; }
   1478   void set_available(int available) { available_ = available; }
   1479 
   1480   Mutex* mutex() { return mutex_; }
   1481 
   1482 #ifdef DEBUG
   1483   intptr_t SumFreeList();
   1484   int FreeListLength();
   1485 #endif
   1486 
   1487  private:
   1488   FreeListNode* top_;
   1489   FreeListNode* end_;
   1490   Mutex* mutex_;
   1491 
   1492   // Total available bytes in all blocks of this free list category.
   1493   int available_;
   1494 };
   1495 
   1496 
   1497 // The free list for the old space.  The free list is organized in such a way
   1498 // as to encourage objects allocated around the same time to be near each
   1499 // other.  The normal way to allocate is intended to be by bumping a 'top'
   1500 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
   1501 // find a new space to allocate from.  This is done with the free list, which
   1502 // is divided up into rough categories to cut down on waste.  Having finer
   1503 // categories would scatter allocation more.
   1504 
   1505 // The old space free list is organized in categories.
   1506 // 1-31 words:  Such small free areas are discarded for efficiency reasons.
   1507 //     They can be reclaimed by the compactor.  However the distance between top
   1508 //     and limit may be this small.
   1509 // 32-255 words: There is a list of spaces this large.  It is used for top and
   1510 //     limit when the object we need to allocate is 1-31 words in size.  These
   1511 //     spaces are called small.
   1512 // 256-2047 words: There is a list of spaces this large.  It is used for top and
   1513 //     limit when the object we need to allocate is 32-255 words in size.  These
   1514 //     spaces are called medium.
   1515 // 1048-16383 words: There is a list of spaces this large.  It is used for top
   1516 //     and limit when the object we need to allocate is 256-2047 words in size.
   1517 //     These spaces are call large.
   1518 // At least 16384 words.  This list is for objects of 2048 words or larger.
   1519 //     Empty pages are added to this list.  These spaces are called huge.
   1520 class FreeList BASE_EMBEDDED {
   1521  public:
   1522   explicit FreeList(PagedSpace* owner);
   1523 
   1524   intptr_t Concatenate(FreeList* free_list);
   1525 
   1526   // Clear the free list.
   1527   void Reset();
   1528 
   1529   // Return the number of bytes available on the free list.
   1530   intptr_t available() {
   1531     return small_list_.available() + medium_list_.available() +
   1532            large_list_.available() + huge_list_.available();
   1533   }
   1534 
   1535   // Place a node on the free list.  The block of size 'size_in_bytes'
   1536   // starting at 'start' is placed on the free list.  The return value is the
   1537   // number of bytes that have been lost due to internal fragmentation by
   1538   // freeing the block.  Bookkeeping information will be written to the block,
   1539   // i.e., its contents will be destroyed.  The start address should be word
   1540   // aligned, and the size should be a non-zero multiple of the word size.
   1541   int Free(Address start, int size_in_bytes);
   1542 
   1543   // Allocate a block of size 'size_in_bytes' from the free list.  The block
   1544   // is unitialized.  A failure is returned if no block is available.  The
   1545   // number of bytes lost to fragmentation is returned in the output parameter
   1546   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
   1547   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
   1548 
   1549 #ifdef DEBUG
   1550   void Zap();
   1551   intptr_t SumFreeLists();
   1552   bool IsVeryLong();
   1553 #endif
   1554 
   1555   // Used after booting the VM.
   1556   void RepairLists(Heap* heap);
   1557 
   1558   intptr_t EvictFreeListItems(Page* p);
   1559 
   1560   FreeListCategory* small_list() { return &small_list_; }
   1561   FreeListCategory* medium_list() { return &medium_list_; }
   1562   FreeListCategory* large_list() { return &large_list_; }
   1563   FreeListCategory* huge_list() { return &huge_list_; }
   1564 
   1565  private:
   1566   // The size range of blocks, in bytes.
   1567   static const int kMinBlockSize = 3 * kPointerSize;
   1568   static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
   1569 
   1570   FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
   1571 
   1572   PagedSpace* owner_;
   1573   Heap* heap_;
   1574 
   1575   static const int kSmallListMin = 0x20 * kPointerSize;
   1576   static const int kSmallListMax = 0xff * kPointerSize;
   1577   static const int kMediumListMax = 0x7ff * kPointerSize;
   1578   static const int kLargeListMax = 0x3fff * kPointerSize;
   1579   static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
   1580   static const int kMediumAllocationMax = kSmallListMax;
   1581   static const int kLargeAllocationMax = kMediumListMax;
   1582   FreeListCategory small_list_;
   1583   FreeListCategory medium_list_;
   1584   FreeListCategory large_list_;
   1585   FreeListCategory huge_list_;
   1586 
   1587   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
   1588 };
   1589 
   1590 
   1591 class PagedSpace : public Space {
   1592  public:
   1593   // Creates a space with a maximum capacity, and an id.
   1594   PagedSpace(Heap* heap,
   1595              intptr_t max_capacity,
   1596              AllocationSpace id,
   1597              Executability executable);
   1598 
   1599   virtual ~PagedSpace() {}
   1600 
   1601   // Set up the space using the given address range of virtual memory (from
   1602   // the memory allocator's initial chunk) if possible.  If the block of
   1603   // addresses is not big enough to contain a single page-aligned page, a
   1604   // fresh chunk will be allocated.
   1605   bool SetUp();
   1606 
   1607   // Returns true if the space has been successfully set up and not
   1608   // subsequently torn down.
   1609   bool HasBeenSetUp();
   1610 
   1611   // Cleans up the space, frees all pages in this space except those belonging
   1612   // to the initial chunk, uncommits addresses in the initial chunk.
   1613   void TearDown();
   1614 
   1615   // Checks whether an object/address is in this space.
   1616   inline bool Contains(Address a);
   1617   bool Contains(HeapObject* o) { return Contains(o->address()); }
   1618 
   1619   // Given an address occupied by a live object, return that object if it is
   1620   // in this space, or Failure::Exception() if it is not. The implementation
   1621   // iterates over objects in the page containing the address, the cost is
   1622   // linear in the number of objects in the page. It may be slow.
   1623   MUST_USE_RESULT MaybeObject* FindObject(Address addr);
   1624 
   1625   // During boot the free_space_map is created, and afterwards we may need
   1626   // to write it into the free list nodes that were already created.
   1627   virtual void RepairFreeListsAfterBoot();
   1628 
   1629   // Prepares for a mark-compact GC.
   1630   virtual void PrepareForMarkCompact();
   1631 
   1632   // Current capacity without growing (Size() + Available()).
   1633   intptr_t Capacity() { return accounting_stats_.Capacity(); }
   1634 
   1635   // Total amount of memory committed for this space.  For paged
   1636   // spaces this equals the capacity.
   1637   intptr_t CommittedMemory() { return Capacity(); }
   1638 
   1639   // Approximate amount of physical memory committed for this space.
   1640   size_t CommittedPhysicalMemory();
   1641 
   1642   struct SizeStats {
   1643     intptr_t Total() {
   1644       return small_size_ + medium_size_ + large_size_ + huge_size_;
   1645     }
   1646 
   1647     intptr_t small_size_;
   1648     intptr_t medium_size_;
   1649     intptr_t large_size_;
   1650     intptr_t huge_size_;
   1651   };
   1652 
   1653   void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
   1654   void ResetFreeListStatistics();
   1655 
   1656   // Sets the capacity, the available space and the wasted space to zero.
   1657   // The stats are rebuilt during sweeping by adding each page to the
   1658   // capacity and the size when it is encountered.  As free spaces are
   1659   // discovered during the sweeping they are subtracted from the size and added
   1660   // to the available and wasted totals.
   1661   void ClearStats() {
   1662     accounting_stats_.ClearSizeWaste();
   1663     ResetFreeListStatistics();
   1664   }
   1665 
   1666   // Increases the number of available bytes of that space.
   1667   void AddToAccountingStats(intptr_t bytes) {
   1668     accounting_stats_.DeallocateBytes(bytes);
   1669   }
   1670 
   1671   // Available bytes without growing.  These are the bytes on the free list.
   1672   // The bytes in the linear allocation area are not included in this total
   1673   // because updating the stats would slow down allocation.  New pages are
   1674   // immediately added to the free list so they show up here.
   1675   intptr_t Available() { return free_list_.available(); }
   1676 
   1677   // Allocated bytes in this space.  Garbage bytes that were not found due to
   1678   // lazy sweeping are counted as being allocated!  The bytes in the current
   1679   // linear allocation area (between top and limit) are also counted here.
   1680   virtual intptr_t Size() { return accounting_stats_.Size(); }
   1681 
   1682   // As size, but the bytes in lazily swept pages are estimated and the bytes
   1683   // in the current linear allocation area are not included.
   1684   virtual intptr_t SizeOfObjects();
   1685 
   1686   // Wasted bytes in this space.  These are just the bytes that were thrown away
   1687   // due to being too small to use for allocation.  They do not include the
   1688   // free bytes that were not found at all due to lazy sweeping.
   1689   virtual intptr_t Waste() { return accounting_stats_.Waste(); }
   1690 
   1691   // Returns the allocation pointer in this space.
   1692   Address top() { return allocation_info_.top; }
   1693   Address limit() { return allocation_info_.limit; }
   1694 
   1695   // The allocation top and limit addresses.
   1696   Address* allocation_top_address() { return &allocation_info_.top; }
   1697   Address* allocation_limit_address() { return &allocation_info_.limit; }
   1698 
   1699   // Allocate the requested number of bytes in the space if possible, return a
   1700   // failure object if not.
   1701   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
   1702 
   1703   virtual bool ReserveSpace(int bytes);
   1704 
   1705   // Give a block of memory to the space's free list.  It might be added to
   1706   // the free list or accounted as waste.
   1707   // If add_to_freelist is false then just accounting stats are updated and
   1708   // no attempt to add area to free list is made.
   1709   int Free(Address start, int size_in_bytes) {
   1710     int wasted = free_list_.Free(start, size_in_bytes);
   1711     accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
   1712     return size_in_bytes - wasted;
   1713   }
   1714 
   1715   void ResetFreeList() {
   1716     free_list_.Reset();
   1717   }
   1718 
   1719   // Set space allocation info.
   1720   void SetTop(Address top, Address limit) {
   1721     ASSERT(top == limit ||
   1722            Page::FromAddress(top) == Page::FromAddress(limit - 1));
   1723     MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
   1724     allocation_info_.top = top;
   1725     allocation_info_.limit = limit;
   1726   }
   1727 
   1728   void Allocate(int bytes) {
   1729     accounting_stats_.AllocateBytes(bytes);
   1730   }
   1731 
   1732   void IncreaseCapacity(int size) {
   1733     accounting_stats_.ExpandSpace(size);
   1734   }
   1735 
   1736   // Releases an unused page and shrinks the space.
   1737   void ReleasePage(Page* page, bool unlink);
   1738 
   1739   // The dummy page that anchors the linked list of pages.
   1740   Page* anchor() { return &anchor_; }
   1741 
   1742 #ifdef VERIFY_HEAP
   1743   // Verify integrity of this space.
   1744   virtual void Verify(ObjectVisitor* visitor);
   1745 
   1746   // Overridden by subclasses to verify space-specific object
   1747   // properties (e.g., only maps or free-list nodes are in map space).
   1748   virtual void VerifyObject(HeapObject* obj) {}
   1749 #endif
   1750 
   1751 #ifdef DEBUG
   1752   // Print meta info and objects in this space.
   1753   virtual void Print();
   1754 
   1755   // Reports statistics for the space
   1756   void ReportStatistics();
   1757 
   1758   // Report code object related statistics
   1759   void CollectCodeStatistics();
   1760   static void ReportCodeStatistics();
   1761   static void ResetCodeStatistics();
   1762 #endif
   1763 
   1764   bool was_swept_conservatively() { return was_swept_conservatively_; }
   1765   void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
   1766 
   1767   // Evacuation candidates are swept by evacuator.  Needs to return a valid
   1768   // result before _and_ after evacuation has finished.
   1769   static bool ShouldBeSweptLazily(Page* p) {
   1770     return !p->IsEvacuationCandidate() &&
   1771            !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
   1772            !p->WasSweptPrecisely();
   1773   }
   1774 
   1775   void SetPagesToSweep(Page* first) {
   1776     ASSERT(unswept_free_bytes_ == 0);
   1777     if (first == &anchor_) first = NULL;
   1778     first_unswept_page_ = first;
   1779   }
   1780 
   1781   void IncrementUnsweptFreeBytes(intptr_t by) {
   1782     unswept_free_bytes_ += by;
   1783   }
   1784 
   1785   void IncreaseUnsweptFreeBytes(Page* p) {
   1786     ASSERT(ShouldBeSweptLazily(p));
   1787     unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
   1788   }
   1789 
   1790   void DecrementUnsweptFreeBytes(intptr_t by) {
   1791     unswept_free_bytes_ -= by;
   1792   }
   1793 
   1794   void DecreaseUnsweptFreeBytes(Page* p) {
   1795     ASSERT(ShouldBeSweptLazily(p));
   1796     unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
   1797   }
   1798 
   1799   void ResetUnsweptFreeBytes() {
   1800     unswept_free_bytes_ = 0;
   1801   }
   1802 
   1803   bool AdvanceSweeper(intptr_t bytes_to_sweep);
   1804 
   1805   // When parallel sweeper threads are active and the main thread finished
   1806   // its sweeping phase, this function waits for them to complete, otherwise
   1807   // AdvanceSweeper with size_in_bytes is called.
   1808   bool EnsureSweeperProgress(intptr_t size_in_bytes);
   1809 
   1810   bool IsLazySweepingComplete() {
   1811     return !first_unswept_page_->is_valid();
   1812   }
   1813 
   1814   Page* FirstPage() { return anchor_.next_page(); }
   1815   Page* LastPage() { return anchor_.prev_page(); }
   1816 
   1817   void EvictEvacuationCandidatesFromFreeLists();
   1818 
   1819   bool CanExpand();
   1820 
   1821   // Returns the number of total pages in this space.
   1822   int CountTotalPages();
   1823 
   1824   // Return size of allocatable area on a page in this space.
   1825   inline int AreaSize() {
   1826     return area_size_;
   1827   }
   1828 
   1829  protected:
   1830   FreeList* free_list() { return &free_list_; }
   1831 
   1832   int area_size_;
   1833 
   1834   // Maximum capacity of this space.
   1835   intptr_t max_capacity_;
   1836 
   1837   intptr_t SizeOfFirstPage();
   1838 
   1839   // Accounting information for this space.
   1840   AllocationStats accounting_stats_;
   1841 
   1842   // The dummy page that anchors the double linked list of pages.
   1843   Page anchor_;
   1844 
   1845   // The space's free list.
   1846   FreeList free_list_;
   1847 
   1848   // Normal allocation information.
   1849   AllocationInfo allocation_info_;
   1850 
   1851   // Bytes of each page that cannot be allocated.  Possibly non-zero
   1852   // for pages in spaces with only fixed-size objects.  Always zero
   1853   // for pages in spaces with variable sized objects (those pages are
   1854   // padded with free-list nodes).
   1855   int page_extra_;
   1856 
   1857   bool was_swept_conservatively_;
   1858 
   1859   // The first page to be swept when the lazy sweeper advances. Is set
   1860   // to NULL when all pages have been swept.
   1861   Page* first_unswept_page_;
   1862 
   1863   // The number of free bytes which could be reclaimed by advancing the
   1864   // lazy sweeper.  This is only an estimation because lazy sweeping is
   1865   // done conservatively.
   1866   intptr_t unswept_free_bytes_;
   1867 
   1868   // Expands the space by allocating a fixed number of pages. Returns false if
   1869   // it cannot allocate requested number of pages from OS, or if the hard heap
   1870   // size limit has been hit.
   1871   bool Expand();
   1872 
   1873   // Generic fast case allocation function that tries linear allocation at the
   1874   // address denoted by top in allocation_info_.
   1875   inline HeapObject* AllocateLinearly(int size_in_bytes);
   1876 
   1877   // Slow path of AllocateRaw.  This function is space-dependent.
   1878   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
   1879 
   1880   friend class PageIterator;
   1881   friend class SweeperThread;
   1882 };
   1883 
   1884 
   1885 class NumberAndSizeInfo BASE_EMBEDDED {
   1886  public:
   1887   NumberAndSizeInfo() : number_(0), bytes_(0) {}
   1888 
   1889   int number() const { return number_; }
   1890   void increment_number(int num) { number_ += num; }
   1891 
   1892   int bytes() const { return bytes_; }
   1893   void increment_bytes(int size) { bytes_ += size; }
   1894 
   1895   void clear() {
   1896     number_ = 0;
   1897     bytes_ = 0;
   1898   }
   1899 
   1900  private:
   1901   int number_;
   1902   int bytes_;
   1903 };
   1904 
   1905 
   1906 // HistogramInfo class for recording a single "bar" of a histogram.  This
   1907 // class is used for collecting statistics to print to the log file.
   1908 class HistogramInfo: public NumberAndSizeInfo {
   1909  public:
   1910   HistogramInfo() : NumberAndSizeInfo() {}
   1911 
   1912   const char* name() { return name_; }
   1913   void set_name(const char* name) { name_ = name; }
   1914 
   1915  private:
   1916   const char* name_;
   1917 };
   1918 
   1919 
   1920 enum SemiSpaceId {
   1921   kFromSpace = 0,
   1922   kToSpace = 1
   1923 };
   1924 
   1925 
   1926 class SemiSpace;
   1927 
   1928 
   1929 class NewSpacePage : public MemoryChunk {
   1930  public:
   1931   // GC related flags copied from from-space to to-space when
   1932   // flipping semispaces.
   1933   static const intptr_t kCopyOnFlipFlagsMask =
   1934     (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
   1935     (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
   1936     (1 << MemoryChunk::SCAN_ON_SCAVENGE);
   1937 
   1938   static const int kAreaSize = Page::kNonCodeObjectAreaSize;
   1939 
   1940   inline NewSpacePage* next_page() const {
   1941     return static_cast<NewSpacePage*>(next_chunk());
   1942   }
   1943 
   1944   inline void set_next_page(NewSpacePage* page) {
   1945     set_next_chunk(page);
   1946   }
   1947 
   1948   inline NewSpacePage* prev_page() const {
   1949     return static_cast<NewSpacePage*>(prev_chunk());
   1950   }
   1951 
   1952   inline void set_prev_page(NewSpacePage* page) {
   1953     set_prev_chunk(page);
   1954   }
   1955 
   1956   SemiSpace* semi_space() {
   1957     return reinterpret_cast<SemiSpace*>(owner());
   1958   }
   1959 
   1960   bool is_anchor() { return !this->InNewSpace(); }
   1961 
   1962   static bool IsAtStart(Address addr) {
   1963     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
   1964         == kObjectStartOffset;
   1965   }
   1966 
   1967   static bool IsAtEnd(Address addr) {
   1968     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
   1969   }
   1970 
   1971   Address address() {
   1972     return reinterpret_cast<Address>(this);
   1973   }
   1974 
   1975   // Finds the NewSpacePage containg the given address.
   1976   static inline NewSpacePage* FromAddress(Address address_in_page) {
   1977     Address page_start =
   1978         reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
   1979                                   ~Page::kPageAlignmentMask);
   1980     NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
   1981     return page;
   1982   }
   1983 
   1984   // Find the page for a limit address. A limit address is either an address
   1985   // inside a page, or the address right after the last byte of a page.
   1986   static inline NewSpacePage* FromLimit(Address address_limit) {
   1987     return NewSpacePage::FromAddress(address_limit - 1);
   1988   }
   1989 
   1990  private:
   1991   // Create a NewSpacePage object that is only used as anchor
   1992   // for the doubly-linked list of real pages.
   1993   explicit NewSpacePage(SemiSpace* owner) {
   1994     InitializeAsAnchor(owner);
   1995   }
   1996 
   1997   static NewSpacePage* Initialize(Heap* heap,
   1998                                   Address start,
   1999                                   SemiSpace* semi_space);
   2000 
   2001   // Intialize a fake NewSpacePage used as sentinel at the ends
   2002   // of a doubly-linked list of real NewSpacePages.
   2003   // Only uses the prev/next links, and sets flags to not be in new-space.
   2004   void InitializeAsAnchor(SemiSpace* owner);
   2005 
   2006   friend class SemiSpace;
   2007   friend class SemiSpaceIterator;
   2008 };
   2009 
   2010 
   2011 // -----------------------------------------------------------------------------
   2012 // SemiSpace in young generation
   2013 //
   2014 // A semispace is a contiguous chunk of memory holding page-like memory
   2015 // chunks. The mark-compact collector  uses the memory of the first page in
   2016 // the from space as a marking stack when tracing live objects.
   2017 
   2018 class SemiSpace : public Space {
   2019  public:
   2020   // Constructor.
   2021   SemiSpace(Heap* heap, SemiSpaceId semispace)
   2022     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
   2023       start_(NULL),
   2024       age_mark_(NULL),
   2025       id_(semispace),
   2026       anchor_(this),
   2027       current_page_(NULL) { }
   2028 
   2029   // Sets up the semispace using the given chunk.
   2030   void SetUp(Address start, int initial_capacity, int maximum_capacity);
   2031 
   2032   // Tear down the space.  Heap memory was not allocated by the space, so it
   2033   // is not deallocated here.
   2034   void TearDown();
   2035 
   2036   // True if the space has been set up but not torn down.
   2037   bool HasBeenSetUp() { return start_ != NULL; }
   2038 
   2039   // Grow the semispace to the new capacity.  The new capacity
   2040   // requested must be larger than the current capacity and less than
   2041   // the maximum capacity.
   2042   bool GrowTo(int new_capacity);
   2043 
   2044   // Shrinks the semispace to the new capacity.  The new capacity
   2045   // requested must be more than the amount of used memory in the
   2046   // semispace and less than the current capacity.
   2047   bool ShrinkTo(int new_capacity);
   2048 
   2049   // Returns the start address of the first page of the space.
   2050   Address space_start() {
   2051     ASSERT(anchor_.next_page() != &anchor_);
   2052     return anchor_.next_page()->area_start();
   2053   }
   2054 
   2055   // Returns the start address of the current page of the space.
   2056   Address page_low() {
   2057     return current_page_->area_start();
   2058   }
   2059 
   2060   // Returns one past the end address of the space.
   2061   Address space_end() {
   2062     return anchor_.prev_page()->area_end();
   2063   }
   2064 
   2065   // Returns one past the end address of the current page of the space.
   2066   Address page_high() {
   2067     return current_page_->area_end();
   2068   }
   2069 
   2070   bool AdvancePage() {
   2071     NewSpacePage* next_page = current_page_->next_page();
   2072     if (next_page == anchor()) return false;
   2073     current_page_ = next_page;
   2074     return true;
   2075   }
   2076 
   2077   // Resets the space to using the first page.
   2078   void Reset();
   2079 
   2080   // Age mark accessors.
   2081   Address age_mark() { return age_mark_; }
   2082   void set_age_mark(Address mark);
   2083 
   2084   // True if the address is in the address range of this semispace (not
   2085   // necessarily below the allocation pointer).
   2086   bool Contains(Address a) {
   2087     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
   2088            == reinterpret_cast<uintptr_t>(start_);
   2089   }
   2090 
   2091   // True if the object is a heap object in the address range of this
   2092   // semispace (not necessarily below the allocation pointer).
   2093   bool Contains(Object* o) {
   2094     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   2095   }
   2096 
   2097   // If we don't have these here then SemiSpace will be abstract.  However
   2098   // they should never be called.
   2099   virtual intptr_t Size() {
   2100     UNREACHABLE();
   2101     return 0;
   2102   }
   2103 
   2104   virtual bool ReserveSpace(int bytes) {
   2105     UNREACHABLE();
   2106     return false;
   2107   }
   2108 
   2109   bool is_committed() { return committed_; }
   2110   bool Commit();
   2111   bool Uncommit();
   2112 
   2113   NewSpacePage* first_page() { return anchor_.next_page(); }
   2114   NewSpacePage* current_page() { return current_page_; }
   2115 
   2116 #ifdef VERIFY_HEAP
   2117   virtual void Verify();
   2118 #endif
   2119 
   2120 #ifdef DEBUG
   2121   virtual void Print();
   2122   // Validate a range of of addresses in a SemiSpace.
   2123   // The "from" address must be on a page prior to the "to" address,
   2124   // in the linked page order, or it must be earlier on the same page.
   2125   static void AssertValidRange(Address from, Address to);
   2126 #else
   2127   // Do nothing.
   2128   inline static void AssertValidRange(Address from, Address to) {}
   2129 #endif
   2130 
   2131   // Returns the current capacity of the semi space.
   2132   int Capacity() { return capacity_; }
   2133 
   2134   // Returns the maximum capacity of the semi space.
   2135   int MaximumCapacity() { return maximum_capacity_; }
   2136 
   2137   // Returns the initial capacity of the semi space.
   2138   int InitialCapacity() { return initial_capacity_; }
   2139 
   2140   SemiSpaceId id() { return id_; }
   2141 
   2142   static void Swap(SemiSpace* from, SemiSpace* to);
   2143 
   2144   // Approximate amount of physical memory committed for this space.
   2145   size_t CommittedPhysicalMemory();
   2146 
   2147  private:
   2148   // Flips the semispace between being from-space and to-space.
   2149   // Copies the flags into the masked positions on all pages in the space.
   2150   void FlipPages(intptr_t flags, intptr_t flag_mask);
   2151 
   2152   NewSpacePage* anchor() { return &anchor_; }
   2153 
   2154   // The current and maximum capacity of the space.
   2155   int capacity_;
   2156   int maximum_capacity_;
   2157   int initial_capacity_;
   2158 
   2159   // The start address of the space.
   2160   Address start_;
   2161   // Used to govern object promotion during mark-compact collection.
   2162   Address age_mark_;
   2163 
   2164   // Masks and comparison values to test for containment in this semispace.
   2165   uintptr_t address_mask_;
   2166   uintptr_t object_mask_;
   2167   uintptr_t object_expected_;
   2168 
   2169   bool committed_;
   2170   SemiSpaceId id_;
   2171 
   2172   NewSpacePage anchor_;
   2173   NewSpacePage* current_page_;
   2174 
   2175   friend class SemiSpaceIterator;
   2176   friend class NewSpacePageIterator;
   2177  public:
   2178   TRACK_MEMORY("SemiSpace")
   2179 };
   2180 
   2181 
   2182 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
   2183 // semispace of the heap's new space.  It iterates over the objects in the
   2184 // semispace from a given start address (defaulting to the bottom of the
   2185 // semispace) to the top of the semispace.  New objects allocated after the
   2186 // iterator is created are not iterated.
   2187 class SemiSpaceIterator : public ObjectIterator {
   2188  public:
   2189   // Create an iterator over the objects in the given space.  If no start
   2190   // address is given, the iterator starts from the bottom of the space.  If
   2191   // no size function is given, the iterator calls Object::Size().
   2192 
   2193   // Iterate over all of allocated to-space.
   2194   explicit SemiSpaceIterator(NewSpace* space);
   2195   // Iterate over all of allocated to-space, with a custome size function.
   2196   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
   2197   // Iterate over part of allocated to-space, from start to the end
   2198   // of allocation.
   2199   SemiSpaceIterator(NewSpace* space, Address start);
   2200   // Iterate from one address to another in the same semi-space.
   2201   SemiSpaceIterator(Address from, Address to);
   2202 
   2203   HeapObject* Next() {
   2204     if (current_ == limit_) return NULL;
   2205     if (NewSpacePage::IsAtEnd(current_)) {
   2206       NewSpacePage* page = NewSpacePage::FromLimit(current_);
   2207       page = page->next_page();
   2208       ASSERT(!page->is_anchor());
   2209       current_ = page->area_start();
   2210       if (current_ == limit_) return NULL;
   2211     }
   2212 
   2213     HeapObject* object = HeapObject::FromAddress(current_);
   2214     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
   2215 
   2216     current_ += size;
   2217     return object;
   2218   }
   2219 
   2220   // Implementation of the ObjectIterator functions.
   2221   virtual HeapObject* next_object() { return Next(); }
   2222 
   2223  private:
   2224   void Initialize(Address start,
   2225                   Address end,
   2226                   HeapObjectCallback size_func);
   2227 
   2228   // The current iteration point.
   2229   Address current_;
   2230   // The end of iteration.
   2231   Address limit_;
   2232   // The callback function.
   2233   HeapObjectCallback size_func_;
   2234 };
   2235 
   2236 
   2237 // -----------------------------------------------------------------------------
   2238 // A PageIterator iterates the pages in a semi-space.
   2239 class NewSpacePageIterator BASE_EMBEDDED {
   2240  public:
   2241   // Make an iterator that runs over all pages in to-space.
   2242   explicit inline NewSpacePageIterator(NewSpace* space);
   2243 
   2244   // Make an iterator that runs over all pages in the given semispace,
   2245   // even those not used in allocation.
   2246   explicit inline NewSpacePageIterator(SemiSpace* space);
   2247 
   2248   // Make iterator that iterates from the page containing start
   2249   // to the page that contains limit in the same semispace.
   2250   inline NewSpacePageIterator(Address start, Address limit);
   2251 
   2252   inline bool has_next();
   2253   inline NewSpacePage* next();
   2254 
   2255  private:
   2256   NewSpacePage* prev_page_;  // Previous page returned.
   2257   // Next page that will be returned.  Cached here so that we can use this
   2258   // iterator for operations that deallocate pages.
   2259   NewSpacePage* next_page_;
   2260   // Last page returned.
   2261   NewSpacePage* last_page_;
   2262 };
   2263 
   2264 
   2265 // -----------------------------------------------------------------------------
   2266 // The young generation space.
   2267 //
   2268 // The new space consists of a contiguous pair of semispaces.  It simply
   2269 // forwards most functions to the appropriate semispace.
   2270 
   2271 class NewSpace : public Space {
   2272  public:
   2273   // Constructor.
   2274   explicit NewSpace(Heap* heap)
   2275     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
   2276       to_space_(heap, kToSpace),
   2277       from_space_(heap, kFromSpace),
   2278       reservation_(),
   2279       inline_allocation_limit_step_(0) {}
   2280 
   2281   // Sets up the new space using the given chunk.
   2282   bool SetUp(int reserved_semispace_size_, int max_semispace_size);
   2283 
   2284   // Tears down the space.  Heap memory was not allocated by the space, so it
   2285   // is not deallocated here.
   2286   void TearDown();
   2287 
   2288   // True if the space has been set up but not torn down.
   2289   bool HasBeenSetUp() {
   2290     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
   2291   }
   2292 
   2293   // Flip the pair of spaces.
   2294   void Flip();
   2295 
   2296   // Grow the capacity of the semispaces.  Assumes that they are not at
   2297   // their maximum capacity.
   2298   void Grow();
   2299 
   2300   // Shrink the capacity of the semispaces.
   2301   void Shrink();
   2302 
   2303   // True if the address or object lies in the address range of either
   2304   // semispace (not necessarily below the allocation pointer).
   2305   bool Contains(Address a) {
   2306     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
   2307         == reinterpret_cast<uintptr_t>(start_);
   2308   }
   2309 
   2310   bool Contains(Object* o) {
   2311     Address a = reinterpret_cast<Address>(o);
   2312     return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
   2313   }
   2314 
   2315   // Return the allocated bytes in the active semispace.
   2316   virtual intptr_t Size() {
   2317     return pages_used_ * NewSpacePage::kAreaSize +
   2318         static_cast<int>(top() - to_space_.page_low());
   2319   }
   2320 
   2321   // The same, but returning an int.  We have to have the one that returns
   2322   // intptr_t because it is inherited, but if we know we are dealing with the
   2323   // new space, which can't get as big as the other spaces then this is useful:
   2324   int SizeAsInt() { return static_cast<int>(Size()); }
   2325 
   2326   // Return the current capacity of a semispace.
   2327   intptr_t EffectiveCapacity() {
   2328     SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
   2329     return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
   2330   }
   2331 
   2332   // Return the current capacity of a semispace.
   2333   intptr_t Capacity() {
   2334     ASSERT(to_space_.Capacity() == from_space_.Capacity());
   2335     return to_space_.Capacity();
   2336   }
   2337 
   2338   // Return the total amount of memory committed for new space.
   2339   intptr_t CommittedMemory() {
   2340     if (from_space_.is_committed()) return 2 * Capacity();
   2341     return Capacity();
   2342   }
   2343 
   2344   // Approximate amount of physical memory committed for this space.
   2345   size_t CommittedPhysicalMemory();
   2346 
   2347   // Return the available bytes without growing.
   2348   intptr_t Available() {
   2349     return Capacity() - Size();
   2350   }
   2351 
   2352   // Return the maximum capacity of a semispace.
   2353   int MaximumCapacity() {
   2354     ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
   2355     return to_space_.MaximumCapacity();
   2356   }
   2357 
   2358   // Returns the initial capacity of a semispace.
   2359   int InitialCapacity() {
   2360     ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
   2361     return to_space_.InitialCapacity();
   2362   }
   2363 
   2364   // Return the address of the allocation pointer in the active semispace.
   2365   Address top() {
   2366     ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
   2367     return allocation_info_.top;
   2368   }
   2369   // Return the address of the first object in the active semispace.
   2370   Address bottom() { return to_space_.space_start(); }
   2371 
   2372   // Get the age mark of the inactive semispace.
   2373   Address age_mark() { return from_space_.age_mark(); }
   2374   // Set the age mark in the active semispace.
   2375   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
   2376 
   2377   // The start address of the space and a bit mask. Anding an address in the
   2378   // new space with the mask will result in the start address.
   2379   Address start() { return start_; }
   2380   uintptr_t mask() { return address_mask_; }
   2381 
   2382   INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
   2383     ASSERT(Contains(addr));
   2384     ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
   2385            IsAligned(OffsetFrom(addr) - 1, kPointerSize));
   2386     return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
   2387   }
   2388 
   2389   INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
   2390     return reinterpret_cast<Address>(index << kPointerSizeLog2);
   2391   }
   2392 
   2393   // The allocation top and limit addresses.
   2394   Address* allocation_top_address() { return &allocation_info_.top; }
   2395   Address* allocation_limit_address() { return &allocation_info_.limit; }
   2396 
   2397   MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
   2398 
   2399   // Reset the allocation pointer to the beginning of the active semispace.
   2400   void ResetAllocationInfo();
   2401 
   2402   void LowerInlineAllocationLimit(intptr_t step) {
   2403     inline_allocation_limit_step_ = step;
   2404     if (step == 0) {
   2405       allocation_info_.limit = to_space_.page_high();
   2406     } else {
   2407       allocation_info_.limit = Min(
   2408           allocation_info_.top + inline_allocation_limit_step_,
   2409           allocation_info_.limit);
   2410     }
   2411     top_on_previous_step_ = allocation_info_.top;
   2412   }
   2413 
   2414   // Get the extent of the inactive semispace (for use as a marking stack,
   2415   // or to zap it). Notice: space-addresses are not necessarily on the
   2416   // same page, so FromSpaceStart() might be above FromSpaceEnd().
   2417   Address FromSpacePageLow() { return from_space_.page_low(); }
   2418   Address FromSpacePageHigh() { return from_space_.page_high(); }
   2419   Address FromSpaceStart() { return from_space_.space_start(); }
   2420   Address FromSpaceEnd() { return from_space_.space_end(); }
   2421 
   2422   // Get the extent of the active semispace's pages' memory.
   2423   Address ToSpaceStart() { return to_space_.space_start(); }
   2424   Address ToSpaceEnd() { return to_space_.space_end(); }
   2425 
   2426   inline bool ToSpaceContains(Address address) {
   2427     return to_space_.Contains(address);
   2428   }
   2429   inline bool FromSpaceContains(Address address) {
   2430     return from_space_.Contains(address);
   2431   }
   2432 
   2433   // True if the object is a heap object in the address range of the
   2434   // respective semispace (not necessarily below the allocation pointer of the
   2435   // semispace).
   2436   inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
   2437   inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
   2438 
   2439   // Try to switch the active semispace to a new, empty, page.
   2440   // Returns false if this isn't possible or reasonable (i.e., there
   2441   // are no pages, or the current page is already empty), or true
   2442   // if successful.
   2443   bool AddFreshPage();
   2444 
   2445   virtual bool ReserveSpace(int bytes);
   2446 
   2447 #ifdef VERIFY_HEAP
   2448   // Verify the active semispace.
   2449   virtual void Verify();
   2450 #endif
   2451 
   2452 #ifdef DEBUG
   2453   // Print the active semispace.
   2454   virtual void Print() { to_space_.Print(); }
   2455 #endif
   2456 
   2457   // Iterates the active semispace to collect statistics.
   2458   void CollectStatistics();
   2459   // Reports previously collected statistics of the active semispace.
   2460   void ReportStatistics();
   2461   // Clears previously collected statistics.
   2462   void ClearHistograms();
   2463 
   2464   // Record the allocation or promotion of a heap object.  Note that we don't
   2465   // record every single allocation, but only those that happen in the
   2466   // to space during a scavenge GC.
   2467   void RecordAllocation(HeapObject* obj);
   2468   void RecordPromotion(HeapObject* obj);
   2469 
   2470   // Return whether the operation succeded.
   2471   bool CommitFromSpaceIfNeeded() {
   2472     if (from_space_.is_committed()) return true;
   2473     return from_space_.Commit();
   2474   }
   2475 
   2476   bool UncommitFromSpace() {
   2477     if (!from_space_.is_committed()) return true;
   2478     return from_space_.Uncommit();
   2479   }
   2480 
   2481   inline intptr_t inline_allocation_limit_step() {
   2482     return inline_allocation_limit_step_;
   2483   }
   2484 
   2485   SemiSpace* active_space() { return &to_space_; }
   2486 
   2487  private:
   2488   // Update allocation info to match the current to-space page.
   2489   void UpdateAllocationInfo();
   2490 
   2491   Address chunk_base_;
   2492   uintptr_t chunk_size_;
   2493 
   2494   // The semispaces.
   2495   SemiSpace to_space_;
   2496   SemiSpace from_space_;
   2497   VirtualMemory reservation_;
   2498   int pages_used_;
   2499 
   2500   // Start address and bit mask for containment testing.
   2501   Address start_;
   2502   uintptr_t address_mask_;
   2503   uintptr_t object_mask_;
   2504   uintptr_t object_expected_;
   2505 
   2506   // Allocation pointer and limit for normal allocation and allocation during
   2507   // mark-compact collection.
   2508   AllocationInfo allocation_info_;
   2509 
   2510   // When incremental marking is active we will set allocation_info_.limit
   2511   // to be lower than actual limit and then will gradually increase it
   2512   // in steps to guarantee that we do incremental marking steps even
   2513   // when all allocation is performed from inlined generated code.
   2514   intptr_t inline_allocation_limit_step_;
   2515 
   2516   Address top_on_previous_step_;
   2517 
   2518   HistogramInfo* allocated_histogram_;
   2519   HistogramInfo* promoted_histogram_;
   2520 
   2521   MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
   2522 
   2523   friend class SemiSpaceIterator;
   2524 
   2525  public:
   2526   TRACK_MEMORY("NewSpace")
   2527 };
   2528 
   2529 
   2530 // -----------------------------------------------------------------------------
   2531 // Old object space (excluding map objects)
   2532 
   2533 class OldSpace : public PagedSpace {
   2534  public:
   2535   // Creates an old space object with a given maximum capacity.
   2536   // The constructor does not allocate pages from OS.
   2537   OldSpace(Heap* heap,
   2538            intptr_t max_capacity,
   2539            AllocationSpace id,
   2540            Executability executable)
   2541       : PagedSpace(heap, max_capacity, id, executable) {
   2542     page_extra_ = 0;
   2543   }
   2544 
   2545   // The limit of allocation for a page in this space.
   2546   virtual Address PageAllocationLimit(Page* page) {
   2547     return page->area_end();
   2548   }
   2549 
   2550  public:
   2551   TRACK_MEMORY("OldSpace")
   2552 };
   2553 
   2554 
   2555 // For contiguous spaces, top should be in the space (or at the end) and limit
   2556 // should be the end of the space.
   2557 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
   2558   SLOW_ASSERT((space).page_low() <= (info).top             \
   2559               && (info).top <= (space).page_high()         \
   2560               && (info).limit <= (space).page_high())
   2561 
   2562 
   2563 // -----------------------------------------------------------------------------
   2564 // Old space for objects of a fixed size
   2565 
   2566 class FixedSpace : public PagedSpace {
   2567  public:
   2568   FixedSpace(Heap* heap,
   2569              intptr_t max_capacity,
   2570              AllocationSpace id,
   2571              int object_size_in_bytes)
   2572       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
   2573         object_size_in_bytes_(object_size_in_bytes) {
   2574     page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
   2575   }
   2576 
   2577   // The limit of allocation for a page in this space.
   2578   virtual Address PageAllocationLimit(Page* page) {
   2579     return page->area_end() - page_extra_;
   2580   }
   2581 
   2582   int object_size_in_bytes() { return object_size_in_bytes_; }
   2583 
   2584   // Prepares for a mark-compact GC.
   2585   virtual void PrepareForMarkCompact();
   2586 
   2587  private:
   2588   // The size of objects in this space.
   2589   int object_size_in_bytes_;
   2590 };
   2591 
   2592 
   2593 // -----------------------------------------------------------------------------
   2594 // Old space for all map objects
   2595 
   2596 class MapSpace : public FixedSpace {
   2597  public:
   2598   // Creates a map space object with a maximum capacity.
   2599   MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
   2600       : FixedSpace(heap, max_capacity, id, Map::kSize),
   2601         max_map_space_pages_(kMaxMapPageIndex - 1) {
   2602   }
   2603 
   2604   // Given an index, returns the page address.
   2605   // TODO(1600): this limit is artifical just to keep code compilable
   2606   static const int kMaxMapPageIndex = 1 << 16;
   2607 
   2608   virtual int RoundSizeDownToObjectAlignment(int size) {
   2609     if (IsPowerOf2(Map::kSize)) {
   2610       return RoundDown(size, Map::kSize);
   2611     } else {
   2612       return (size / Map::kSize) * Map::kSize;
   2613     }
   2614   }
   2615 
   2616  protected:
   2617   virtual void VerifyObject(HeapObject* obj);
   2618 
   2619  private:
   2620   static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
   2621 
   2622   // Do map space compaction if there is a page gap.
   2623   int CompactionThreshold() {
   2624     return kMapsPerPage * (max_map_space_pages_ - 1);
   2625   }
   2626 
   2627   const int max_map_space_pages_;
   2628 
   2629  public:
   2630   TRACK_MEMORY("MapSpace")
   2631 };
   2632 
   2633 
   2634 // -----------------------------------------------------------------------------
   2635 // Old space for simple property cell objects
   2636 
   2637 class CellSpace : public FixedSpace {
   2638  public:
   2639   // Creates a property cell space object with a maximum capacity.
   2640   CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
   2641       : FixedSpace(heap, max_capacity, id, Cell::kSize)
   2642   {}
   2643 
   2644   virtual int RoundSizeDownToObjectAlignment(int size) {
   2645     if (IsPowerOf2(Cell::kSize)) {
   2646       return RoundDown(size, Cell::kSize);
   2647     } else {
   2648       return (size / Cell::kSize) * Cell::kSize;
   2649     }
   2650   }
   2651 
   2652  protected:
   2653   virtual void VerifyObject(HeapObject* obj);
   2654 
   2655  public:
   2656   TRACK_MEMORY("CellSpace")
   2657 };
   2658 
   2659 
   2660 // -----------------------------------------------------------------------------
   2661 // Old space for all global object property cell objects
   2662 
   2663 class PropertyCellSpace : public FixedSpace {
   2664  public:
   2665   // Creates a property cell space object with a maximum capacity.
   2666   PropertyCellSpace(Heap* heap, intptr_t max_capacity,
   2667                     AllocationSpace id)
   2668       : FixedSpace(heap, max_capacity, id, PropertyCell::kSize)
   2669   {}
   2670 
   2671   virtual int RoundSizeDownToObjectAlignment(int size) {
   2672     if (IsPowerOf2(PropertyCell::kSize)) {
   2673       return RoundDown(size, PropertyCell::kSize);
   2674     } else {
   2675       return (size / PropertyCell::kSize) * PropertyCell::kSize;
   2676     }
   2677   }
   2678 
   2679  protected:
   2680   virtual void VerifyObject(HeapObject* obj);
   2681 
   2682  public:
   2683   TRACK_MEMORY("PropertyCellSpace")
   2684 };
   2685 
   2686 
   2687 // -----------------------------------------------------------------------------
   2688 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
   2689 // the large object space. A large object is allocated from OS heap with
   2690 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
   2691 // A large object always starts at Page::kObjectStartOffset to a page.
   2692 // Large objects do not move during garbage collections.
   2693 
   2694 class LargeObjectSpace : public Space {
   2695  public:
   2696   LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
   2697   virtual ~LargeObjectSpace() {}
   2698 
   2699   // Initializes internal data structures.
   2700   bool SetUp();
   2701 
   2702   // Releases internal resources, frees objects in this space.
   2703   void TearDown();
   2704 
   2705   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
   2706     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
   2707     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
   2708   }
   2709 
   2710   // Shared implementation of AllocateRaw, AllocateRawCode and
   2711   // AllocateRawFixedArray.
   2712   MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
   2713                                            Executability executable);
   2714 
   2715   // Available bytes for objects in this space.
   2716   inline intptr_t Available();
   2717 
   2718   virtual intptr_t Size() {
   2719     return size_;
   2720   }
   2721 
   2722   virtual intptr_t SizeOfObjects() {
   2723     return objects_size_;
   2724   }
   2725 
   2726   intptr_t CommittedMemory() {
   2727     return Size();
   2728   }
   2729 
   2730   // Approximate amount of physical memory committed for this space.
   2731   size_t CommittedPhysicalMemory();
   2732 
   2733   int PageCount() {
   2734     return page_count_;
   2735   }
   2736 
   2737   // Finds an object for a given address, returns Failure::Exception()
   2738   // if it is not found. The function iterates through all objects in this
   2739   // space, may be slow.
   2740   MaybeObject* FindObject(Address a);
   2741 
   2742   // Finds a large object page containing the given address, returns NULL
   2743   // if such a page doesn't exist.
   2744   LargePage* FindPage(Address a);
   2745 
   2746   // Frees unmarked objects.
   2747   void FreeUnmarkedObjects();
   2748 
   2749   // Checks whether a heap object is in this space; O(1).
   2750   bool Contains(HeapObject* obj);
   2751 
   2752   // Checks whether the space is empty.
   2753   bool IsEmpty() { return first_page_ == NULL; }
   2754 
   2755   // See the comments for ReserveSpace in the Space class.  This has to be
   2756   // called after ReserveSpace has been called on the paged spaces, since they
   2757   // may use some memory, leaving less for large objects.
   2758   virtual bool ReserveSpace(int bytes);
   2759 
   2760   LargePage* first_page() { return first_page_; }
   2761 
   2762 #ifdef VERIFY_HEAP
   2763   virtual void Verify();
   2764 #endif
   2765 
   2766 #ifdef DEBUG
   2767   virtual void Print();
   2768   void ReportStatistics();
   2769   void CollectCodeStatistics();
   2770 #endif
   2771   // Checks whether an address is in the object area in this space.  It
   2772   // iterates all objects in the space. May be slow.
   2773   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
   2774 
   2775  private:
   2776   intptr_t max_capacity_;
   2777   // The head of the linked list of large object chunks.
   2778   LargePage* first_page_;
   2779   intptr_t size_;  // allocated bytes
   2780   int page_count_;  // number of chunks
   2781   intptr_t objects_size_;  // size of objects
   2782   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
   2783   HashMap chunk_map_;
   2784 
   2785   friend class LargeObjectIterator;
   2786 
   2787  public:
   2788   TRACK_MEMORY("LargeObjectSpace")
   2789 };
   2790 
   2791 
   2792 class LargeObjectIterator: public ObjectIterator {
   2793  public:
   2794   explicit LargeObjectIterator(LargeObjectSpace* space);
   2795   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
   2796 
   2797   HeapObject* Next();
   2798 
   2799   // implementation of ObjectIterator.
   2800   virtual HeapObject* next_object() { return Next(); }
   2801 
   2802  private:
   2803   LargePage* current_;
   2804   HeapObjectCallback size_func_;
   2805 };
   2806 
   2807 
   2808 // Iterates over the chunks (pages and large object pages) that can contain
   2809 // pointers to new space.
   2810 class PointerChunkIterator BASE_EMBEDDED {
   2811  public:
   2812   inline explicit PointerChunkIterator(Heap* heap);
   2813 
   2814   // Return NULL when the iterator is done.
   2815   MemoryChunk* next() {
   2816     switch (state_) {
   2817       case kOldPointerState: {
   2818         if (old_pointer_iterator_.has_next()) {
   2819           return old_pointer_iterator_.next();
   2820         }
   2821         state_ = kMapState;
   2822         // Fall through.
   2823       }
   2824       case kMapState: {
   2825         if (map_iterator_.has_next()) {
   2826           return map_iterator_.next();
   2827         }
   2828         state_ = kLargeObjectState;
   2829         // Fall through.
   2830       }
   2831       case kLargeObjectState: {
   2832         HeapObject* heap_object;
   2833         do {
   2834           heap_object = lo_iterator_.Next();
   2835           if (heap_object == NULL) {
   2836             state_ = kFinishedState;
   2837             return NULL;
   2838           }
   2839           // Fixed arrays are the only pointer-containing objects in large
   2840           // object space.
   2841         } while (!heap_object->IsFixedArray());
   2842         MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
   2843         return answer;
   2844       }
   2845       case kFinishedState:
   2846         return NULL;
   2847       default:
   2848         break;
   2849     }
   2850     UNREACHABLE();
   2851     return NULL;
   2852   }
   2853 
   2854 
   2855  private:
   2856   enum State {
   2857     kOldPointerState,
   2858     kMapState,
   2859     kLargeObjectState,
   2860     kFinishedState
   2861   };
   2862   State state_;
   2863   PageIterator old_pointer_iterator_;
   2864   PageIterator map_iterator_;
   2865   LargeObjectIterator lo_iterator_;
   2866 };
   2867 
   2868 
   2869 #ifdef DEBUG
   2870 struct CommentStatistic {
   2871   const char* comment;
   2872   int size;
   2873   int count;
   2874   void Clear() {
   2875     comment = NULL;
   2876     size = 0;
   2877     count = 0;
   2878   }
   2879   // Must be small, since an iteration is used for lookup.
   2880   static const int kMaxComments = 64;
   2881 };
   2882 #endif
   2883 
   2884 
   2885 } }  // namespace v8::internal
   2886 
   2887 #endif  // V8_SPACES_H_
   2888