Home | History | Annotate | Download | only in src
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_SPACES_H_
     29 #define V8_SPACES_H_
     30 
     31 #include "allocation.h"
     32 #include "hashmap.h"
     33 #include "list.h"
     34 #include "log.h"
     35 #include "platform/mutex.h"
     36 #include "v8utils.h"
     37 
     38 namespace v8 {
     39 namespace internal {
     40 
     41 class Isolate;
     42 
     43 // -----------------------------------------------------------------------------
     44 // Heap structures:
     45 //
     46 // A JS heap consists of a young generation, an old generation, and a large
     47 // object space. The young generation is divided into two semispaces. A
     48 // scavenger implements Cheney's copying algorithm. The old generation is
     49 // separated into a map space and an old object space. The map space contains
     50 // all (and only) map objects, the rest of old objects go into the old space.
     51 // The old generation is collected by a mark-sweep-compact collector.
     52 //
     53 // The semispaces of the young generation are contiguous.  The old and map
     54 // spaces consists of a list of pages. A page has a page header and an object
     55 // area.
     56 //
     57 // There is a separate large object space for objects larger than
     58 // Page::kMaxHeapObjectSize, so that they do not have to move during
     59 // collection. The large object space is paged. Pages in large object space
     60 // may be larger than the page size.
     61 //
     62 // A store-buffer based write barrier is used to keep track of intergenerational
     63 // references.  See store-buffer.h.
     64 //
     65 // During scavenges and mark-sweep collections we sometimes (after a store
     66 // buffer overflow) iterate intergenerational pointers without decoding heap
     67 // object maps so if the page belongs to old pointer space or large object
     68 // space it is essential to guarantee that the page does not contain any
     69 // garbage pointers to new space: every pointer aligned word which satisfies
     70 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
     71 // new space. Thus objects in old pointer and large object spaces should have a
     72 // special layout (e.g. no bare integer fields). This requirement does not
     73 // apply to map space which is iterated in a special fashion. However we still
     74 // require pointer fields of dead maps to be cleaned.
     75 //
     76 // To enable lazy cleaning of old space pages we can mark chunks of the page
     77 // as being garbage.  Garbage sections are marked with a special map.  These
     78 // sections are skipped when scanning the page, even if we are otherwise
     79 // scanning without regard for object boundaries.  Garbage sections are chained
     80 // together to form a free list after a GC.  Garbage sections created outside
     81 // of GCs by object trunctation etc. may not be in the free list chain.  Very
     82 // small free spaces are ignored, they need only be cleaned of bogus pointers
     83 // into new space.
     84 //
     85 // Each page may have up to one special garbage section.  The start of this
     86 // section is denoted by the top field in the space.  The end of the section
     87 // is denoted by the limit field in the space.  This special garbage section
     88 // is not marked with a free space map in the data.  The point of this section
     89 // is to enable linear allocation without having to constantly update the byte
     90 // array every time the top field is updated and a new object is created.  The
     91 // special garbage section is not in the chain of garbage sections.
     92 //
     93 // Since the top and limit fields are in the space, not the page, only one page
     94 // has a special garbage section, and if the top and limit are equal then there
     95 // is no special garbage section.
     96 
     97 // Some assertion macros used in the debugging mode.
     98 
     99 #define ASSERT_PAGE_ALIGNED(address)                                           \
    100   ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
    101 
    102 #define ASSERT_OBJECT_ALIGNED(address)                                         \
    103   ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
    104 
    105 #define ASSERT_OBJECT_SIZE(size)                                               \
    106   ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
    107 
    108 #define ASSERT_PAGE_OFFSET(offset)                                             \
    109   ASSERT((Page::kObjectStartOffset <= offset)                                  \
    110       && (offset <= Page::kPageSize))
    111 
    112 #define ASSERT_MAP_PAGE_INDEX(index)                                           \
    113   ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
    114 
    115 
    116 class PagedSpace;
    117 class MemoryAllocator;
    118 class AllocationInfo;
    119 class Space;
    120 class FreeList;
    121 class MemoryChunk;
    122 
    123 class MarkBit {
    124  public:
    125   typedef uint32_t CellType;
    126 
    127   inline MarkBit(CellType* cell, CellType mask, bool data_only)
    128       : cell_(cell), mask_(mask), data_only_(data_only) { }
    129 
    130   inline CellType* cell() { return cell_; }
    131   inline CellType mask() { return mask_; }
    132 
    133 #ifdef DEBUG
    134   bool operator==(const MarkBit& other) {
    135     return cell_ == other.cell_ && mask_ == other.mask_;
    136   }
    137 #endif
    138 
    139   inline void Set() { *cell_ |= mask_; }
    140   inline bool Get() { return (*cell_ & mask_) != 0; }
    141   inline void Clear() { *cell_ &= ~mask_; }
    142 
    143   inline bool data_only() { return data_only_; }
    144 
    145   inline MarkBit Next() {
    146     CellType new_mask = mask_ << 1;
    147     if (new_mask == 0) {
    148       return MarkBit(cell_ + 1, 1, data_only_);
    149     } else {
    150       return MarkBit(cell_, new_mask, data_only_);
    151     }
    152   }
    153 
    154  private:
    155   CellType* cell_;
    156   CellType mask_;
    157   // This boolean indicates that the object is in a data-only space with no
    158   // pointers.  This enables some optimizations when marking.
    159   // It is expected that this field is inlined and turned into control flow
    160   // at the place where the MarkBit object is created.
    161   bool data_only_;
    162 };
    163 
    164 
    165 // Bitmap is a sequence of cells each containing fixed number of bits.
    166 class Bitmap {
    167  public:
    168   static const uint32_t kBitsPerCell = 32;
    169   static const uint32_t kBitsPerCellLog2 = 5;
    170   static const uint32_t kBitIndexMask = kBitsPerCell - 1;
    171   static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
    172   static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
    173 
    174   static const size_t kLength =
    175     (1 << kPageSizeBits) >> (kPointerSizeLog2);
    176 
    177   static const size_t kSize =
    178     (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
    179 
    180 
    181   static int CellsForLength(int length) {
    182     return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
    183   }
    184 
    185   int CellsCount() {
    186     return CellsForLength(kLength);
    187   }
    188 
    189   static int SizeFor(int cells_count) {
    190     return sizeof(MarkBit::CellType) * cells_count;
    191   }
    192 
    193   INLINE(static uint32_t IndexToCell(uint32_t index)) {
    194     return index >> kBitsPerCellLog2;
    195   }
    196 
    197   INLINE(static uint32_t CellToIndex(uint32_t index)) {
    198     return index << kBitsPerCellLog2;
    199   }
    200 
    201   INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
    202     return (index + kBitIndexMask) & ~kBitIndexMask;
    203   }
    204 
    205   INLINE(MarkBit::CellType* cells()) {
    206     return reinterpret_cast<MarkBit::CellType*>(this);
    207   }
    208 
    209   INLINE(Address address()) {
    210     return reinterpret_cast<Address>(this);
    211   }
    212 
    213   INLINE(static Bitmap* FromAddress(Address addr)) {
    214     return reinterpret_cast<Bitmap*>(addr);
    215   }
    216 
    217   inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
    218     MarkBit::CellType mask = 1 << (index & kBitIndexMask);
    219     MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
    220     return MarkBit(cell, mask, data_only);
    221   }
    222 
    223   static inline void Clear(MemoryChunk* chunk);
    224 
    225   static void PrintWord(uint32_t word, uint32_t himask = 0) {
    226     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
    227       if ((mask & himask) != 0) PrintF("[");
    228       PrintF((mask & word) ? "1" : "0");
    229       if ((mask & himask) != 0) PrintF("]");
    230     }
    231   }
    232 
    233   class CellPrinter {
    234    public:
    235     CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
    236 
    237     void Print(uint32_t pos, uint32_t cell) {
    238       if (cell == seq_type) {
    239         seq_length++;
    240         return;
    241       }
    242 
    243       Flush();
    244 
    245       if (IsSeq(cell)) {
    246         seq_start = pos;
    247         seq_length = 0;
    248         seq_type = cell;
    249         return;
    250       }
    251 
    252       PrintF("%d: ", pos);
    253       PrintWord(cell);
    254       PrintF("\n");
    255     }
    256 
    257     void Flush() {
    258       if (seq_length > 0) {
    259         PrintF("%d: %dx%d\n",
    260                seq_start,
    261                seq_type == 0 ? 0 : 1,
    262                seq_length * kBitsPerCell);
    263         seq_length = 0;
    264       }
    265     }
    266 
    267     static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
    268 
    269    private:
    270     uint32_t seq_start;
    271     uint32_t seq_type;
    272     uint32_t seq_length;
    273   };
    274 
    275   void Print() {
    276     CellPrinter printer;
    277     for (int i = 0; i < CellsCount(); i++) {
    278       printer.Print(i, cells()[i]);
    279     }
    280     printer.Flush();
    281     PrintF("\n");
    282   }
    283 
    284   bool IsClean() {
    285     for (int i = 0; i < CellsCount(); i++) {
    286       if (cells()[i] != 0) {
    287         return false;
    288       }
    289     }
    290     return true;
    291   }
    292 };
    293 
    294 
    295 class SkipList;
    296 class SlotsBuffer;
    297 
    298 // MemoryChunk represents a memory region owned by a specific space.
    299 // It is divided into the header and the body. Chunk start is always
    300 // 1MB aligned. Start of the body is aligned so it can accommodate
    301 // any heap object.
    302 class MemoryChunk {
    303  public:
    304   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
    305   static MemoryChunk* FromAddress(Address a) {
    306     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
    307   }
    308 
    309   // Only works for addresses in pointer spaces, not data or code spaces.
    310   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
    311 
    312   Address address() { return reinterpret_cast<Address>(this); }
    313 
    314   bool is_valid() { return address() != NULL; }
    315 
    316   MemoryChunk* next_chunk() const { return next_chunk_; }
    317   MemoryChunk* prev_chunk() const { return prev_chunk_; }
    318 
    319   void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
    320   void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
    321 
    322   Space* owner() const {
    323     if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
    324         kFailureTag) {
    325       return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
    326                                       kFailureTag);
    327     } else {
    328       return NULL;
    329     }
    330   }
    331 
    332   void set_owner(Space* space) {
    333     ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
    334     owner_ = reinterpret_cast<Address>(space) + kFailureTag;
    335     ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
    336            kFailureTag);
    337   }
    338 
    339   VirtualMemory* reserved_memory() {
    340     return &reservation_;
    341   }
    342 
    343   void InitializeReservedMemory() {
    344     reservation_.Reset();
    345   }
    346 
    347   void set_reserved_memory(VirtualMemory* reservation) {
    348     ASSERT_NOT_NULL(reservation);
    349     reservation_.TakeControl(reservation);
    350   }
    351 
    352   bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
    353   void initialize_scan_on_scavenge(bool scan) {
    354     if (scan) {
    355       SetFlag(SCAN_ON_SCAVENGE);
    356     } else {
    357       ClearFlag(SCAN_ON_SCAVENGE);
    358     }
    359   }
    360   inline void set_scan_on_scavenge(bool scan);
    361 
    362   int store_buffer_counter() { return store_buffer_counter_; }
    363   void set_store_buffer_counter(int counter) {
    364     store_buffer_counter_ = counter;
    365   }
    366 
    367   bool Contains(Address addr) {
    368     return addr >= area_start() && addr < area_end();
    369   }
    370 
    371   // Checks whether addr can be a limit of addresses in this page.
    372   // It's a limit if it's in the page, or if it's just after the
    373   // last byte of the page.
    374   bool ContainsLimit(Address addr) {
    375     return addr >= area_start() && addr <= area_end();
    376   }
    377 
    378   // Every n write barrier invocations we go to runtime even though
    379   // we could have handled it in generated code.  This lets us check
    380   // whether we have hit the limit and should do some more marking.
    381   static const int kWriteBarrierCounterGranularity = 500;
    382 
    383   enum MemoryChunkFlags {
    384     IS_EXECUTABLE,
    385     ABOUT_TO_BE_FREED,
    386     POINTERS_TO_HERE_ARE_INTERESTING,
    387     POINTERS_FROM_HERE_ARE_INTERESTING,
    388     SCAN_ON_SCAVENGE,
    389     IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
    390     IN_TO_SPACE,    // All pages in new space has one of these two set.
    391     NEW_SPACE_BELOW_AGE_MARK,
    392     CONTAINS_ONLY_DATA,
    393     EVACUATION_CANDIDATE,
    394     RESCAN_ON_EVACUATION,
    395 
    396     // Pages swept precisely can be iterated, hitting only the live objects.
    397     // Whereas those swept conservatively cannot be iterated over. Both flags
    398     // indicate that marking bits have been cleared by the sweeper, otherwise
    399     // marking bits are still intact.
    400     WAS_SWEPT_PRECISELY,
    401     WAS_SWEPT_CONSERVATIVELY,
    402 
    403     // Large objects can have a progress bar in their page header. These object
    404     // are scanned in increments and will be kept black while being scanned.
    405     // Even if the mutator writes to them they will be kept black and a white
    406     // to grey transition is performed in the value.
    407     HAS_PROGRESS_BAR,
    408 
    409     // Last flag, keep at bottom.
    410     NUM_MEMORY_CHUNK_FLAGS
    411   };
    412 
    413 
    414   static const int kPointersToHereAreInterestingMask =
    415       1 << POINTERS_TO_HERE_ARE_INTERESTING;
    416 
    417   static const int kPointersFromHereAreInterestingMask =
    418       1 << POINTERS_FROM_HERE_ARE_INTERESTING;
    419 
    420   static const int kEvacuationCandidateMask =
    421       1 << EVACUATION_CANDIDATE;
    422 
    423   static const int kSkipEvacuationSlotsRecordingMask =
    424       (1 << EVACUATION_CANDIDATE) |
    425       (1 << RESCAN_ON_EVACUATION) |
    426       (1 << IN_FROM_SPACE) |
    427       (1 << IN_TO_SPACE);
    428 
    429 
    430   void SetFlag(int flag) {
    431     flags_ |= static_cast<uintptr_t>(1) << flag;
    432   }
    433 
    434   void ClearFlag(int flag) {
    435     flags_ &= ~(static_cast<uintptr_t>(1) << flag);
    436   }
    437 
    438   void SetFlagTo(int flag, bool value) {
    439     if (value) {
    440       SetFlag(flag);
    441     } else {
    442       ClearFlag(flag);
    443     }
    444   }
    445 
    446   bool IsFlagSet(int flag) {
    447     return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
    448   }
    449 
    450   // Set or clear multiple flags at a time. The flags in the mask
    451   // are set to the value in "flags", the rest retain the current value
    452   // in flags_.
    453   void SetFlags(intptr_t flags, intptr_t mask) {
    454     flags_ = (flags_ & ~mask) | (flags & mask);
    455   }
    456 
    457   // Return all current flags.
    458   intptr_t GetFlags() { return flags_; }
    459 
    460   intptr_t parallel_sweeping() const {
    461     return parallel_sweeping_;
    462   }
    463 
    464   void set_parallel_sweeping(intptr_t state) {
    465     parallel_sweeping_ = state;
    466   }
    467 
    468   bool TryParallelSweeping() {
    469     return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
    470   }
    471 
    472   // Manage live byte count (count of bytes known to be live,
    473   // because they are marked black).
    474   void ResetLiveBytes() {
    475     if (FLAG_gc_verbose) {
    476       PrintF("ResetLiveBytes:%p:%x->0\n",
    477              static_cast<void*>(this), live_byte_count_);
    478     }
    479     live_byte_count_ = 0;
    480   }
    481   void IncrementLiveBytes(int by) {
    482     if (FLAG_gc_verbose) {
    483       printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
    484              static_cast<void*>(this), live_byte_count_,
    485              ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
    486              live_byte_count_ + by);
    487     }
    488     live_byte_count_ += by;
    489     ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
    490   }
    491   int LiveBytes() {
    492     ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
    493     return live_byte_count_;
    494   }
    495 
    496   int write_barrier_counter() {
    497     return static_cast<int>(write_barrier_counter_);
    498   }
    499 
    500   void set_write_barrier_counter(int counter) {
    501     write_barrier_counter_ = counter;
    502   }
    503 
    504   int progress_bar() {
    505     ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
    506     return progress_bar_;
    507   }
    508 
    509   void set_progress_bar(int progress_bar) {
    510     ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
    511     progress_bar_ = progress_bar;
    512   }
    513 
    514   void ResetProgressBar() {
    515     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
    516       set_progress_bar(0);
    517       ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
    518     }
    519   }
    520 
    521   bool IsLeftOfProgressBar(Object** slot) {
    522     Address slot_address = reinterpret_cast<Address>(slot);
    523     ASSERT(slot_address > this->address());
    524     return (slot_address - (this->address() + kObjectStartOffset)) <
    525            progress_bar();
    526   }
    527 
    528   static void IncrementLiveBytesFromGC(Address address, int by) {
    529     MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
    530   }
    531 
    532   static void IncrementLiveBytesFromMutator(Address address, int by);
    533 
    534   static const intptr_t kAlignment =
    535       (static_cast<uintptr_t>(1) << kPageSizeBits);
    536 
    537   static const intptr_t kAlignmentMask = kAlignment - 1;
    538 
    539   static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
    540 
    541   static const intptr_t kLiveBytesOffset =
    542      kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
    543      kPointerSize + kPointerSize +
    544      kPointerSize + kPointerSize + kPointerSize + kIntSize;
    545 
    546   static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
    547 
    548   static const size_t kWriteBarrierCounterOffset =
    549       kSlotsBufferOffset + kPointerSize + kPointerSize;
    550 
    551   static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
    552                                     kIntSize + kIntSize + kPointerSize +
    553                                     5 * kPointerSize;
    554 
    555   static const int kBodyOffset =
    556       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
    557 
    558   // The start offset of the object area in a page. Aligned to both maps and
    559   // code alignment to be suitable for both.  Also aligned to 32 words because
    560   // the marking bitmap is arranged in 32 bit chunks.
    561   static const int kObjectStartAlignment = 32 * kPointerSize;
    562   static const int kObjectStartOffset = kBodyOffset - 1 +
    563       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
    564 
    565   size_t size() const { return size_; }
    566 
    567   void set_size(size_t size) {
    568     size_ = size;
    569   }
    570 
    571   void SetArea(Address area_start, Address area_end) {
    572     area_start_ = area_start;
    573     area_end_ = area_end;
    574   }
    575 
    576   Executability executable() {
    577     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
    578   }
    579 
    580   bool ContainsOnlyData() {
    581     return IsFlagSet(CONTAINS_ONLY_DATA);
    582   }
    583 
    584   bool InNewSpace() {
    585     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
    586   }
    587 
    588   bool InToSpace() {
    589     return IsFlagSet(IN_TO_SPACE);
    590   }
    591 
    592   bool InFromSpace() {
    593     return IsFlagSet(IN_FROM_SPACE);
    594   }
    595 
    596   // ---------------------------------------------------------------------
    597   // Markbits support
    598 
    599   inline Bitmap* markbits() {
    600     return Bitmap::FromAddress(address() + kHeaderSize);
    601   }
    602 
    603   void PrintMarkbits() { markbits()->Print(); }
    604 
    605   inline uint32_t AddressToMarkbitIndex(Address addr) {
    606     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
    607   }
    608 
    609   inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
    610     const intptr_t offset =
    611         reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
    612 
    613     return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
    614   }
    615 
    616   inline Address MarkbitIndexToAddress(uint32_t index) {
    617     return this->address() + (index << kPointerSizeLog2);
    618   }
    619 
    620   void InsertAfter(MemoryChunk* other);
    621   void Unlink();
    622 
    623   inline Heap* heap() { return heap_; }
    624 
    625   static const int kFlagsOffset = kPointerSize * 3;
    626 
    627   bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
    628 
    629   bool ShouldSkipEvacuationSlotRecording() {
    630     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
    631   }
    632 
    633   inline SkipList* skip_list() {
    634     return skip_list_;
    635   }
    636 
    637   inline void set_skip_list(SkipList* skip_list) {
    638     skip_list_ = skip_list;
    639   }
    640 
    641   inline SlotsBuffer* slots_buffer() {
    642     return slots_buffer_;
    643   }
    644 
    645   inline SlotsBuffer** slots_buffer_address() {
    646     return &slots_buffer_;
    647   }
    648 
    649   void MarkEvacuationCandidate() {
    650     ASSERT(slots_buffer_ == NULL);
    651     SetFlag(EVACUATION_CANDIDATE);
    652   }
    653 
    654   void ClearEvacuationCandidate() {
    655     ASSERT(slots_buffer_ == NULL);
    656     ClearFlag(EVACUATION_CANDIDATE);
    657   }
    658 
    659   Address area_start() { return area_start_; }
    660   Address area_end() { return area_end_; }
    661   int area_size() {
    662     return static_cast<int>(area_end() - area_start());
    663   }
    664   bool CommitArea(size_t requested);
    665 
    666   // Approximate amount of physical memory committed for this chunk.
    667   size_t CommittedPhysicalMemory() {
    668     return high_water_mark_;
    669   }
    670 
    671   static inline void UpdateHighWaterMark(Address mark);
    672 
    673  protected:
    674   MemoryChunk* next_chunk_;
    675   MemoryChunk* prev_chunk_;
    676   size_t size_;
    677   intptr_t flags_;
    678 
    679   // Start and end of allocatable memory on this chunk.
    680   Address area_start_;
    681   Address area_end_;
    682 
    683   // If the chunk needs to remember its memory reservation, it is stored here.
    684   VirtualMemory reservation_;
    685   // The identity of the owning space.  This is tagged as a failure pointer, but
    686   // no failure can be in an object, so this can be distinguished from any entry
    687   // in a fixed array.
    688   Address owner_;
    689   Heap* heap_;
    690   // Used by the store buffer to keep track of which pages to mark scan-on-
    691   // scavenge.
    692   int store_buffer_counter_;
    693   // Count of bytes marked black on page.
    694   int live_byte_count_;
    695   SlotsBuffer* slots_buffer_;
    696   SkipList* skip_list_;
    697   intptr_t write_barrier_counter_;
    698   // Used by the incremental marker to keep track of the scanning progress in
    699   // large objects that have a progress bar and are scanned in increments.
    700   int progress_bar_;
    701   // Assuming the initial allocation on a page is sequential,
    702   // count highest number of bytes ever allocated on the page.
    703   int high_water_mark_;
    704 
    705   intptr_t parallel_sweeping_;
    706 
    707   // PagedSpace free-list statistics.
    708   intptr_t available_in_small_free_list_;
    709   intptr_t available_in_medium_free_list_;
    710   intptr_t available_in_large_free_list_;
    711   intptr_t available_in_huge_free_list_;
    712   intptr_t non_available_small_blocks_;
    713 
    714   static MemoryChunk* Initialize(Heap* heap,
    715                                  Address base,
    716                                  size_t size,
    717                                  Address area_start,
    718                                  Address area_end,
    719                                  Executability executable,
    720                                  Space* owner);
    721 
    722   friend class MemoryAllocator;
    723 };
    724 
    725 
    726 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
    727 
    728 
    729 // -----------------------------------------------------------------------------
    730 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
    731 //
    732 // The only way to get a page pointer is by calling factory methods:
    733 //   Page* p = Page::FromAddress(addr); or
    734 //   Page* p = Page::FromAllocationTop(top);
    735 class Page : public MemoryChunk {
    736  public:
    737   // Returns the page containing a given address. The address ranges
    738   // from [page_addr .. page_addr + kPageSize[
    739   // This only works if the object is in fact in a page.  See also MemoryChunk::
    740   // FromAddress() and FromAnyAddress().
    741   INLINE(static Page* FromAddress(Address a)) {
    742     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
    743   }
    744 
    745   // Returns the page containing an allocation top. Because an allocation
    746   // top address can be the upper bound of the page, we need to subtract
    747   // it with kPointerSize first. The address ranges from
    748   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
    749   INLINE(static Page* FromAllocationTop(Address top)) {
    750     Page* p = FromAddress(top - kPointerSize);
    751     return p;
    752   }
    753 
    754   // Returns the next page in the chain of pages owned by a space.
    755   inline Page* next_page();
    756   inline Page* prev_page();
    757   inline void set_next_page(Page* page);
    758   inline void set_prev_page(Page* page);
    759 
    760   // Checks whether an address is page aligned.
    761   static bool IsAlignedToPageSize(Address a) {
    762     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
    763   }
    764 
    765   // Returns the offset of a given address to this page.
    766   INLINE(int Offset(Address a)) {
    767     int offset = static_cast<int>(a - address());
    768     return offset;
    769   }
    770 
    771   // Returns the address for a given offset to the this page.
    772   Address OffsetToAddress(int offset) {
    773     ASSERT_PAGE_OFFSET(offset);
    774     return address() + offset;
    775   }
    776 
    777   // ---------------------------------------------------------------------
    778 
    779   // Page size in bytes.  This must be a multiple of the OS page size.
    780   static const int kPageSize = 1 << kPageSizeBits;
    781 
    782   // Object area size in bytes.
    783   static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
    784 
    785   // Maximum object size that fits in a page. Objects larger than that size are
    786   // allocated in large object space and are never moved in memory. This also
    787   // applies to new space allocation, since objects are never migrated from new
    788   // space to large object space.  Takes double alignment into account.
    789   static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
    790 
    791   // Maximum object size that fits in a page. Objects larger than that size
    792   // are allocated in large object space and are never moved in memory. This
    793   // also applies to new space allocation, since objects are never migrated
    794   // from new space to large object space.  Takes double alignment into account.
    795   static const int kMaxNonCodeHeapObjectSize =
    796       kNonCodeObjectAreaSize - kPointerSize;
    797 
    798   // Page size mask.
    799   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
    800 
    801   inline void ClearGCFields();
    802 
    803   static inline Page* Initialize(Heap* heap,
    804                                  MemoryChunk* chunk,
    805                                  Executability executable,
    806                                  PagedSpace* owner);
    807 
    808   void InitializeAsAnchor(PagedSpace* owner);
    809 
    810   bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
    811   bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
    812   bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
    813 
    814   void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
    815   void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
    816 
    817   void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
    818   void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
    819 
    820   void ResetFreeListStatistics();
    821 
    822 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
    823   type name() { return name##_; }                 \
    824   void set_##name(type name) { name##_ = name; }  \
    825   void add_##name(type name) { name##_ += name; }
    826 
    827   FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
    828   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
    829   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
    830   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
    831   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
    832 
    833 #undef FRAGMENTATION_STATS_ACCESSORS
    834 
    835 #ifdef DEBUG
    836   void Print();
    837 #endif  // DEBUG
    838 
    839   friend class MemoryAllocator;
    840 };
    841 
    842 
    843 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
    844 
    845 
    846 class LargePage : public MemoryChunk {
    847  public:
    848   HeapObject* GetObject() {
    849     return HeapObject::FromAddress(area_start());
    850   }
    851 
    852   inline LargePage* next_page() const {
    853     return static_cast<LargePage*>(next_chunk());
    854   }
    855 
    856   inline void set_next_page(LargePage* page) {
    857     set_next_chunk(page);
    858   }
    859  private:
    860   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
    861 
    862   friend class MemoryAllocator;
    863 };
    864 
    865 STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
    866 
    867 // ----------------------------------------------------------------------------
    868 // Space is the abstract superclass for all allocation spaces.
    869 class Space : public Malloced {
    870  public:
    871   Space(Heap* heap, AllocationSpace id, Executability executable)
    872       : heap_(heap), id_(id), executable_(executable) {}
    873 
    874   virtual ~Space() {}
    875 
    876   Heap* heap() const { return heap_; }
    877 
    878   // Does the space need executable memory?
    879   Executability executable() { return executable_; }
    880 
    881   // Identity used in error reporting.
    882   AllocationSpace identity() { return id_; }
    883 
    884   // Returns allocated size.
    885   virtual intptr_t Size() = 0;
    886 
    887   // Returns size of objects. Can differ from the allocated size
    888   // (e.g. see LargeObjectSpace).
    889   virtual intptr_t SizeOfObjects() { return Size(); }
    890 
    891   virtual int RoundSizeDownToObjectAlignment(int size) {
    892     if (id_ == CODE_SPACE) {
    893       return RoundDown(size, kCodeAlignment);
    894     } else {
    895       return RoundDown(size, kPointerSize);
    896     }
    897   }
    898 
    899 #ifdef DEBUG
    900   virtual void Print() = 0;
    901 #endif
    902 
    903  private:
    904   Heap* heap_;
    905   AllocationSpace id_;
    906   Executability executable_;
    907 };
    908 
    909 
    910 // ----------------------------------------------------------------------------
    911 // All heap objects containing executable code (code objects) must be allocated
    912 // from a 2 GB range of memory, so that they can call each other using 32-bit
    913 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
    914 // displacements cover the entire 4GB virtual address space.  On 64-bit
    915 // platforms, we support this using the CodeRange object, which reserves and
    916 // manages a range of virtual memory.
    917 class CodeRange {
    918  public:
    919   explicit CodeRange(Isolate* isolate);
    920   ~CodeRange() { TearDown(); }
    921 
    922   // Reserves a range of virtual memory, but does not commit any of it.
    923   // Can only be called once, at heap initialization time.
    924   // Returns false on failure.
    925   bool SetUp(const size_t requested_size);
    926 
    927   // Frees the range of virtual memory, and frees the data structures used to
    928   // manage it.
    929   void TearDown();
    930 
    931   bool exists() { return this != NULL && code_range_ != NULL; }
    932   Address start() {
    933     if (this == NULL || code_range_ == NULL) return NULL;
    934     return static_cast<Address>(code_range_->address());
    935   }
    936   bool contains(Address address) {
    937     if (this == NULL || code_range_ == NULL) return false;
    938     Address start = static_cast<Address>(code_range_->address());
    939     return start <= address && address < start + code_range_->size();
    940   }
    941 
    942   // Allocates a chunk of memory from the large-object portion of
    943   // the code range.  On platforms with no separate code range, should
    944   // not be called.
    945   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
    946                                             const size_t commit_size,
    947                                             size_t* allocated);
    948   bool CommitRawMemory(Address start, size_t length);
    949   bool UncommitRawMemory(Address start, size_t length);
    950   void FreeRawMemory(Address buf, size_t length);
    951 
    952  private:
    953   Isolate* isolate_;
    954 
    955   // The reserved range of virtual memory that all code objects are put in.
    956   VirtualMemory* code_range_;
    957   // Plain old data class, just a struct plus a constructor.
    958   class FreeBlock {
    959    public:
    960     FreeBlock(Address start_arg, size_t size_arg)
    961         : start(start_arg), size(size_arg) {
    962       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
    963       ASSERT(size >= static_cast<size_t>(Page::kPageSize));
    964     }
    965     FreeBlock(void* start_arg, size_t size_arg)
    966         : start(static_cast<Address>(start_arg)), size(size_arg) {
    967       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
    968       ASSERT(size >= static_cast<size_t>(Page::kPageSize));
    969     }
    970 
    971     Address start;
    972     size_t size;
    973   };
    974 
    975   // Freed blocks of memory are added to the free list.  When the allocation
    976   // list is exhausted, the free list is sorted and merged to make the new
    977   // allocation list.
    978   List<FreeBlock> free_list_;
    979   // Memory is allocated from the free blocks on the allocation list.
    980   // The block at current_allocation_block_index_ is the current block.
    981   List<FreeBlock> allocation_list_;
    982   int current_allocation_block_index_;
    983 
    984   // Finds a block on the allocation list that contains at least the
    985   // requested amount of memory.  If none is found, sorts and merges
    986   // the existing free memory blocks, and searches again.
    987   // If none can be found, terminates V8 with FatalProcessOutOfMemory.
    988   void GetNextAllocationBlock(size_t requested);
    989   // Compares the start addresses of two free blocks.
    990   static int CompareFreeBlockAddress(const FreeBlock* left,
    991                                      const FreeBlock* right);
    992 
    993   DISALLOW_COPY_AND_ASSIGN(CodeRange);
    994 };
    995 
    996 
    997 class SkipList {
    998  public:
    999   SkipList() {
   1000     Clear();
   1001   }
   1002 
   1003   void Clear() {
   1004     for (int idx = 0; idx < kSize; idx++) {
   1005       starts_[idx] = reinterpret_cast<Address>(-1);
   1006     }
   1007   }
   1008 
   1009   Address StartFor(Address addr) {
   1010     return starts_[RegionNumber(addr)];
   1011   }
   1012 
   1013   void AddObject(Address addr, int size) {
   1014     int start_region = RegionNumber(addr);
   1015     int end_region = RegionNumber(addr + size - kPointerSize);
   1016     for (int idx = start_region; idx <= end_region; idx++) {
   1017       if (starts_[idx] > addr) starts_[idx] = addr;
   1018     }
   1019   }
   1020 
   1021   static inline int RegionNumber(Address addr) {
   1022     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
   1023   }
   1024 
   1025   static void Update(Address addr, int size) {
   1026     Page* page = Page::FromAddress(addr);
   1027     SkipList* list = page->skip_list();
   1028     if (list == NULL) {
   1029       list = new SkipList();
   1030       page->set_skip_list(list);
   1031     }
   1032 
   1033     list->AddObject(addr, size);
   1034   }
   1035 
   1036  private:
   1037   static const int kRegionSizeLog2 = 13;
   1038   static const int kRegionSize = 1 << kRegionSizeLog2;
   1039   static const int kSize = Page::kPageSize / kRegionSize;
   1040 
   1041   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
   1042 
   1043   Address starts_[kSize];
   1044 };
   1045 
   1046 
   1047 // ----------------------------------------------------------------------------
   1048 // A space acquires chunks of memory from the operating system. The memory
   1049 // allocator allocated and deallocates pages for the paged heap spaces and large
   1050 // pages for large object space.
   1051 //
   1052 // Each space has to manage it's own pages.
   1053 //
   1054 class MemoryAllocator {
   1055  public:
   1056   explicit MemoryAllocator(Isolate* isolate);
   1057 
   1058   // Initializes its internal bookkeeping structures.
   1059   // Max capacity of the total space and executable memory limit.
   1060   bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
   1061 
   1062   void TearDown();
   1063 
   1064   Page* AllocatePage(
   1065       intptr_t size, PagedSpace* owner, Executability executable);
   1066 
   1067   LargePage* AllocateLargePage(
   1068       intptr_t object_size, Space* owner, Executability executable);
   1069 
   1070   void Free(MemoryChunk* chunk);
   1071 
   1072   // Returns the maximum available bytes of heaps.
   1073   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
   1074 
   1075   // Returns allocated spaces in bytes.
   1076   intptr_t Size() { return size_; }
   1077 
   1078   // Returns the maximum available executable bytes of heaps.
   1079   intptr_t AvailableExecutable() {
   1080     if (capacity_executable_ < size_executable_) return 0;
   1081     return capacity_executable_ - size_executable_;
   1082   }
   1083 
   1084   // Returns allocated executable spaces in bytes.
   1085   intptr_t SizeExecutable() { return size_executable_; }
   1086 
   1087   // Returns maximum available bytes that the old space can have.
   1088   intptr_t MaxAvailable() {
   1089     return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
   1090   }
   1091 
   1092   // Returns an indication of whether a pointer is in a space that has
   1093   // been allocated by this MemoryAllocator.
   1094   V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
   1095     return address < lowest_ever_allocated_ ||
   1096         address >= highest_ever_allocated_;
   1097   }
   1098 
   1099 #ifdef DEBUG
   1100   // Reports statistic info of the space.
   1101   void ReportStatistics();
   1102 #endif
   1103 
   1104   // Returns a MemoryChunk in which the memory region from commit_area_size to
   1105   // reserve_area_size of the chunk area is reserved but not committed, it
   1106   // could be committed later by calling MemoryChunk::CommitArea.
   1107   MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
   1108                              intptr_t commit_area_size,
   1109                              Executability executable,
   1110                              Space* space);
   1111 
   1112   Address ReserveAlignedMemory(size_t requested,
   1113                                size_t alignment,
   1114                                VirtualMemory* controller);
   1115   Address AllocateAlignedMemory(size_t reserve_size,
   1116                                 size_t commit_size,
   1117                                 size_t alignment,
   1118                                 Executability executable,
   1119                                 VirtualMemory* controller);
   1120 
   1121   bool CommitMemory(Address addr, size_t size, Executability executable);
   1122 
   1123   void FreeMemory(VirtualMemory* reservation, Executability executable);
   1124   void FreeMemory(Address addr, size_t size, Executability executable);
   1125 
   1126   // Commit a contiguous block of memory from the initial chunk.  Assumes that
   1127   // the address is not NULL, the size is greater than zero, and that the
   1128   // block is contained in the initial chunk.  Returns true if it succeeded
   1129   // and false otherwise.
   1130   bool CommitBlock(Address start, size_t size, Executability executable);
   1131 
   1132   // Uncommit a contiguous block of memory [start..(start+size)[.
   1133   // start is not NULL, the size is greater than zero, and the
   1134   // block is contained in the initial chunk.  Returns true if it succeeded
   1135   // and false otherwise.
   1136   bool UncommitBlock(Address start, size_t size);
   1137 
   1138   // Zaps a contiguous block of memory [start..(start+size)[ thus
   1139   // filling it up with a recognizable non-NULL bit pattern.
   1140   void ZapBlock(Address start, size_t size);
   1141 
   1142   void PerformAllocationCallback(ObjectSpace space,
   1143                                  AllocationAction action,
   1144                                  size_t size);
   1145 
   1146   void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
   1147                                           ObjectSpace space,
   1148                                           AllocationAction action);
   1149 
   1150   void RemoveMemoryAllocationCallback(
   1151       MemoryAllocationCallback callback);
   1152 
   1153   bool MemoryAllocationCallbackRegistered(
   1154       MemoryAllocationCallback callback);
   1155 
   1156   static int CodePageGuardStartOffset();
   1157 
   1158   static int CodePageGuardSize();
   1159 
   1160   static int CodePageAreaStartOffset();
   1161 
   1162   static int CodePageAreaEndOffset();
   1163 
   1164   static int CodePageAreaSize() {
   1165     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
   1166   }
   1167 
   1168   MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
   1169                                               Address start,
   1170                                               size_t commit_size,
   1171                                               size_t reserved_size);
   1172 
   1173  private:
   1174   Isolate* isolate_;
   1175 
   1176   // Maximum space size in bytes.
   1177   size_t capacity_;
   1178   // Maximum subset of capacity_ that can be executable
   1179   size_t capacity_executable_;
   1180 
   1181   // Allocated space size in bytes.
   1182   size_t size_;
   1183   // Allocated executable space size in bytes.
   1184   size_t size_executable_;
   1185 
   1186   // We keep the lowest and highest addresses allocated as a quick way
   1187   // of determining that pointers are outside the heap. The estimate is
   1188   // conservative, i.e. not all addrsses in 'allocated' space are allocated
   1189   // to our heap. The range is [lowest, highest[, inclusive on the low end
   1190   // and exclusive on the high end.
   1191   void* lowest_ever_allocated_;
   1192   void* highest_ever_allocated_;
   1193 
   1194   struct MemoryAllocationCallbackRegistration {
   1195     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
   1196                                          ObjectSpace space,
   1197                                          AllocationAction action)
   1198         : callback(callback), space(space), action(action) {
   1199     }
   1200     MemoryAllocationCallback callback;
   1201     ObjectSpace space;
   1202     AllocationAction action;
   1203   };
   1204 
   1205   // A List of callback that are triggered when memory is allocated or free'd
   1206   List<MemoryAllocationCallbackRegistration>
   1207       memory_allocation_callbacks_;
   1208 
   1209   // Initializes pages in a chunk. Returns the first page address.
   1210   // This function and GetChunkId() are provided for the mark-compact
   1211   // collector to rebuild page headers in the from space, which is
   1212   // used as a marking stack and its page headers are destroyed.
   1213   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
   1214                                PagedSpace* owner);
   1215 
   1216   void UpdateAllocatedSpaceLimits(void* low, void* high) {
   1217     lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
   1218     highest_ever_allocated_ = Max(highest_ever_allocated_, high);
   1219   }
   1220 
   1221   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
   1222 };
   1223 
   1224 
   1225 // -----------------------------------------------------------------------------
   1226 // Interface for heap object iterator to be implemented by all object space
   1227 // object iterators.
   1228 //
   1229 // NOTE: The space specific object iterators also implements the own next()
   1230 //       method which is used to avoid using virtual functions
   1231 //       iterating a specific space.
   1232 
   1233 class ObjectIterator : public Malloced {
   1234  public:
   1235   virtual ~ObjectIterator() { }
   1236 
   1237   virtual HeapObject* next_object() = 0;
   1238 };
   1239 
   1240 
   1241 // -----------------------------------------------------------------------------
   1242 // Heap object iterator in new/old/map spaces.
   1243 //
   1244 // A HeapObjectIterator iterates objects from the bottom of the given space
   1245 // to its top or from the bottom of the given page to its top.
   1246 //
   1247 // If objects are allocated in the page during iteration the iterator may
   1248 // or may not iterate over those objects.  The caller must create a new
   1249 // iterator in order to be sure to visit these new objects.
   1250 class HeapObjectIterator: public ObjectIterator {
   1251  public:
   1252   // Creates a new object iterator in a given space.
   1253   // If the size function is not given, the iterator calls the default
   1254   // Object::Size().
   1255   explicit HeapObjectIterator(PagedSpace* space);
   1256   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
   1257   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
   1258 
   1259   // Advance to the next object, skipping free spaces and other fillers and
   1260   // skipping the special garbage section of which there is one per space.
   1261   // Returns NULL when the iteration has ended.
   1262   inline HeapObject* Next() {
   1263     do {
   1264       HeapObject* next_obj = FromCurrentPage();
   1265       if (next_obj != NULL) return next_obj;
   1266     } while (AdvanceToNextPage());
   1267     return NULL;
   1268   }
   1269 
   1270   virtual HeapObject* next_object() {
   1271     return Next();
   1272   }
   1273 
   1274  private:
   1275   enum PageMode { kOnePageOnly, kAllPagesInSpace };
   1276 
   1277   Address cur_addr_;  // Current iteration point.
   1278   Address cur_end_;   // End iteration point.
   1279   HeapObjectCallback size_func_;  // Size function or NULL.
   1280   PagedSpace* space_;
   1281   PageMode page_mode_;
   1282 
   1283   // Fast (inlined) path of next().
   1284   inline HeapObject* FromCurrentPage();
   1285 
   1286   // Slow path of next(), goes into the next page.  Returns false if the
   1287   // iteration has ended.
   1288   bool AdvanceToNextPage();
   1289 
   1290   // Initializes fields.
   1291   inline void Initialize(PagedSpace* owner,
   1292                          Address start,
   1293                          Address end,
   1294                          PageMode mode,
   1295                          HeapObjectCallback size_func);
   1296 };
   1297 
   1298 
   1299 // -----------------------------------------------------------------------------
   1300 // A PageIterator iterates the pages in a paged space.
   1301 
   1302 class PageIterator BASE_EMBEDDED {
   1303  public:
   1304   explicit inline PageIterator(PagedSpace* space);
   1305 
   1306   inline bool has_next();
   1307   inline Page* next();
   1308 
   1309  private:
   1310   PagedSpace* space_;
   1311   Page* prev_page_;  // Previous page returned.
   1312   // Next page that will be returned.  Cached here so that we can use this
   1313   // iterator for operations that deallocate pages.
   1314   Page* next_page_;
   1315 };
   1316 
   1317 
   1318 // -----------------------------------------------------------------------------
   1319 // A space has a circular list of pages. The next page can be accessed via
   1320 // Page::next_page() call.
   1321 
   1322 // An abstraction of allocation and relocation pointers in a page-structured
   1323 // space.
   1324 class AllocationInfo {
   1325  public:
   1326   AllocationInfo() : top_(NULL), limit_(NULL) {
   1327   }
   1328 
   1329   INLINE(void set_top(Address top)) {
   1330     SLOW_ASSERT(top == NULL ||
   1331         (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
   1332     top_ = top;
   1333   }
   1334 
   1335   INLINE(Address top()) const {
   1336     SLOW_ASSERT(top_ == NULL ||
   1337         (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
   1338     return top_;
   1339   }
   1340 
   1341   Address* top_address() {
   1342     return &top_;
   1343   }
   1344 
   1345   INLINE(void set_limit(Address limit)) {
   1346     SLOW_ASSERT(limit == NULL ||
   1347         (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
   1348     limit_ = limit;
   1349   }
   1350 
   1351   INLINE(Address limit()) const {
   1352     SLOW_ASSERT(limit_ == NULL ||
   1353         (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
   1354     return limit_;
   1355   }
   1356 
   1357   Address* limit_address() {
   1358     return &limit_;
   1359   }
   1360 
   1361 #ifdef DEBUG
   1362   bool VerifyPagedAllocation() {
   1363     return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
   1364         && (top_ <= limit_);
   1365   }
   1366 #endif
   1367 
   1368  private:
   1369   // Current allocation top.
   1370   Address top_;
   1371   // Current allocation limit.
   1372   Address limit_;
   1373 };
   1374 
   1375 
   1376 // An abstraction of the accounting statistics of a page-structured space.
   1377 // The 'capacity' of a space is the number of object-area bytes (i.e., not
   1378 // including page bookkeeping structures) currently in the space. The 'size'
   1379 // of a space is the number of allocated bytes, the 'waste' in the space is
   1380 // the number of bytes that are not allocated and not available to
   1381 // allocation without reorganizing the space via a GC (e.g. small blocks due
   1382 // to internal fragmentation, top of page areas in map space), and the bytes
   1383 // 'available' is the number of unallocated bytes that are not waste.  The
   1384 // capacity is the sum of size, waste, and available.
   1385 //
   1386 // The stats are only set by functions that ensure they stay balanced. These
   1387 // functions increase or decrease one of the non-capacity stats in
   1388 // conjunction with capacity, or else they always balance increases and
   1389 // decreases to the non-capacity stats.
   1390 class AllocationStats BASE_EMBEDDED {
   1391  public:
   1392   AllocationStats() { Clear(); }
   1393 
   1394   // Zero out all the allocation statistics (i.e., no capacity).
   1395   void Clear() {
   1396     capacity_ = 0;
   1397     max_capacity_ = 0;
   1398     size_ = 0;
   1399     waste_ = 0;
   1400   }
   1401 
   1402   void ClearSizeWaste() {
   1403     size_ = capacity_;
   1404     waste_ = 0;
   1405   }
   1406 
   1407   // Reset the allocation statistics (i.e., available = capacity with no
   1408   // wasted or allocated bytes).
   1409   void Reset() {
   1410     size_ = 0;
   1411     waste_ = 0;
   1412   }
   1413 
   1414   // Accessors for the allocation statistics.
   1415   intptr_t Capacity() { return capacity_; }
   1416   intptr_t MaxCapacity() { return max_capacity_; }
   1417   intptr_t Size() { return size_; }
   1418   intptr_t Waste() { return waste_; }
   1419 
   1420   // Grow the space by adding available bytes.  They are initially marked as
   1421   // being in use (part of the size), but will normally be immediately freed,
   1422   // putting them on the free list and removing them from size_.
   1423   void ExpandSpace(int size_in_bytes) {
   1424     capacity_ += size_in_bytes;
   1425     size_ += size_in_bytes;
   1426     if (capacity_ > max_capacity_) {
   1427       max_capacity_ = capacity_;
   1428     }
   1429     ASSERT(size_ >= 0);
   1430   }
   1431 
   1432   // Shrink the space by removing available bytes.  Since shrinking is done
   1433   // during sweeping, bytes have been marked as being in use (part of the size)
   1434   // and are hereby freed.
   1435   void ShrinkSpace(int size_in_bytes) {
   1436     capacity_ -= size_in_bytes;
   1437     size_ -= size_in_bytes;
   1438     ASSERT(size_ >= 0);
   1439   }
   1440 
   1441   // Allocate from available bytes (available -> size).
   1442   void AllocateBytes(intptr_t size_in_bytes) {
   1443     size_ += size_in_bytes;
   1444     ASSERT(size_ >= 0);
   1445   }
   1446 
   1447   // Free allocated bytes, making them available (size -> available).
   1448   void DeallocateBytes(intptr_t size_in_bytes) {
   1449     size_ -= size_in_bytes;
   1450     ASSERT(size_ >= 0);
   1451   }
   1452 
   1453   // Waste free bytes (available -> waste).
   1454   void WasteBytes(int size_in_bytes) {
   1455     size_ -= size_in_bytes;
   1456     waste_ += size_in_bytes;
   1457     ASSERT(size_ >= 0);
   1458   }
   1459 
   1460  private:
   1461   intptr_t capacity_;
   1462   intptr_t max_capacity_;
   1463   intptr_t size_;
   1464   intptr_t waste_;
   1465 };
   1466 
   1467 
   1468 // -----------------------------------------------------------------------------
   1469 // Free lists for old object spaces
   1470 //
   1471 // Free-list nodes are free blocks in the heap.  They look like heap objects
   1472 // (free-list node pointers have the heap object tag, and they have a map like
   1473 // a heap object).  They have a size and a next pointer.  The next pointer is
   1474 // the raw address of the next free list node (or NULL).
   1475 class FreeListNode: public HeapObject {
   1476  public:
   1477   // Obtain a free-list node from a raw address.  This is not a cast because
   1478   // it does not check nor require that the first word at the address is a map
   1479   // pointer.
   1480   static FreeListNode* FromAddress(Address address) {
   1481     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
   1482   }
   1483 
   1484   static inline bool IsFreeListNode(HeapObject* object);
   1485 
   1486   // Set the size in bytes, which can be read with HeapObject::Size().  This
   1487   // function also writes a map to the first word of the block so that it
   1488   // looks like a heap object to the garbage collector and heap iteration
   1489   // functions.
   1490   void set_size(Heap* heap, int size_in_bytes);
   1491 
   1492   // Accessors for the next field.
   1493   inline FreeListNode* next();
   1494   inline FreeListNode** next_address();
   1495   inline void set_next(FreeListNode* next);
   1496 
   1497   inline void Zap();
   1498 
   1499   static inline FreeListNode* cast(MaybeObject* maybe) {
   1500     ASSERT(!maybe->IsFailure());
   1501     return reinterpret_cast<FreeListNode*>(maybe);
   1502   }
   1503 
   1504  private:
   1505   static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
   1506 
   1507   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
   1508 };
   1509 
   1510 
   1511 // The free list category holds a pointer to the top element and a pointer to
   1512 // the end element of the linked list of free memory blocks.
   1513 class FreeListCategory {
   1514  public:
   1515   FreeListCategory() :
   1516       top_(NULL),
   1517       end_(NULL),
   1518       available_(0) {}
   1519 
   1520   intptr_t Concatenate(FreeListCategory* category);
   1521 
   1522   void Reset();
   1523 
   1524   void Free(FreeListNode* node, int size_in_bytes);
   1525 
   1526   FreeListNode* PickNodeFromList(int *node_size);
   1527   FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
   1528 
   1529   intptr_t EvictFreeListItemsInList(Page* p);
   1530 
   1531   void RepairFreeList(Heap* heap);
   1532 
   1533   FreeListNode** GetTopAddress() { return &top_; }
   1534   FreeListNode* top() const { return top_; }
   1535   void set_top(FreeListNode* top) { top_ = top; }
   1536 
   1537   FreeListNode** GetEndAddress() { return &end_; }
   1538   FreeListNode* end() const { return end_; }
   1539   void set_end(FreeListNode* end) { end_ = end; }
   1540 
   1541   int* GetAvailableAddress() { return &available_; }
   1542   int available() const { return available_; }
   1543   void set_available(int available) { available_ = available; }
   1544 
   1545   Mutex* mutex() { return &mutex_; }
   1546 
   1547 #ifdef DEBUG
   1548   intptr_t SumFreeList();
   1549   int FreeListLength();
   1550 #endif
   1551 
   1552  private:
   1553   FreeListNode* top_;
   1554   FreeListNode* end_;
   1555   Mutex mutex_;
   1556 
   1557   // Total available bytes in all blocks of this free list category.
   1558   int available_;
   1559 };
   1560 
   1561 
   1562 // The free list for the old space.  The free list is organized in such a way
   1563 // as to encourage objects allocated around the same time to be near each
   1564 // other.  The normal way to allocate is intended to be by bumping a 'top'
   1565 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
   1566 // find a new space to allocate from.  This is done with the free list, which
   1567 // is divided up into rough categories to cut down on waste.  Having finer
   1568 // categories would scatter allocation more.
   1569 
   1570 // The old space free list is organized in categories.
   1571 // 1-31 words:  Such small free areas are discarded for efficiency reasons.
   1572 //     They can be reclaimed by the compactor.  However the distance between top
   1573 //     and limit may be this small.
   1574 // 32-255 words: There is a list of spaces this large.  It is used for top and
   1575 //     limit when the object we need to allocate is 1-31 words in size.  These
   1576 //     spaces are called small.
   1577 // 256-2047 words: There is a list of spaces this large.  It is used for top and
   1578 //     limit when the object we need to allocate is 32-255 words in size.  These
   1579 //     spaces are called medium.
   1580 // 1048-16383 words: There is a list of spaces this large.  It is used for top
   1581 //     and limit when the object we need to allocate is 256-2047 words in size.
   1582 //     These spaces are call large.
   1583 // At least 16384 words.  This list is for objects of 2048 words or larger.
   1584 //     Empty pages are added to this list.  These spaces are called huge.
   1585 class FreeList BASE_EMBEDDED {
   1586  public:
   1587   explicit FreeList(PagedSpace* owner);
   1588 
   1589   intptr_t Concatenate(FreeList* free_list);
   1590 
   1591   // Clear the free list.
   1592   void Reset();
   1593 
   1594   // Return the number of bytes available on the free list.
   1595   intptr_t available() {
   1596     return small_list_.available() + medium_list_.available() +
   1597            large_list_.available() + huge_list_.available();
   1598   }
   1599 
   1600   // Place a node on the free list.  The block of size 'size_in_bytes'
   1601   // starting at 'start' is placed on the free list.  The return value is the
   1602   // number of bytes that have been lost due to internal fragmentation by
   1603   // freeing the block.  Bookkeeping information will be written to the block,
   1604   // i.e., its contents will be destroyed.  The start address should be word
   1605   // aligned, and the size should be a non-zero multiple of the word size.
   1606   int Free(Address start, int size_in_bytes);
   1607 
   1608   // Allocate a block of size 'size_in_bytes' from the free list.  The block
   1609   // is unitialized.  A failure is returned if no block is available.  The
   1610   // number of bytes lost to fragmentation is returned in the output parameter
   1611   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
   1612   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
   1613 
   1614 #ifdef DEBUG
   1615   void Zap();
   1616   intptr_t SumFreeLists();
   1617   bool IsVeryLong();
   1618 #endif
   1619 
   1620   // Used after booting the VM.
   1621   void RepairLists(Heap* heap);
   1622 
   1623   intptr_t EvictFreeListItems(Page* p);
   1624 
   1625   FreeListCategory* small_list() { return &small_list_; }
   1626   FreeListCategory* medium_list() { return &medium_list_; }
   1627   FreeListCategory* large_list() { return &large_list_; }
   1628   FreeListCategory* huge_list() { return &huge_list_; }
   1629 
   1630  private:
   1631   // The size range of blocks, in bytes.
   1632   static const int kMinBlockSize = 3 * kPointerSize;
   1633   static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
   1634 
   1635   FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
   1636 
   1637   PagedSpace* owner_;
   1638   Heap* heap_;
   1639 
   1640   static const int kSmallListMin = 0x20 * kPointerSize;
   1641   static const int kSmallListMax = 0xff * kPointerSize;
   1642   static const int kMediumListMax = 0x7ff * kPointerSize;
   1643   static const int kLargeListMax = 0x3fff * kPointerSize;
   1644   static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
   1645   static const int kMediumAllocationMax = kSmallListMax;
   1646   static const int kLargeAllocationMax = kMediumListMax;
   1647   FreeListCategory small_list_;
   1648   FreeListCategory medium_list_;
   1649   FreeListCategory large_list_;
   1650   FreeListCategory huge_list_;
   1651 
   1652   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
   1653 };
   1654 
   1655 
   1656 class PagedSpace : public Space {
   1657  public:
   1658   // Creates a space with a maximum capacity, and an id.
   1659   PagedSpace(Heap* heap,
   1660              intptr_t max_capacity,
   1661              AllocationSpace id,
   1662              Executability executable);
   1663 
   1664   virtual ~PagedSpace() {}
   1665 
   1666   // Set up the space using the given address range of virtual memory (from
   1667   // the memory allocator's initial chunk) if possible.  If the block of
   1668   // addresses is not big enough to contain a single page-aligned page, a
   1669   // fresh chunk will be allocated.
   1670   bool SetUp();
   1671 
   1672   // Returns true if the space has been successfully set up and not
   1673   // subsequently torn down.
   1674   bool HasBeenSetUp();
   1675 
   1676   // Cleans up the space, frees all pages in this space except those belonging
   1677   // to the initial chunk, uncommits addresses in the initial chunk.
   1678   void TearDown();
   1679 
   1680   // Checks whether an object/address is in this space.
   1681   inline bool Contains(Address a);
   1682   bool Contains(HeapObject* o) { return Contains(o->address()); }
   1683 
   1684   // Given an address occupied by a live object, return that object if it is
   1685   // in this space, or Failure::Exception() if it is not. The implementation
   1686   // iterates over objects in the page containing the address, the cost is
   1687   // linear in the number of objects in the page. It may be slow.
   1688   MUST_USE_RESULT MaybeObject* FindObject(Address addr);
   1689 
   1690   // During boot the free_space_map is created, and afterwards we may need
   1691   // to write it into the free list nodes that were already created.
   1692   void RepairFreeListsAfterBoot();
   1693 
   1694   // Prepares for a mark-compact GC.
   1695   void PrepareForMarkCompact();
   1696 
   1697   // Current capacity without growing (Size() + Available()).
   1698   intptr_t Capacity() { return accounting_stats_.Capacity(); }
   1699 
   1700   // Total amount of memory committed for this space.  For paged
   1701   // spaces this equals the capacity.
   1702   intptr_t CommittedMemory() { return Capacity(); }
   1703 
   1704   // The maximum amount of memory ever committed for this space.
   1705   intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
   1706 
   1707   // Approximate amount of physical memory committed for this space.
   1708   size_t CommittedPhysicalMemory();
   1709 
   1710   struct SizeStats {
   1711     intptr_t Total() {
   1712       return small_size_ + medium_size_ + large_size_ + huge_size_;
   1713     }
   1714 
   1715     intptr_t small_size_;
   1716     intptr_t medium_size_;
   1717     intptr_t large_size_;
   1718     intptr_t huge_size_;
   1719   };
   1720 
   1721   void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
   1722   void ResetFreeListStatistics();
   1723 
   1724   // Sets the capacity, the available space and the wasted space to zero.
   1725   // The stats are rebuilt during sweeping by adding each page to the
   1726   // capacity and the size when it is encountered.  As free spaces are
   1727   // discovered during the sweeping they are subtracted from the size and added
   1728   // to the available and wasted totals.
   1729   void ClearStats() {
   1730     accounting_stats_.ClearSizeWaste();
   1731     ResetFreeListStatistics();
   1732   }
   1733 
   1734   // Increases the number of available bytes of that space.
   1735   void AddToAccountingStats(intptr_t bytes) {
   1736     accounting_stats_.DeallocateBytes(bytes);
   1737   }
   1738 
   1739   // Available bytes without growing.  These are the bytes on the free list.
   1740   // The bytes in the linear allocation area are not included in this total
   1741   // because updating the stats would slow down allocation.  New pages are
   1742   // immediately added to the free list so they show up here.
   1743   intptr_t Available() { return free_list_.available(); }
   1744 
   1745   // Allocated bytes in this space.  Garbage bytes that were not found due to
   1746   // lazy sweeping are counted as being allocated!  The bytes in the current
   1747   // linear allocation area (between top and limit) are also counted here.
   1748   virtual intptr_t Size() { return accounting_stats_.Size(); }
   1749 
   1750   // As size, but the bytes in lazily swept pages are estimated and the bytes
   1751   // in the current linear allocation area are not included.
   1752   virtual intptr_t SizeOfObjects();
   1753 
   1754   // Wasted bytes in this space.  These are just the bytes that were thrown away
   1755   // due to being too small to use for allocation.  They do not include the
   1756   // free bytes that were not found at all due to lazy sweeping.
   1757   virtual intptr_t Waste() { return accounting_stats_.Waste(); }
   1758 
   1759   // Returns the allocation pointer in this space.
   1760   Address top() { return allocation_info_.top(); }
   1761   Address limit() { return allocation_info_.limit(); }
   1762 
   1763   // The allocation top address.
   1764   Address* allocation_top_address() {
   1765     return allocation_info_.top_address();
   1766   }
   1767 
   1768   // The allocation limit address.
   1769   Address* allocation_limit_address() {
   1770     return allocation_info_.limit_address();
   1771   }
   1772 
   1773   // Allocate the requested number of bytes in the space if possible, return a
   1774   // failure object if not.
   1775   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
   1776 
   1777   // Give a block of memory to the space's free list.  It might be added to
   1778   // the free list or accounted as waste.
   1779   // If add_to_freelist is false then just accounting stats are updated and
   1780   // no attempt to add area to free list is made.
   1781   int Free(Address start, int size_in_bytes) {
   1782     int wasted = free_list_.Free(start, size_in_bytes);
   1783     accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
   1784     return size_in_bytes - wasted;
   1785   }
   1786 
   1787   void ResetFreeList() {
   1788     free_list_.Reset();
   1789   }
   1790 
   1791   // Set space allocation info.
   1792   void SetTopAndLimit(Address top, Address limit) {
   1793     ASSERT(top == limit ||
   1794            Page::FromAddress(top) == Page::FromAddress(limit - 1));
   1795     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
   1796     allocation_info_.set_top(top);
   1797     allocation_info_.set_limit(limit);
   1798   }
   1799 
   1800   // Empty space allocation info, returning unused area to free list.
   1801   void EmptyAllocationInfo() {
   1802     // Mark the old linear allocation area with a free space map so it can be
   1803     // skipped when scanning the heap.
   1804     int old_linear_size = static_cast<int>(limit() - top());
   1805     Free(top(), old_linear_size);
   1806     SetTopAndLimit(NULL, NULL);
   1807   }
   1808 
   1809   void Allocate(int bytes) {
   1810     accounting_stats_.AllocateBytes(bytes);
   1811   }
   1812 
   1813   void IncreaseCapacity(int size);
   1814 
   1815   // Releases an unused page and shrinks the space.
   1816   void ReleasePage(Page* page, bool unlink);
   1817 
   1818   // The dummy page that anchors the linked list of pages.
   1819   Page* anchor() { return &anchor_; }
   1820 
   1821 #ifdef VERIFY_HEAP
   1822   // Verify integrity of this space.
   1823   virtual void Verify(ObjectVisitor* visitor);
   1824 
   1825   // Overridden by subclasses to verify space-specific object
   1826   // properties (e.g., only maps or free-list nodes are in map space).
   1827   virtual void VerifyObject(HeapObject* obj) {}
   1828 #endif
   1829 
   1830 #ifdef DEBUG
   1831   // Print meta info and objects in this space.
   1832   virtual void Print();
   1833 
   1834   // Reports statistics for the space
   1835   void ReportStatistics();
   1836 
   1837   // Report code object related statistics
   1838   void CollectCodeStatistics();
   1839   static void ReportCodeStatistics(Isolate* isolate);
   1840   static void ResetCodeStatistics(Isolate* isolate);
   1841 #endif
   1842 
   1843   bool was_swept_conservatively() { return was_swept_conservatively_; }
   1844   void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
   1845 
   1846   // Evacuation candidates are swept by evacuator.  Needs to return a valid
   1847   // result before _and_ after evacuation has finished.
   1848   static bool ShouldBeSweptLazily(Page* p) {
   1849     return !p->IsEvacuationCandidate() &&
   1850            !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
   1851            !p->WasSweptPrecisely();
   1852   }
   1853 
   1854   void SetPagesToSweep(Page* first) {
   1855     ASSERT(unswept_free_bytes_ == 0);
   1856     if (first == &anchor_) first = NULL;
   1857     first_unswept_page_ = first;
   1858   }
   1859 
   1860   void IncrementUnsweptFreeBytes(intptr_t by) {
   1861     unswept_free_bytes_ += by;
   1862   }
   1863 
   1864   void IncreaseUnsweptFreeBytes(Page* p) {
   1865     ASSERT(ShouldBeSweptLazily(p));
   1866     unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
   1867   }
   1868 
   1869   void DecrementUnsweptFreeBytes(intptr_t by) {
   1870     unswept_free_bytes_ -= by;
   1871   }
   1872 
   1873   void DecreaseUnsweptFreeBytes(Page* p) {
   1874     ASSERT(ShouldBeSweptLazily(p));
   1875     unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
   1876   }
   1877 
   1878   void ResetUnsweptFreeBytes() {
   1879     unswept_free_bytes_ = 0;
   1880   }
   1881 
   1882   bool AdvanceSweeper(intptr_t bytes_to_sweep);
   1883 
   1884   // When parallel sweeper threads are active and the main thread finished
   1885   // its sweeping phase, this function waits for them to complete, otherwise
   1886   // AdvanceSweeper with size_in_bytes is called.
   1887   bool EnsureSweeperProgress(intptr_t size_in_bytes);
   1888 
   1889   bool IsLazySweepingComplete() {
   1890     return !first_unswept_page_->is_valid();
   1891   }
   1892 
   1893   Page* FirstPage() { return anchor_.next_page(); }
   1894   Page* LastPage() { return anchor_.prev_page(); }
   1895 
   1896   void EvictEvacuationCandidatesFromFreeLists();
   1897 
   1898   bool CanExpand();
   1899 
   1900   // Returns the number of total pages in this space.
   1901   int CountTotalPages();
   1902 
   1903   // Return size of allocatable area on a page in this space.
   1904   inline int AreaSize() {
   1905     return area_size_;
   1906   }
   1907 
   1908  protected:
   1909   FreeList* free_list() { return &free_list_; }
   1910 
   1911   int area_size_;
   1912 
   1913   // Maximum capacity of this space.
   1914   intptr_t max_capacity_;
   1915 
   1916   intptr_t SizeOfFirstPage();
   1917 
   1918   // Accounting information for this space.
   1919   AllocationStats accounting_stats_;
   1920 
   1921   // The dummy page that anchors the double linked list of pages.
   1922   Page anchor_;
   1923 
   1924   // The space's free list.
   1925   FreeList free_list_;
   1926 
   1927   // Normal allocation information.
   1928   AllocationInfo allocation_info_;
   1929 
   1930   bool was_swept_conservatively_;
   1931 
   1932   // The first page to be swept when the lazy sweeper advances. Is set
   1933   // to NULL when all pages have been swept.
   1934   Page* first_unswept_page_;
   1935 
   1936   // The number of free bytes which could be reclaimed by advancing the
   1937   // lazy sweeper.  This is only an estimation because lazy sweeping is
   1938   // done conservatively.
   1939   intptr_t unswept_free_bytes_;
   1940 
   1941   // Expands the space by allocating a fixed number of pages. Returns false if
   1942   // it cannot allocate requested number of pages from OS, or if the hard heap
   1943   // size limit has been hit.
   1944   bool Expand();
   1945 
   1946   // Generic fast case allocation function that tries linear allocation at the
   1947   // address denoted by top in allocation_info_.
   1948   inline HeapObject* AllocateLinearly(int size_in_bytes);
   1949 
   1950   // Slow path of AllocateRaw.  This function is space-dependent.
   1951   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
   1952 
   1953   friend class PageIterator;
   1954   friend class SweeperThread;
   1955 };
   1956 
   1957 
   1958 class NumberAndSizeInfo BASE_EMBEDDED {
   1959  public:
   1960   NumberAndSizeInfo() : number_(0), bytes_(0) {}
   1961 
   1962   int number() const { return number_; }
   1963   void increment_number(int num) { number_ += num; }
   1964 
   1965   int bytes() const { return bytes_; }
   1966   void increment_bytes(int size) { bytes_ += size; }
   1967 
   1968   void clear() {
   1969     number_ = 0;
   1970     bytes_ = 0;
   1971   }
   1972 
   1973  private:
   1974   int number_;
   1975   int bytes_;
   1976 };
   1977 
   1978 
   1979 // HistogramInfo class for recording a single "bar" of a histogram.  This
   1980 // class is used for collecting statistics to print to the log file.
   1981 class HistogramInfo: public NumberAndSizeInfo {
   1982  public:
   1983   HistogramInfo() : NumberAndSizeInfo() {}
   1984 
   1985   const char* name() { return name_; }
   1986   void set_name(const char* name) { name_ = name; }
   1987 
   1988  private:
   1989   const char* name_;
   1990 };
   1991 
   1992 
   1993 enum SemiSpaceId {
   1994   kFromSpace = 0,
   1995   kToSpace = 1
   1996 };
   1997 
   1998 
   1999 class SemiSpace;
   2000 
   2001 
   2002 class NewSpacePage : public MemoryChunk {
   2003  public:
   2004   // GC related flags copied from from-space to to-space when
   2005   // flipping semispaces.
   2006   static const intptr_t kCopyOnFlipFlagsMask =
   2007     (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
   2008     (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
   2009     (1 << MemoryChunk::SCAN_ON_SCAVENGE);
   2010 
   2011   static const int kAreaSize = Page::kNonCodeObjectAreaSize;
   2012 
   2013   inline NewSpacePage* next_page() const {
   2014     return static_cast<NewSpacePage*>(next_chunk());
   2015   }
   2016 
   2017   inline void set_next_page(NewSpacePage* page) {
   2018     set_next_chunk(page);
   2019   }
   2020 
   2021   inline NewSpacePage* prev_page() const {
   2022     return static_cast<NewSpacePage*>(prev_chunk());
   2023   }
   2024 
   2025   inline void set_prev_page(NewSpacePage* page) {
   2026     set_prev_chunk(page);
   2027   }
   2028 
   2029   SemiSpace* semi_space() {
   2030     return reinterpret_cast<SemiSpace*>(owner());
   2031   }
   2032 
   2033   bool is_anchor() { return !this->InNewSpace(); }
   2034 
   2035   static bool IsAtStart(Address addr) {
   2036     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
   2037         == kObjectStartOffset;
   2038   }
   2039 
   2040   static bool IsAtEnd(Address addr) {
   2041     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
   2042   }
   2043 
   2044   Address address() {
   2045     return reinterpret_cast<Address>(this);
   2046   }
   2047 
   2048   // Finds the NewSpacePage containg the given address.
   2049   static inline NewSpacePage* FromAddress(Address address_in_page) {
   2050     Address page_start =
   2051         reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
   2052                                   ~Page::kPageAlignmentMask);
   2053     NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
   2054     return page;
   2055   }
   2056 
   2057   // Find the page for a limit address. A limit address is either an address
   2058   // inside a page, or the address right after the last byte of a page.
   2059   static inline NewSpacePage* FromLimit(Address address_limit) {
   2060     return NewSpacePage::FromAddress(address_limit - 1);
   2061   }
   2062 
   2063  private:
   2064   // Create a NewSpacePage object that is only used as anchor
   2065   // for the doubly-linked list of real pages.
   2066   explicit NewSpacePage(SemiSpace* owner) {
   2067     InitializeAsAnchor(owner);
   2068   }
   2069 
   2070   static NewSpacePage* Initialize(Heap* heap,
   2071                                   Address start,
   2072                                   SemiSpace* semi_space);
   2073 
   2074   // Intialize a fake NewSpacePage used as sentinel at the ends
   2075   // of a doubly-linked list of real NewSpacePages.
   2076   // Only uses the prev/next links, and sets flags to not be in new-space.
   2077   void InitializeAsAnchor(SemiSpace* owner);
   2078 
   2079   friend class SemiSpace;
   2080   friend class SemiSpaceIterator;
   2081 };
   2082 
   2083 
   2084 // -----------------------------------------------------------------------------
   2085 // SemiSpace in young generation
   2086 //
   2087 // A semispace is a contiguous chunk of memory holding page-like memory
   2088 // chunks. The mark-compact collector  uses the memory of the first page in
   2089 // the from space as a marking stack when tracing live objects.
   2090 
   2091 class SemiSpace : public Space {
   2092  public:
   2093   // Constructor.
   2094   SemiSpace(Heap* heap, SemiSpaceId semispace)
   2095     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
   2096       start_(NULL),
   2097       age_mark_(NULL),
   2098       id_(semispace),
   2099       anchor_(this),
   2100       current_page_(NULL) { }
   2101 
   2102   // Sets up the semispace using the given chunk.
   2103   void SetUp(Address start, int initial_capacity, int maximum_capacity);
   2104 
   2105   // Tear down the space.  Heap memory was not allocated by the space, so it
   2106   // is not deallocated here.
   2107   void TearDown();
   2108 
   2109   // True if the space has been set up but not torn down.
   2110   bool HasBeenSetUp() { return start_ != NULL; }
   2111 
   2112   // Grow the semispace to the new capacity.  The new capacity
   2113   // requested must be larger than the current capacity and less than
   2114   // the maximum capacity.
   2115   bool GrowTo(int new_capacity);
   2116 
   2117   // Shrinks the semispace to the new capacity.  The new capacity
   2118   // requested must be more than the amount of used memory in the
   2119   // semispace and less than the current capacity.
   2120   bool ShrinkTo(int new_capacity);
   2121 
   2122   // Returns the start address of the first page of the space.
   2123   Address space_start() {
   2124     ASSERT(anchor_.next_page() != &anchor_);
   2125     return anchor_.next_page()->area_start();
   2126   }
   2127 
   2128   // Returns the start address of the current page of the space.
   2129   Address page_low() {
   2130     return current_page_->area_start();
   2131   }
   2132 
   2133   // Returns one past the end address of the space.
   2134   Address space_end() {
   2135     return anchor_.prev_page()->area_end();
   2136   }
   2137 
   2138   // Returns one past the end address of the current page of the space.
   2139   Address page_high() {
   2140     return current_page_->area_end();
   2141   }
   2142 
   2143   bool AdvancePage() {
   2144     NewSpacePage* next_page = current_page_->next_page();
   2145     if (next_page == anchor()) return false;
   2146     current_page_ = next_page;
   2147     return true;
   2148   }
   2149 
   2150   // Resets the space to using the first page.
   2151   void Reset();
   2152 
   2153   // Age mark accessors.
   2154   Address age_mark() { return age_mark_; }
   2155   void set_age_mark(Address mark);
   2156 
   2157   // True if the address is in the address range of this semispace (not
   2158   // necessarily below the allocation pointer).
   2159   bool Contains(Address a) {
   2160     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
   2161            == reinterpret_cast<uintptr_t>(start_);
   2162   }
   2163 
   2164   // True if the object is a heap object in the address range of this
   2165   // semispace (not necessarily below the allocation pointer).
   2166   bool Contains(Object* o) {
   2167     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   2168   }
   2169 
   2170   // If we don't have these here then SemiSpace will be abstract.  However
   2171   // they should never be called.
   2172   virtual intptr_t Size() {
   2173     UNREACHABLE();
   2174     return 0;
   2175   }
   2176 
   2177   bool is_committed() { return committed_; }
   2178   bool Commit();
   2179   bool Uncommit();
   2180 
   2181   NewSpacePage* first_page() { return anchor_.next_page(); }
   2182   NewSpacePage* current_page() { return current_page_; }
   2183 
   2184 #ifdef VERIFY_HEAP
   2185   virtual void Verify();
   2186 #endif
   2187 
   2188 #ifdef DEBUG
   2189   virtual void Print();
   2190   // Validate a range of of addresses in a SemiSpace.
   2191   // The "from" address must be on a page prior to the "to" address,
   2192   // in the linked page order, or it must be earlier on the same page.
   2193   static void AssertValidRange(Address from, Address to);
   2194 #else
   2195   // Do nothing.
   2196   inline static void AssertValidRange(Address from, Address to) {}
   2197 #endif
   2198 
   2199   // Returns the current capacity of the semi space.
   2200   int Capacity() { return capacity_; }
   2201 
   2202   // Returns the maximum capacity of the semi space.
   2203   int MaximumCapacity() { return maximum_capacity_; }
   2204 
   2205   // Returns the initial capacity of the semi space.
   2206   int InitialCapacity() { return initial_capacity_; }
   2207 
   2208   SemiSpaceId id() { return id_; }
   2209 
   2210   static void Swap(SemiSpace* from, SemiSpace* to);
   2211 
   2212   // Returns the maximum amount of memory ever committed by the semi space.
   2213   size_t MaximumCommittedMemory() { return maximum_committed_; }
   2214 
   2215   // Approximate amount of physical memory committed for this space.
   2216   size_t CommittedPhysicalMemory();
   2217 
   2218  private:
   2219   // Flips the semispace between being from-space and to-space.
   2220   // Copies the flags into the masked positions on all pages in the space.
   2221   void FlipPages(intptr_t flags, intptr_t flag_mask);
   2222 
   2223   // Updates Capacity and MaximumCommitted based on new capacity.
   2224   void SetCapacity(int new_capacity);
   2225 
   2226   NewSpacePage* anchor() { return &anchor_; }
   2227 
   2228   // The current and maximum capacity of the space.
   2229   int capacity_;
   2230   int maximum_capacity_;
   2231   int initial_capacity_;
   2232 
   2233   intptr_t maximum_committed_;
   2234 
   2235   // The start address of the space.
   2236   Address start_;
   2237   // Used to govern object promotion during mark-compact collection.
   2238   Address age_mark_;
   2239 
   2240   // Masks and comparison values to test for containment in this semispace.
   2241   uintptr_t address_mask_;
   2242   uintptr_t object_mask_;
   2243   uintptr_t object_expected_;
   2244 
   2245   bool committed_;
   2246   SemiSpaceId id_;
   2247 
   2248   NewSpacePage anchor_;
   2249   NewSpacePage* current_page_;
   2250 
   2251   friend class SemiSpaceIterator;
   2252   friend class NewSpacePageIterator;
   2253  public:
   2254   TRACK_MEMORY("SemiSpace")
   2255 };
   2256 
   2257 
   2258 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
   2259 // semispace of the heap's new space.  It iterates over the objects in the
   2260 // semispace from a given start address (defaulting to the bottom of the
   2261 // semispace) to the top of the semispace.  New objects allocated after the
   2262 // iterator is created are not iterated.
   2263 class SemiSpaceIterator : public ObjectIterator {
   2264  public:
   2265   // Create an iterator over the objects in the given space.  If no start
   2266   // address is given, the iterator starts from the bottom of the space.  If
   2267   // no size function is given, the iterator calls Object::Size().
   2268 
   2269   // Iterate over all of allocated to-space.
   2270   explicit SemiSpaceIterator(NewSpace* space);
   2271   // Iterate over all of allocated to-space, with a custome size function.
   2272   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
   2273   // Iterate over part of allocated to-space, from start to the end
   2274   // of allocation.
   2275   SemiSpaceIterator(NewSpace* space, Address start);
   2276   // Iterate from one address to another in the same semi-space.
   2277   SemiSpaceIterator(Address from, Address to);
   2278 
   2279   HeapObject* Next() {
   2280     if (current_ == limit_) return NULL;
   2281     if (NewSpacePage::IsAtEnd(current_)) {
   2282       NewSpacePage* page = NewSpacePage::FromLimit(current_);
   2283       page = page->next_page();
   2284       ASSERT(!page->is_anchor());
   2285       current_ = page->area_start();
   2286       if (current_ == limit_) return NULL;
   2287     }
   2288 
   2289     HeapObject* object = HeapObject::FromAddress(current_);
   2290     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
   2291 
   2292     current_ += size;
   2293     return object;
   2294   }
   2295 
   2296   // Implementation of the ObjectIterator functions.
   2297   virtual HeapObject* next_object() { return Next(); }
   2298 
   2299  private:
   2300   void Initialize(Address start,
   2301                   Address end,
   2302                   HeapObjectCallback size_func);
   2303 
   2304   // The current iteration point.
   2305   Address current_;
   2306   // The end of iteration.
   2307   Address limit_;
   2308   // The callback function.
   2309   HeapObjectCallback size_func_;
   2310 };
   2311 
   2312 
   2313 // -----------------------------------------------------------------------------
   2314 // A PageIterator iterates the pages in a semi-space.
   2315 class NewSpacePageIterator BASE_EMBEDDED {
   2316  public:
   2317   // Make an iterator that runs over all pages in to-space.
   2318   explicit inline NewSpacePageIterator(NewSpace* space);
   2319 
   2320   // Make an iterator that runs over all pages in the given semispace,
   2321   // even those not used in allocation.
   2322   explicit inline NewSpacePageIterator(SemiSpace* space);
   2323 
   2324   // Make iterator that iterates from the page containing start
   2325   // to the page that contains limit in the same semispace.
   2326   inline NewSpacePageIterator(Address start, Address limit);
   2327 
   2328   inline bool has_next();
   2329   inline NewSpacePage* next();
   2330 
   2331  private:
   2332   NewSpacePage* prev_page_;  // Previous page returned.
   2333   // Next page that will be returned.  Cached here so that we can use this
   2334   // iterator for operations that deallocate pages.
   2335   NewSpacePage* next_page_;
   2336   // Last page returned.
   2337   NewSpacePage* last_page_;
   2338 };
   2339 
   2340 
   2341 // -----------------------------------------------------------------------------
   2342 // The young generation space.
   2343 //
   2344 // The new space consists of a contiguous pair of semispaces.  It simply
   2345 // forwards most functions to the appropriate semispace.
   2346 
   2347 class NewSpace : public Space {
   2348  public:
   2349   // Constructor.
   2350   explicit NewSpace(Heap* heap)
   2351     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
   2352       to_space_(heap, kToSpace),
   2353       from_space_(heap, kFromSpace),
   2354       reservation_(),
   2355       inline_allocation_limit_step_(0) {}
   2356 
   2357   // Sets up the new space using the given chunk.
   2358   bool SetUp(int reserved_semispace_size_, int max_semispace_size);
   2359 
   2360   // Tears down the space.  Heap memory was not allocated by the space, so it
   2361   // is not deallocated here.
   2362   void TearDown();
   2363 
   2364   // True if the space has been set up but not torn down.
   2365   bool HasBeenSetUp() {
   2366     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
   2367   }
   2368 
   2369   // Flip the pair of spaces.
   2370   void Flip();
   2371 
   2372   // Grow the capacity of the semispaces.  Assumes that they are not at
   2373   // their maximum capacity.
   2374   void Grow();
   2375 
   2376   // Shrink the capacity of the semispaces.
   2377   void Shrink();
   2378 
   2379   // True if the address or object lies in the address range of either
   2380   // semispace (not necessarily below the allocation pointer).
   2381   bool Contains(Address a) {
   2382     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
   2383         == reinterpret_cast<uintptr_t>(start_);
   2384   }
   2385 
   2386   bool Contains(Object* o) {
   2387     Address a = reinterpret_cast<Address>(o);
   2388     return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
   2389   }
   2390 
   2391   // Return the allocated bytes in the active semispace.
   2392   virtual intptr_t Size() {
   2393     return pages_used_ * NewSpacePage::kAreaSize +
   2394         static_cast<int>(top() - to_space_.page_low());
   2395   }
   2396 
   2397   // The same, but returning an int.  We have to have the one that returns
   2398   // intptr_t because it is inherited, but if we know we are dealing with the
   2399   // new space, which can't get as big as the other spaces then this is useful:
   2400   int SizeAsInt() { return static_cast<int>(Size()); }
   2401 
   2402   // Return the current capacity of a semispace.
   2403   intptr_t EffectiveCapacity() {
   2404     SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
   2405     return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
   2406   }
   2407 
   2408   // Return the current capacity of a semispace.
   2409   intptr_t Capacity() {
   2410     ASSERT(to_space_.Capacity() == from_space_.Capacity());
   2411     return to_space_.Capacity();
   2412   }
   2413 
   2414   // Return the total amount of memory committed for new space.
   2415   intptr_t CommittedMemory() {
   2416     if (from_space_.is_committed()) return 2 * Capacity();
   2417     return Capacity();
   2418   }
   2419 
   2420   // Return the total amount of memory committed for new space.
   2421   intptr_t MaximumCommittedMemory() {
   2422     return to_space_.MaximumCommittedMemory() +
   2423         from_space_.MaximumCommittedMemory();
   2424   }
   2425 
   2426   // Approximate amount of physical memory committed for this space.
   2427   size_t CommittedPhysicalMemory();
   2428 
   2429   // Return the available bytes without growing.
   2430   intptr_t Available() {
   2431     return Capacity() - Size();
   2432   }
   2433 
   2434   // Return the maximum capacity of a semispace.
   2435   int MaximumCapacity() {
   2436     ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
   2437     return to_space_.MaximumCapacity();
   2438   }
   2439 
   2440   // Returns the initial capacity of a semispace.
   2441   int InitialCapacity() {
   2442     ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
   2443     return to_space_.InitialCapacity();
   2444   }
   2445 
   2446   // Return the address of the allocation pointer in the active semispace.
   2447   Address top() {
   2448     ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
   2449     return allocation_info_.top();
   2450   }
   2451 
   2452   void set_top(Address top) {
   2453     ASSERT(to_space_.current_page()->ContainsLimit(top));
   2454     allocation_info_.set_top(top);
   2455   }
   2456 
   2457   // Return the address of the first object in the active semispace.
   2458   Address bottom() { return to_space_.space_start(); }
   2459 
   2460   // Get the age mark of the inactive semispace.
   2461   Address age_mark() { return from_space_.age_mark(); }
   2462   // Set the age mark in the active semispace.
   2463   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
   2464 
   2465   // The start address of the space and a bit mask. Anding an address in the
   2466   // new space with the mask will result in the start address.
   2467   Address start() { return start_; }
   2468   uintptr_t mask() { return address_mask_; }
   2469 
   2470   INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
   2471     ASSERT(Contains(addr));
   2472     ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
   2473            IsAligned(OffsetFrom(addr) - 1, kPointerSize));
   2474     return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
   2475   }
   2476 
   2477   INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
   2478     return reinterpret_cast<Address>(index << kPointerSizeLog2);
   2479   }
   2480 
   2481   // The allocation top and limit address.
   2482   Address* allocation_top_address() {
   2483     return allocation_info_.top_address();
   2484   }
   2485 
   2486   // The allocation limit address.
   2487   Address* allocation_limit_address() {
   2488     return allocation_info_.limit_address();
   2489   }
   2490 
   2491   MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
   2492 
   2493   // Reset the allocation pointer to the beginning of the active semispace.
   2494   void ResetAllocationInfo();
   2495 
   2496   void UpdateInlineAllocationLimit(int size_in_bytes);
   2497   void LowerInlineAllocationLimit(intptr_t step) {
   2498     inline_allocation_limit_step_ = step;
   2499     UpdateInlineAllocationLimit(0);
   2500     top_on_previous_step_ = allocation_info_.top();
   2501   }
   2502 
   2503   // Get the extent of the inactive semispace (for use as a marking stack,
   2504   // or to zap it). Notice: space-addresses are not necessarily on the
   2505   // same page, so FromSpaceStart() might be above FromSpaceEnd().
   2506   Address FromSpacePageLow() { return from_space_.page_low(); }
   2507   Address FromSpacePageHigh() { return from_space_.page_high(); }
   2508   Address FromSpaceStart() { return from_space_.space_start(); }
   2509   Address FromSpaceEnd() { return from_space_.space_end(); }
   2510 
   2511   // Get the extent of the active semispace's pages' memory.
   2512   Address ToSpaceStart() { return to_space_.space_start(); }
   2513   Address ToSpaceEnd() { return to_space_.space_end(); }
   2514 
   2515   inline bool ToSpaceContains(Address address) {
   2516     return to_space_.Contains(address);
   2517   }
   2518   inline bool FromSpaceContains(Address address) {
   2519     return from_space_.Contains(address);
   2520   }
   2521 
   2522   // True if the object is a heap object in the address range of the
   2523   // respective semispace (not necessarily below the allocation pointer of the
   2524   // semispace).
   2525   inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
   2526   inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
   2527 
   2528   // Try to switch the active semispace to a new, empty, page.
   2529   // Returns false if this isn't possible or reasonable (i.e., there
   2530   // are no pages, or the current page is already empty), or true
   2531   // if successful.
   2532   bool AddFreshPage();
   2533 
   2534 #ifdef VERIFY_HEAP
   2535   // Verify the active semispace.
   2536   virtual void Verify();
   2537 #endif
   2538 
   2539 #ifdef DEBUG
   2540   // Print the active semispace.
   2541   virtual void Print() { to_space_.Print(); }
   2542 #endif
   2543 
   2544   // Iterates the active semispace to collect statistics.
   2545   void CollectStatistics();
   2546   // Reports previously collected statistics of the active semispace.
   2547   void ReportStatistics();
   2548   // Clears previously collected statistics.
   2549   void ClearHistograms();
   2550 
   2551   // Record the allocation or promotion of a heap object.  Note that we don't
   2552   // record every single allocation, but only those that happen in the
   2553   // to space during a scavenge GC.
   2554   void RecordAllocation(HeapObject* obj);
   2555   void RecordPromotion(HeapObject* obj);
   2556 
   2557   // Return whether the operation succeded.
   2558   bool CommitFromSpaceIfNeeded() {
   2559     if (from_space_.is_committed()) return true;
   2560     return from_space_.Commit();
   2561   }
   2562 
   2563   bool UncommitFromSpace() {
   2564     if (!from_space_.is_committed()) return true;
   2565     return from_space_.Uncommit();
   2566   }
   2567 
   2568   inline intptr_t inline_allocation_limit_step() {
   2569     return inline_allocation_limit_step_;
   2570   }
   2571 
   2572   SemiSpace* active_space() { return &to_space_; }
   2573 
   2574  private:
   2575   // Update allocation info to match the current to-space page.
   2576   void UpdateAllocationInfo();
   2577 
   2578   Address chunk_base_;
   2579   uintptr_t chunk_size_;
   2580 
   2581   // The semispaces.
   2582   SemiSpace to_space_;
   2583   SemiSpace from_space_;
   2584   VirtualMemory reservation_;
   2585   int pages_used_;
   2586 
   2587   // Start address and bit mask for containment testing.
   2588   Address start_;
   2589   uintptr_t address_mask_;
   2590   uintptr_t object_mask_;
   2591   uintptr_t object_expected_;
   2592 
   2593   // Allocation pointer and limit for normal allocation and allocation during
   2594   // mark-compact collection.
   2595   AllocationInfo allocation_info_;
   2596 
   2597   // When incremental marking is active we will set allocation_info_.limit
   2598   // to be lower than actual limit and then will gradually increase it
   2599   // in steps to guarantee that we do incremental marking steps even
   2600   // when all allocation is performed from inlined generated code.
   2601   intptr_t inline_allocation_limit_step_;
   2602 
   2603   Address top_on_previous_step_;
   2604 
   2605   HistogramInfo* allocated_histogram_;
   2606   HistogramInfo* promoted_histogram_;
   2607 
   2608   MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
   2609 
   2610   friend class SemiSpaceIterator;
   2611 
   2612  public:
   2613   TRACK_MEMORY("NewSpace")
   2614 };
   2615 
   2616 
   2617 // -----------------------------------------------------------------------------
   2618 // Old object space (excluding map objects)
   2619 
   2620 class OldSpace : public PagedSpace {
   2621  public:
   2622   // Creates an old space object with a given maximum capacity.
   2623   // The constructor does not allocate pages from OS.
   2624   OldSpace(Heap* heap,
   2625            intptr_t max_capacity,
   2626            AllocationSpace id,
   2627            Executability executable)
   2628       : PagedSpace(heap, max_capacity, id, executable) {
   2629   }
   2630 
   2631  public:
   2632   TRACK_MEMORY("OldSpace")
   2633 };
   2634 
   2635 
   2636 // For contiguous spaces, top should be in the space (or at the end) and limit
   2637 // should be the end of the space.
   2638 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
   2639   SLOW_ASSERT((space).page_low() <= (info).top() \
   2640               && (info).top() <= (space).page_high() \
   2641               && (info).limit() <= (space).page_high())
   2642 
   2643 
   2644 // -----------------------------------------------------------------------------
   2645 // Old space for all map objects
   2646 
   2647 class MapSpace : public PagedSpace {
   2648  public:
   2649   // Creates a map space object with a maximum capacity.
   2650   MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
   2651       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
   2652         max_map_space_pages_(kMaxMapPageIndex - 1) {
   2653   }
   2654 
   2655   // Given an index, returns the page address.
   2656   // TODO(1600): this limit is artifical just to keep code compilable
   2657   static const int kMaxMapPageIndex = 1 << 16;
   2658 
   2659   virtual int RoundSizeDownToObjectAlignment(int size) {
   2660     if (IsPowerOf2(Map::kSize)) {
   2661       return RoundDown(size, Map::kSize);
   2662     } else {
   2663       return (size / Map::kSize) * Map::kSize;
   2664     }
   2665   }
   2666 
   2667  protected:
   2668   virtual void VerifyObject(HeapObject* obj);
   2669 
   2670  private:
   2671   static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
   2672 
   2673   // Do map space compaction if there is a page gap.
   2674   int CompactionThreshold() {
   2675     return kMapsPerPage * (max_map_space_pages_ - 1);
   2676   }
   2677 
   2678   const int max_map_space_pages_;
   2679 
   2680  public:
   2681   TRACK_MEMORY("MapSpace")
   2682 };
   2683 
   2684 
   2685 // -----------------------------------------------------------------------------
   2686 // Old space for simple property cell objects
   2687 
   2688 class CellSpace : public PagedSpace {
   2689  public:
   2690   // Creates a property cell space object with a maximum capacity.
   2691   CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
   2692       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
   2693   }
   2694 
   2695   virtual int RoundSizeDownToObjectAlignment(int size) {
   2696     if (IsPowerOf2(Cell::kSize)) {
   2697       return RoundDown(size, Cell::kSize);
   2698     } else {
   2699       return (size / Cell::kSize) * Cell::kSize;
   2700     }
   2701   }
   2702 
   2703  protected:
   2704   virtual void VerifyObject(HeapObject* obj);
   2705 
   2706  public:
   2707   TRACK_MEMORY("CellSpace")
   2708 };
   2709 
   2710 
   2711 // -----------------------------------------------------------------------------
   2712 // Old space for all global object property cell objects
   2713 
   2714 class PropertyCellSpace : public PagedSpace {
   2715  public:
   2716   // Creates a property cell space object with a maximum capacity.
   2717   PropertyCellSpace(Heap* heap, intptr_t max_capacity,
   2718                     AllocationSpace id)
   2719       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
   2720   }
   2721 
   2722   virtual int RoundSizeDownToObjectAlignment(int size) {
   2723     if (IsPowerOf2(PropertyCell::kSize)) {
   2724       return RoundDown(size, PropertyCell::kSize);
   2725     } else {
   2726       return (size / PropertyCell::kSize) * PropertyCell::kSize;
   2727     }
   2728   }
   2729 
   2730  protected:
   2731   virtual void VerifyObject(HeapObject* obj);
   2732 
   2733  public:
   2734   TRACK_MEMORY("PropertyCellSpace")
   2735 };
   2736 
   2737 
   2738 // -----------------------------------------------------------------------------
   2739 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
   2740 // the large object space. A large object is allocated from OS heap with
   2741 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
   2742 // A large object always starts at Page::kObjectStartOffset to a page.
   2743 // Large objects do not move during garbage collections.
   2744 
   2745 class LargeObjectSpace : public Space {
   2746  public:
   2747   LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
   2748   virtual ~LargeObjectSpace() {}
   2749 
   2750   // Initializes internal data structures.
   2751   bool SetUp();
   2752 
   2753   // Releases internal resources, frees objects in this space.
   2754   void TearDown();
   2755 
   2756   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
   2757     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
   2758     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
   2759   }
   2760 
   2761   // Shared implementation of AllocateRaw, AllocateRawCode and
   2762   // AllocateRawFixedArray.
   2763   MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
   2764                                            Executability executable);
   2765 
   2766   // Available bytes for objects in this space.
   2767   inline intptr_t Available();
   2768 
   2769   virtual intptr_t Size() {
   2770     return size_;
   2771   }
   2772 
   2773   virtual intptr_t SizeOfObjects() {
   2774     return objects_size_;
   2775   }
   2776 
   2777   intptr_t MaximumCommittedMemory() {
   2778     return maximum_committed_;
   2779   }
   2780 
   2781   intptr_t CommittedMemory() {
   2782     return Size();
   2783   }
   2784 
   2785   // Approximate amount of physical memory committed for this space.
   2786   size_t CommittedPhysicalMemory();
   2787 
   2788   int PageCount() {
   2789     return page_count_;
   2790   }
   2791 
   2792   // Finds an object for a given address, returns Failure::Exception()
   2793   // if it is not found. The function iterates through all objects in this
   2794   // space, may be slow.
   2795   MaybeObject* FindObject(Address a);
   2796 
   2797   // Finds a large object page containing the given address, returns NULL
   2798   // if such a page doesn't exist.
   2799   LargePage* FindPage(Address a);
   2800 
   2801   // Frees unmarked objects.
   2802   void FreeUnmarkedObjects();
   2803 
   2804   // Checks whether a heap object is in this space; O(1).
   2805   bool Contains(HeapObject* obj);
   2806 
   2807   // Checks whether the space is empty.
   2808   bool IsEmpty() { return first_page_ == NULL; }
   2809 
   2810   LargePage* first_page() { return first_page_; }
   2811 
   2812 #ifdef VERIFY_HEAP
   2813   virtual void Verify();
   2814 #endif
   2815 
   2816 #ifdef DEBUG
   2817   virtual void Print();
   2818   void ReportStatistics();
   2819   void CollectCodeStatistics();
   2820 #endif
   2821   // Checks whether an address is in the object area in this space.  It
   2822   // iterates all objects in the space. May be slow.
   2823   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
   2824 
   2825  private:
   2826   intptr_t max_capacity_;
   2827   intptr_t maximum_committed_;
   2828   // The head of the linked list of large object chunks.
   2829   LargePage* first_page_;
   2830   intptr_t size_;  // allocated bytes
   2831   int page_count_;  // number of chunks
   2832   intptr_t objects_size_;  // size of objects
   2833   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
   2834   HashMap chunk_map_;
   2835 
   2836   friend class LargeObjectIterator;
   2837 
   2838  public:
   2839   TRACK_MEMORY("LargeObjectSpace")
   2840 };
   2841 
   2842 
   2843 class LargeObjectIterator: public ObjectIterator {
   2844  public:
   2845   explicit LargeObjectIterator(LargeObjectSpace* space);
   2846   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
   2847 
   2848   HeapObject* Next();
   2849 
   2850   // implementation of ObjectIterator.
   2851   virtual HeapObject* next_object() { return Next(); }
   2852 
   2853  private:
   2854   LargePage* current_;
   2855   HeapObjectCallback size_func_;
   2856 };
   2857 
   2858 
   2859 // Iterates over the chunks (pages and large object pages) that can contain
   2860 // pointers to new space.
   2861 class PointerChunkIterator BASE_EMBEDDED {
   2862  public:
   2863   inline explicit PointerChunkIterator(Heap* heap);
   2864 
   2865   // Return NULL when the iterator is done.
   2866   MemoryChunk* next() {
   2867     switch (state_) {
   2868       case kOldPointerState: {
   2869         if (old_pointer_iterator_.has_next()) {
   2870           return old_pointer_iterator_.next();
   2871         }
   2872         state_ = kMapState;
   2873         // Fall through.
   2874       }
   2875       case kMapState: {
   2876         if (map_iterator_.has_next()) {
   2877           return map_iterator_.next();
   2878         }
   2879         state_ = kLargeObjectState;
   2880         // Fall through.
   2881       }
   2882       case kLargeObjectState: {
   2883         HeapObject* heap_object;
   2884         do {
   2885           heap_object = lo_iterator_.Next();
   2886           if (heap_object == NULL) {
   2887             state_ = kFinishedState;
   2888             return NULL;
   2889           }
   2890           // Fixed arrays are the only pointer-containing objects in large
   2891           // object space.
   2892         } while (!heap_object->IsFixedArray());
   2893         MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
   2894         return answer;
   2895       }
   2896       case kFinishedState:
   2897         return NULL;
   2898       default:
   2899         break;
   2900     }
   2901     UNREACHABLE();
   2902     return NULL;
   2903   }
   2904 
   2905 
   2906  private:
   2907   enum State {
   2908     kOldPointerState,
   2909     kMapState,
   2910     kLargeObjectState,
   2911     kFinishedState
   2912   };
   2913   State state_;
   2914   PageIterator old_pointer_iterator_;
   2915   PageIterator map_iterator_;
   2916   LargeObjectIterator lo_iterator_;
   2917 };
   2918 
   2919 
   2920 #ifdef DEBUG
   2921 struct CommentStatistic {
   2922   const char* comment;
   2923   int size;
   2924   int count;
   2925   void Clear() {
   2926     comment = NULL;
   2927     size = 0;
   2928     count = 0;
   2929   }
   2930   // Must be small, since an iteration is used for lookup.
   2931   static const int kMaxComments = 64;
   2932 };
   2933 #endif
   2934 
   2935 
   2936 } }  // namespace v8::internal
   2937 
   2938 #endif  // V8_SPACES_H_
   2939