Home | History | Annotate | Download | only in src
      1 // Copyright 2006-2010 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_SPACES_H_
     29 #define V8_SPACES_H_
     30 
     31 #include "list-inl.h"
     32 #include "log.h"
     33 
     34 namespace v8 {
     35 namespace internal {
     36 
     37 class Isolate;
     38 
     39 // -----------------------------------------------------------------------------
     40 // Heap structures:
     41 //
     42 // A JS heap consists of a young generation, an old generation, and a large
     43 // object space. The young generation is divided into two semispaces. A
     44 // scavenger implements Cheney's copying algorithm. The old generation is
     45 // separated into a map space and an old object space. The map space contains
     46 // all (and only) map objects, the rest of old objects go into the old space.
     47 // The old generation is collected by a mark-sweep-compact collector.
     48 //
     49 // The semispaces of the young generation are contiguous.  The old and map
     50 // spaces consists of a list of pages. A page has a page header and an object
     51 // area. A page size is deliberately chosen as 8K bytes.
     52 // The first word of a page is an opaque page header that has the
     53 // address of the next page and its ownership information. The second word may
     54 // have the allocation top address of this page. Heap objects are aligned to the
     55 // pointer size.
     56 //
     57 // There is a separate large object space for objects larger than
     58 // Page::kMaxHeapObjectSize, so that they do not have to move during
     59 // collection. The large object space is paged. Pages in large object space
     60 // may be larger than 8K.
     61 //
     62 // A card marking write barrier is used to keep track of intergenerational
     63 // references. Old space pages are divided into regions of Page::kRegionSize
     64 // size. Each region has a corresponding dirty bit in the page header which is
     65 // set if the region might contain pointers to new space. For details about
     66 // dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
     67 // method body.
     68 //
     69 // During scavenges and mark-sweep collections we iterate intergenerational
     70 // pointers without decoding heap object maps so if the page belongs to old
     71 // pointer space or large object space it is essential to guarantee that
     72 // the page does not contain any garbage pointers to new space: every pointer
     73 // aligned word which satisfies the Heap::InNewSpace() predicate must be a
     74 // pointer to a live heap object in new space. Thus objects in old pointer
     75 // and large object spaces should have a special layout (e.g. no bare integer
     76 // fields). This requirement does not apply to map space which is iterated in
     77 // a special fashion. However we still require pointer fields of dead maps to
     78 // be cleaned.
     79 //
     80 // To enable lazy cleaning of old space pages we use a notion of allocation
     81 // watermark. Every pointer under watermark is considered to be well formed.
     82 // Page allocation watermark is not necessarily equal to page allocation top but
     83 // all alive objects on page should reside under allocation watermark.
     84 // During scavenge allocation watermark might be bumped and invalid pointers
     85 // might appear below it. To avoid following them we store a valid watermark
     86 // into special field in the page header and set a page WATERMARK_INVALIDATED
     87 // flag. For details see comments in the Page::SetAllocationWatermark() method
     88 // body.
     89 //
     90 
     91 // Some assertion macros used in the debugging mode.
     92 
     93 #define ASSERT_PAGE_ALIGNED(address)                                           \
     94   ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
     95 
     96 #define ASSERT_OBJECT_ALIGNED(address)                                         \
     97   ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
     98 
     99 #define ASSERT_MAP_ALIGNED(address)                                            \
    100   ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
    101 
    102 #define ASSERT_OBJECT_SIZE(size)                                               \
    103   ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
    104 
    105 #define ASSERT_PAGE_OFFSET(offset)                                             \
    106   ASSERT((Page::kObjectStartOffset <= offset)                                  \
    107       && (offset <= Page::kPageSize))
    108 
    109 #define ASSERT_MAP_PAGE_INDEX(index)                                           \
    110   ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
    111 
    112 
    113 class PagedSpace;
    114 class MemoryAllocator;
    115 class AllocationInfo;
    116 
    117 // -----------------------------------------------------------------------------
    118 // A page normally has 8K bytes. Large object pages may be larger.  A page
    119 // address is always aligned to the 8K page size.
    120 //
    121 // Each page starts with a header of Page::kPageHeaderSize size which contains
    122 // bookkeeping data.
    123 //
    124 // The mark-compact collector transforms a map pointer into a page index and a
    125 // page offset. The exact encoding is described in the comments for
    126 // class MapWord in objects.h.
    127 //
    128 // The only way to get a page pointer is by calling factory methods:
    129 //   Page* p = Page::FromAddress(addr); or
    130 //   Page* p = Page::FromAllocationTop(top);
    131 class Page {
    132  public:
    133   // Returns the page containing a given address. The address ranges
    134   // from [page_addr .. page_addr + kPageSize[
    135   //
    136   // Note that this function only works for addresses in normal paged
    137   // spaces and addresses in the first 8K of large object pages (i.e.,
    138   // the start of large objects but not necessarily derived pointers
    139   // within them).
    140   INLINE(static Page* FromAddress(Address a)) {
    141     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
    142   }
    143 
    144   // Returns the page containing an allocation top. Because an allocation
    145   // top address can be the upper bound of the page, we need to subtract
    146   // it with kPointerSize first. The address ranges from
    147   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
    148   INLINE(static Page* FromAllocationTop(Address top)) {
    149     Page* p = FromAddress(top - kPointerSize);
    150     ASSERT_PAGE_OFFSET(p->Offset(top));
    151     return p;
    152   }
    153 
    154   // Returns the start address of this page.
    155   Address address() { return reinterpret_cast<Address>(this); }
    156 
    157   // Checks whether this is a valid page address.
    158   bool is_valid() { return address() != NULL; }
    159 
    160   // Returns the next page of this page.
    161   inline Page* next_page();
    162 
    163   // Return the end of allocation in this page. Undefined for unused pages.
    164   inline Address AllocationTop();
    165 
    166   // Return the allocation watermark for the page.
    167   // For old space pages it is guaranteed that the area under the watermark
    168   // does not contain any garbage pointers to new space.
    169   inline Address AllocationWatermark();
    170 
    171   // Return the allocation watermark offset from the beginning of the page.
    172   inline uint32_t AllocationWatermarkOffset();
    173 
    174   inline void SetAllocationWatermark(Address allocation_watermark);
    175 
    176   inline void SetCachedAllocationWatermark(Address allocation_watermark);
    177   inline Address CachedAllocationWatermark();
    178 
    179   // Returns the start address of the object area in this page.
    180   Address ObjectAreaStart() { return address() + kObjectStartOffset; }
    181 
    182   // Returns the end address (exclusive) of the object area in this page.
    183   Address ObjectAreaEnd() { return address() + Page::kPageSize; }
    184 
    185   // Checks whether an address is page aligned.
    186   static bool IsAlignedToPageSize(Address a) {
    187     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
    188   }
    189 
    190   // True if this page was in use before current compaction started.
    191   // Result is valid only for pages owned by paged spaces and
    192   // only after PagedSpace::PrepareForMarkCompact was called.
    193   inline bool WasInUseBeforeMC();
    194 
    195   inline void SetWasInUseBeforeMC(bool was_in_use);
    196 
    197   // True if this page is a large object page.
    198   inline bool IsLargeObjectPage();
    199 
    200   inline void SetIsLargeObjectPage(bool is_large_object_page);
    201 
    202   inline bool IsPageExecutable();
    203 
    204   inline void SetIsPageExecutable(bool is_page_executable);
    205 
    206   // Returns the offset of a given address to this page.
    207   INLINE(int Offset(Address a)) {
    208     int offset = static_cast<int>(a - address());
    209     ASSERT_PAGE_OFFSET(offset);
    210     return offset;
    211   }
    212 
    213   // Returns the address for a given offset to the this page.
    214   Address OffsetToAddress(int offset) {
    215     ASSERT_PAGE_OFFSET(offset);
    216     return address() + offset;
    217   }
    218 
    219   // ---------------------------------------------------------------------
    220   // Card marking support
    221 
    222   static const uint32_t kAllRegionsCleanMarks = 0x0;
    223   static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
    224 
    225   inline uint32_t GetRegionMarks();
    226   inline void SetRegionMarks(uint32_t dirty);
    227 
    228   inline uint32_t GetRegionMaskForAddress(Address addr);
    229   inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
    230   inline int GetRegionNumberForAddress(Address addr);
    231 
    232   inline void MarkRegionDirty(Address addr);
    233   inline bool IsRegionDirty(Address addr);
    234 
    235   inline void ClearRegionMarks(Address start,
    236                                Address end,
    237                                bool reaches_limit);
    238 
    239   // Page size in bytes.  This must be a multiple of the OS page size.
    240   static const int kPageSize = 1 << kPageSizeBits;
    241 
    242   // Page size mask.
    243   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
    244 
    245   static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
    246     kIntSize + kPointerSize + kPointerSize;
    247 
    248   // The start offset of the object area in a page. Aligned to both maps and
    249   // code alignment to be suitable for both.
    250   static const int kObjectStartOffset =
    251       CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
    252 
    253   // Object area size in bytes.
    254   static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
    255 
    256   // Maximum object size that fits in a page.
    257   static const int kMaxHeapObjectSize = kObjectAreaSize;
    258 
    259   static const int kDirtyFlagOffset = 2 * kPointerSize;
    260   static const int kRegionSizeLog2 = 8;
    261   static const int kRegionSize = 1 << kRegionSizeLog2;
    262   static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
    263 
    264   STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
    265 
    266   enum PageFlag {
    267     IS_NORMAL_PAGE = 0,
    268     WAS_IN_USE_BEFORE_MC,
    269 
    270     // Page allocation watermark was bumped by preallocation during scavenge.
    271     // Correct watermark can be retrieved by CachedAllocationWatermark() method
    272     WATERMARK_INVALIDATED,
    273     IS_EXECUTABLE,
    274     NUM_PAGE_FLAGS  // Must be last
    275   };
    276   static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
    277 
    278   // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
    279   // scavenge we just invalidate the watermark on each old space page after
    280   // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
    281   // flag at the beginning of the next scavenge and each page becomes marked as
    282   // having a valid watermark.
    283   //
    284   // The following invariant must hold for pages in old pointer and map spaces:
    285   //     If page is in use then page is marked as having invalid watermark at
    286   //     the beginning and at the end of any GC.
    287   //
    288   // This invariant guarantees that after flipping flag meaning at the
    289   // beginning of scavenge all pages in use will be marked as having valid
    290   // watermark.
    291   static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
    292 
    293   // Returns true if the page allocation watermark was not altered during
    294   // scavenge.
    295   inline bool IsWatermarkValid();
    296 
    297   inline void InvalidateWatermark(bool value);
    298 
    299   inline bool GetPageFlag(PageFlag flag);
    300   inline void SetPageFlag(PageFlag flag, bool value);
    301   inline void ClearPageFlags();
    302 
    303   inline void ClearGCFields();
    304 
    305   static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
    306   static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
    307   static const uint32_t kAllocationWatermarkOffsetMask =
    308       ((1 << kAllocationWatermarkOffsetBits) - 1) <<
    309       kAllocationWatermarkOffsetShift;
    310 
    311   static const uint32_t kFlagsMask =
    312     ((1 << kAllocationWatermarkOffsetShift) - 1);
    313 
    314   STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
    315                kAllocationWatermarkOffsetBits);
    316 
    317   //---------------------------------------------------------------------------
    318   // Page header description.
    319   //
    320   // If a page is not in the large object space, the first word,
    321   // opaque_header, encodes the next page address (aligned to kPageSize 8K)
    322   // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
    323   // opaque_header. The value range of the opaque_header is [0..kPageSize[,
    324   // or [next_page_start, next_page_end[. It cannot point to a valid address
    325   // in the current page.  If a page is in the large object space, the first
    326   // word *may* (if the page start and large object chunk start are the
    327   // same) contain the address of the next large object chunk.
    328   intptr_t opaque_header;
    329 
    330   // If the page is not in the large object space, the low-order bit of the
    331   // second word is set. If the page is in the large object space, the
    332   // second word *may* (if the page start and large object chunk start are
    333   // the same) contain the large object chunk size.  In either case, the
    334   // low-order bit for large object pages will be cleared.
    335   // For normal pages this word is used to store page flags and
    336   // offset of allocation top.
    337   intptr_t flags_;
    338 
    339   // This field contains dirty marks for regions covering the page. Only dirty
    340   // regions might contain intergenerational references.
    341   // Only 32 dirty marks are supported so for large object pages several regions
    342   // might be mapped to a single dirty mark.
    343   uint32_t dirty_regions_;
    344 
    345   // The index of the page in its owner space.
    346   int mc_page_index;
    347 
    348   // During mark-compact collections this field contains the forwarding address
    349   // of the first live object in this page.
    350   // During scavenge collection this field is used to store allocation watermark
    351   // if it is altered during scavenge.
    352   Address mc_first_forwarded;
    353 
    354   Heap* heap_;
    355 };
    356 
    357 
    358 // ----------------------------------------------------------------------------
    359 // Space is the abstract superclass for all allocation spaces.
    360 class Space : public Malloced {
    361  public:
    362   Space(Heap* heap, AllocationSpace id, Executability executable)
    363       : heap_(heap), id_(id), executable_(executable) {}
    364 
    365   virtual ~Space() {}
    366 
    367   Heap* heap() const { return heap_; }
    368 
    369   // Does the space need executable memory?
    370   Executability executable() { return executable_; }
    371 
    372   // Identity used in error reporting.
    373   AllocationSpace identity() { return id_; }
    374 
    375   // Returns allocated size.
    376   virtual intptr_t Size() = 0;
    377 
    378   // Returns size of objects. Can differ from the allocated size
    379   // (e.g. see LargeObjectSpace).
    380   virtual intptr_t SizeOfObjects() { return Size(); }
    381 
    382 #ifdef ENABLE_HEAP_PROTECTION
    383   // Protect/unprotect the space by marking it read-only/writable.
    384   virtual void Protect() = 0;
    385   virtual void Unprotect() = 0;
    386 #endif
    387 
    388 #ifdef DEBUG
    389   virtual void Print() = 0;
    390 #endif
    391 
    392   // After calling this we can allocate a certain number of bytes using only
    393   // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
    394   // without using freelists or causing a GC.  This is used by partial
    395   // snapshots.  It returns true of space was reserved or false if a GC is
    396   // needed.  For paged spaces the space requested must include the space wasted
    397   // at the end of each when allocating linearly.
    398   virtual bool ReserveSpace(int bytes) = 0;
    399 
    400  private:
    401   Heap* heap_;
    402   AllocationSpace id_;
    403   Executability executable_;
    404 };
    405 
    406 
    407 // ----------------------------------------------------------------------------
    408 // All heap objects containing executable code (code objects) must be allocated
    409 // from a 2 GB range of memory, so that they can call each other using 32-bit
    410 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
    411 // displacements cover the entire 4GB virtual address space.  On 64-bit
    412 // platforms, we support this using the CodeRange object, which reserves and
    413 // manages a range of virtual memory.
    414 class CodeRange {
    415  public:
    416   explicit CodeRange(Isolate* isolate);
    417 
    418   // Reserves a range of virtual memory, but does not commit any of it.
    419   // Can only be called once, at heap initialization time.
    420   // Returns false on failure.
    421   bool Setup(const size_t requested_size);
    422 
    423   // Frees the range of virtual memory, and frees the data structures used to
    424   // manage it.
    425   void TearDown();
    426 
    427   bool exists() { return this != NULL && code_range_ != NULL; }
    428   bool contains(Address address) {
    429     if (this == NULL || code_range_ == NULL) return false;
    430     Address start = static_cast<Address>(code_range_->address());
    431     return start <= address && address < start + code_range_->size();
    432   }
    433 
    434   // Allocates a chunk of memory from the large-object portion of
    435   // the code range.  On platforms with no separate code range, should
    436   // not be called.
    437   MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
    438                                           size_t* allocated);
    439   void FreeRawMemory(void* buf, size_t length);
    440 
    441  private:
    442   Isolate* isolate_;
    443 
    444   // The reserved range of virtual memory that all code objects are put in.
    445   VirtualMemory* code_range_;
    446   // Plain old data class, just a struct plus a constructor.
    447   class FreeBlock {
    448    public:
    449     FreeBlock(Address start_arg, size_t size_arg)
    450         : start(start_arg), size(size_arg) {}
    451     FreeBlock(void* start_arg, size_t size_arg)
    452         : start(static_cast<Address>(start_arg)), size(size_arg) {}
    453 
    454     Address start;
    455     size_t size;
    456   };
    457 
    458   // Freed blocks of memory are added to the free list.  When the allocation
    459   // list is exhausted, the free list is sorted and merged to make the new
    460   // allocation list.
    461   List<FreeBlock> free_list_;
    462   // Memory is allocated from the free blocks on the allocation list.
    463   // The block at current_allocation_block_index_ is the current block.
    464   List<FreeBlock> allocation_list_;
    465   int current_allocation_block_index_;
    466 
    467   // Finds a block on the allocation list that contains at least the
    468   // requested amount of memory.  If none is found, sorts and merges
    469   // the existing free memory blocks, and searches again.
    470   // If none can be found, terminates V8 with FatalProcessOutOfMemory.
    471   void GetNextAllocationBlock(size_t requested);
    472   // Compares the start addresses of two free blocks.
    473   static int CompareFreeBlockAddress(const FreeBlock* left,
    474                                      const FreeBlock* right);
    475 
    476   DISALLOW_COPY_AND_ASSIGN(CodeRange);
    477 };
    478 
    479 
    480 // ----------------------------------------------------------------------------
    481 // A space acquires chunks of memory from the operating system. The memory
    482 // allocator manages chunks for the paged heap spaces (old space and map
    483 // space).  A paged chunk consists of pages. Pages in a chunk have contiguous
    484 // addresses and are linked as a list.
    485 //
    486 // The allocator keeps an initial chunk which is used for the new space.  The
    487 // leftover regions of the initial chunk are used for the initial chunks of
    488 // old space and map space if they are big enough to hold at least one page.
    489 // The allocator assumes that there is one old space and one map space, each
    490 // expands the space by allocating kPagesPerChunk pages except the last
    491 // expansion (before running out of space).  The first chunk may contain fewer
    492 // than kPagesPerChunk pages as well.
    493 //
    494 // The memory allocator also allocates chunks for the large object space, but
    495 // they are managed by the space itself.  The new space does not expand.
    496 //
    497 // The fact that pages for paged spaces are allocated and deallocated in chunks
    498 // induces a constraint on the order of pages in a linked lists. We say that
    499 // pages are linked in the chunk-order if and only if every two consecutive
    500 // pages from the same chunk are consecutive in the linked list.
    501 //
    502 
    503 
    504 class MemoryAllocator {
    505  public:
    506   explicit MemoryAllocator(Isolate* isolate);
    507 
    508   // Initializes its internal bookkeeping structures.
    509   // Max capacity of the total space and executable memory limit.
    510   bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
    511 
    512   // Deletes valid chunks.
    513   void TearDown();
    514 
    515   // Reserves an initial address range of virtual memory to be split between
    516   // the two new space semispaces, the old space, and the map space.  The
    517   // memory is not yet committed or assigned to spaces and split into pages.
    518   // The initial chunk is unmapped when the memory allocator is torn down.
    519   // This function should only be called when there is not already a reserved
    520   // initial chunk (initial_chunk_ should be NULL).  It returns the start
    521   // address of the initial chunk if successful, with the side effect of
    522   // setting the initial chunk, or else NULL if unsuccessful and leaves the
    523   // initial chunk NULL.
    524   void* ReserveInitialChunk(const size_t requested);
    525 
    526   // Commits pages from an as-yet-unmanaged block of virtual memory into a
    527   // paged space.  The block should be part of the initial chunk reserved via
    528   // a call to ReserveInitialChunk.  The number of pages is always returned in
    529   // the output parameter num_pages.  This function assumes that the start
    530   // address is non-null and that it is big enough to hold at least one
    531   // page-aligned page.  The call always succeeds, and num_pages is always
    532   // greater than zero.
    533   Page* CommitPages(Address start, size_t size, PagedSpace* owner,
    534                     int* num_pages);
    535 
    536   // Commit a contiguous block of memory from the initial chunk.  Assumes that
    537   // the address is not NULL, the size is greater than zero, and that the
    538   // block is contained in the initial chunk.  Returns true if it succeeded
    539   // and false otherwise.
    540   bool CommitBlock(Address start, size_t size, Executability executable);
    541 
    542   // Uncommit a contiguous block of memory [start..(start+size)[.
    543   // start is not NULL, the size is greater than zero, and the
    544   // block is contained in the initial chunk.  Returns true if it succeeded
    545   // and false otherwise.
    546   bool UncommitBlock(Address start, size_t size);
    547 
    548   // Zaps a contiguous block of memory [start..(start+size)[ thus
    549   // filling it up with a recognizable non-NULL bit pattern.
    550   void ZapBlock(Address start, size_t size);
    551 
    552   // Attempts to allocate the requested (non-zero) number of pages from the
    553   // OS.  Fewer pages might be allocated than requested. If it fails to
    554   // allocate memory for the OS or cannot allocate a single page, this
    555   // function returns an invalid page pointer (NULL). The caller must check
    556   // whether the returned page is valid (by calling Page::is_valid()).  It is
    557   // guaranteed that allocated pages have contiguous addresses.  The actual
    558   // number of allocated pages is returned in the output parameter
    559   // allocated_pages.  If the PagedSpace owner is executable and there is
    560   // a code range, the pages are allocated from the code range.
    561   Page* AllocatePages(int requested_pages, int* allocated_pages,
    562                       PagedSpace* owner);
    563 
    564   // Frees pages from a given page and after. Requires pages to be
    565   // linked in chunk-order (see comment for class).
    566   // If 'p' is the first page of a chunk, pages from 'p' are freed
    567   // and this function returns an invalid page pointer.
    568   // Otherwise, the function searches a page after 'p' that is
    569   // the first page of a chunk. Pages after the found page
    570   // are freed and the function returns 'p'.
    571   Page* FreePages(Page* p);
    572 
    573   // Frees all pages owned by given space.
    574   void FreeAllPages(PagedSpace* space);
    575 
    576   // Allocates and frees raw memory of certain size.
    577   // These are just thin wrappers around OS::Allocate and OS::Free,
    578   // but keep track of allocated bytes as part of heap.
    579   // If the flag is EXECUTABLE and a code range exists, the requested
    580   // memory is allocated from the code range.  If a code range exists
    581   // and the freed memory is in it, the code range manages the freed memory.
    582   MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
    583                                           size_t* allocated,
    584                                           Executability executable);
    585   void FreeRawMemory(void* buf,
    586                      size_t length,
    587                      Executability executable);
    588   void PerformAllocationCallback(ObjectSpace space,
    589                                  AllocationAction action,
    590                                  size_t size);
    591 
    592   void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
    593                                    ObjectSpace space,
    594                                    AllocationAction action);
    595   void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
    596   bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
    597 
    598   // Returns the maximum available bytes of heaps.
    599   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
    600 
    601   // Returns allocated spaces in bytes.
    602   intptr_t Size() { return size_; }
    603 
    604   // Returns the maximum available executable bytes of heaps.
    605   intptr_t AvailableExecutable() {
    606     if (capacity_executable_ < size_executable_) return 0;
    607     return capacity_executable_ - size_executable_;
    608   }
    609 
    610   // Returns allocated executable spaces in bytes.
    611   intptr_t SizeExecutable() { return size_executable_; }
    612 
    613   // Returns maximum available bytes that the old space can have.
    614   intptr_t MaxAvailable() {
    615     return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
    616   }
    617 
    618   // Links two pages.
    619   inline void SetNextPage(Page* prev, Page* next);
    620 
    621   // Returns the next page of a given page.
    622   inline Page* GetNextPage(Page* p);
    623 
    624   // Checks whether a page belongs to a space.
    625   inline bool IsPageInSpace(Page* p, PagedSpace* space);
    626 
    627   // Returns the space that owns the given page.
    628   inline PagedSpace* PageOwner(Page* page);
    629 
    630   // Finds the first/last page in the same chunk as a given page.
    631   Page* FindFirstPageInSameChunk(Page* p);
    632   Page* FindLastPageInSameChunk(Page* p);
    633 
    634   // Relinks list of pages owned by space to make it chunk-ordered.
    635   // Returns new first and last pages of space.
    636   // Also returns last page in relinked list which has WasInUsedBeforeMC
    637   // flag set.
    638   void RelinkPageListInChunkOrder(PagedSpace* space,
    639                                   Page** first_page,
    640                                   Page** last_page,
    641                                   Page** last_page_in_use);
    642 
    643 #ifdef ENABLE_HEAP_PROTECTION
    644   // Protect/unprotect a block of memory by marking it read-only/writable.
    645   inline void Protect(Address start, size_t size);
    646   inline void Unprotect(Address start, size_t size,
    647                         Executability executable);
    648 
    649   // Protect/unprotect a chunk given a page in the chunk.
    650   inline void ProtectChunkFromPage(Page* page);
    651   inline void UnprotectChunkFromPage(Page* page);
    652 #endif
    653 
    654 #ifdef DEBUG
    655   // Reports statistic info of the space.
    656   void ReportStatistics();
    657 #endif
    658 
    659   // Due to encoding limitation, we can only have 8K chunks.
    660   static const int kMaxNofChunks = 1 << kPageSizeBits;
    661   // If a chunk has at least 16 pages, the maximum heap size is about
    662   // 8K * 8K * 16 = 1G bytes.
    663 #ifdef V8_TARGET_ARCH_X64
    664   static const int kPagesPerChunk = 32;
    665   // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
    666   static const int kPagesPerChunkLog2 = 5;
    667   static const int kChunkTableLevels = 4;
    668   static const int kChunkTableBitsPerLevel = 12;
    669 #else
    670   static const int kPagesPerChunk = 16;
    671   // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
    672   static const int kPagesPerChunkLog2 = 4;
    673   static const int kChunkTableLevels = 2;
    674   static const int kChunkTableBitsPerLevel = 8;
    675 #endif
    676 
    677  private:
    678   static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
    679   static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
    680 
    681   Isolate* isolate_;
    682 
    683   // Maximum space size in bytes.
    684   intptr_t capacity_;
    685   // Maximum subset of capacity_ that can be executable
    686   intptr_t capacity_executable_;
    687 
    688   // Allocated space size in bytes.
    689   intptr_t size_;
    690 
    691   // Allocated executable space size in bytes.
    692   intptr_t size_executable_;
    693 
    694   struct MemoryAllocationCallbackRegistration {
    695     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
    696                                          ObjectSpace space,
    697                                          AllocationAction action)
    698         : callback(callback), space(space), action(action) {
    699     }
    700     MemoryAllocationCallback callback;
    701     ObjectSpace space;
    702     AllocationAction action;
    703   };
    704   // A List of callback that are triggered when memory is allocated or free'd
    705   List<MemoryAllocationCallbackRegistration>
    706       memory_allocation_callbacks_;
    707 
    708   // The initial chunk of virtual memory.
    709   VirtualMemory* initial_chunk_;
    710 
    711   // Allocated chunk info: chunk start address, chunk size, and owning space.
    712   class ChunkInfo BASE_EMBEDDED {
    713    public:
    714     ChunkInfo() : address_(NULL),
    715                   size_(0),
    716                   owner_(NULL),
    717                   executable_(NOT_EXECUTABLE),
    718                   owner_identity_(FIRST_SPACE) {}
    719     inline void init(Address a, size_t s, PagedSpace* o);
    720     Address address() { return address_; }
    721     size_t size() { return size_; }
    722     PagedSpace* owner() { return owner_; }
    723     // We save executability of the owner to allow using it
    724     // when collecting stats after the owner has been destroyed.
    725     Executability executable() const { return executable_; }
    726     AllocationSpace owner_identity() const { return owner_identity_; }
    727 
    728    private:
    729     Address address_;
    730     size_t size_;
    731     PagedSpace* owner_;
    732     Executability executable_;
    733     AllocationSpace owner_identity_;
    734   };
    735 
    736   // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
    737   List<ChunkInfo> chunks_;
    738   List<int> free_chunk_ids_;
    739   int max_nof_chunks_;
    740   int top_;
    741 
    742   // Push/pop a free chunk id onto/from the stack.
    743   void Push(int free_chunk_id);
    744   int Pop();
    745   bool OutOfChunkIds() { return top_ == 0; }
    746 
    747   // Frees a chunk.
    748   void DeleteChunk(int chunk_id);
    749 
    750   // Basic check whether a chunk id is in the valid range.
    751   inline bool IsValidChunkId(int chunk_id);
    752 
    753   // Checks whether a chunk id identifies an allocated chunk.
    754   inline bool IsValidChunk(int chunk_id);
    755 
    756   // Returns the chunk id that a page belongs to.
    757   inline int GetChunkId(Page* p);
    758 
    759   // True if the address lies in the initial chunk.
    760   inline bool InInitialChunk(Address address);
    761 
    762   // Initializes pages in a chunk. Returns the first page address.
    763   // This function and GetChunkId() are provided for the mark-compact
    764   // collector to rebuild page headers in the from space, which is
    765   // used as a marking stack and its page headers are destroyed.
    766   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
    767                                PagedSpace* owner);
    768 
    769   Page* RelinkPagesInChunk(int chunk_id,
    770                            Address chunk_start,
    771                            size_t chunk_size,
    772                            Page* prev,
    773                            Page** last_page_in_use);
    774 
    775   DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
    776 };
    777 
    778 
    779 // -----------------------------------------------------------------------------
    780 // Interface for heap object iterator to be implemented by all object space
    781 // object iterators.
    782 //
    783 // NOTE: The space specific object iterators also implements the own next()
    784 //       method which is used to avoid using virtual functions
    785 //       iterating a specific space.
    786 
    787 class ObjectIterator : public Malloced {
    788  public:
    789   virtual ~ObjectIterator() { }
    790 
    791   virtual HeapObject* next_object() = 0;
    792 };
    793 
    794 
    795 // -----------------------------------------------------------------------------
    796 // Heap object iterator in new/old/map spaces.
    797 //
    798 // A HeapObjectIterator iterates objects from a given address to the
    799 // top of a space. The given address must be below the current
    800 // allocation pointer (space top). There are some caveats.
    801 //
    802 // (1) If the space top changes upward during iteration (because of
    803 //     allocating new objects), the iterator does not iterate objects
    804 //     above the original space top. The caller must create a new
    805 //     iterator starting from the old top in order to visit these new
    806 //     objects.
    807 //
    808 // (2) If new objects are allocated below the original allocation top
    809 //     (e.g., free-list allocation in paged spaces), the new objects
    810 //     may or may not be iterated depending on their position with
    811 //     respect to the current point of iteration.
    812 //
    813 // (3) The space top should not change downward during iteration,
    814 //     otherwise the iterator will return not-necessarily-valid
    815 //     objects.
    816 
    817 class HeapObjectIterator: public ObjectIterator {
    818  public:
    819   // Creates a new object iterator in a given space. If a start
    820   // address is not given, the iterator starts from the space bottom.
    821   // If the size function is not given, the iterator calls the default
    822   // Object::Size().
    823   explicit HeapObjectIterator(PagedSpace* space);
    824   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
    825   HeapObjectIterator(PagedSpace* space, Address start);
    826   HeapObjectIterator(PagedSpace* space,
    827                      Address start,
    828                      HeapObjectCallback size_func);
    829   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
    830 
    831   inline HeapObject* next() {
    832     return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
    833   }
    834 
    835   // implementation of ObjectIterator.
    836   virtual HeapObject* next_object() { return next(); }
    837 
    838  private:
    839   Address cur_addr_;  // current iteration point
    840   Address end_addr_;  // end iteration point
    841   Address cur_limit_;  // current page limit
    842   HeapObjectCallback size_func_;  // size function
    843   Page* end_page_;  // caches the page of the end address
    844 
    845   HeapObject* FromCurrentPage() {
    846     ASSERT(cur_addr_ < cur_limit_);
    847 
    848     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
    849     int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
    850     ASSERT_OBJECT_SIZE(obj_size);
    851 
    852     cur_addr_ += obj_size;
    853     ASSERT(cur_addr_ <= cur_limit_);
    854 
    855     return obj;
    856   }
    857 
    858   // Slow path of next, goes into the next page.
    859   HeapObject* FromNextPage();
    860 
    861   // Initializes fields.
    862   void Initialize(Address start, Address end, HeapObjectCallback size_func);
    863 
    864 #ifdef DEBUG
    865   // Verifies whether fields have valid values.
    866   void Verify();
    867 #endif
    868 };
    869 
    870 
    871 // -----------------------------------------------------------------------------
    872 // A PageIterator iterates the pages in a paged space.
    873 //
    874 // The PageIterator class provides three modes for iterating pages in a space:
    875 //   PAGES_IN_USE iterates pages containing allocated objects.
    876 //   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
    877 //                    mark-compact collection.
    878 //   ALL_PAGES iterates all pages in the space.
    879 //
    880 // There are some caveats.
    881 //
    882 // (1) If the space expands during iteration, new pages will not be
    883 //     returned by the iterator in any mode.
    884 //
    885 // (2) If new objects are allocated during iteration, they will appear
    886 //     in pages returned by the iterator.  Allocation may cause the
    887 //     allocation pointer or MC allocation pointer in the last page to
    888 //     change between constructing the iterator and iterating the last
    889 //     page.
    890 //
    891 // (3) The space should not shrink during iteration, otherwise the
    892 //     iterator will return deallocated pages.
    893 
    894 class PageIterator BASE_EMBEDDED {
    895  public:
    896   enum Mode {
    897     PAGES_IN_USE,
    898     PAGES_USED_BY_MC,
    899     ALL_PAGES
    900   };
    901 
    902   PageIterator(PagedSpace* space, Mode mode);
    903 
    904   inline bool has_next();
    905   inline Page* next();
    906 
    907  private:
    908   PagedSpace* space_;
    909   Page* prev_page_;  // Previous page returned.
    910   Page* stop_page_;  // Page to stop at (last page returned by the iterator).
    911 };
    912 
    913 
    914 // -----------------------------------------------------------------------------
    915 // A space has a list of pages. The next page can be accessed via
    916 // Page::next_page() call. The next page of the last page is an
    917 // invalid page pointer. A space can expand and shrink dynamically.
    918 
    919 // An abstraction of allocation and relocation pointers in a page-structured
    920 // space.
    921 class AllocationInfo {
    922  public:
    923   Address top;  // current allocation top
    924   Address limit;  // current allocation limit
    925 
    926 #ifdef DEBUG
    927   bool VerifyPagedAllocation() {
    928     return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
    929         && (top <= limit);
    930   }
    931 #endif
    932 };
    933 
    934 
    935 // An abstraction of the accounting statistics of a page-structured space.
    936 // The 'capacity' of a space is the number of object-area bytes (ie, not
    937 // including page bookkeeping structures) currently in the space. The 'size'
    938 // of a space is the number of allocated bytes, the 'waste' in the space is
    939 // the number of bytes that are not allocated and not available to
    940 // allocation without reorganizing the space via a GC (eg, small blocks due
    941 // to internal fragmentation, top of page areas in map space), and the bytes
    942 // 'available' is the number of unallocated bytes that are not waste.  The
    943 // capacity is the sum of size, waste, and available.
    944 //
    945 // The stats are only set by functions that ensure they stay balanced. These
    946 // functions increase or decrease one of the non-capacity stats in
    947 // conjunction with capacity, or else they always balance increases and
    948 // decreases to the non-capacity stats.
    949 class AllocationStats BASE_EMBEDDED {
    950  public:
    951   AllocationStats() { Clear(); }
    952 
    953   // Zero out all the allocation statistics (ie, no capacity).
    954   void Clear() {
    955     capacity_ = 0;
    956     available_ = 0;
    957     size_ = 0;
    958     waste_ = 0;
    959   }
    960 
    961   // Reset the allocation statistics (ie, available = capacity with no
    962   // wasted or allocated bytes).
    963   void Reset() {
    964     available_ = capacity_;
    965     size_ = 0;
    966     waste_ = 0;
    967   }
    968 
    969   // Accessors for the allocation statistics.
    970   intptr_t Capacity() { return capacity_; }
    971   intptr_t Available() { return available_; }
    972   intptr_t Size() { return size_; }
    973   intptr_t Waste() { return waste_; }
    974 
    975   // Grow the space by adding available bytes.
    976   void ExpandSpace(int size_in_bytes) {
    977     capacity_ += size_in_bytes;
    978     available_ += size_in_bytes;
    979   }
    980 
    981   // Shrink the space by removing available bytes.
    982   void ShrinkSpace(int size_in_bytes) {
    983     capacity_ -= size_in_bytes;
    984     available_ -= size_in_bytes;
    985   }
    986 
    987   // Allocate from available bytes (available -> size).
    988   void AllocateBytes(intptr_t size_in_bytes) {
    989     available_ -= size_in_bytes;
    990     size_ += size_in_bytes;
    991   }
    992 
    993   // Free allocated bytes, making them available (size -> available).
    994   void DeallocateBytes(intptr_t size_in_bytes) {
    995     size_ -= size_in_bytes;
    996     available_ += size_in_bytes;
    997   }
    998 
    999   // Waste free bytes (available -> waste).
   1000   void WasteBytes(int size_in_bytes) {
   1001     available_ -= size_in_bytes;
   1002     waste_ += size_in_bytes;
   1003   }
   1004 
   1005   // Consider the wasted bytes to be allocated, as they contain filler
   1006   // objects (waste -> size).
   1007   void FillWastedBytes(intptr_t size_in_bytes) {
   1008     waste_ -= size_in_bytes;
   1009     size_ += size_in_bytes;
   1010   }
   1011 
   1012  private:
   1013   intptr_t capacity_;
   1014   intptr_t available_;
   1015   intptr_t size_;
   1016   intptr_t waste_;
   1017 };
   1018 
   1019 
   1020 class PagedSpace : public Space {
   1021  public:
   1022   // Creates a space with a maximum capacity, and an id.
   1023   PagedSpace(Heap* heap,
   1024              intptr_t max_capacity,
   1025              AllocationSpace id,
   1026              Executability executable);
   1027 
   1028   virtual ~PagedSpace() {}
   1029 
   1030   // Set up the space using the given address range of virtual memory (from
   1031   // the memory allocator's initial chunk) if possible.  If the block of
   1032   // addresses is not big enough to contain a single page-aligned page, a
   1033   // fresh chunk will be allocated.
   1034   bool Setup(Address start, size_t size);
   1035 
   1036   // Returns true if the space has been successfully set up and not
   1037   // subsequently torn down.
   1038   bool HasBeenSetup();
   1039 
   1040   // Cleans up the space, frees all pages in this space except those belonging
   1041   // to the initial chunk, uncommits addresses in the initial chunk.
   1042   void TearDown();
   1043 
   1044   // Checks whether an object/address is in this space.
   1045   inline bool Contains(Address a);
   1046   bool Contains(HeapObject* o) { return Contains(o->address()); }
   1047   // Never crashes even if a is not a valid pointer.
   1048   inline bool SafeContains(Address a);
   1049 
   1050   // Given an address occupied by a live object, return that object if it is
   1051   // in this space, or Failure::Exception() if it is not. The implementation
   1052   // iterates over objects in the page containing the address, the cost is
   1053   // linear in the number of objects in the page. It may be slow.
   1054   MUST_USE_RESULT MaybeObject* FindObject(Address addr);
   1055 
   1056   // Checks whether page is currently in use by this space.
   1057   bool IsUsed(Page* page);
   1058 
   1059   void MarkAllPagesClean();
   1060 
   1061   // Prepares for a mark-compact GC.
   1062   virtual void PrepareForMarkCompact(bool will_compact);
   1063 
   1064   // The top of allocation in a page in this space. Undefined if page is unused.
   1065   Address PageAllocationTop(Page* page) {
   1066     return page == TopPageOf(allocation_info_) ? top()
   1067         : PageAllocationLimit(page);
   1068   }
   1069 
   1070   // The limit of allocation for a page in this space.
   1071   virtual Address PageAllocationLimit(Page* page) = 0;
   1072 
   1073   void FlushTopPageWatermark() {
   1074     AllocationTopPage()->SetCachedAllocationWatermark(top());
   1075     AllocationTopPage()->InvalidateWatermark(true);
   1076   }
   1077 
   1078   // Current capacity without growing (Size() + Available() + Waste()).
   1079   intptr_t Capacity() { return accounting_stats_.Capacity(); }
   1080 
   1081   // Total amount of memory committed for this space.  For paged
   1082   // spaces this equals the capacity.
   1083   intptr_t CommittedMemory() { return Capacity(); }
   1084 
   1085   // Available bytes without growing.
   1086   intptr_t Available() { return accounting_stats_.Available(); }
   1087 
   1088   // Allocated bytes in this space.
   1089   virtual intptr_t Size() { return accounting_stats_.Size(); }
   1090 
   1091   // Wasted bytes due to fragmentation and not recoverable until the
   1092   // next GC of this space.
   1093   intptr_t Waste() { return accounting_stats_.Waste(); }
   1094 
   1095   // Returns the address of the first object in this space.
   1096   Address bottom() { return first_page_->ObjectAreaStart(); }
   1097 
   1098   // Returns the allocation pointer in this space.
   1099   Address top() { return allocation_info_.top; }
   1100 
   1101   // Allocate the requested number of bytes in the space if possible, return a
   1102   // failure object if not.
   1103   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
   1104 
   1105   // Allocate the requested number of bytes for relocation during mark-compact
   1106   // collection.
   1107   MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
   1108 
   1109   virtual bool ReserveSpace(int bytes);
   1110 
   1111   // Used by ReserveSpace.
   1112   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
   1113 
   1114   // Free all pages in range from prev (exclusive) to last (inclusive).
   1115   // Freed pages are moved to the end of page list.
   1116   void FreePages(Page* prev, Page* last);
   1117 
   1118   // Deallocates a block.
   1119   virtual void DeallocateBlock(Address start,
   1120                                int size_in_bytes,
   1121                                bool add_to_freelist) = 0;
   1122 
   1123   // Set space allocation info.
   1124   void SetTop(Address top) {
   1125     allocation_info_.top = top;
   1126     allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
   1127   }
   1128 
   1129   // ---------------------------------------------------------------------------
   1130   // Mark-compact collection support functions
   1131 
   1132   // Set the relocation point to the beginning of the space.
   1133   void MCResetRelocationInfo();
   1134 
   1135   // Writes relocation info to the top page.
   1136   void MCWriteRelocationInfoToPage() {
   1137     TopPageOf(mc_forwarding_info_)->
   1138         SetAllocationWatermark(mc_forwarding_info_.top);
   1139   }
   1140 
   1141   // Computes the offset of a given address in this space to the beginning
   1142   // of the space.
   1143   int MCSpaceOffsetForAddress(Address addr);
   1144 
   1145   // Updates the allocation pointer to the relocation top after a mark-compact
   1146   // collection.
   1147   virtual void MCCommitRelocationInfo() = 0;
   1148 
   1149   // Releases half of unused pages.
   1150   void Shrink();
   1151 
   1152   // Ensures that the capacity is at least 'capacity'. Returns false on failure.
   1153   bool EnsureCapacity(int capacity);
   1154 
   1155 #ifdef ENABLE_HEAP_PROTECTION
   1156   // Protect/unprotect the space by marking it read-only/writable.
   1157   void Protect();
   1158   void Unprotect();
   1159 #endif
   1160 
   1161 #ifdef DEBUG
   1162   // Print meta info and objects in this space.
   1163   virtual void Print();
   1164 
   1165   // Verify integrity of this space.
   1166   virtual void Verify(ObjectVisitor* visitor);
   1167 
   1168   // Overridden by subclasses to verify space-specific object
   1169   // properties (e.g., only maps or free-list nodes are in map space).
   1170   virtual void VerifyObject(HeapObject* obj) {}
   1171 
   1172   // Report code object related statistics
   1173   void CollectCodeStatistics();
   1174   static void ReportCodeStatistics();
   1175   static void ResetCodeStatistics();
   1176 #endif
   1177 
   1178   // Returns the page of the allocation pointer.
   1179   Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
   1180 
   1181   void RelinkPageListInChunkOrder(bool deallocate_blocks);
   1182 
   1183  protected:
   1184   // Maximum capacity of this space.
   1185   intptr_t max_capacity_;
   1186 
   1187   // Accounting information for this space.
   1188   AllocationStats accounting_stats_;
   1189 
   1190   // The first page in this space.
   1191   Page* first_page_;
   1192 
   1193   // The last page in this space.  Initially set in Setup, updated in
   1194   // Expand and Shrink.
   1195   Page* last_page_;
   1196 
   1197   // True if pages owned by this space are linked in chunk-order.
   1198   // See comment for class MemoryAllocator for definition of chunk-order.
   1199   bool page_list_is_chunk_ordered_;
   1200 
   1201   // Normal allocation information.
   1202   AllocationInfo allocation_info_;
   1203 
   1204   // Relocation information during mark-compact collections.
   1205   AllocationInfo mc_forwarding_info_;
   1206 
   1207   // Bytes of each page that cannot be allocated.  Possibly non-zero
   1208   // for pages in spaces with only fixed-size objects.  Always zero
   1209   // for pages in spaces with variable sized objects (those pages are
   1210   // padded with free-list nodes).
   1211   int page_extra_;
   1212 
   1213   // Sets allocation pointer to a page bottom.
   1214   static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
   1215 
   1216   // Returns the top page specified by an allocation info structure.
   1217   static Page* TopPageOf(AllocationInfo alloc_info) {
   1218     return Page::FromAllocationTop(alloc_info.limit);
   1219   }
   1220 
   1221   int CountPagesToTop() {
   1222     Page* p = Page::FromAllocationTop(allocation_info_.top);
   1223     PageIterator it(this, PageIterator::ALL_PAGES);
   1224     int counter = 1;
   1225     while (it.has_next()) {
   1226       if (it.next() == p) return counter;
   1227       counter++;
   1228     }
   1229     UNREACHABLE();
   1230     return -1;
   1231   }
   1232 
   1233   // Expands the space by allocating a fixed number of pages. Returns false if
   1234   // it cannot allocate requested number of pages from OS. Newly allocated
   1235   // pages are append to the last_page;
   1236   bool Expand(Page* last_page);
   1237 
   1238   // Generic fast case allocation function that tries linear allocation in
   1239   // the top page of 'alloc_info'.  Returns NULL on failure.
   1240   inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
   1241                                       int size_in_bytes);
   1242 
   1243   // During normal allocation or deserialization, roll to the next page in
   1244   // the space (there is assumed to be one) and allocate there.  This
   1245   // function is space-dependent.
   1246   virtual HeapObject* AllocateInNextPage(Page* current_page,
   1247                                          int size_in_bytes) = 0;
   1248 
   1249   // Slow path of AllocateRaw.  This function is space-dependent.
   1250   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
   1251 
   1252   // Slow path of MCAllocateRaw.
   1253   MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
   1254 
   1255 #ifdef DEBUG
   1256   // Returns the number of total pages in this space.
   1257   int CountTotalPages();
   1258 #endif
   1259  private:
   1260 
   1261   // Returns a pointer to the page of the relocation pointer.
   1262   Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
   1263 
   1264   friend class PageIterator;
   1265 };
   1266 
   1267 
   1268 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   1269 class NumberAndSizeInfo BASE_EMBEDDED {
   1270  public:
   1271   NumberAndSizeInfo() : number_(0), bytes_(0) {}
   1272 
   1273   int number() const { return number_; }
   1274   void increment_number(int num) { number_ += num; }
   1275 
   1276   int bytes() const { return bytes_; }
   1277   void increment_bytes(int size) { bytes_ += size; }
   1278 
   1279   void clear() {
   1280     number_ = 0;
   1281     bytes_ = 0;
   1282   }
   1283 
   1284  private:
   1285   int number_;
   1286   int bytes_;
   1287 };
   1288 
   1289 
   1290 // HistogramInfo class for recording a single "bar" of a histogram.  This
   1291 // class is used for collecting statistics to print to stdout (when compiled
   1292 // with DEBUG) or to the log file (when compiled with
   1293 // ENABLE_LOGGING_AND_PROFILING).
   1294 class HistogramInfo: public NumberAndSizeInfo {
   1295  public:
   1296   HistogramInfo() : NumberAndSizeInfo() {}
   1297 
   1298   const char* name() { return name_; }
   1299   void set_name(const char* name) { name_ = name; }
   1300 
   1301  private:
   1302   const char* name_;
   1303 };
   1304 #endif
   1305 
   1306 
   1307 // -----------------------------------------------------------------------------
   1308 // SemiSpace in young generation
   1309 //
   1310 // A semispace is a contiguous chunk of memory. The mark-compact collector
   1311 // uses the memory in the from space as a marking stack when tracing live
   1312 // objects.
   1313 
   1314 class SemiSpace : public Space {
   1315  public:
   1316   // Constructor.
   1317   explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
   1318     start_ = NULL;
   1319     age_mark_ = NULL;
   1320   }
   1321 
   1322   // Sets up the semispace using the given chunk.
   1323   bool Setup(Address start, int initial_capacity, int maximum_capacity);
   1324 
   1325   // Tear down the space.  Heap memory was not allocated by the space, so it
   1326   // is not deallocated here.
   1327   void TearDown();
   1328 
   1329   // True if the space has been set up but not torn down.
   1330   bool HasBeenSetup() { return start_ != NULL; }
   1331 
   1332   // Grow the size of the semispace by committing extra virtual memory.
   1333   // Assumes that the caller has checked that the semispace has not reached
   1334   // its maximum capacity (and thus there is space available in the reserved
   1335   // address range to grow).
   1336   bool Grow();
   1337 
   1338   // Grow the semispace to the new capacity.  The new capacity
   1339   // requested must be larger than the current capacity.
   1340   bool GrowTo(int new_capacity);
   1341 
   1342   // Shrinks the semispace to the new capacity.  The new capacity
   1343   // requested must be more than the amount of used memory in the
   1344   // semispace and less than the current capacity.
   1345   bool ShrinkTo(int new_capacity);
   1346 
   1347   // Returns the start address of the space.
   1348   Address low() { return start_; }
   1349   // Returns one past the end address of the space.
   1350   Address high() { return low() + capacity_; }
   1351 
   1352   // Age mark accessors.
   1353   Address age_mark() { return age_mark_; }
   1354   void set_age_mark(Address mark) { age_mark_ = mark; }
   1355 
   1356   // True if the address is in the address range of this semispace (not
   1357   // necessarily below the allocation pointer).
   1358   bool Contains(Address a) {
   1359     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
   1360            == reinterpret_cast<uintptr_t>(start_);
   1361   }
   1362 
   1363   // True if the object is a heap object in the address range of this
   1364   // semispace (not necessarily below the allocation pointer).
   1365   bool Contains(Object* o) {
   1366     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   1367   }
   1368 
   1369   // The offset of an address from the beginning of the space.
   1370   int SpaceOffsetForAddress(Address addr) {
   1371     return static_cast<int>(addr - low());
   1372   }
   1373 
   1374   // If we don't have these here then SemiSpace will be abstract.  However
   1375   // they should never be called.
   1376   virtual intptr_t Size() {
   1377     UNREACHABLE();
   1378     return 0;
   1379   }
   1380 
   1381   virtual bool ReserveSpace(int bytes) {
   1382     UNREACHABLE();
   1383     return false;
   1384   }
   1385 
   1386   bool is_committed() { return committed_; }
   1387   bool Commit();
   1388   bool Uncommit();
   1389 
   1390 #ifdef ENABLE_HEAP_PROTECTION
   1391   // Protect/unprotect the space by marking it read-only/writable.
   1392   virtual void Protect() {}
   1393   virtual void Unprotect() {}
   1394 #endif
   1395 
   1396 #ifdef DEBUG
   1397   virtual void Print();
   1398   virtual void Verify();
   1399 #endif
   1400 
   1401   // Returns the current capacity of the semi space.
   1402   int Capacity() { return capacity_; }
   1403 
   1404   // Returns the maximum capacity of the semi space.
   1405   int MaximumCapacity() { return maximum_capacity_; }
   1406 
   1407   // Returns the initial capacity of the semi space.
   1408   int InitialCapacity() { return initial_capacity_; }
   1409 
   1410  private:
   1411   // The current and maximum capacity of the space.
   1412   int capacity_;
   1413   int maximum_capacity_;
   1414   int initial_capacity_;
   1415 
   1416   // The start address of the space.
   1417   Address start_;
   1418   // Used to govern object promotion during mark-compact collection.
   1419   Address age_mark_;
   1420 
   1421   // Masks and comparison values to test for containment in this semispace.
   1422   uintptr_t address_mask_;
   1423   uintptr_t object_mask_;
   1424   uintptr_t object_expected_;
   1425 
   1426   bool committed_;
   1427 
   1428  public:
   1429   TRACK_MEMORY("SemiSpace")
   1430 };
   1431 
   1432 
   1433 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
   1434 // semispace of the heap's new space.  It iterates over the objects in the
   1435 // semispace from a given start address (defaulting to the bottom of the
   1436 // semispace) to the top of the semispace.  New objects allocated after the
   1437 // iterator is created are not iterated.
   1438 class SemiSpaceIterator : public ObjectIterator {
   1439  public:
   1440   // Create an iterator over the objects in the given space.  If no start
   1441   // address is given, the iterator starts from the bottom of the space.  If
   1442   // no size function is given, the iterator calls Object::Size().
   1443   explicit SemiSpaceIterator(NewSpace* space);
   1444   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
   1445   SemiSpaceIterator(NewSpace* space, Address start);
   1446 
   1447   HeapObject* next() {
   1448     if (current_ == limit_) return NULL;
   1449 
   1450     HeapObject* object = HeapObject::FromAddress(current_);
   1451     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
   1452 
   1453     current_ += size;
   1454     return object;
   1455   }
   1456 
   1457   // Implementation of the ObjectIterator functions.
   1458   virtual HeapObject* next_object() { return next(); }
   1459 
   1460  private:
   1461   void Initialize(NewSpace* space, Address start, Address end,
   1462                   HeapObjectCallback size_func);
   1463 
   1464   // The semispace.
   1465   SemiSpace* space_;
   1466   // The current iteration point.
   1467   Address current_;
   1468   // The end of iteration.
   1469   Address limit_;
   1470   // The callback function.
   1471   HeapObjectCallback size_func_;
   1472 };
   1473 
   1474 
   1475 // -----------------------------------------------------------------------------
   1476 // The young generation space.
   1477 //
   1478 // The new space consists of a contiguous pair of semispaces.  It simply
   1479 // forwards most functions to the appropriate semispace.
   1480 
   1481 class NewSpace : public Space {
   1482  public:
   1483   // Constructor.
   1484   explicit NewSpace(Heap* heap)
   1485     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
   1486       to_space_(heap),
   1487       from_space_(heap) {}
   1488 
   1489   // Sets up the new space using the given chunk.
   1490   bool Setup(Address start, int size);
   1491 
   1492   // Tears down the space.  Heap memory was not allocated by the space, so it
   1493   // is not deallocated here.
   1494   void TearDown();
   1495 
   1496   // True if the space has been set up but not torn down.
   1497   bool HasBeenSetup() {
   1498     return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
   1499   }
   1500 
   1501   // Flip the pair of spaces.
   1502   void Flip();
   1503 
   1504   // Grow the capacity of the semispaces.  Assumes that they are not at
   1505   // their maximum capacity.
   1506   void Grow();
   1507 
   1508   // Shrink the capacity of the semispaces.
   1509   void Shrink();
   1510 
   1511   // True if the address or object lies in the address range of either
   1512   // semispace (not necessarily below the allocation pointer).
   1513   bool Contains(Address a) {
   1514     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
   1515         == reinterpret_cast<uintptr_t>(start_);
   1516   }
   1517   bool Contains(Object* o) {
   1518     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   1519   }
   1520 
   1521   // Return the allocated bytes in the active semispace.
   1522   virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
   1523   // The same, but returning an int.  We have to have the one that returns
   1524   // intptr_t because it is inherited, but if we know we are dealing with the
   1525   // new space, which can't get as big as the other spaces then this is useful:
   1526   int SizeAsInt() { return static_cast<int>(Size()); }
   1527 
   1528   // Return the current capacity of a semispace.
   1529   intptr_t Capacity() {
   1530     ASSERT(to_space_.Capacity() == from_space_.Capacity());
   1531     return to_space_.Capacity();
   1532   }
   1533 
   1534   // Return the total amount of memory committed for new space.
   1535   intptr_t CommittedMemory() {
   1536     if (from_space_.is_committed()) return 2 * Capacity();
   1537     return Capacity();
   1538   }
   1539 
   1540   // Return the available bytes without growing in the active semispace.
   1541   intptr_t Available() { return Capacity() - Size(); }
   1542 
   1543   // Return the maximum capacity of a semispace.
   1544   int MaximumCapacity() {
   1545     ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
   1546     return to_space_.MaximumCapacity();
   1547   }
   1548 
   1549   // Returns the initial capacity of a semispace.
   1550   int InitialCapacity() {
   1551     ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
   1552     return to_space_.InitialCapacity();
   1553   }
   1554 
   1555   // Return the address of the allocation pointer in the active semispace.
   1556   Address top() { return allocation_info_.top; }
   1557   // Return the address of the first object in the active semispace.
   1558   Address bottom() { return to_space_.low(); }
   1559 
   1560   // Get the age mark of the inactive semispace.
   1561   Address age_mark() { return from_space_.age_mark(); }
   1562   // Set the age mark in the active semispace.
   1563   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
   1564 
   1565   // The start address of the space and a bit mask. Anding an address in the
   1566   // new space with the mask will result in the start address.
   1567   Address start() { return start_; }
   1568   uintptr_t mask() { return address_mask_; }
   1569 
   1570   // The allocation top and limit addresses.
   1571   Address* allocation_top_address() { return &allocation_info_.top; }
   1572   Address* allocation_limit_address() { return &allocation_info_.limit; }
   1573 
   1574   MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
   1575     return AllocateRawInternal(size_in_bytes, &allocation_info_);
   1576   }
   1577 
   1578   // Allocate the requested number of bytes for relocation during mark-compact
   1579   // collection.
   1580   MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
   1581     return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
   1582   }
   1583 
   1584   // Reset the allocation pointer to the beginning of the active semispace.
   1585   void ResetAllocationInfo();
   1586   // Reset the reloction pointer to the bottom of the inactive semispace in
   1587   // preparation for mark-compact collection.
   1588   void MCResetRelocationInfo();
   1589   // Update the allocation pointer in the active semispace after a
   1590   // mark-compact collection.
   1591   void MCCommitRelocationInfo();
   1592 
   1593   // Get the extent of the inactive semispace (for use as a marking stack).
   1594   Address FromSpaceLow() { return from_space_.low(); }
   1595   Address FromSpaceHigh() { return from_space_.high(); }
   1596 
   1597   // Get the extent of the active semispace (to sweep newly copied objects
   1598   // during a scavenge collection).
   1599   Address ToSpaceLow() { return to_space_.low(); }
   1600   Address ToSpaceHigh() { return to_space_.high(); }
   1601 
   1602   // Offsets from the beginning of the semispaces.
   1603   int ToSpaceOffsetForAddress(Address a) {
   1604     return to_space_.SpaceOffsetForAddress(a);
   1605   }
   1606   int FromSpaceOffsetForAddress(Address a) {
   1607     return from_space_.SpaceOffsetForAddress(a);
   1608   }
   1609 
   1610   // True if the object is a heap object in the address range of the
   1611   // respective semispace (not necessarily below the allocation pointer of the
   1612   // semispace).
   1613   bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
   1614   bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
   1615 
   1616   bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
   1617   bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
   1618 
   1619   virtual bool ReserveSpace(int bytes);
   1620 
   1621   // Resizes a sequential string which must be the most recent thing that was
   1622   // allocated in new space.
   1623   template <typename StringType>
   1624   inline void ShrinkStringAtAllocationBoundary(String* string, int len);
   1625 
   1626 #ifdef ENABLE_HEAP_PROTECTION
   1627   // Protect/unprotect the space by marking it read-only/writable.
   1628   virtual void Protect();
   1629   virtual void Unprotect();
   1630 #endif
   1631 
   1632 #ifdef DEBUG
   1633   // Verify the active semispace.
   1634   virtual void Verify();
   1635   // Print the active semispace.
   1636   virtual void Print() { to_space_.Print(); }
   1637 #endif
   1638 
   1639 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   1640   // Iterates the active semispace to collect statistics.
   1641   void CollectStatistics();
   1642   // Reports previously collected statistics of the active semispace.
   1643   void ReportStatistics();
   1644   // Clears previously collected statistics.
   1645   void ClearHistograms();
   1646 
   1647   // Record the allocation or promotion of a heap object.  Note that we don't
   1648   // record every single allocation, but only those that happen in the
   1649   // to space during a scavenge GC.
   1650   void RecordAllocation(HeapObject* obj);
   1651   void RecordPromotion(HeapObject* obj);
   1652 #endif
   1653 
   1654   // Return whether the operation succeded.
   1655   bool CommitFromSpaceIfNeeded() {
   1656     if (from_space_.is_committed()) return true;
   1657     return from_space_.Commit();
   1658   }
   1659 
   1660   bool UncommitFromSpace() {
   1661     if (!from_space_.is_committed()) return true;
   1662     return from_space_.Uncommit();
   1663   }
   1664 
   1665  private:
   1666   // The semispaces.
   1667   SemiSpace to_space_;
   1668   SemiSpace from_space_;
   1669 
   1670   // Start address and bit mask for containment testing.
   1671   Address start_;
   1672   uintptr_t address_mask_;
   1673   uintptr_t object_mask_;
   1674   uintptr_t object_expected_;
   1675 
   1676   // Allocation pointer and limit for normal allocation and allocation during
   1677   // mark-compact collection.
   1678   AllocationInfo allocation_info_;
   1679   AllocationInfo mc_forwarding_info_;
   1680 
   1681 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   1682   HistogramInfo* allocated_histogram_;
   1683   HistogramInfo* promoted_histogram_;
   1684 #endif
   1685 
   1686   // Implementation of AllocateRaw and MCAllocateRaw.
   1687   MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
   1688       int size_in_bytes,
   1689       AllocationInfo* alloc_info);
   1690 
   1691   friend class SemiSpaceIterator;
   1692 
   1693  public:
   1694   TRACK_MEMORY("NewSpace")
   1695 };
   1696 
   1697 
   1698 // -----------------------------------------------------------------------------
   1699 // Free lists for old object spaces
   1700 //
   1701 // Free-list nodes are free blocks in the heap.  They look like heap objects
   1702 // (free-list node pointers have the heap object tag, and they have a map like
   1703 // a heap object).  They have a size and a next pointer.  The next pointer is
   1704 // the raw address of the next free list node (or NULL).
   1705 class FreeListNode: public HeapObject {
   1706  public:
   1707   // Obtain a free-list node from a raw address.  This is not a cast because
   1708   // it does not check nor require that the first word at the address is a map
   1709   // pointer.
   1710   static FreeListNode* FromAddress(Address address) {
   1711     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
   1712   }
   1713 
   1714   static inline bool IsFreeListNode(HeapObject* object);
   1715 
   1716   // Set the size in bytes, which can be read with HeapObject::Size().  This
   1717   // function also writes a map to the first word of the block so that it
   1718   // looks like a heap object to the garbage collector and heap iteration
   1719   // functions.
   1720   void set_size(Heap* heap, int size_in_bytes);
   1721 
   1722   // Accessors for the next field.
   1723   inline Address next(Heap* heap);
   1724   inline void set_next(Heap* heap, Address next);
   1725 
   1726  private:
   1727   static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
   1728 
   1729   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
   1730 };
   1731 
   1732 
   1733 // The free list for the old space.
   1734 class OldSpaceFreeList BASE_EMBEDDED {
   1735  public:
   1736   OldSpaceFreeList(Heap* heap, AllocationSpace owner);
   1737 
   1738   // Clear the free list.
   1739   void Reset();
   1740 
   1741   // Return the number of bytes available on the free list.
   1742   intptr_t available() { return available_; }
   1743 
   1744   // Place a node on the free list.  The block of size 'size_in_bytes'
   1745   // starting at 'start' is placed on the free list.  The return value is the
   1746   // number of bytes that have been lost due to internal fragmentation by
   1747   // freeing the block.  Bookkeeping information will be written to the block,
   1748   // ie, its contents will be destroyed.  The start address should be word
   1749   // aligned, and the size should be a non-zero multiple of the word size.
   1750   int Free(Address start, int size_in_bytes);
   1751 
   1752   // Allocate a block of size 'size_in_bytes' from the free list.  The block
   1753   // is unitialized.  A failure is returned if no block is available.  The
   1754   // number of bytes lost to fragmentation is returned in the output parameter
   1755   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
   1756   MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
   1757 
   1758   void MarkNodes();
   1759 
   1760  private:
   1761   // The size range of blocks, in bytes. (Smaller allocations are allowed, but
   1762   // will always result in waste.)
   1763   static const int kMinBlockSize = 2 * kPointerSize;
   1764   static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
   1765 
   1766   Heap* heap_;
   1767 
   1768   // The identity of the owning space, for building allocation Failure
   1769   // objects.
   1770   AllocationSpace owner_;
   1771 
   1772   // Total available bytes in all blocks on this free list.
   1773   int available_;
   1774 
   1775   // Blocks are put on exact free lists in an array, indexed by size in words.
   1776   // The available sizes are kept in an increasingly ordered list. Entries
   1777   // corresponding to sizes < kMinBlockSize always have an empty free list
   1778   // (but index kHead is used for the head of the size list).
   1779   struct SizeNode {
   1780     // Address of the head FreeListNode of the implied block size or NULL.
   1781     Address head_node_;
   1782     // Size (words) of the next larger available size if head_node_ != NULL.
   1783     int next_size_;
   1784   };
   1785   static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
   1786   SizeNode free_[kFreeListsLength];
   1787 
   1788   // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
   1789   static const int kHead = kMinBlockSize / kPointerSize - 1;
   1790   static const int kEnd = kMaxInt;
   1791 
   1792   // We keep a "finger" in the size list to speed up a common pattern:
   1793   // repeated requests for the same or increasing sizes.
   1794   int finger_;
   1795 
   1796   // Starting from *prev, find and return the smallest size >= index (words),
   1797   // or kEnd. Update *prev to be the largest size < index, or kHead.
   1798   int FindSize(int index, int* prev) {
   1799     int cur = free_[*prev].next_size_;
   1800     while (cur < index) {
   1801       *prev = cur;
   1802       cur = free_[cur].next_size_;
   1803     }
   1804     return cur;
   1805   }
   1806 
   1807   // Remove an existing element from the size list.
   1808   void RemoveSize(int index) {
   1809     int prev = kHead;
   1810     int cur = FindSize(index, &prev);
   1811     ASSERT(cur == index);
   1812     free_[prev].next_size_ = free_[cur].next_size_;
   1813     finger_ = prev;
   1814   }
   1815 
   1816   // Insert a new element into the size list.
   1817   void InsertSize(int index) {
   1818     int prev = kHead;
   1819     int cur = FindSize(index, &prev);
   1820     ASSERT(cur != index);
   1821     free_[prev].next_size_ = index;
   1822     free_[index].next_size_ = cur;
   1823   }
   1824 
   1825   // The size list is not updated during a sequence of calls to Free, but is
   1826   // rebuilt before the next allocation.
   1827   void RebuildSizeList();
   1828   bool needs_rebuild_;
   1829 
   1830 #ifdef DEBUG
   1831   // Does this free list contain a free block located at the address of 'node'?
   1832   bool Contains(FreeListNode* node);
   1833 #endif
   1834 
   1835   DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
   1836 };
   1837 
   1838 
   1839 // The free list for the map space.
   1840 class FixedSizeFreeList BASE_EMBEDDED {
   1841  public:
   1842   FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
   1843 
   1844   // Clear the free list.
   1845   void Reset();
   1846 
   1847   // Return the number of bytes available on the free list.
   1848   intptr_t available() { return available_; }
   1849 
   1850   // Place a node on the free list.  The block starting at 'start' (assumed to
   1851   // have size object_size_) is placed on the free list.  Bookkeeping
   1852   // information will be written to the block, ie, its contents will be
   1853   // destroyed.  The start address should be word aligned.
   1854   void Free(Address start);
   1855 
   1856   // Allocate a fixed sized block from the free list.  The block is unitialized.
   1857   // A failure is returned if no block is available.
   1858   MUST_USE_RESULT MaybeObject* Allocate();
   1859 
   1860   void MarkNodes();
   1861 
   1862  private:
   1863 
   1864   Heap* heap_;
   1865 
   1866   // Available bytes on the free list.
   1867   intptr_t available_;
   1868 
   1869   // The head of the free list.
   1870   Address head_;
   1871 
   1872   // The tail of the free list.
   1873   Address tail_;
   1874 
   1875   // The identity of the owning space, for building allocation Failure
   1876   // objects.
   1877   AllocationSpace owner_;
   1878 
   1879   // The size of the objects in this space.
   1880   int object_size_;
   1881 
   1882   DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
   1883 };
   1884 
   1885 
   1886 // -----------------------------------------------------------------------------
   1887 // Old object space (excluding map objects)
   1888 
   1889 class OldSpace : public PagedSpace {
   1890  public:
   1891   // Creates an old space object with a given maximum capacity.
   1892   // The constructor does not allocate pages from OS.
   1893   OldSpace(Heap* heap,
   1894            intptr_t max_capacity,
   1895            AllocationSpace id,
   1896            Executability executable)
   1897       : PagedSpace(heap, max_capacity, id, executable),
   1898         free_list_(heap, id) {
   1899     page_extra_ = 0;
   1900   }
   1901 
   1902   // The bytes available on the free list (ie, not above the linear allocation
   1903   // pointer).
   1904   intptr_t AvailableFree() { return free_list_.available(); }
   1905 
   1906   // The limit of allocation for a page in this space.
   1907   virtual Address PageAllocationLimit(Page* page) {
   1908     return page->ObjectAreaEnd();
   1909   }
   1910 
   1911   // Give a block of memory to the space's free list.  It might be added to
   1912   // the free list or accounted as waste.
   1913   // If add_to_freelist is false then just accounting stats are updated and
   1914   // no attempt to add area to free list is made.
   1915   void Free(Address start, int size_in_bytes, bool add_to_freelist) {
   1916     accounting_stats_.DeallocateBytes(size_in_bytes);
   1917 
   1918     if (add_to_freelist) {
   1919       int wasted_bytes = free_list_.Free(start, size_in_bytes);
   1920       accounting_stats_.WasteBytes(wasted_bytes);
   1921     }
   1922   }
   1923 
   1924   virtual void DeallocateBlock(Address start,
   1925                                int size_in_bytes,
   1926                                bool add_to_freelist);
   1927 
   1928   // Prepare for full garbage collection.  Resets the relocation pointer and
   1929   // clears the free list.
   1930   virtual void PrepareForMarkCompact(bool will_compact);
   1931 
   1932   // Updates the allocation pointer to the relocation top after a mark-compact
   1933   // collection.
   1934   virtual void MCCommitRelocationInfo();
   1935 
   1936   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
   1937 
   1938   void MarkFreeListNodes() { free_list_.MarkNodes(); }
   1939 
   1940 #ifdef DEBUG
   1941   // Reports statistics for the space
   1942   void ReportStatistics();
   1943 #endif
   1944 
   1945  protected:
   1946   // Virtual function in the superclass.  Slow path of AllocateRaw.
   1947   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
   1948 
   1949   // Virtual function in the superclass.  Allocate linearly at the start of
   1950   // the page after current_page (there is assumed to be one).
   1951   HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
   1952 
   1953  private:
   1954   // The space's free list.
   1955   OldSpaceFreeList free_list_;
   1956 
   1957  public:
   1958   TRACK_MEMORY("OldSpace")
   1959 };
   1960 
   1961 
   1962 // -----------------------------------------------------------------------------
   1963 // Old space for objects of a fixed size
   1964 
   1965 class FixedSpace : public PagedSpace {
   1966  public:
   1967   FixedSpace(Heap* heap,
   1968              intptr_t max_capacity,
   1969              AllocationSpace id,
   1970              int object_size_in_bytes,
   1971              const char* name)
   1972       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
   1973         object_size_in_bytes_(object_size_in_bytes),
   1974         name_(name),
   1975         free_list_(heap, id, object_size_in_bytes) {
   1976     page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
   1977   }
   1978 
   1979   // The limit of allocation for a page in this space.
   1980   virtual Address PageAllocationLimit(Page* page) {
   1981     return page->ObjectAreaEnd() - page_extra_;
   1982   }
   1983 
   1984   int object_size_in_bytes() { return object_size_in_bytes_; }
   1985 
   1986   // Give a fixed sized block of memory to the space's free list.
   1987   // If add_to_freelist is false then just accounting stats are updated and
   1988   // no attempt to add area to free list is made.
   1989   void Free(Address start, bool add_to_freelist) {
   1990     if (add_to_freelist) {
   1991       free_list_.Free(start);
   1992     }
   1993     accounting_stats_.DeallocateBytes(object_size_in_bytes_);
   1994   }
   1995 
   1996   // Prepares for a mark-compact GC.
   1997   virtual void PrepareForMarkCompact(bool will_compact);
   1998 
   1999   // Updates the allocation pointer to the relocation top after a mark-compact
   2000   // collection.
   2001   virtual void MCCommitRelocationInfo();
   2002 
   2003   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
   2004 
   2005   virtual void DeallocateBlock(Address start,
   2006                                int size_in_bytes,
   2007                                bool add_to_freelist);
   2008 
   2009   void MarkFreeListNodes() { free_list_.MarkNodes(); }
   2010 
   2011 #ifdef DEBUG
   2012   // Reports statistic info of the space
   2013   void ReportStatistics();
   2014 #endif
   2015 
   2016  protected:
   2017   // Virtual function in the superclass.  Slow path of AllocateRaw.
   2018   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
   2019 
   2020   // Virtual function in the superclass.  Allocate linearly at the start of
   2021   // the page after current_page (there is assumed to be one).
   2022   HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
   2023 
   2024   void ResetFreeList() {
   2025     free_list_.Reset();
   2026   }
   2027 
   2028  private:
   2029   // The size of objects in this space.
   2030   int object_size_in_bytes_;
   2031 
   2032   // The name of this space.
   2033   const char* name_;
   2034 
   2035   // The space's free list.
   2036   FixedSizeFreeList free_list_;
   2037 };
   2038 
   2039 
   2040 // -----------------------------------------------------------------------------
   2041 // Old space for all map objects
   2042 
   2043 class MapSpace : public FixedSpace {
   2044  public:
   2045   // Creates a map space object with a maximum capacity.
   2046   MapSpace(Heap* heap,
   2047            intptr_t max_capacity,
   2048            int max_map_space_pages,
   2049            AllocationSpace id)
   2050       : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
   2051         max_map_space_pages_(max_map_space_pages) {
   2052     ASSERT(max_map_space_pages < kMaxMapPageIndex);
   2053   }
   2054 
   2055   // Prepares for a mark-compact GC.
   2056   virtual void PrepareForMarkCompact(bool will_compact);
   2057 
   2058   // Given an index, returns the page address.
   2059   Address PageAddress(int page_index) { return page_addresses_[page_index]; }
   2060 
   2061   static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
   2062 
   2063   // Are map pointers encodable into map word?
   2064   bool MapPointersEncodable() {
   2065     if (!FLAG_use_big_map_space) {
   2066       ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
   2067       return true;
   2068     }
   2069     return CountPagesToTop() <= max_map_space_pages_;
   2070   }
   2071 
   2072   // Should be called after forced sweep to find out if map space needs
   2073   // compaction.
   2074   bool NeedsCompaction(int live_maps) {
   2075     return !MapPointersEncodable() && live_maps <= CompactionThreshold();
   2076   }
   2077 
   2078   Address TopAfterCompaction(int live_maps) {
   2079     ASSERT(NeedsCompaction(live_maps));
   2080 
   2081     int pages_left = live_maps / kMapsPerPage;
   2082     PageIterator it(this, PageIterator::ALL_PAGES);
   2083     while (pages_left-- > 0) {
   2084       ASSERT(it.has_next());
   2085       it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
   2086     }
   2087     ASSERT(it.has_next());
   2088     Page* top_page = it.next();
   2089     top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
   2090     ASSERT(top_page->is_valid());
   2091 
   2092     int offset = live_maps % kMapsPerPage * Map::kSize;
   2093     Address top = top_page->ObjectAreaStart() + offset;
   2094     ASSERT(top < top_page->ObjectAreaEnd());
   2095     ASSERT(Contains(top));
   2096 
   2097     return top;
   2098   }
   2099 
   2100   void FinishCompaction(Address new_top, int live_maps) {
   2101     Page* top_page = Page::FromAddress(new_top);
   2102     ASSERT(top_page->is_valid());
   2103 
   2104     SetAllocationInfo(&allocation_info_, top_page);
   2105     allocation_info_.top = new_top;
   2106 
   2107     int new_size = live_maps * Map::kSize;
   2108     accounting_stats_.DeallocateBytes(accounting_stats_.Size());
   2109     accounting_stats_.AllocateBytes(new_size);
   2110 
   2111     // Flush allocation watermarks.
   2112     for (Page* p = first_page_; p != top_page; p = p->next_page()) {
   2113       p->SetAllocationWatermark(p->AllocationTop());
   2114     }
   2115     top_page->SetAllocationWatermark(new_top);
   2116 
   2117 #ifdef DEBUG
   2118     if (FLAG_enable_slow_asserts) {
   2119       intptr_t actual_size = 0;
   2120       for (Page* p = first_page_; p != top_page; p = p->next_page())
   2121         actual_size += kMapsPerPage * Map::kSize;
   2122       actual_size += (new_top - top_page->ObjectAreaStart());
   2123       ASSERT(accounting_stats_.Size() == actual_size);
   2124     }
   2125 #endif
   2126 
   2127     Shrink();
   2128     ResetFreeList();
   2129   }
   2130 
   2131  protected:
   2132 #ifdef DEBUG
   2133   virtual void VerifyObject(HeapObject* obj);
   2134 #endif
   2135 
   2136  private:
   2137   static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
   2138 
   2139   // Do map space compaction if there is a page gap.
   2140   int CompactionThreshold() {
   2141     return kMapsPerPage * (max_map_space_pages_ - 1);
   2142   }
   2143 
   2144   const int max_map_space_pages_;
   2145 
   2146   // An array of page start address in a map space.
   2147   Address page_addresses_[kMaxMapPageIndex];
   2148 
   2149  public:
   2150   TRACK_MEMORY("MapSpace")
   2151 };
   2152 
   2153 
   2154 // -----------------------------------------------------------------------------
   2155 // Old space for all global object property cell objects
   2156 
   2157 class CellSpace : public FixedSpace {
   2158  public:
   2159   // Creates a property cell space object with a maximum capacity.
   2160   CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
   2161       : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
   2162   {}
   2163 
   2164  protected:
   2165 #ifdef DEBUG
   2166   virtual void VerifyObject(HeapObject* obj);
   2167 #endif
   2168 
   2169  public:
   2170   TRACK_MEMORY("CellSpace")
   2171 };
   2172 
   2173 
   2174 // -----------------------------------------------------------------------------
   2175 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
   2176 // the large object space. A large object is allocated from OS heap with
   2177 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
   2178 // A large object always starts at Page::kObjectStartOffset to a page.
   2179 // Large objects do not move during garbage collections.
   2180 
   2181 // A LargeObjectChunk holds exactly one large object page with exactly one
   2182 // large object.
   2183 class LargeObjectChunk {
   2184  public:
   2185   // Allocates a new LargeObjectChunk that contains a large object page
   2186   // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
   2187   // object) bytes after the object area start of that page.
   2188   static LargeObjectChunk* New(int size_in_bytes, Executability executable);
   2189 
   2190   // Free the memory associated with the chunk.
   2191   inline void Free(Executability executable);
   2192 
   2193   // Interpret a raw address as a large object chunk.
   2194   static LargeObjectChunk* FromAddress(Address address) {
   2195     return reinterpret_cast<LargeObjectChunk*>(address);
   2196   }
   2197 
   2198   // Returns the address of this chunk.
   2199   Address address() { return reinterpret_cast<Address>(this); }
   2200 
   2201   // Accessors for the fields of the chunk.
   2202   LargeObjectChunk* next() { return next_; }
   2203   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
   2204   size_t size() { return size_ & ~Page::kPageFlagMask; }
   2205 
   2206   // Compute the start address in the chunk.
   2207   inline Address GetStartAddress();
   2208 
   2209   // Returns the object in this chunk.
   2210   HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
   2211 
   2212   // Given a requested size returns the physical size of a chunk to be
   2213   // allocated.
   2214   static int ChunkSizeFor(int size_in_bytes);
   2215 
   2216   // Given a chunk size, returns the object size it can accommodate.  Used by
   2217   // LargeObjectSpace::Available.
   2218   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
   2219     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
   2220     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
   2221   }
   2222 
   2223  private:
   2224   // A pointer to the next large object chunk in the space or NULL.
   2225   LargeObjectChunk* next_;
   2226 
   2227   // The total size of this chunk.
   2228   size_t size_;
   2229 
   2230  public:
   2231   TRACK_MEMORY("LargeObjectChunk")
   2232 };
   2233 
   2234 
   2235 class LargeObjectSpace : public Space {
   2236  public:
   2237   LargeObjectSpace(Heap* heap, AllocationSpace id);
   2238   virtual ~LargeObjectSpace() {}
   2239 
   2240   // Initializes internal data structures.
   2241   bool Setup();
   2242 
   2243   // Releases internal resources, frees objects in this space.
   2244   void TearDown();
   2245 
   2246   // Allocates a (non-FixedArray, non-Code) large object.
   2247   MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
   2248   // Allocates a large Code object.
   2249   MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
   2250   // Allocates a large FixedArray.
   2251   MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
   2252 
   2253   // Available bytes for objects in this space.
   2254   inline intptr_t Available();
   2255 
   2256   virtual intptr_t Size() {
   2257     return size_;
   2258   }
   2259 
   2260   virtual intptr_t SizeOfObjects() {
   2261     return objects_size_;
   2262   }
   2263 
   2264   int PageCount() {
   2265     return page_count_;
   2266   }
   2267 
   2268   // Finds an object for a given address, returns Failure::Exception()
   2269   // if it is not found. The function iterates through all objects in this
   2270   // space, may be slow.
   2271   MaybeObject* FindObject(Address a);
   2272 
   2273   // Finds a large object page containing the given pc, returns NULL
   2274   // if such a page doesn't exist.
   2275   LargeObjectChunk* FindChunkContainingPc(Address pc);
   2276 
   2277   // Iterates objects covered by dirty regions.
   2278   void IterateDirtyRegions(ObjectSlotCallback func);
   2279 
   2280   // Frees unmarked objects.
   2281   void FreeUnmarkedObjects();
   2282 
   2283   // Checks whether a heap object is in this space; O(1).
   2284   bool Contains(HeapObject* obj);
   2285 
   2286   // Checks whether the space is empty.
   2287   bool IsEmpty() { return first_chunk_ == NULL; }
   2288 
   2289   // See the comments for ReserveSpace in the Space class.  This has to be
   2290   // called after ReserveSpace has been called on the paged spaces, since they
   2291   // may use some memory, leaving less for large objects.
   2292   virtual bool ReserveSpace(int bytes);
   2293 
   2294 #ifdef ENABLE_HEAP_PROTECTION
   2295   // Protect/unprotect the space by marking it read-only/writable.
   2296   void Protect();
   2297   void Unprotect();
   2298 #endif
   2299 
   2300 #ifdef DEBUG
   2301   virtual void Verify();
   2302   virtual void Print();
   2303   void ReportStatistics();
   2304   void CollectCodeStatistics();
   2305 #endif
   2306   // Checks whether an address is in the object area in this space.  It
   2307   // iterates all objects in the space. May be slow.
   2308   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
   2309 
   2310  private:
   2311   // The head of the linked list of large object chunks.
   2312   LargeObjectChunk* first_chunk_;
   2313   intptr_t size_;  // allocated bytes
   2314   int page_count_;  // number of chunks
   2315   intptr_t objects_size_;  // size of objects
   2316 
   2317   // Shared implementation of AllocateRaw, AllocateRawCode and
   2318   // AllocateRawFixedArray.
   2319   MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
   2320                                                    int object_size,
   2321                                                    Executability executable);
   2322 
   2323   friend class LargeObjectIterator;
   2324 
   2325  public:
   2326   TRACK_MEMORY("LargeObjectSpace")
   2327 };
   2328 
   2329 
   2330 class LargeObjectIterator: public ObjectIterator {
   2331  public:
   2332   explicit LargeObjectIterator(LargeObjectSpace* space);
   2333   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
   2334 
   2335   HeapObject* next();
   2336 
   2337   // implementation of ObjectIterator.
   2338   virtual HeapObject* next_object() { return next(); }
   2339 
   2340  private:
   2341   LargeObjectChunk* current_;
   2342   HeapObjectCallback size_func_;
   2343 };
   2344 
   2345 
   2346 #ifdef DEBUG
   2347 struct CommentStatistic {
   2348   const char* comment;
   2349   int size;
   2350   int count;
   2351   void Clear() {
   2352     comment = NULL;
   2353     size = 0;
   2354     count = 0;
   2355   }
   2356   // Must be small, since an iteration is used for lookup.
   2357   static const int kMaxComments = 64;
   2358 };
   2359 #endif
   2360 
   2361 
   2362 } }  // namespace v8::internal
   2363 
   2364 #endif  // V8_SPACES_H_
   2365