Home | History | Annotate | Download | only in src
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "macro-assembler.h"
     31 #include "mark-compact.h"
     32 #include "platform.h"
     33 
     34 namespace v8 {
     35 namespace internal {
     36 
     37 
     38 // ----------------------------------------------------------------------------
     39 // HeapObjectIterator
     40 
     41 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
     42   // You can't actually iterate over the anchor page.  It is not a real page,
     43   // just an anchor for the double linked page list.  Initialize as if we have
     44   // reached the end of the anchor page, then the first iteration will move on
     45   // to the first page.
     46   Initialize(space,
     47              NULL,
     48              NULL,
     49              kAllPagesInSpace,
     50              NULL);
     51 }
     52 
     53 
     54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
     55                                        HeapObjectCallback size_func) {
     56   // You can't actually iterate over the anchor page.  It is not a real page,
     57   // just an anchor for the double linked page list.  Initialize the current
     58   // address and end as NULL, then the first iteration will move on
     59   // to the first page.
     60   Initialize(space,
     61              NULL,
     62              NULL,
     63              kAllPagesInSpace,
     64              size_func);
     65 }
     66 
     67 
     68 HeapObjectIterator::HeapObjectIterator(Page* page,
     69                                        HeapObjectCallback size_func) {
     70   Space* owner = page->owner();
     71   ASSERT(owner == page->heap()->old_pointer_space() ||
     72          owner == page->heap()->old_data_space() ||
     73          owner == page->heap()->map_space() ||
     74          owner == page->heap()->cell_space() ||
     75          owner == page->heap()->property_cell_space() ||
     76          owner == page->heap()->code_space());
     77   Initialize(reinterpret_cast<PagedSpace*>(owner),
     78              page->area_start(),
     79              page->area_end(),
     80              kOnePageOnly,
     81              size_func);
     82   ASSERT(page->WasSweptPrecisely());
     83 }
     84 
     85 
     86 void HeapObjectIterator::Initialize(PagedSpace* space,
     87                                     Address cur, Address end,
     88                                     HeapObjectIterator::PageMode mode,
     89                                     HeapObjectCallback size_f) {
     90   // Check that we actually can iterate this space.
     91   ASSERT(!space->was_swept_conservatively());
     92 
     93   space_ = space;
     94   cur_addr_ = cur;
     95   cur_end_ = end;
     96   page_mode_ = mode;
     97   size_func_ = size_f;
     98 }
     99 
    100 
    101 // We have hit the end of the page and should advance to the next block of
    102 // objects.  This happens at the end of the page.
    103 bool HeapObjectIterator::AdvanceToNextPage() {
    104   ASSERT(cur_addr_ == cur_end_);
    105   if (page_mode_ == kOnePageOnly) return false;
    106   Page* cur_page;
    107   if (cur_addr_ == NULL) {
    108     cur_page = space_->anchor();
    109   } else {
    110     cur_page = Page::FromAddress(cur_addr_ - 1);
    111     ASSERT(cur_addr_ == cur_page->area_end());
    112   }
    113   cur_page = cur_page->next_page();
    114   if (cur_page == space_->anchor()) return false;
    115   cur_addr_ = cur_page->area_start();
    116   cur_end_ = cur_page->area_end();
    117   ASSERT(cur_page->WasSweptPrecisely());
    118   return true;
    119 }
    120 
    121 
    122 // -----------------------------------------------------------------------------
    123 // CodeRange
    124 
    125 
    126 CodeRange::CodeRange(Isolate* isolate)
    127     : isolate_(isolate),
    128       code_range_(NULL),
    129       free_list_(0),
    130       allocation_list_(0),
    131       current_allocation_block_index_(0) {
    132 }
    133 
    134 
    135 bool CodeRange::SetUp(const size_t requested) {
    136   ASSERT(code_range_ == NULL);
    137 
    138   code_range_ = new VirtualMemory(requested);
    139   CHECK(code_range_ != NULL);
    140   if (!code_range_->IsReserved()) {
    141     delete code_range_;
    142     code_range_ = NULL;
    143     return false;
    144   }
    145 
    146   // We are sure that we have mapped a block of requested addresses.
    147   ASSERT(code_range_->size() == requested);
    148   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
    149   Address base = reinterpret_cast<Address>(code_range_->address());
    150   Address aligned_base =
    151       RoundUp(reinterpret_cast<Address>(code_range_->address()),
    152               MemoryChunk::kAlignment);
    153   size_t size = code_range_->size() - (aligned_base - base);
    154   allocation_list_.Add(FreeBlock(aligned_base, size));
    155   current_allocation_block_index_ = 0;
    156   return true;
    157 }
    158 
    159 
    160 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
    161                                        const FreeBlock* right) {
    162   // The entire point of CodeRange is that the difference between two
    163   // addresses in the range can be represented as a signed 32-bit int,
    164   // so the cast is semantically correct.
    165   return static_cast<int>(left->start - right->start);
    166 }
    167 
    168 
    169 void CodeRange::GetNextAllocationBlock(size_t requested) {
    170   for (current_allocation_block_index_++;
    171        current_allocation_block_index_ < allocation_list_.length();
    172        current_allocation_block_index_++) {
    173     if (requested <= allocation_list_[current_allocation_block_index_].size) {
    174       return;  // Found a large enough allocation block.
    175     }
    176   }
    177 
    178   // Sort and merge the free blocks on the free list and the allocation list.
    179   free_list_.AddAll(allocation_list_);
    180   allocation_list_.Clear();
    181   free_list_.Sort(&CompareFreeBlockAddress);
    182   for (int i = 0; i < free_list_.length();) {
    183     FreeBlock merged = free_list_[i];
    184     i++;
    185     // Add adjacent free blocks to the current merged block.
    186     while (i < free_list_.length() &&
    187            free_list_[i].start == merged.start + merged.size) {
    188       merged.size += free_list_[i].size;
    189       i++;
    190     }
    191     if (merged.size > 0) {
    192       allocation_list_.Add(merged);
    193     }
    194   }
    195   free_list_.Clear();
    196 
    197   for (current_allocation_block_index_ = 0;
    198        current_allocation_block_index_ < allocation_list_.length();
    199        current_allocation_block_index_++) {
    200     if (requested <= allocation_list_[current_allocation_block_index_].size) {
    201       return;  // Found a large enough allocation block.
    202     }
    203   }
    204 
    205   // Code range is full or too fragmented.
    206   V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
    207 }
    208 
    209 
    210 Address CodeRange::AllocateRawMemory(const size_t requested_size,
    211                                      const size_t commit_size,
    212                                      size_t* allocated) {
    213   ASSERT(commit_size <= requested_size);
    214   ASSERT(current_allocation_block_index_ < allocation_list_.length());
    215   if (requested_size > allocation_list_[current_allocation_block_index_].size) {
    216     // Find an allocation block large enough.  This function call may
    217     // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
    218     GetNextAllocationBlock(requested_size);
    219   }
    220   // Commit the requested memory at the start of the current allocation block.
    221   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
    222   FreeBlock current = allocation_list_[current_allocation_block_index_];
    223   if (aligned_requested >= (current.size - Page::kPageSize)) {
    224     // Don't leave a small free block, useless for a large object or chunk.
    225     *allocated = current.size;
    226   } else {
    227     *allocated = aligned_requested;
    228   }
    229   ASSERT(*allocated <= current.size);
    230   ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
    231   if (!MemoryAllocator::CommitExecutableMemory(code_range_,
    232                                                current.start,
    233                                                commit_size,
    234                                                *allocated)) {
    235     *allocated = 0;
    236     return NULL;
    237   }
    238   allocation_list_[current_allocation_block_index_].start += *allocated;
    239   allocation_list_[current_allocation_block_index_].size -= *allocated;
    240   if (*allocated == current.size) {
    241     GetNextAllocationBlock(0);  // This block is used up, get the next one.
    242   }
    243   return current.start;
    244 }
    245 
    246 
    247 bool CodeRange::CommitRawMemory(Address start, size_t length) {
    248   return code_range_->Commit(start, length, true);
    249 }
    250 
    251 
    252 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
    253   return code_range_->Uncommit(start, length);
    254 }
    255 
    256 
    257 void CodeRange::FreeRawMemory(Address address, size_t length) {
    258   ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
    259   free_list_.Add(FreeBlock(address, length));
    260   code_range_->Uncommit(address, length);
    261 }
    262 
    263 
    264 void CodeRange::TearDown() {
    265     delete code_range_;  // Frees all memory in the virtual memory range.
    266     code_range_ = NULL;
    267     free_list_.Free();
    268     allocation_list_.Free();
    269 }
    270 
    271 
    272 // -----------------------------------------------------------------------------
    273 // MemoryAllocator
    274 //
    275 
    276 MemoryAllocator::MemoryAllocator(Isolate* isolate)
    277     : isolate_(isolate),
    278       capacity_(0),
    279       capacity_executable_(0),
    280       size_(0),
    281       size_executable_(0) {
    282 }
    283 
    284 
    285 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
    286   capacity_ = RoundUp(capacity, Page::kPageSize);
    287   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
    288   ASSERT_GE(capacity_, capacity_executable_);
    289 
    290   size_ = 0;
    291   size_executable_ = 0;
    292 
    293   return true;
    294 }
    295 
    296 
    297 void MemoryAllocator::TearDown() {
    298   // Check that spaces were torn down before MemoryAllocator.
    299   ASSERT(size_ == 0);
    300   // TODO(gc) this will be true again when we fix FreeMemory.
    301   // ASSERT(size_executable_ == 0);
    302   capacity_ = 0;
    303   capacity_executable_ = 0;
    304 }
    305 
    306 
    307 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
    308                                  Executability executable) {
    309   // TODO(gc) make code_range part of memory allocator?
    310   ASSERT(reservation->IsReserved());
    311   size_t size = reservation->size();
    312   ASSERT(size_ >= size);
    313   size_ -= size;
    314 
    315   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
    316 
    317   if (executable == EXECUTABLE) {
    318     ASSERT(size_executable_ >= size);
    319     size_executable_ -= size;
    320   }
    321   // Code which is part of the code-range does not have its own VirtualMemory.
    322   ASSERT(!isolate_->code_range()->contains(
    323       static_cast<Address>(reservation->address())));
    324   ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
    325   reservation->Release();
    326 }
    327 
    328 
    329 void MemoryAllocator::FreeMemory(Address base,
    330                                  size_t size,
    331                                  Executability executable) {
    332   // TODO(gc) make code_range part of memory allocator?
    333   ASSERT(size_ >= size);
    334   size_ -= size;
    335 
    336   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
    337 
    338   if (executable == EXECUTABLE) {
    339     ASSERT(size_executable_ >= size);
    340     size_executable_ -= size;
    341   }
    342   if (isolate_->code_range()->contains(static_cast<Address>(base))) {
    343     ASSERT(executable == EXECUTABLE);
    344     isolate_->code_range()->FreeRawMemory(base, size);
    345   } else {
    346     ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
    347     bool result = VirtualMemory::ReleaseRegion(base, size);
    348     USE(result);
    349     ASSERT(result);
    350   }
    351 }
    352 
    353 
    354 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
    355                                               size_t alignment,
    356                                               VirtualMemory* controller) {
    357   VirtualMemory reservation(size, alignment);
    358 
    359   if (!reservation.IsReserved()) return NULL;
    360   size_ += reservation.size();
    361   Address base = RoundUp(static_cast<Address>(reservation.address()),
    362                          alignment);
    363   controller->TakeControl(&reservation);
    364   return base;
    365 }
    366 
    367 
    368 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
    369                                                size_t commit_size,
    370                                                size_t alignment,
    371                                                Executability executable,
    372                                                VirtualMemory* controller) {
    373   ASSERT(commit_size <= reserve_size);
    374   VirtualMemory reservation;
    375   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
    376   if (base == NULL) return NULL;
    377 
    378   if (executable == EXECUTABLE) {
    379     if (!CommitExecutableMemory(&reservation,
    380                                 base,
    381                                 commit_size,
    382                                 reserve_size)) {
    383       base = NULL;
    384     }
    385   } else {
    386     if (!reservation.Commit(base, commit_size, false)) {
    387       base = NULL;
    388     }
    389   }
    390 
    391   if (base == NULL) {
    392     // Failed to commit the body. Release the mapping and any partially
    393     // commited regions inside it.
    394     reservation.Release();
    395     return NULL;
    396   }
    397 
    398   controller->TakeControl(&reservation);
    399   return base;
    400 }
    401 
    402 
    403 void Page::InitializeAsAnchor(PagedSpace* owner) {
    404   set_owner(owner);
    405   set_prev_page(this);
    406   set_next_page(this);
    407 }
    408 
    409 
    410 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
    411                                        Address start,
    412                                        SemiSpace* semi_space) {
    413   Address area_start = start + NewSpacePage::kObjectStartOffset;
    414   Address area_end = start + Page::kPageSize;
    415 
    416   MemoryChunk* chunk = MemoryChunk::Initialize(heap,
    417                                                start,
    418                                                Page::kPageSize,
    419                                                area_start,
    420                                                area_end,
    421                                                NOT_EXECUTABLE,
    422                                                semi_space);
    423   chunk->set_next_chunk(NULL);
    424   chunk->set_prev_chunk(NULL);
    425   chunk->initialize_scan_on_scavenge(true);
    426   bool in_to_space = (semi_space->id() != kFromSpace);
    427   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
    428                              : MemoryChunk::IN_FROM_SPACE);
    429   ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
    430                                        : MemoryChunk::IN_TO_SPACE));
    431   NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
    432   heap->incremental_marking()->SetNewSpacePageFlags(page);
    433   return page;
    434 }
    435 
    436 
    437 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
    438   set_owner(semi_space);
    439   set_next_chunk(this);
    440   set_prev_chunk(this);
    441   // Flags marks this invalid page as not being in new-space.
    442   // All real new-space pages will be in new-space.
    443   SetFlags(0, ~0);
    444 }
    445 
    446 
    447 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
    448                                      Address base,
    449                                      size_t size,
    450                                      Address area_start,
    451                                      Address area_end,
    452                                      Executability executable,
    453                                      Space* owner) {
    454   MemoryChunk* chunk = FromAddress(base);
    455 
    456   ASSERT(base == chunk->address());
    457 
    458   chunk->heap_ = heap;
    459   chunk->size_ = size;
    460   chunk->area_start_ = area_start;
    461   chunk->area_end_ = area_end;
    462   chunk->flags_ = 0;
    463   chunk->set_owner(owner);
    464   chunk->InitializeReservedMemory();
    465   chunk->slots_buffer_ = NULL;
    466   chunk->skip_list_ = NULL;
    467   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
    468   chunk->progress_bar_ = 0;
    469   chunk->high_water_mark_ = static_cast<int>(area_start - base);
    470   chunk->parallel_sweeping_ = 0;
    471   chunk->available_in_small_free_list_ = 0;
    472   chunk->available_in_medium_free_list_ = 0;
    473   chunk->available_in_large_free_list_ = 0;
    474   chunk->available_in_huge_free_list_ = 0;
    475   chunk->non_available_small_blocks_ = 0;
    476   chunk->ResetLiveBytes();
    477   Bitmap::Clear(chunk);
    478   chunk->initialize_scan_on_scavenge(false);
    479   chunk->SetFlag(WAS_SWEPT_PRECISELY);
    480 
    481   ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
    482   ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
    483 
    484   if (executable == EXECUTABLE) {
    485     chunk->SetFlag(IS_EXECUTABLE);
    486   }
    487 
    488   if (owner == heap->old_data_space()) {
    489     chunk->SetFlag(CONTAINS_ONLY_DATA);
    490   }
    491 
    492   return chunk;
    493 }
    494 
    495 
    496 // Commit MemoryChunk area to the requested size.
    497 bool MemoryChunk::CommitArea(size_t requested) {
    498   size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
    499                       MemoryAllocator::CodePageGuardSize() : 0;
    500   size_t header_size = area_start() - address() - guard_size;
    501   size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
    502   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
    503                                   OS::CommitPageSize());
    504 
    505   if (commit_size > committed_size) {
    506     // Commit size should be less or equal than the reserved size.
    507     ASSERT(commit_size <= size() - 2 * guard_size);
    508     // Append the committed area.
    509     Address start = address() + committed_size + guard_size;
    510     size_t length = commit_size - committed_size;
    511     if (reservation_.IsReserved()) {
    512       if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
    513         return false;
    514       }
    515     } else {
    516       CodeRange* code_range = heap_->isolate()->code_range();
    517       ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
    518       if (!code_range->CommitRawMemory(start, length)) return false;
    519     }
    520 
    521     if (Heap::ShouldZapGarbage()) {
    522       heap_->isolate()->memory_allocator()->ZapBlock(start, length);
    523     }
    524   } else if (commit_size < committed_size) {
    525     ASSERT(commit_size > 0);
    526     // Shrink the committed area.
    527     size_t length = committed_size - commit_size;
    528     Address start = address() + committed_size + guard_size - length;
    529     if (reservation_.IsReserved()) {
    530       if (!reservation_.Uncommit(start, length)) return false;
    531     } else {
    532       CodeRange* code_range = heap_->isolate()->code_range();
    533       ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
    534       if (!code_range->UncommitRawMemory(start, length)) return false;
    535     }
    536   }
    537 
    538   area_end_ = area_start_ + requested;
    539   return true;
    540 }
    541 
    542 
    543 void MemoryChunk::InsertAfter(MemoryChunk* other) {
    544   next_chunk_ = other->next_chunk_;
    545   prev_chunk_ = other;
    546 
    547   // This memory barrier is needed since concurrent sweeper threads may iterate
    548   // over the list of pages while a new page is inserted.
    549   // TODO(hpayer): find a cleaner way to guarantee that the page list can be
    550   // expanded concurrently
    551   MemoryBarrier();
    552 
    553   // The following two write operations can take effect in arbitrary order
    554   // since pages are always iterated by the sweeper threads in LIFO order, i.e,
    555   // the inserted page becomes visible for the sweeper threads after
    556   // other->next_chunk_ = this;
    557   other->next_chunk_->prev_chunk_ = this;
    558   other->next_chunk_ = this;
    559 }
    560 
    561 
    562 void MemoryChunk::Unlink() {
    563   if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
    564     heap_->decrement_scan_on_scavenge_pages();
    565     ClearFlag(SCAN_ON_SCAVENGE);
    566   }
    567   next_chunk_->prev_chunk_ = prev_chunk_;
    568   prev_chunk_->next_chunk_ = next_chunk_;
    569   prev_chunk_ = NULL;
    570   next_chunk_ = NULL;
    571 }
    572 
    573 
    574 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
    575                                             intptr_t commit_area_size,
    576                                             Executability executable,
    577                                             Space* owner) {
    578   ASSERT(commit_area_size <= reserve_area_size);
    579 
    580   size_t chunk_size;
    581   Heap* heap = isolate_->heap();
    582   Address base = NULL;
    583   VirtualMemory reservation;
    584   Address area_start = NULL;
    585   Address area_end = NULL;
    586 
    587   //
    588   // MemoryChunk layout:
    589   //
    590   //             Executable
    591   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
    592   // |           Header           |
    593   // +----------------------------+<- base + CodePageGuardStartOffset
    594   // |           Guard            |
    595   // +----------------------------+<- area_start_
    596   // |           Area             |
    597   // +----------------------------+<- area_end_ (area_start + commit_area_size)
    598   // |   Committed but not used   |
    599   // +----------------------------+<- aligned at OS page boundary
    600   // | Reserved but not committed |
    601   // +----------------------------+<- aligned at OS page boundary
    602   // |           Guard            |
    603   // +----------------------------+<- base + chunk_size
    604   //
    605   //           Non-executable
    606   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
    607   // |          Header            |
    608   // +----------------------------+<- area_start_ (base + kObjectStartOffset)
    609   // |           Area             |
    610   // +----------------------------+<- area_end_ (area_start + commit_area_size)
    611   // |  Committed but not used    |
    612   // +----------------------------+<- aligned at OS page boundary
    613   // | Reserved but not committed |
    614   // +----------------------------+<- base + chunk_size
    615   //
    616 
    617   if (executable == EXECUTABLE) {
    618     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
    619                          OS::CommitPageSize()) + CodePageGuardSize();
    620 
    621     // Check executable memory limit.
    622     if (size_executable_ + chunk_size > capacity_executable_) {
    623       LOG(isolate_,
    624           StringEvent("MemoryAllocator::AllocateRawMemory",
    625                       "V8 Executable Allocation capacity exceeded"));
    626       return NULL;
    627     }
    628 
    629     // Size of header (not executable) plus area (executable).
    630     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
    631                                  OS::CommitPageSize());
    632     // Allocate executable memory either from code range or from the
    633     // OS.
    634     if (isolate_->code_range()->exists()) {
    635       base = isolate_->code_range()->AllocateRawMemory(chunk_size,
    636                                                        commit_size,
    637                                                        &chunk_size);
    638       ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
    639                        MemoryChunk::kAlignment));
    640       if (base == NULL) return NULL;
    641       size_ += chunk_size;
    642       // Update executable memory size.
    643       size_executable_ += chunk_size;
    644     } else {
    645       base = AllocateAlignedMemory(chunk_size,
    646                                    commit_size,
    647                                    MemoryChunk::kAlignment,
    648                                    executable,
    649                                    &reservation);
    650       if (base == NULL) return NULL;
    651       // Update executable memory size.
    652       size_executable_ += reservation.size();
    653     }
    654 
    655     if (Heap::ShouldZapGarbage()) {
    656       ZapBlock(base, CodePageGuardStartOffset());
    657       ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
    658     }
    659 
    660     area_start = base + CodePageAreaStartOffset();
    661     area_end = area_start + commit_area_size;
    662   } else {
    663     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
    664                          OS::CommitPageSize());
    665     size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
    666                                  commit_area_size, OS::CommitPageSize());
    667     base = AllocateAlignedMemory(chunk_size,
    668                                  commit_size,
    669                                  MemoryChunk::kAlignment,
    670                                  executable,
    671                                  &reservation);
    672 
    673     if (base == NULL) return NULL;
    674 
    675     if (Heap::ShouldZapGarbage()) {
    676       ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
    677     }
    678 
    679     area_start = base + Page::kObjectStartOffset;
    680     area_end = area_start + commit_area_size;
    681   }
    682 
    683   // Use chunk_size for statistics and callbacks because we assume that they
    684   // treat reserved but not-yet committed memory regions of chunks as allocated.
    685   isolate_->counters()->memory_allocated()->
    686       Increment(static_cast<int>(chunk_size));
    687 
    688   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
    689   if (owner != NULL) {
    690     ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
    691     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
    692   }
    693 
    694   MemoryChunk* result = MemoryChunk::Initialize(heap,
    695                                                 base,
    696                                                 chunk_size,
    697                                                 area_start,
    698                                                 area_end,
    699                                                 executable,
    700                                                 owner);
    701   result->set_reserved_memory(&reservation);
    702   return result;
    703 }
    704 
    705 
    706 void Page::ResetFreeListStatistics() {
    707   non_available_small_blocks_ = 0;
    708   available_in_small_free_list_ = 0;
    709   available_in_medium_free_list_ = 0;
    710   available_in_large_free_list_ = 0;
    711   available_in_huge_free_list_ = 0;
    712 }
    713 
    714 
    715 Page* MemoryAllocator::AllocatePage(intptr_t size,
    716                                     PagedSpace* owner,
    717                                     Executability executable) {
    718   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
    719 
    720   if (chunk == NULL) return NULL;
    721 
    722   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
    723 }
    724 
    725 
    726 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
    727                                               Space* owner,
    728                                               Executability executable) {
    729   MemoryChunk* chunk = AllocateChunk(object_size,
    730                                      object_size,
    731                                      executable,
    732                                      owner);
    733   if (chunk == NULL) return NULL;
    734   return LargePage::Initialize(isolate_->heap(), chunk);
    735 }
    736 
    737 
    738 void MemoryAllocator::Free(MemoryChunk* chunk) {
    739   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
    740   if (chunk->owner() != NULL) {
    741     ObjectSpace space =
    742         static_cast<ObjectSpace>(1 << chunk->owner()->identity());
    743     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
    744   }
    745 
    746   isolate_->heap()->RememberUnmappedPage(
    747       reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
    748 
    749   delete chunk->slots_buffer();
    750   delete chunk->skip_list();
    751 
    752   VirtualMemory* reservation = chunk->reserved_memory();
    753   if (reservation->IsReserved()) {
    754     FreeMemory(reservation, chunk->executable());
    755   } else {
    756     FreeMemory(chunk->address(),
    757                chunk->size(),
    758                chunk->executable());
    759   }
    760 }
    761 
    762 
    763 bool MemoryAllocator::CommitBlock(Address start,
    764                                   size_t size,
    765                                   Executability executable) {
    766   if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
    767 
    768   if (Heap::ShouldZapGarbage()) {
    769     ZapBlock(start, size);
    770   }
    771 
    772   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
    773   return true;
    774 }
    775 
    776 
    777 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
    778   if (!VirtualMemory::UncommitRegion(start, size)) return false;
    779   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
    780   return true;
    781 }
    782 
    783 
    784 void MemoryAllocator::ZapBlock(Address start, size_t size) {
    785   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
    786     Memory::Address_at(start + s) = kZapValue;
    787   }
    788 }
    789 
    790 
    791 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
    792                                                 AllocationAction action,
    793                                                 size_t size) {
    794   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
    795     MemoryAllocationCallbackRegistration registration =
    796       memory_allocation_callbacks_[i];
    797     if ((registration.space & space) == space &&
    798         (registration.action & action) == action)
    799       registration.callback(space, action, static_cast<int>(size));
    800   }
    801 }
    802 
    803 
    804 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
    805     MemoryAllocationCallback callback) {
    806   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
    807     if (memory_allocation_callbacks_[i].callback == callback) return true;
    808   }
    809   return false;
    810 }
    811 
    812 
    813 void MemoryAllocator::AddMemoryAllocationCallback(
    814     MemoryAllocationCallback callback,
    815     ObjectSpace space,
    816     AllocationAction action) {
    817   ASSERT(callback != NULL);
    818   MemoryAllocationCallbackRegistration registration(callback, space, action);
    819   ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
    820   return memory_allocation_callbacks_.Add(registration);
    821 }
    822 
    823 
    824 void MemoryAllocator::RemoveMemoryAllocationCallback(
    825      MemoryAllocationCallback callback) {
    826   ASSERT(callback != NULL);
    827   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
    828     if (memory_allocation_callbacks_[i].callback == callback) {
    829       memory_allocation_callbacks_.Remove(i);
    830       return;
    831     }
    832   }
    833   UNREACHABLE();
    834 }
    835 
    836 
    837 #ifdef DEBUG
    838 void MemoryAllocator::ReportStatistics() {
    839   float pct = static_cast<float>(capacity_ - size_) / capacity_;
    840   PrintF("  capacity: %" V8_PTR_PREFIX "d"
    841              ", used: %" V8_PTR_PREFIX "d"
    842              ", available: %%%d\n\n",
    843          capacity_, size_, static_cast<int>(pct*100));
    844 }
    845 #endif
    846 
    847 
    848 int MemoryAllocator::CodePageGuardStartOffset() {
    849   // We are guarding code pages: the first OS page after the header
    850   // will be protected as non-writable.
    851   return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
    852 }
    853 
    854 
    855 int MemoryAllocator::CodePageGuardSize() {
    856   return static_cast<int>(OS::CommitPageSize());
    857 }
    858 
    859 
    860 int MemoryAllocator::CodePageAreaStartOffset() {
    861   // We are guarding code pages: the first OS page after the header
    862   // will be protected as non-writable.
    863   return CodePageGuardStartOffset() + CodePageGuardSize();
    864 }
    865 
    866 
    867 int MemoryAllocator::CodePageAreaEndOffset() {
    868   // We are guarding code pages: the last OS page will be protected as
    869   // non-writable.
    870   return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
    871 }
    872 
    873 
    874 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
    875                                              Address start,
    876                                              size_t commit_size,
    877                                              size_t reserved_size) {
    878   // Commit page header (not executable).
    879   if (!vm->Commit(start,
    880                   CodePageGuardStartOffset(),
    881                   false)) {
    882     return false;
    883   }
    884 
    885   // Create guard page after the header.
    886   if (!vm->Guard(start + CodePageGuardStartOffset())) {
    887     return false;
    888   }
    889 
    890   // Commit page body (executable).
    891   if (!vm->Commit(start + CodePageAreaStartOffset(),
    892                   commit_size - CodePageGuardStartOffset(),
    893                   true)) {
    894     return false;
    895   }
    896 
    897   // Create guard page before the end.
    898   if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
    899     return false;
    900   }
    901 
    902   return true;
    903 }
    904 
    905 
    906 // -----------------------------------------------------------------------------
    907 // MemoryChunk implementation
    908 
    909 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
    910   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
    911   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
    912     static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
    913   }
    914   chunk->IncrementLiveBytes(by);
    915 }
    916 
    917 
    918 // -----------------------------------------------------------------------------
    919 // PagedSpace implementation
    920 
    921 PagedSpace::PagedSpace(Heap* heap,
    922                        intptr_t max_capacity,
    923                        AllocationSpace id,
    924                        Executability executable)
    925     : Space(heap, id, executable),
    926       free_list_(this),
    927       was_swept_conservatively_(false),
    928       first_unswept_page_(Page::FromAddress(NULL)),
    929       unswept_free_bytes_(0) {
    930   if (id == CODE_SPACE) {
    931     area_size_ = heap->isolate()->memory_allocator()->
    932         CodePageAreaSize();
    933   } else {
    934     area_size_ = Page::kPageSize - Page::kObjectStartOffset;
    935   }
    936   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
    937       * AreaSize();
    938   accounting_stats_.Clear();
    939 
    940   allocation_info_.top = NULL;
    941   allocation_info_.limit = NULL;
    942 
    943   anchor_.InitializeAsAnchor(this);
    944 }
    945 
    946 
    947 bool PagedSpace::SetUp() {
    948   return true;
    949 }
    950 
    951 
    952 bool PagedSpace::HasBeenSetUp() {
    953   return true;
    954 }
    955 
    956 
    957 void PagedSpace::TearDown() {
    958   PageIterator iterator(this);
    959   while (iterator.has_next()) {
    960     heap()->isolate()->memory_allocator()->Free(iterator.next());
    961   }
    962   anchor_.set_next_page(&anchor_);
    963   anchor_.set_prev_page(&anchor_);
    964   accounting_stats_.Clear();
    965 }
    966 
    967 
    968 size_t PagedSpace::CommittedPhysicalMemory() {
    969   if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
    970   MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
    971   size_t size = 0;
    972   PageIterator it(this);
    973   while (it.has_next()) {
    974     size += it.next()->CommittedPhysicalMemory();
    975   }
    976   return size;
    977 }
    978 
    979 
    980 MaybeObject* PagedSpace::FindObject(Address addr) {
    981   // Note: this function can only be called on precisely swept spaces.
    982   ASSERT(!heap()->mark_compact_collector()->in_use());
    983 
    984   if (!Contains(addr)) return Failure::Exception();
    985 
    986   Page* p = Page::FromAddress(addr);
    987   HeapObjectIterator it(p, NULL);
    988   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    989     Address cur = obj->address();
    990     Address next = cur + obj->Size();
    991     if ((cur <= addr) && (addr < next)) return obj;
    992   }
    993 
    994   UNREACHABLE();
    995   return Failure::Exception();
    996 }
    997 
    998 
    999 bool PagedSpace::CanExpand() {
   1000   ASSERT(max_capacity_ % AreaSize() == 0);
   1001 
   1002   if (Capacity() == max_capacity_) return false;
   1003 
   1004   ASSERT(Capacity() < max_capacity_);
   1005 
   1006   // Are we going to exceed capacity for this space?
   1007   if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
   1008 
   1009   return true;
   1010 }
   1011 
   1012 
   1013 bool PagedSpace::Expand() {
   1014   if (!CanExpand()) return false;
   1015 
   1016   intptr_t size = AreaSize();
   1017 
   1018   if (anchor_.next_page() == &anchor_) {
   1019     size = SizeOfFirstPage();
   1020   }
   1021 
   1022   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
   1023       size, this, executable());
   1024   if (p == NULL) return false;
   1025 
   1026   ASSERT(Capacity() <= max_capacity_);
   1027 
   1028   p->InsertAfter(anchor_.prev_page());
   1029 
   1030   return true;
   1031 }
   1032 
   1033 
   1034 intptr_t PagedSpace::SizeOfFirstPage() {
   1035   int size = 0;
   1036   switch (identity()) {
   1037     case OLD_POINTER_SPACE:
   1038       size = 64 * kPointerSize * KB;
   1039       break;
   1040     case OLD_DATA_SPACE:
   1041       size = 192 * KB;
   1042       break;
   1043     case MAP_SPACE:
   1044       size = 16 * kPointerSize * KB;
   1045       break;
   1046     case CELL_SPACE:
   1047       size = 16 * kPointerSize * KB;
   1048       break;
   1049     case PROPERTY_CELL_SPACE:
   1050       size = 8 * kPointerSize * KB;
   1051       break;
   1052     case CODE_SPACE:
   1053       if (heap()->isolate()->code_range()->exists()) {
   1054         // When code range exists, code pages are allocated in a special way
   1055         // (from the reserved code range). That part of the code is not yet
   1056         // upgraded to handle small pages.
   1057         size = AreaSize();
   1058       } else {
   1059         size = 384 * KB;
   1060       }
   1061       break;
   1062     default:
   1063       UNREACHABLE();
   1064   }
   1065   return Min(size, AreaSize());
   1066 }
   1067 
   1068 
   1069 int PagedSpace::CountTotalPages() {
   1070   PageIterator it(this);
   1071   int count = 0;
   1072   while (it.has_next()) {
   1073     it.next();
   1074     count++;
   1075   }
   1076   return count;
   1077 }
   1078 
   1079 
   1080 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
   1081   sizes->huge_size_ = page->available_in_huge_free_list();
   1082   sizes->small_size_ = page->available_in_small_free_list();
   1083   sizes->medium_size_ = page->available_in_medium_free_list();
   1084   sizes->large_size_ = page->available_in_large_free_list();
   1085 }
   1086 
   1087 
   1088 void PagedSpace::ResetFreeListStatistics() {
   1089   PageIterator page_iterator(this);
   1090   while (page_iterator.has_next()) {
   1091     Page* page = page_iterator.next();
   1092     page->ResetFreeListStatistics();
   1093   }
   1094 }
   1095 
   1096 
   1097 void PagedSpace::ReleasePage(Page* page, bool unlink) {
   1098   ASSERT(page->LiveBytes() == 0);
   1099   ASSERT(AreaSize() == page->area_size());
   1100 
   1101   // Adjust list of unswept pages if the page is the head of the list.
   1102   if (first_unswept_page_ == page) {
   1103     first_unswept_page_ = page->next_page();
   1104     if (first_unswept_page_ == anchor()) {
   1105       first_unswept_page_ = Page::FromAddress(NULL);
   1106     }
   1107   }
   1108 
   1109   if (page->WasSwept()) {
   1110     intptr_t size = free_list_.EvictFreeListItems(page);
   1111     accounting_stats_.AllocateBytes(size);
   1112     ASSERT_EQ(AreaSize(), static_cast<int>(size));
   1113   } else {
   1114     DecreaseUnsweptFreeBytes(page);
   1115   }
   1116 
   1117   if (Page::FromAllocationTop(allocation_info_.top) == page) {
   1118     allocation_info_.top = allocation_info_.limit = NULL;
   1119   }
   1120 
   1121   if (unlink) {
   1122     page->Unlink();
   1123   }
   1124   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
   1125     heap()->isolate()->memory_allocator()->Free(page);
   1126   } else {
   1127     heap()->QueueMemoryChunkForFree(page);
   1128   }
   1129 
   1130   ASSERT(Capacity() > 0);
   1131   accounting_stats_.ShrinkSpace(AreaSize());
   1132 }
   1133 
   1134 
   1135 #ifdef DEBUG
   1136 void PagedSpace::Print() { }
   1137 #endif
   1138 
   1139 #ifdef VERIFY_HEAP
   1140 void PagedSpace::Verify(ObjectVisitor* visitor) {
   1141   // We can only iterate over the pages if they were swept precisely.
   1142   if (was_swept_conservatively_) return;
   1143 
   1144   bool allocation_pointer_found_in_space =
   1145       (allocation_info_.top == allocation_info_.limit);
   1146   PageIterator page_iterator(this);
   1147   while (page_iterator.has_next()) {
   1148     Page* page = page_iterator.next();
   1149     CHECK(page->owner() == this);
   1150     if (page == Page::FromAllocationTop(allocation_info_.top)) {
   1151       allocation_pointer_found_in_space = true;
   1152     }
   1153     CHECK(page->WasSweptPrecisely());
   1154     HeapObjectIterator it(page, NULL);
   1155     Address end_of_previous_object = page->area_start();
   1156     Address top = page->area_end();
   1157     int black_size = 0;
   1158     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
   1159       CHECK(end_of_previous_object <= object->address());
   1160 
   1161       // The first word should be a map, and we expect all map pointers to
   1162       // be in map space.
   1163       Map* map = object->map();
   1164       CHECK(map->IsMap());
   1165       CHECK(heap()->map_space()->Contains(map));
   1166 
   1167       // Perform space-specific object verification.
   1168       VerifyObject(object);
   1169 
   1170       // The object itself should look OK.
   1171       object->Verify();
   1172 
   1173       // All the interior pointers should be contained in the heap.
   1174       int size = object->Size();
   1175       object->IterateBody(map->instance_type(), size, visitor);
   1176       if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
   1177         black_size += size;
   1178       }
   1179 
   1180       CHECK(object->address() + size <= top);
   1181       end_of_previous_object = object->address() + size;
   1182     }
   1183     CHECK_LE(black_size, page->LiveBytes());
   1184   }
   1185   CHECK(allocation_pointer_found_in_space);
   1186 }
   1187 #endif  // VERIFY_HEAP
   1188 
   1189 // -----------------------------------------------------------------------------
   1190 // NewSpace implementation
   1191 
   1192 
   1193 bool NewSpace::SetUp(int reserved_semispace_capacity,
   1194                      int maximum_semispace_capacity) {
   1195   // Set up new space based on the preallocated memory block defined by
   1196   // start and size. The provided space is divided into two semi-spaces.
   1197   // To support fast containment testing in the new space, the size of
   1198   // this chunk must be a power of two and it must be aligned to its size.
   1199   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
   1200 
   1201   size_t size = 2 * reserved_semispace_capacity;
   1202   Address base =
   1203       heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
   1204           size, size, &reservation_);
   1205   if (base == NULL) return false;
   1206 
   1207   chunk_base_ = base;
   1208   chunk_size_ = static_cast<uintptr_t>(size);
   1209   LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
   1210 
   1211   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
   1212   ASSERT(IsPowerOf2(maximum_semispace_capacity));
   1213 
   1214   // Allocate and set up the histogram arrays if necessary.
   1215   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
   1216   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
   1217 
   1218 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
   1219                        promoted_histogram_[name].set_name(#name);
   1220   INSTANCE_TYPE_LIST(SET_NAME)
   1221 #undef SET_NAME
   1222 
   1223   ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
   1224   ASSERT(static_cast<intptr_t>(chunk_size_) >=
   1225          2 * heap()->ReservedSemiSpaceSize());
   1226   ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
   1227 
   1228   to_space_.SetUp(chunk_base_,
   1229                   initial_semispace_capacity,
   1230                   maximum_semispace_capacity);
   1231   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
   1232                     initial_semispace_capacity,
   1233                     maximum_semispace_capacity);
   1234   if (!to_space_.Commit()) {
   1235     return false;
   1236   }
   1237   ASSERT(!from_space_.is_committed());  // No need to use memory yet.
   1238 
   1239   start_ = chunk_base_;
   1240   address_mask_ = ~(2 * reserved_semispace_capacity - 1);
   1241   object_mask_ = address_mask_ | kHeapObjectTagMask;
   1242   object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
   1243 
   1244   ResetAllocationInfo();
   1245 
   1246   return true;
   1247 }
   1248 
   1249 
   1250 void NewSpace::TearDown() {
   1251   if (allocated_histogram_) {
   1252     DeleteArray(allocated_histogram_);
   1253     allocated_histogram_ = NULL;
   1254   }
   1255   if (promoted_histogram_) {
   1256     DeleteArray(promoted_histogram_);
   1257     promoted_histogram_ = NULL;
   1258   }
   1259 
   1260   start_ = NULL;
   1261   allocation_info_.top = NULL;
   1262   allocation_info_.limit = NULL;
   1263 
   1264   to_space_.TearDown();
   1265   from_space_.TearDown();
   1266 
   1267   LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
   1268 
   1269   ASSERT(reservation_.IsReserved());
   1270   heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
   1271                                                     NOT_EXECUTABLE);
   1272   chunk_base_ = NULL;
   1273   chunk_size_ = 0;
   1274 }
   1275 
   1276 
   1277 void NewSpace::Flip() {
   1278   SemiSpace::Swap(&from_space_, &to_space_);
   1279 }
   1280 
   1281 
   1282 void NewSpace::Grow() {
   1283   // Double the semispace size but only up to maximum capacity.
   1284   ASSERT(Capacity() < MaximumCapacity());
   1285   int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
   1286   if (to_space_.GrowTo(new_capacity)) {
   1287     // Only grow from space if we managed to grow to-space.
   1288     if (!from_space_.GrowTo(new_capacity)) {
   1289       // If we managed to grow to-space but couldn't grow from-space,
   1290       // attempt to shrink to-space.
   1291       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
   1292         // We are in an inconsistent state because we could not
   1293         // commit/uncommit memory from new space.
   1294         V8::FatalProcessOutOfMemory("Failed to grow new space.");
   1295       }
   1296     }
   1297   }
   1298   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
   1299 }
   1300 
   1301 
   1302 void NewSpace::Shrink() {
   1303   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
   1304   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
   1305   if (rounded_new_capacity < Capacity() &&
   1306       to_space_.ShrinkTo(rounded_new_capacity))  {
   1307     // Only shrink from-space if we managed to shrink to-space.
   1308     from_space_.Reset();
   1309     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
   1310       // If we managed to shrink to-space but couldn't shrink from
   1311       // space, attempt to grow to-space again.
   1312       if (!to_space_.GrowTo(from_space_.Capacity())) {
   1313         // We are in an inconsistent state because we could not
   1314         // commit/uncommit memory from new space.
   1315         V8::FatalProcessOutOfMemory("Failed to shrink new space.");
   1316       }
   1317     }
   1318   }
   1319   allocation_info_.limit = to_space_.page_high();
   1320   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
   1321 }
   1322 
   1323 
   1324 void NewSpace::UpdateAllocationInfo() {
   1325   MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
   1326   allocation_info_.top = to_space_.page_low();
   1327   allocation_info_.limit = to_space_.page_high();
   1328 
   1329   // Lower limit during incremental marking.
   1330   if (heap()->incremental_marking()->IsMarking() &&
   1331       inline_allocation_limit_step() != 0) {
   1332     Address new_limit =
   1333         allocation_info_.top + inline_allocation_limit_step();
   1334     allocation_info_.limit = Min(new_limit, allocation_info_.limit);
   1335   }
   1336   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
   1337 }
   1338 
   1339 
   1340 void NewSpace::ResetAllocationInfo() {
   1341   to_space_.Reset();
   1342   UpdateAllocationInfo();
   1343   pages_used_ = 0;
   1344   // Clear all mark-bits in the to-space.
   1345   NewSpacePageIterator it(&to_space_);
   1346   while (it.has_next()) {
   1347     Bitmap::Clear(it.next());
   1348   }
   1349 }
   1350 
   1351 
   1352 bool NewSpace::AddFreshPage() {
   1353   Address top = allocation_info_.top;
   1354   if (NewSpacePage::IsAtStart(top)) {
   1355     // The current page is already empty. Don't try to make another.
   1356 
   1357     // We should only get here if someone asks to allocate more
   1358     // than what can be stored in a single page.
   1359     // TODO(gc): Change the limit on new-space allocation to prevent this
   1360     // from happening (all such allocations should go directly to LOSpace).
   1361     return false;
   1362   }
   1363   if (!to_space_.AdvancePage()) {
   1364     // Failed to get a new page in to-space.
   1365     return false;
   1366   }
   1367 
   1368   // Clear remainder of current page.
   1369   Address limit = NewSpacePage::FromLimit(top)->area_end();
   1370   if (heap()->gc_state() == Heap::SCAVENGE) {
   1371     heap()->promotion_queue()->SetNewLimit(limit);
   1372     heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
   1373   }
   1374 
   1375   int remaining_in_page = static_cast<int>(limit - top);
   1376   heap()->CreateFillerObjectAt(top, remaining_in_page);
   1377   pages_used_++;
   1378   UpdateAllocationInfo();
   1379 
   1380   return true;
   1381 }
   1382 
   1383 
   1384 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
   1385   Address old_top = allocation_info_.top;
   1386   Address new_top = old_top + size_in_bytes;
   1387   Address high = to_space_.page_high();
   1388   if (allocation_info_.limit < high) {
   1389     // Incremental marking has lowered the limit to get a
   1390     // chance to do a step.
   1391     allocation_info_.limit = Min(
   1392         allocation_info_.limit + inline_allocation_limit_step_,
   1393         high);
   1394     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
   1395     heap()->incremental_marking()->Step(
   1396         bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
   1397     top_on_previous_step_ = new_top;
   1398     return AllocateRaw(size_in_bytes);
   1399   } else if (AddFreshPage()) {
   1400     // Switched to new page. Try allocating again.
   1401     int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
   1402     heap()->incremental_marking()->Step(
   1403         bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
   1404     top_on_previous_step_ = to_space_.page_low();
   1405     return AllocateRaw(size_in_bytes);
   1406   } else {
   1407     return Failure::RetryAfterGC();
   1408   }
   1409 }
   1410 
   1411 
   1412 #ifdef VERIFY_HEAP
   1413 // We do not use the SemiSpaceIterator because verification doesn't assume
   1414 // that it works (it depends on the invariants we are checking).
   1415 void NewSpace::Verify() {
   1416   // The allocation pointer should be in the space or at the very end.
   1417   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
   1418 
   1419   // There should be objects packed in from the low address up to the
   1420   // allocation pointer.
   1421   Address current = to_space_.first_page()->area_start();
   1422   CHECK_EQ(current, to_space_.space_start());
   1423 
   1424   while (current != top()) {
   1425     if (!NewSpacePage::IsAtEnd(current)) {
   1426       // The allocation pointer should not be in the middle of an object.
   1427       CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
   1428             current < top());
   1429 
   1430       HeapObject* object = HeapObject::FromAddress(current);
   1431 
   1432       // The first word should be a map, and we expect all map pointers to
   1433       // be in map space.
   1434       Map* map = object->map();
   1435       CHECK(map->IsMap());
   1436       CHECK(heap()->map_space()->Contains(map));
   1437 
   1438       // The object should not be code or a map.
   1439       CHECK(!object->IsMap());
   1440       CHECK(!object->IsCode());
   1441 
   1442       // The object itself should look OK.
   1443       object->Verify();
   1444 
   1445       // All the interior pointers should be contained in the heap.
   1446       VerifyPointersVisitor visitor;
   1447       int size = object->Size();
   1448       object->IterateBody(map->instance_type(), size, &visitor);
   1449 
   1450       current += size;
   1451     } else {
   1452       // At end of page, switch to next page.
   1453       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
   1454       // Next page should be valid.
   1455       CHECK(!page->is_anchor());
   1456       current = page->area_start();
   1457     }
   1458   }
   1459 
   1460   // Check semi-spaces.
   1461   CHECK_EQ(from_space_.id(), kFromSpace);
   1462   CHECK_EQ(to_space_.id(), kToSpace);
   1463   from_space_.Verify();
   1464   to_space_.Verify();
   1465 }
   1466 #endif
   1467 
   1468 // -----------------------------------------------------------------------------
   1469 // SemiSpace implementation
   1470 
   1471 void SemiSpace::SetUp(Address start,
   1472                       int initial_capacity,
   1473                       int maximum_capacity) {
   1474   // Creates a space in the young generation. The constructor does not
   1475   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
   1476   // memory of size 'capacity' when set up, and does not grow or shrink
   1477   // otherwise.  In the mark-compact collector, the memory region of the from
   1478   // space is used as the marking stack. It requires contiguous memory
   1479   // addresses.
   1480   ASSERT(maximum_capacity >= Page::kPageSize);
   1481   initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
   1482   capacity_ = initial_capacity;
   1483   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
   1484   committed_ = false;
   1485   start_ = start;
   1486   address_mask_ = ~(maximum_capacity - 1);
   1487   object_mask_ = address_mask_ | kHeapObjectTagMask;
   1488   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
   1489   age_mark_ = start_;
   1490 }
   1491 
   1492 
   1493 void SemiSpace::TearDown() {
   1494   start_ = NULL;
   1495   capacity_ = 0;
   1496 }
   1497 
   1498 
   1499 bool SemiSpace::Commit() {
   1500   ASSERT(!is_committed());
   1501   int pages = capacity_ / Page::kPageSize;
   1502   Address end = start_ + maximum_capacity_;
   1503   Address start = end - pages * Page::kPageSize;
   1504   if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
   1505                                                           capacity_,
   1506                                                           executable())) {
   1507     return false;
   1508   }
   1509 
   1510   NewSpacePage* page = anchor();
   1511   for (int i = 1; i <= pages; i++) {
   1512     NewSpacePage* new_page =
   1513       NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
   1514     new_page->InsertAfter(page);
   1515     page = new_page;
   1516   }
   1517 
   1518   committed_ = true;
   1519   Reset();
   1520   return true;
   1521 }
   1522 
   1523 
   1524 bool SemiSpace::Uncommit() {
   1525   ASSERT(is_committed());
   1526   Address start = start_ + maximum_capacity_ - capacity_;
   1527   if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
   1528     return false;
   1529   }
   1530   anchor()->set_next_page(anchor());
   1531   anchor()->set_prev_page(anchor());
   1532 
   1533   committed_ = false;
   1534   return true;
   1535 }
   1536 
   1537 
   1538 size_t SemiSpace::CommittedPhysicalMemory() {
   1539   if (!is_committed()) return 0;
   1540   size_t size = 0;
   1541   NewSpacePageIterator it(this);
   1542   while (it.has_next()) {
   1543     size += it.next()->CommittedPhysicalMemory();
   1544   }
   1545   return size;
   1546 }
   1547 
   1548 
   1549 bool SemiSpace::GrowTo(int new_capacity) {
   1550   if (!is_committed()) {
   1551     if (!Commit()) return false;
   1552   }
   1553   ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   1554   ASSERT(new_capacity <= maximum_capacity_);
   1555   ASSERT(new_capacity > capacity_);
   1556   int pages_before = capacity_ / Page::kPageSize;
   1557   int pages_after = new_capacity / Page::kPageSize;
   1558 
   1559   Address end = start_ + maximum_capacity_;
   1560   Address start = end - new_capacity;
   1561   size_t delta = new_capacity - capacity_;
   1562 
   1563   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
   1564   if (!heap()->isolate()->memory_allocator()->CommitBlock(
   1565       start, delta, executable())) {
   1566     return false;
   1567   }
   1568   capacity_ = new_capacity;
   1569   NewSpacePage* last_page = anchor()->prev_page();
   1570   ASSERT(last_page != anchor());
   1571   for (int i = pages_before + 1; i <= pages_after; i++) {
   1572     Address page_address = end - i * Page::kPageSize;
   1573     NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
   1574                                                       page_address,
   1575                                                       this);
   1576     new_page->InsertAfter(last_page);
   1577     Bitmap::Clear(new_page);
   1578     // Duplicate the flags that was set on the old page.
   1579     new_page->SetFlags(last_page->GetFlags(),
   1580                        NewSpacePage::kCopyOnFlipFlagsMask);
   1581     last_page = new_page;
   1582   }
   1583   return true;
   1584 }
   1585 
   1586 
   1587 bool SemiSpace::ShrinkTo(int new_capacity) {
   1588   ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   1589   ASSERT(new_capacity >= initial_capacity_);
   1590   ASSERT(new_capacity < capacity_);
   1591   if (is_committed()) {
   1592     // Semispaces grow backwards from the end of their allocated capacity,
   1593     // so we find the before and after start addresses relative to the
   1594     // end of the space.
   1595     Address space_end = start_ + maximum_capacity_;
   1596     Address old_start = space_end - capacity_;
   1597     size_t delta = capacity_ - new_capacity;
   1598     ASSERT(IsAligned(delta, OS::AllocateAlignment()));
   1599 
   1600     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
   1601     if (!allocator->UncommitBlock(old_start, delta)) {
   1602       return false;
   1603     }
   1604 
   1605     int pages_after = new_capacity / Page::kPageSize;
   1606     NewSpacePage* new_last_page =
   1607         NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
   1608     new_last_page->set_next_page(anchor());
   1609     anchor()->set_prev_page(new_last_page);
   1610     ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
   1611   }
   1612 
   1613   capacity_ = new_capacity;
   1614 
   1615   return true;
   1616 }
   1617 
   1618 
   1619 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
   1620   anchor_.set_owner(this);
   1621   // Fixup back-pointers to anchor. Address of anchor changes
   1622   // when we swap.
   1623   anchor_.prev_page()->set_next_page(&anchor_);
   1624   anchor_.next_page()->set_prev_page(&anchor_);
   1625 
   1626   bool becomes_to_space = (id_ == kFromSpace);
   1627   id_ = becomes_to_space ? kToSpace : kFromSpace;
   1628   NewSpacePage* page = anchor_.next_page();
   1629   while (page != &anchor_) {
   1630     page->set_owner(this);
   1631     page->SetFlags(flags, mask);
   1632     if (becomes_to_space) {
   1633       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
   1634       page->SetFlag(MemoryChunk::IN_TO_SPACE);
   1635       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
   1636       page->ResetLiveBytes();
   1637     } else {
   1638       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
   1639       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
   1640     }
   1641     ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
   1642     ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
   1643            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
   1644     page = page->next_page();
   1645   }
   1646 }
   1647 
   1648 
   1649 void SemiSpace::Reset() {
   1650   ASSERT(anchor_.next_page() != &anchor_);
   1651   current_page_ = anchor_.next_page();
   1652 }
   1653 
   1654 
   1655 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
   1656   // We won't be swapping semispaces without data in them.
   1657   ASSERT(from->anchor_.next_page() != &from->anchor_);
   1658   ASSERT(to->anchor_.next_page() != &to->anchor_);
   1659 
   1660   // Swap bits.
   1661   SemiSpace tmp = *from;
   1662   *from = *to;
   1663   *to = tmp;
   1664 
   1665   // Fixup back-pointers to the page list anchor now that its address
   1666   // has changed.
   1667   // Swap to/from-space bits on pages.
   1668   // Copy GC flags from old active space (from-space) to new (to-space).
   1669   intptr_t flags = from->current_page()->GetFlags();
   1670   to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
   1671 
   1672   from->FlipPages(0, 0);
   1673 }
   1674 
   1675 
   1676 void SemiSpace::set_age_mark(Address mark) {
   1677   ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
   1678   age_mark_ = mark;
   1679   // Mark all pages up to the one containing mark.
   1680   NewSpacePageIterator it(space_start(), mark);
   1681   while (it.has_next()) {
   1682     it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
   1683   }
   1684 }
   1685 
   1686 
   1687 #ifdef DEBUG
   1688 void SemiSpace::Print() { }
   1689 #endif
   1690 
   1691 #ifdef VERIFY_HEAP
   1692 void SemiSpace::Verify() {
   1693   bool is_from_space = (id_ == kFromSpace);
   1694   NewSpacePage* page = anchor_.next_page();
   1695   CHECK(anchor_.semi_space() == this);
   1696   while (page != &anchor_) {
   1697     CHECK(page->semi_space() == this);
   1698     CHECK(page->InNewSpace());
   1699     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
   1700                                         : MemoryChunk::IN_TO_SPACE));
   1701     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
   1702                                          : MemoryChunk::IN_FROM_SPACE));
   1703     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
   1704     if (!is_from_space) {
   1705       // The pointers-from-here-are-interesting flag isn't updated dynamically
   1706       // on from-space pages, so it might be out of sync with the marking state.
   1707       if (page->heap()->incremental_marking()->IsMarking()) {
   1708         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
   1709       } else {
   1710         CHECK(!page->IsFlagSet(
   1711             MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
   1712       }
   1713       // TODO(gc): Check that the live_bytes_count_ field matches the
   1714       // black marking on the page (if we make it match in new-space).
   1715     }
   1716     CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
   1717     CHECK(page->prev_page()->next_page() == page);
   1718     page = page->next_page();
   1719   }
   1720 }
   1721 #endif
   1722 
   1723 #ifdef DEBUG
   1724 void SemiSpace::AssertValidRange(Address start, Address end) {
   1725   // Addresses belong to same semi-space
   1726   NewSpacePage* page = NewSpacePage::FromLimit(start);
   1727   NewSpacePage* end_page = NewSpacePage::FromLimit(end);
   1728   SemiSpace* space = page->semi_space();
   1729   CHECK_EQ(space, end_page->semi_space());
   1730   // Start address is before end address, either on same page,
   1731   // or end address is on a later page in the linked list of
   1732   // semi-space pages.
   1733   if (page == end_page) {
   1734     CHECK(start <= end);
   1735   } else {
   1736     while (page != end_page) {
   1737       page = page->next_page();
   1738       CHECK_NE(page, space->anchor());
   1739     }
   1740   }
   1741 }
   1742 #endif
   1743 
   1744 
   1745 // -----------------------------------------------------------------------------
   1746 // SemiSpaceIterator implementation.
   1747 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
   1748   Initialize(space->bottom(), space->top(), NULL);
   1749 }
   1750 
   1751 
   1752 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
   1753                                      HeapObjectCallback size_func) {
   1754   Initialize(space->bottom(), space->top(), size_func);
   1755 }
   1756 
   1757 
   1758 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
   1759   Initialize(start, space->top(), NULL);
   1760 }
   1761 
   1762 
   1763 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
   1764   Initialize(from, to, NULL);
   1765 }
   1766 
   1767 
   1768 void SemiSpaceIterator::Initialize(Address start,
   1769                                    Address end,
   1770                                    HeapObjectCallback size_func) {
   1771   SemiSpace::AssertValidRange(start, end);
   1772   current_ = start;
   1773   limit_ = end;
   1774   size_func_ = size_func;
   1775 }
   1776 
   1777 
   1778 #ifdef DEBUG
   1779 // heap_histograms is shared, always clear it before using it.
   1780 static void ClearHistograms() {
   1781   Isolate* isolate = Isolate::Current();
   1782   // We reset the name each time, though it hasn't changed.
   1783 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
   1784   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
   1785 #undef DEF_TYPE_NAME
   1786 
   1787 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
   1788   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
   1789 #undef CLEAR_HISTOGRAM
   1790 
   1791   isolate->js_spill_information()->Clear();
   1792 }
   1793 
   1794 
   1795 static void ClearCodeKindStatistics(int* code_kind_statistics) {
   1796   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
   1797     code_kind_statistics[i] = 0;
   1798   }
   1799 }
   1800 
   1801 
   1802 static void ReportCodeKindStatistics(int* code_kind_statistics) {
   1803   PrintF("\n   Code kind histograms: \n");
   1804   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
   1805     if (code_kind_statistics[i] > 0) {
   1806       PrintF("     %-20s: %10d bytes\n",
   1807              Code::Kind2String(static_cast<Code::Kind>(i)),
   1808              code_kind_statistics[i]);
   1809     }
   1810   }
   1811   PrintF("\n");
   1812 }
   1813 
   1814 
   1815 static int CollectHistogramInfo(HeapObject* obj) {
   1816   Isolate* isolate = obj->GetIsolate();
   1817   InstanceType type = obj->map()->instance_type();
   1818   ASSERT(0 <= type && type <= LAST_TYPE);
   1819   ASSERT(isolate->heap_histograms()[type].name() != NULL);
   1820   isolate->heap_histograms()[type].increment_number(1);
   1821   isolate->heap_histograms()[type].increment_bytes(obj->Size());
   1822 
   1823   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
   1824     JSObject::cast(obj)->IncrementSpillStatistics(
   1825         isolate->js_spill_information());
   1826   }
   1827 
   1828   return obj->Size();
   1829 }
   1830 
   1831 
   1832 static void ReportHistogram(bool print_spill) {
   1833   Isolate* isolate = Isolate::Current();
   1834   PrintF("\n  Object Histogram:\n");
   1835   for (int i = 0; i <= LAST_TYPE; i++) {
   1836     if (isolate->heap_histograms()[i].number() > 0) {
   1837       PrintF("    %-34s%10d (%10d bytes)\n",
   1838              isolate->heap_histograms()[i].name(),
   1839              isolate->heap_histograms()[i].number(),
   1840              isolate->heap_histograms()[i].bytes());
   1841     }
   1842   }
   1843   PrintF("\n");
   1844 
   1845   // Summarize string types.
   1846   int string_number = 0;
   1847   int string_bytes = 0;
   1848 #define INCREMENT(type, size, name, camel_name)      \
   1849     string_number += isolate->heap_histograms()[type].number(); \
   1850     string_bytes += isolate->heap_histograms()[type].bytes();
   1851   STRING_TYPE_LIST(INCREMENT)
   1852 #undef INCREMENT
   1853   if (string_number > 0) {
   1854     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
   1855            string_bytes);
   1856   }
   1857 
   1858   if (FLAG_collect_heap_spill_statistics && print_spill) {
   1859     isolate->js_spill_information()->Print();
   1860   }
   1861 }
   1862 #endif  // DEBUG
   1863 
   1864 
   1865 // Support for statistics gathering for --heap-stats and --log-gc.
   1866 void NewSpace::ClearHistograms() {
   1867   for (int i = 0; i <= LAST_TYPE; i++) {
   1868     allocated_histogram_[i].clear();
   1869     promoted_histogram_[i].clear();
   1870   }
   1871 }
   1872 
   1873 
   1874 // Because the copying collector does not touch garbage objects, we iterate
   1875 // the new space before a collection to get a histogram of allocated objects.
   1876 // This only happens when --log-gc flag is set.
   1877 void NewSpace::CollectStatistics() {
   1878   ClearHistograms();
   1879   SemiSpaceIterator it(this);
   1880   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
   1881     RecordAllocation(obj);
   1882 }
   1883 
   1884 
   1885 static void DoReportStatistics(Isolate* isolate,
   1886                                HistogramInfo* info, const char* description) {
   1887   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
   1888   // Lump all the string types together.
   1889   int string_number = 0;
   1890   int string_bytes = 0;
   1891 #define INCREMENT(type, size, name, camel_name)       \
   1892     string_number += info[type].number();             \
   1893     string_bytes += info[type].bytes();
   1894   STRING_TYPE_LIST(INCREMENT)
   1895 #undef INCREMENT
   1896   if (string_number > 0) {
   1897     LOG(isolate,
   1898         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
   1899   }
   1900 
   1901   // Then do the other types.
   1902   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
   1903     if (info[i].number() > 0) {
   1904       LOG(isolate,
   1905           HeapSampleItemEvent(info[i].name(), info[i].number(),
   1906                               info[i].bytes()));
   1907     }
   1908   }
   1909   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
   1910 }
   1911 
   1912 
   1913 void NewSpace::ReportStatistics() {
   1914 #ifdef DEBUG
   1915   if (FLAG_heap_stats) {
   1916     float pct = static_cast<float>(Available()) / Capacity();
   1917     PrintF("  capacity: %" V8_PTR_PREFIX "d"
   1918                ", available: %" V8_PTR_PREFIX "d, %%%d\n",
   1919            Capacity(), Available(), static_cast<int>(pct*100));
   1920     PrintF("\n  Object Histogram:\n");
   1921     for (int i = 0; i <= LAST_TYPE; i++) {
   1922       if (allocated_histogram_[i].number() > 0) {
   1923         PrintF("    %-34s%10d (%10d bytes)\n",
   1924                allocated_histogram_[i].name(),
   1925                allocated_histogram_[i].number(),
   1926                allocated_histogram_[i].bytes());
   1927       }
   1928     }
   1929     PrintF("\n");
   1930   }
   1931 #endif  // DEBUG
   1932 
   1933   if (FLAG_log_gc) {
   1934     Isolate* isolate = ISOLATE;
   1935     DoReportStatistics(isolate, allocated_histogram_, "allocated");
   1936     DoReportStatistics(isolate, promoted_histogram_, "promoted");
   1937   }
   1938 }
   1939 
   1940 
   1941 void NewSpace::RecordAllocation(HeapObject* obj) {
   1942   InstanceType type = obj->map()->instance_type();
   1943   ASSERT(0 <= type && type <= LAST_TYPE);
   1944   allocated_histogram_[type].increment_number(1);
   1945   allocated_histogram_[type].increment_bytes(obj->Size());
   1946 }
   1947 
   1948 
   1949 void NewSpace::RecordPromotion(HeapObject* obj) {
   1950   InstanceType type = obj->map()->instance_type();
   1951   ASSERT(0 <= type && type <= LAST_TYPE);
   1952   promoted_histogram_[type].increment_number(1);
   1953   promoted_histogram_[type].increment_bytes(obj->Size());
   1954 }
   1955 
   1956 
   1957 size_t NewSpace::CommittedPhysicalMemory() {
   1958   if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
   1959   MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
   1960   size_t size = to_space_.CommittedPhysicalMemory();
   1961   if (from_space_.is_committed()) {
   1962     size += from_space_.CommittedPhysicalMemory();
   1963   }
   1964   return size;
   1965 }
   1966 
   1967 
   1968 // -----------------------------------------------------------------------------
   1969 // Free lists for old object spaces implementation
   1970 
   1971 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
   1972   ASSERT(size_in_bytes > 0);
   1973   ASSERT(IsAligned(size_in_bytes, kPointerSize));
   1974 
   1975   // We write a map and possibly size information to the block.  If the block
   1976   // is big enough to be a FreeSpace with at least one extra word (the next
   1977   // pointer), we set its map to be the free space map and its size to an
   1978   // appropriate array length for the desired size from HeapObject::Size().
   1979   // If the block is too small (eg, one or two words), to hold both a size
   1980   // field and a next pointer, we give it a filler map that gives it the
   1981   // correct size.
   1982   if (size_in_bytes > FreeSpace::kHeaderSize) {
   1983     set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
   1984     // Can't use FreeSpace::cast because it fails during deserialization.
   1985     FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
   1986     this_as_free_space->set_size(size_in_bytes);
   1987   } else if (size_in_bytes == kPointerSize) {
   1988     set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
   1989   } else if (size_in_bytes == 2 * kPointerSize) {
   1990     set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
   1991   } else {
   1992     UNREACHABLE();
   1993   }
   1994   // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
   1995   // deserialization because the free space map is not done yet.
   1996 }
   1997 
   1998 
   1999 FreeListNode* FreeListNode::next() {
   2000   ASSERT(IsFreeListNode(this));
   2001   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
   2002     ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
   2003     return reinterpret_cast<FreeListNode*>(
   2004         Memory::Address_at(address() + kNextOffset));
   2005   } else {
   2006     return reinterpret_cast<FreeListNode*>(
   2007         Memory::Address_at(address() + kPointerSize));
   2008   }
   2009 }
   2010 
   2011 
   2012 FreeListNode** FreeListNode::next_address() {
   2013   ASSERT(IsFreeListNode(this));
   2014   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
   2015     ASSERT(Size() >= kNextOffset + kPointerSize);
   2016     return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
   2017   } else {
   2018     return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
   2019   }
   2020 }
   2021 
   2022 
   2023 void FreeListNode::set_next(FreeListNode* next) {
   2024   ASSERT(IsFreeListNode(this));
   2025   // While we are booting the VM the free space map will actually be null.  So
   2026   // we have to make sure that we don't try to use it for anything at that
   2027   // stage.
   2028   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
   2029     ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
   2030     Memory::Address_at(address() + kNextOffset) =
   2031         reinterpret_cast<Address>(next);
   2032   } else {
   2033     Memory::Address_at(address() + kPointerSize) =
   2034         reinterpret_cast<Address>(next);
   2035   }
   2036 }
   2037 
   2038 
   2039 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
   2040   intptr_t free_bytes = 0;
   2041   if (category->top_ != NULL) {
   2042     ASSERT(category->end_ != NULL);
   2043     // This is safe (not going to deadlock) since Concatenate operations
   2044     // are never performed on the same free lists at the same time in
   2045     // reverse order.
   2046     ScopedLock lock_target(mutex_);
   2047     ScopedLock lock_source(category->mutex());
   2048     free_bytes = category->available();
   2049     if (end_ == NULL) {
   2050       end_ = category->end();
   2051     } else {
   2052       category->end()->set_next(top_);
   2053     }
   2054     top_ = category->top();
   2055     available_ += category->available();
   2056     category->Reset();
   2057   }
   2058   return free_bytes;
   2059 }
   2060 
   2061 
   2062 void FreeListCategory::Reset() {
   2063   top_ = NULL;
   2064   end_ = NULL;
   2065   available_ = 0;
   2066 }
   2067 
   2068 
   2069 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
   2070   int sum = 0;
   2071   FreeListNode** n = &top_;
   2072   while (*n != NULL) {
   2073     if (Page::FromAddress((*n)->address()) == p) {
   2074       FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
   2075       sum += free_space->Size();
   2076       *n = (*n)->next();
   2077     } else {
   2078       n = (*n)->next_address();
   2079     }
   2080   }
   2081   if (top_ == NULL) {
   2082     end_ = NULL;
   2083   }
   2084   available_ -= sum;
   2085   return sum;
   2086 }
   2087 
   2088 
   2089 FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
   2090   FreeListNode* node = top_;
   2091 
   2092   if (node == NULL) return NULL;
   2093 
   2094   while (node != NULL &&
   2095          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
   2096     available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
   2097     node = node->next();
   2098   }
   2099 
   2100   if (node != NULL) {
   2101     set_top(node->next());
   2102     *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
   2103     available_ -= *node_size;
   2104   } else {
   2105     set_top(NULL);
   2106   }
   2107 
   2108   if (top() == NULL) {
   2109     set_end(NULL);
   2110   }
   2111 
   2112   return node;
   2113 }
   2114 
   2115 
   2116 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
   2117                                                  int *node_size) {
   2118   FreeListNode* node = PickNodeFromList(node_size);
   2119   if (node != NULL && *node_size < size_in_bytes) {
   2120     Free(node, *node_size);
   2121     *node_size = 0;
   2122     return NULL;
   2123   }
   2124   return node;
   2125 }
   2126 
   2127 
   2128 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
   2129   node->set_next(top_);
   2130   top_ = node;
   2131   if (end_ == NULL) {
   2132     end_ = node;
   2133   }
   2134   available_ += size_in_bytes;
   2135 }
   2136 
   2137 
   2138 void FreeListCategory::RepairFreeList(Heap* heap) {
   2139   FreeListNode* n = top_;
   2140   while (n != NULL) {
   2141     Map** map_location = reinterpret_cast<Map**>(n->address());
   2142     if (*map_location == NULL) {
   2143       *map_location = heap->free_space_map();
   2144     } else {
   2145       ASSERT(*map_location == heap->free_space_map());
   2146     }
   2147     n = n->next();
   2148   }
   2149 }
   2150 
   2151 
   2152 FreeList::FreeList(PagedSpace* owner)
   2153     : owner_(owner), heap_(owner->heap()) {
   2154   Reset();
   2155 }
   2156 
   2157 
   2158 intptr_t FreeList::Concatenate(FreeList* free_list) {
   2159   intptr_t free_bytes = 0;
   2160   free_bytes += small_list_.Concatenate(free_list->small_list());
   2161   free_bytes += medium_list_.Concatenate(free_list->medium_list());
   2162   free_bytes += large_list_.Concatenate(free_list->large_list());
   2163   free_bytes += huge_list_.Concatenate(free_list->huge_list());
   2164   return free_bytes;
   2165 }
   2166 
   2167 
   2168 void FreeList::Reset() {
   2169   small_list_.Reset();
   2170   medium_list_.Reset();
   2171   large_list_.Reset();
   2172   huge_list_.Reset();
   2173 }
   2174 
   2175 
   2176 int FreeList::Free(Address start, int size_in_bytes) {
   2177   if (size_in_bytes == 0) return 0;
   2178 
   2179   FreeListNode* node = FreeListNode::FromAddress(start);
   2180   node->set_size(heap_, size_in_bytes);
   2181   Page* page = Page::FromAddress(start);
   2182 
   2183   // Early return to drop too-small blocks on the floor.
   2184   if (size_in_bytes < kSmallListMin) {
   2185     page->add_non_available_small_blocks(size_in_bytes);
   2186     return size_in_bytes;
   2187   }
   2188 
   2189   // Insert other blocks at the head of a free list of the appropriate
   2190   // magnitude.
   2191   if (size_in_bytes <= kSmallListMax) {
   2192     small_list_.Free(node, size_in_bytes);
   2193     page->add_available_in_small_free_list(size_in_bytes);
   2194   } else if (size_in_bytes <= kMediumListMax) {
   2195     medium_list_.Free(node, size_in_bytes);
   2196     page->add_available_in_medium_free_list(size_in_bytes);
   2197   } else if (size_in_bytes <= kLargeListMax) {
   2198     large_list_.Free(node, size_in_bytes);
   2199     page->add_available_in_large_free_list(size_in_bytes);
   2200   } else {
   2201     huge_list_.Free(node, size_in_bytes);
   2202     page->add_available_in_huge_free_list(size_in_bytes);
   2203   }
   2204 
   2205   ASSERT(IsVeryLong() || available() == SumFreeLists());
   2206   return 0;
   2207 }
   2208 
   2209 
   2210 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   2211   FreeListNode* node = NULL;
   2212   Page* page = NULL;
   2213 
   2214   if (size_in_bytes <= kSmallAllocationMax) {
   2215     node = small_list_.PickNodeFromList(node_size);
   2216     if (node != NULL) {
   2217       ASSERT(size_in_bytes <= *node_size);
   2218       page = Page::FromAddress(node->address());
   2219       page->add_available_in_small_free_list(-(*node_size));
   2220       ASSERT(IsVeryLong() || available() == SumFreeLists());
   2221       return node;
   2222     }
   2223   }
   2224 
   2225   if (size_in_bytes <= kMediumAllocationMax) {
   2226     node = medium_list_.PickNodeFromList(node_size);
   2227     if (node != NULL) {
   2228       ASSERT(size_in_bytes <= *node_size);
   2229       page = Page::FromAddress(node->address());
   2230       page->add_available_in_medium_free_list(-(*node_size));
   2231       ASSERT(IsVeryLong() || available() == SumFreeLists());
   2232       return node;
   2233     }
   2234   }
   2235 
   2236   if (size_in_bytes <= kLargeAllocationMax) {
   2237     node = large_list_.PickNodeFromList(node_size);
   2238     if (node != NULL) {
   2239       ASSERT(size_in_bytes <= *node_size);
   2240       page = Page::FromAddress(node->address());
   2241       page->add_available_in_large_free_list(-(*node_size));
   2242       ASSERT(IsVeryLong() || available() == SumFreeLists());
   2243       return node;
   2244     }
   2245   }
   2246 
   2247   int huge_list_available = huge_list_.available();
   2248   for (FreeListNode** cur = huge_list_.GetTopAddress();
   2249        *cur != NULL;
   2250        cur = (*cur)->next_address()) {
   2251     FreeListNode* cur_node = *cur;
   2252     while (cur_node != NULL &&
   2253            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
   2254       int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
   2255       huge_list_available -= size;
   2256       page = Page::FromAddress(cur_node->address());
   2257       page->add_available_in_huge_free_list(-size);
   2258       cur_node = cur_node->next();
   2259     }
   2260 
   2261     *cur = cur_node;
   2262     if (cur_node == NULL) {
   2263       huge_list_.set_end(NULL);
   2264       break;
   2265     }
   2266 
   2267     ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
   2268     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
   2269     int size = cur_as_free_space->Size();
   2270     if (size >= size_in_bytes) {
   2271       // Large enough node found.  Unlink it from the list.
   2272       node = *cur;
   2273       *cur = node->next();
   2274       *node_size = size;
   2275       huge_list_available -= size;
   2276       page = Page::FromAddress(node->address());
   2277       page->add_available_in_huge_free_list(-size);
   2278       break;
   2279     }
   2280   }
   2281 
   2282   if (huge_list_.top() == NULL) {
   2283     huge_list_.set_end(NULL);
   2284   }
   2285   huge_list_.set_available(huge_list_available);
   2286 
   2287   if (node != NULL) {
   2288     ASSERT(IsVeryLong() || available() == SumFreeLists());
   2289     return node;
   2290   }
   2291 
   2292   if (size_in_bytes <= kSmallListMax) {
   2293     node = small_list_.PickNodeFromList(size_in_bytes, node_size);
   2294     if (node != NULL) {
   2295       ASSERT(size_in_bytes <= *node_size);
   2296       page = Page::FromAddress(node->address());
   2297       page->add_available_in_small_free_list(-(*node_size));
   2298     }
   2299   } else if (size_in_bytes <= kMediumListMax) {
   2300     node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
   2301     if (node != NULL) {
   2302       ASSERT(size_in_bytes <= *node_size);
   2303       page = Page::FromAddress(node->address());
   2304       page->add_available_in_medium_free_list(-(*node_size));
   2305     }
   2306   } else if (size_in_bytes <= kLargeListMax) {
   2307     node = large_list_.PickNodeFromList(size_in_bytes, node_size);
   2308     if (node != NULL) {
   2309       ASSERT(size_in_bytes <= *node_size);
   2310       page = Page::FromAddress(node->address());
   2311       page->add_available_in_large_free_list(-(*node_size));
   2312     }
   2313   }
   2314 
   2315   ASSERT(IsVeryLong() || available() == SumFreeLists());
   2316   return node;
   2317 }
   2318 
   2319 
   2320 // Allocation on the old space free list.  If it succeeds then a new linear
   2321 // allocation space has been set up with the top and limit of the space.  If
   2322 // the allocation fails then NULL is returned, and the caller can perform a GC
   2323 // or allocate a new page before retrying.
   2324 HeapObject* FreeList::Allocate(int size_in_bytes) {
   2325   ASSERT(0 < size_in_bytes);
   2326   ASSERT(size_in_bytes <= kMaxBlockSize);
   2327   ASSERT(IsAligned(size_in_bytes, kPointerSize));
   2328   // Don't free list allocate if there is linear space available.
   2329   ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
   2330 
   2331   int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
   2332   // Mark the old linear allocation area with a free space map so it can be
   2333   // skipped when scanning the heap.  This also puts it back in the free list
   2334   // if it is big enough.
   2335   owner_->Free(owner_->top(), old_linear_size);
   2336 
   2337   owner_->heap()->incremental_marking()->OldSpaceStep(
   2338       size_in_bytes - old_linear_size);
   2339 
   2340   int new_node_size = 0;
   2341   FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   2342   if (new_node == NULL) {
   2343     owner_->SetTop(NULL, NULL);
   2344     return NULL;
   2345   }
   2346 
   2347   int bytes_left = new_node_size - size_in_bytes;
   2348   ASSERT(bytes_left >= 0);
   2349 
   2350 #ifdef DEBUG
   2351   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
   2352     reinterpret_cast<Object**>(new_node->address())[i] =
   2353         Smi::FromInt(kCodeZapValue);
   2354   }
   2355 #endif
   2356 
   2357   // The old-space-step might have finished sweeping and restarted marking.
   2358   // Verify that it did not turn the page of the new node into an evacuation
   2359   // candidate.
   2360   ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
   2361 
   2362   const int kThreshold = IncrementalMarking::kAllocatedThreshold;
   2363 
   2364   // Memory in the linear allocation area is counted as allocated.  We may free
   2365   // a little of this again immediately - see below.
   2366   owner_->Allocate(new_node_size);
   2367 
   2368   if (bytes_left > kThreshold &&
   2369       owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
   2370       FLAG_incremental_marking_steps) {
   2371     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
   2372     // We don't want to give too large linear areas to the allocator while
   2373     // incremental marking is going on, because we won't check again whether
   2374     // we want to do another increment until the linear area is used up.
   2375     owner_->Free(new_node->address() + size_in_bytes + linear_size,
   2376                  new_node_size - size_in_bytes - linear_size);
   2377     owner_->SetTop(new_node->address() + size_in_bytes,
   2378                    new_node->address() + size_in_bytes + linear_size);
   2379   } else if (bytes_left > 0) {
   2380     // Normally we give the rest of the node to the allocator as its new
   2381     // linear allocation area.
   2382     owner_->SetTop(new_node->address() + size_in_bytes,
   2383                    new_node->address() + new_node_size);
   2384   } else {
   2385     // TODO(gc) Try not freeing linear allocation region when bytes_left
   2386     // are zero.
   2387     owner_->SetTop(NULL, NULL);
   2388   }
   2389 
   2390   return new_node;
   2391 }
   2392 
   2393 
   2394 intptr_t FreeList::EvictFreeListItems(Page* p) {
   2395   intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
   2396   p->set_available_in_huge_free_list(0);
   2397 
   2398   if (sum < p->area_size()) {
   2399     sum += small_list_.EvictFreeListItemsInList(p) +
   2400         medium_list_.EvictFreeListItemsInList(p) +
   2401         large_list_.EvictFreeListItemsInList(p);
   2402     p->set_available_in_small_free_list(0);
   2403     p->set_available_in_medium_free_list(0);
   2404     p->set_available_in_large_free_list(0);
   2405   }
   2406 
   2407   return sum;
   2408 }
   2409 
   2410 
   2411 void FreeList::RepairLists(Heap* heap) {
   2412   small_list_.RepairFreeList(heap);
   2413   medium_list_.RepairFreeList(heap);
   2414   large_list_.RepairFreeList(heap);
   2415   huge_list_.RepairFreeList(heap);
   2416 }
   2417 
   2418 
   2419 #ifdef DEBUG
   2420 intptr_t FreeListCategory::SumFreeList() {
   2421   intptr_t sum = 0;
   2422   FreeListNode* cur = top_;
   2423   while (cur != NULL) {
   2424     ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
   2425     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
   2426     sum += cur_as_free_space->Size();
   2427     cur = cur->next();
   2428   }
   2429   return sum;
   2430 }
   2431 
   2432 
   2433 static const int kVeryLongFreeList = 500;
   2434 
   2435 
   2436 int FreeListCategory::FreeListLength() {
   2437   int length = 0;
   2438   FreeListNode* cur = top_;
   2439   while (cur != NULL) {
   2440     length++;
   2441     cur = cur->next();
   2442     if (length == kVeryLongFreeList) return length;
   2443   }
   2444   return length;
   2445 }
   2446 
   2447 
   2448 bool FreeList::IsVeryLong() {
   2449   if (small_list_.FreeListLength() == kVeryLongFreeList) return  true;
   2450   if (medium_list_.FreeListLength() == kVeryLongFreeList) return  true;
   2451   if (large_list_.FreeListLength() == kVeryLongFreeList) return  true;
   2452   if (huge_list_.FreeListLength() == kVeryLongFreeList) return  true;
   2453   return false;
   2454 }
   2455 
   2456 
   2457 // This can take a very long time because it is linear in the number of entries
   2458 // on the free list, so it should not be called if FreeListLength returns
   2459 // kVeryLongFreeList.
   2460 intptr_t FreeList::SumFreeLists() {
   2461   intptr_t sum = small_list_.SumFreeList();
   2462   sum += medium_list_.SumFreeList();
   2463   sum += large_list_.SumFreeList();
   2464   sum += huge_list_.SumFreeList();
   2465   return sum;
   2466 }
   2467 #endif
   2468 
   2469 
   2470 // -----------------------------------------------------------------------------
   2471 // OldSpace implementation
   2472 
   2473 bool NewSpace::ReserveSpace(int bytes) {
   2474   // We can't reliably unpack a partial snapshot that needs more new space
   2475   // space than the minimum NewSpace size.  The limit can be set lower than
   2476   // the end of new space either because there is more space on the next page
   2477   // or because we have lowered the limit in order to get periodic incremental
   2478   // marking.  The most reliable way to ensure that there is linear space is
   2479   // to do the allocation, then rewind the limit.
   2480   ASSERT(bytes <= InitialCapacity());
   2481   MaybeObject* maybe = AllocateRaw(bytes);
   2482   Object* object = NULL;
   2483   if (!maybe->ToObject(&object)) return false;
   2484   HeapObject* allocation = HeapObject::cast(object);
   2485   Address top = allocation_info_.top;
   2486   if ((top - bytes) == allocation->address()) {
   2487     allocation_info_.top = allocation->address();
   2488     return true;
   2489   }
   2490   // There may be a borderline case here where the allocation succeeded, but
   2491   // the limit and top have moved on to a new page.  In that case we try again.
   2492   return ReserveSpace(bytes);
   2493 }
   2494 
   2495 
   2496 void PagedSpace::PrepareForMarkCompact() {
   2497   // We don't have a linear allocation area while sweeping.  It will be restored
   2498   // on the first allocation after the sweep.
   2499   // Mark the old linear allocation area with a free space map so it can be
   2500   // skipped when scanning the heap.
   2501   int old_linear_size = static_cast<int>(limit() - top());
   2502   Free(top(), old_linear_size);
   2503   SetTop(NULL, NULL);
   2504 
   2505   // Stop lazy sweeping and clear marking bits for unswept pages.
   2506   if (first_unswept_page_ != NULL) {
   2507     Page* p = first_unswept_page_;
   2508     do {
   2509       // Do not use ShouldBeSweptLazily predicate here.
   2510       // New evacuation candidates were selected but they still have
   2511       // to be swept before collection starts.
   2512       if (!p->WasSwept()) {
   2513         Bitmap::Clear(p);
   2514         if (FLAG_gc_verbose) {
   2515           PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
   2516                  reinterpret_cast<intptr_t>(p));
   2517         }
   2518       }
   2519       p = p->next_page();
   2520     } while (p != anchor());
   2521   }
   2522   first_unswept_page_ = Page::FromAddress(NULL);
   2523   unswept_free_bytes_ = 0;
   2524 
   2525   // Clear the free list before a full GC---it will be rebuilt afterward.
   2526   free_list_.Reset();
   2527 }
   2528 
   2529 
   2530 bool PagedSpace::ReserveSpace(int size_in_bytes) {
   2531   ASSERT(size_in_bytes <= AreaSize());
   2532   ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
   2533   Address current_top = allocation_info_.top;
   2534   Address new_top = current_top + size_in_bytes;
   2535   if (new_top <= allocation_info_.limit) return true;
   2536 
   2537   HeapObject* new_area = free_list_.Allocate(size_in_bytes);
   2538   if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
   2539   if (new_area == NULL) return false;
   2540 
   2541   int old_linear_size = static_cast<int>(limit() - top());
   2542   // Mark the old linear allocation area with a free space so it can be
   2543   // skipped when scanning the heap.  This also puts it back in the free list
   2544   // if it is big enough.
   2545   Free(top(), old_linear_size);
   2546 
   2547   SetTop(new_area->address(), new_area->address() + size_in_bytes);
   2548   return true;
   2549 }
   2550 
   2551 
   2552 intptr_t PagedSpace::SizeOfObjects() {
   2553   ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
   2554   return Size() - unswept_free_bytes_ - (limit() - top());
   2555 }
   2556 
   2557 
   2558 // After we have booted, we have created a map which represents free space
   2559 // on the heap.  If there was already a free list then the elements on it
   2560 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
   2561 // fix them.
   2562 void PagedSpace::RepairFreeListsAfterBoot() {
   2563   free_list_.RepairLists(heap());
   2564 }
   2565 
   2566 
   2567 // You have to call this last, since the implementation from PagedSpace
   2568 // doesn't know that memory was 'promised' to large object space.
   2569 bool LargeObjectSpace::ReserveSpace(int bytes) {
   2570   return heap()->OldGenerationCapacityAvailable() >= bytes &&
   2571          (!heap()->incremental_marking()->IsStopped() ||
   2572            heap()->OldGenerationSpaceAvailable() >= bytes);
   2573 }
   2574 
   2575 
   2576 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
   2577   if (IsLazySweepingComplete()) return true;
   2578 
   2579   intptr_t freed_bytes = 0;
   2580   Page* p = first_unswept_page_;
   2581   do {
   2582     Page* next_page = p->next_page();
   2583     if (ShouldBeSweptLazily(p)) {
   2584       if (FLAG_gc_verbose) {
   2585         PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
   2586                reinterpret_cast<intptr_t>(p));
   2587       }
   2588       DecreaseUnsweptFreeBytes(p);
   2589       freed_bytes +=
   2590           MarkCompactCollector::
   2591               SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
   2592                   this, NULL, p);
   2593     }
   2594     p = next_page;
   2595   } while (p != anchor() && freed_bytes < bytes_to_sweep);
   2596 
   2597   if (p == anchor()) {
   2598     first_unswept_page_ = Page::FromAddress(NULL);
   2599   } else {
   2600     first_unswept_page_ = p;
   2601   }
   2602 
   2603   heap()->FreeQueuedChunks();
   2604 
   2605   return IsLazySweepingComplete();
   2606 }
   2607 
   2608 
   2609 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
   2610   if (allocation_info_.top >= allocation_info_.limit) return;
   2611 
   2612   if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
   2613     // Create filler object to keep page iterable if it was iterable.
   2614     int remaining =
   2615         static_cast<int>(allocation_info_.limit - allocation_info_.top);
   2616     heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
   2617 
   2618     allocation_info_.top = NULL;
   2619     allocation_info_.limit = NULL;
   2620   }
   2621 }
   2622 
   2623 
   2624 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
   2625   MarkCompactCollector* collector = heap()->mark_compact_collector();
   2626   if (collector->AreSweeperThreadsActivated()) {
   2627     if (collector->IsConcurrentSweepingInProgress()) {
   2628       if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
   2629         if (!collector->sequential_sweeping()) {
   2630           collector->WaitUntilSweepingCompleted();
   2631           return true;
   2632         }
   2633       }
   2634       return false;
   2635     }
   2636     return true;
   2637   } else {
   2638     return AdvanceSweeper(size_in_bytes);
   2639   }
   2640 }
   2641 
   2642 
   2643 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   2644   // Allocation in this space has failed.
   2645 
   2646   // If there are unswept pages advance lazy sweeper a bounded number of times
   2647   // until we find a size_in_bytes contiguous piece of memory
   2648   const int kMaxSweepingTries = 5;
   2649   bool sweeping_complete = false;
   2650 
   2651   for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
   2652     sweeping_complete = EnsureSweeperProgress(size_in_bytes);
   2653 
   2654     // Retry the free list allocation.
   2655     HeapObject* object = free_list_.Allocate(size_in_bytes);
   2656     if (object != NULL) return object;
   2657   }
   2658 
   2659   // Free list allocation failed and there is no next page.  Fail if we have
   2660   // hit the old generation size limit that should cause a garbage
   2661   // collection.
   2662   if (!heap()->always_allocate() &&
   2663       heap()->OldGenerationAllocationLimitReached()) {
   2664     return NULL;
   2665   }
   2666 
   2667   // Try to expand the space and allocate in the new next page.
   2668   if (Expand()) {
   2669     return free_list_.Allocate(size_in_bytes);
   2670   }
   2671 
   2672   // Last ditch, sweep all the remaining pages to try to find space.  This may
   2673   // cause a pause.
   2674   if (!IsLazySweepingComplete()) {
   2675     EnsureSweeperProgress(kMaxInt);
   2676 
   2677     // Retry the free list allocation.
   2678     HeapObject* object = free_list_.Allocate(size_in_bytes);
   2679     if (object != NULL) return object;
   2680   }
   2681 
   2682   // Finally, fail.
   2683   return NULL;
   2684 }
   2685 
   2686 
   2687 #ifdef DEBUG
   2688 void PagedSpace::ReportCodeStatistics() {
   2689   Isolate* isolate = Isolate::Current();
   2690   CommentStatistic* comments_statistics =
   2691       isolate->paged_space_comments_statistics();
   2692   ReportCodeKindStatistics(isolate->code_kind_statistics());
   2693   PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
   2694          "count  (average)\"):\n");
   2695   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
   2696     const CommentStatistic& cs = comments_statistics[i];
   2697     if (cs.size > 0) {
   2698       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
   2699              cs.size/cs.count);
   2700     }
   2701   }
   2702   PrintF("\n");
   2703 }
   2704 
   2705 
   2706 void PagedSpace::ResetCodeStatistics() {
   2707   Isolate* isolate = Isolate::Current();
   2708   CommentStatistic* comments_statistics =
   2709       isolate->paged_space_comments_statistics();
   2710   ClearCodeKindStatistics(isolate->code_kind_statistics());
   2711   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
   2712     comments_statistics[i].Clear();
   2713   }
   2714   comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
   2715   comments_statistics[CommentStatistic::kMaxComments].size = 0;
   2716   comments_statistics[CommentStatistic::kMaxComments].count = 0;
   2717 }
   2718 
   2719 
   2720 // Adds comment to 'comment_statistics' table. Performance OK as long as
   2721 // 'kMaxComments' is small
   2722 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
   2723   CommentStatistic* comments_statistics =
   2724       isolate->paged_space_comments_statistics();
   2725   // Do not count empty comments
   2726   if (delta <= 0) return;
   2727   CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
   2728   // Search for a free or matching entry in 'comments_statistics': 'cs'
   2729   // points to result.
   2730   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
   2731     if (comments_statistics[i].comment == NULL) {
   2732       cs = &comments_statistics[i];
   2733       cs->comment = comment;
   2734       break;
   2735     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
   2736       cs = &comments_statistics[i];
   2737       break;
   2738     }
   2739   }
   2740   // Update entry for 'comment'
   2741   cs->size += delta;
   2742   cs->count += 1;
   2743 }
   2744 
   2745 
   2746 // Call for each nested comment start (start marked with '[ xxx', end marked
   2747 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
   2748 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
   2749   ASSERT(!it->done());
   2750   ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
   2751   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
   2752   if (tmp[0] != '[') {
   2753     // Not a nested comment; skip
   2754     return;
   2755   }
   2756 
   2757   // Search for end of nested comment or a new nested comment
   2758   const char* const comment_txt =
   2759       reinterpret_cast<const char*>(it->rinfo()->data());
   2760   const byte* prev_pc = it->rinfo()->pc();
   2761   int flat_delta = 0;
   2762   it->next();
   2763   while (true) {
   2764     // All nested comments must be terminated properly, and therefore exit
   2765     // from loop.
   2766     ASSERT(!it->done());
   2767     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
   2768       const char* const txt =
   2769           reinterpret_cast<const char*>(it->rinfo()->data());
   2770       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
   2771       if (txt[0] == ']') break;  // End of nested  comment
   2772       // A new comment
   2773       CollectCommentStatistics(isolate, it);
   2774       // Skip code that was covered with previous comment
   2775       prev_pc = it->rinfo()->pc();
   2776     }
   2777     it->next();
   2778   }
   2779   EnterComment(isolate, comment_txt, flat_delta);
   2780 }
   2781 
   2782 
   2783 // Collects code size statistics:
   2784 // - by code kind
   2785 // - by code comment
   2786 void PagedSpace::CollectCodeStatistics() {
   2787   Isolate* isolate = heap()->isolate();
   2788   HeapObjectIterator obj_it(this);
   2789   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
   2790     if (obj->IsCode()) {
   2791       Code* code = Code::cast(obj);
   2792       isolate->code_kind_statistics()[code->kind()] += code->Size();
   2793       RelocIterator it(code);
   2794       int delta = 0;
   2795       const byte* prev_pc = code->instruction_start();
   2796       while (!it.done()) {
   2797         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
   2798           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
   2799           CollectCommentStatistics(isolate, &it);
   2800           prev_pc = it.rinfo()->pc();
   2801         }
   2802         it.next();
   2803       }
   2804 
   2805       ASSERT(code->instruction_start() <= prev_pc &&
   2806              prev_pc <= code->instruction_end());
   2807       delta += static_cast<int>(code->instruction_end() - prev_pc);
   2808       EnterComment(isolate, "NoComment", delta);
   2809     }
   2810   }
   2811 }
   2812 
   2813 
   2814 void PagedSpace::ReportStatistics() {
   2815   int pct = static_cast<int>(Available() * 100 / Capacity());
   2816   PrintF("  capacity: %" V8_PTR_PREFIX "d"
   2817              ", waste: %" V8_PTR_PREFIX "d"
   2818              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
   2819          Capacity(), Waste(), Available(), pct);
   2820 
   2821   if (was_swept_conservatively_) return;
   2822   ClearHistograms();
   2823   HeapObjectIterator obj_it(this);
   2824   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
   2825     CollectHistogramInfo(obj);
   2826   ReportHistogram(true);
   2827 }
   2828 #endif
   2829 
   2830 // -----------------------------------------------------------------------------
   2831 // FixedSpace implementation
   2832 
   2833 void FixedSpace::PrepareForMarkCompact() {
   2834   // Call prepare of the super class.
   2835   PagedSpace::PrepareForMarkCompact();
   2836 
   2837   // During a non-compacting collection, everything below the linear
   2838   // allocation pointer except wasted top-of-page blocks is considered
   2839   // allocated and we will rediscover available bytes during the
   2840   // collection.
   2841   accounting_stats_.AllocateBytes(free_list_.available());
   2842 
   2843   // Clear the free list before a full GC---it will be rebuilt afterward.
   2844   free_list_.Reset();
   2845 }
   2846 
   2847 
   2848 // -----------------------------------------------------------------------------
   2849 // MapSpace implementation
   2850 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
   2851 // there is at least one non-inlined virtual function. I would prefer to hide
   2852 // the VerifyObject definition behind VERIFY_HEAP.
   2853 
   2854 void MapSpace::VerifyObject(HeapObject* object) {
   2855   // The object should be a map or a free-list node.
   2856   CHECK(object->IsMap() || object->IsFreeSpace());
   2857 }
   2858 
   2859 
   2860 // -----------------------------------------------------------------------------
   2861 // CellSpace and PropertyCellSpace implementation
   2862 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
   2863 // there is at least one non-inlined virtual function. I would prefer to hide
   2864 // the VerifyObject definition behind VERIFY_HEAP.
   2865 
   2866 void CellSpace::VerifyObject(HeapObject* object) {
   2867   // The object should be a global object property cell or a free-list node.
   2868   CHECK(object->IsCell() ||
   2869          object->map() == heap()->two_pointer_filler_map());
   2870 }
   2871 
   2872 
   2873 void PropertyCellSpace::VerifyObject(HeapObject* object) {
   2874   // The object should be a global object property cell or a free-list node.
   2875   CHECK(object->IsPropertyCell() ||
   2876          object->map() == heap()->two_pointer_filler_map());
   2877 }
   2878 
   2879 
   2880 // -----------------------------------------------------------------------------
   2881 // LargeObjectIterator
   2882 
   2883 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
   2884   current_ = space->first_page_;
   2885   size_func_ = NULL;
   2886 }
   2887 
   2888 
   2889 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
   2890                                          HeapObjectCallback size_func) {
   2891   current_ = space->first_page_;
   2892   size_func_ = size_func;
   2893 }
   2894 
   2895 
   2896 HeapObject* LargeObjectIterator::Next() {
   2897   if (current_ == NULL) return NULL;
   2898 
   2899   HeapObject* object = current_->GetObject();
   2900   current_ = current_->next_page();
   2901   return object;
   2902 }
   2903 
   2904 
   2905 // -----------------------------------------------------------------------------
   2906 // LargeObjectSpace
   2907 static bool ComparePointers(void* key1, void* key2) {
   2908     return key1 == key2;
   2909 }
   2910 
   2911 
   2912 LargeObjectSpace::LargeObjectSpace(Heap* heap,
   2913                                    intptr_t max_capacity,
   2914                                    AllocationSpace id)
   2915     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
   2916       max_capacity_(max_capacity),
   2917       first_page_(NULL),
   2918       size_(0),
   2919       page_count_(0),
   2920       objects_size_(0),
   2921       chunk_map_(ComparePointers, 1024) {}
   2922 
   2923 
   2924 bool LargeObjectSpace::SetUp() {
   2925   first_page_ = NULL;
   2926   size_ = 0;
   2927   page_count_ = 0;
   2928   objects_size_ = 0;
   2929   chunk_map_.Clear();
   2930   return true;
   2931 }
   2932 
   2933 
   2934 void LargeObjectSpace::TearDown() {
   2935   while (first_page_ != NULL) {
   2936     LargePage* page = first_page_;
   2937     first_page_ = first_page_->next_page();
   2938     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
   2939 
   2940     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
   2941     heap()->isolate()->memory_allocator()->PerformAllocationCallback(
   2942         space, kAllocationActionFree, page->size());
   2943     heap()->isolate()->memory_allocator()->Free(page);
   2944   }
   2945   SetUp();
   2946 }
   2947 
   2948 
   2949 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
   2950                                            Executability executable) {
   2951   // Check if we want to force a GC before growing the old space further.
   2952   // If so, fail the allocation.
   2953   if (!heap()->always_allocate() &&
   2954       heap()->OldGenerationAllocationLimitReached()) {
   2955     return Failure::RetryAfterGC(identity());
   2956   }
   2957 
   2958   if (Size() + object_size > max_capacity_) {
   2959     return Failure::RetryAfterGC(identity());
   2960   }
   2961 
   2962   LargePage* page = heap()->isolate()->memory_allocator()->
   2963       AllocateLargePage(object_size, this, executable);
   2964   if (page == NULL) return Failure::RetryAfterGC(identity());
   2965   ASSERT(page->area_size() >= object_size);
   2966 
   2967   size_ += static_cast<int>(page->size());
   2968   objects_size_ += object_size;
   2969   page_count_++;
   2970   page->set_next_page(first_page_);
   2971   first_page_ = page;
   2972 
   2973   // Register all MemoryChunk::kAlignment-aligned chunks covered by
   2974   // this large page in the chunk map.
   2975   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
   2976   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
   2977   for (uintptr_t key = base; key <= limit; key++) {
   2978     HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
   2979                                               static_cast<uint32_t>(key),
   2980                                               true);
   2981     ASSERT(entry != NULL);
   2982     entry->value = page;
   2983   }
   2984 
   2985   HeapObject* object = page->GetObject();
   2986 
   2987   if (Heap::ShouldZapGarbage()) {
   2988     // Make the object consistent so the heap can be verified in OldSpaceStep.
   2989     // We only need to do this in debug builds or if verify_heap is on.
   2990     reinterpret_cast<Object**>(object->address())[0] =
   2991         heap()->fixed_array_map();
   2992     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
   2993   }
   2994 
   2995   heap()->incremental_marking()->OldSpaceStep(object_size);
   2996   return object;
   2997 }
   2998 
   2999 
   3000 size_t LargeObjectSpace::CommittedPhysicalMemory() {
   3001   if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
   3002   size_t size = 0;
   3003   LargePage* current = first_page_;
   3004   while (current != NULL) {
   3005     size += current->CommittedPhysicalMemory();
   3006     current = current->next_page();
   3007   }
   3008   return size;
   3009 }
   3010 
   3011 
   3012 // GC support
   3013 MaybeObject* LargeObjectSpace::FindObject(Address a) {
   3014   LargePage* page = FindPage(a);
   3015   if (page != NULL) {
   3016     return page->GetObject();
   3017   }
   3018   return Failure::Exception();
   3019 }
   3020 
   3021 
   3022 LargePage* LargeObjectSpace::FindPage(Address a) {
   3023   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
   3024   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
   3025                                         static_cast<uint32_t>(key),
   3026                                         false);
   3027   if (e != NULL) {
   3028     ASSERT(e->value != NULL);
   3029     LargePage* page = reinterpret_cast<LargePage*>(e->value);
   3030     ASSERT(page->is_valid());
   3031     if (page->Contains(a)) {
   3032       return page;
   3033     }
   3034   }
   3035   return NULL;
   3036 }
   3037 
   3038 
   3039 void LargeObjectSpace::FreeUnmarkedObjects() {
   3040   LargePage* previous = NULL;
   3041   LargePage* current = first_page_;
   3042   while (current != NULL) {
   3043     HeapObject* object = current->GetObject();
   3044     // Can this large page contain pointers to non-trivial objects.  No other
   3045     // pointer object is this big.
   3046     bool is_pointer_object = object->IsFixedArray();
   3047     MarkBit mark_bit = Marking::MarkBitFrom(object);
   3048     if (mark_bit.Get()) {
   3049       mark_bit.Clear();
   3050       Page::FromAddress(object->address())->ResetProgressBar();
   3051       Page::FromAddress(object->address())->ResetLiveBytes();
   3052       previous = current;
   3053       current = current->next_page();
   3054     } else {
   3055       LargePage* page = current;
   3056       // Cut the chunk out from the chunk list.
   3057       current = current->next_page();
   3058       if (previous == NULL) {
   3059         first_page_ = current;
   3060       } else {
   3061         previous->set_next_page(current);
   3062       }
   3063 
   3064       // Free the chunk.
   3065       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
   3066           object, heap()->isolate());
   3067       size_ -= static_cast<int>(page->size());
   3068       objects_size_ -= object->Size();
   3069       page_count_--;
   3070 
   3071       // Remove entries belonging to this page.
   3072       // Use variable alignment to help pass length check (<= 80 characters)
   3073       // of single line in tools/presubmit.py.
   3074       const intptr_t alignment = MemoryChunk::kAlignment;
   3075       uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
   3076       uintptr_t limit = base + (page->size()-1)/alignment;
   3077       for (uintptr_t key = base; key <= limit; key++) {
   3078         chunk_map_.Remove(reinterpret_cast<void*>(key),
   3079                           static_cast<uint32_t>(key));
   3080       }
   3081 
   3082       if (is_pointer_object) {
   3083         heap()->QueueMemoryChunkForFree(page);
   3084       } else {
   3085         heap()->isolate()->memory_allocator()->Free(page);
   3086       }
   3087     }
   3088   }
   3089   heap()->FreeQueuedChunks();
   3090 }
   3091 
   3092 
   3093 bool LargeObjectSpace::Contains(HeapObject* object) {
   3094   Address address = object->address();
   3095   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
   3096 
   3097   bool owned = (chunk->owner() == this);
   3098 
   3099   SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
   3100 
   3101   return owned;
   3102 }
   3103 
   3104 
   3105 #ifdef VERIFY_HEAP
   3106 // We do not assume that the large object iterator works, because it depends
   3107 // on the invariants we are checking during verification.
   3108 void LargeObjectSpace::Verify() {
   3109   for (LargePage* chunk = first_page_;
   3110        chunk != NULL;
   3111        chunk = chunk->next_page()) {
   3112     // Each chunk contains an object that starts at the large object page's
   3113     // object area start.
   3114     HeapObject* object = chunk->GetObject();
   3115     Page* page = Page::FromAddress(object->address());
   3116     CHECK(object->address() == page->area_start());
   3117 
   3118     // The first word should be a map, and we expect all map pointers to be
   3119     // in map space.
   3120     Map* map = object->map();
   3121     CHECK(map->IsMap());
   3122     CHECK(heap()->map_space()->Contains(map));
   3123 
   3124     // We have only code, sequential strings, external strings
   3125     // (sequential strings that have been morphed into external
   3126     // strings), fixed arrays, and byte arrays in large object space.
   3127     CHECK(object->IsCode() || object->IsSeqString() ||
   3128            object->IsExternalString() || object->IsFixedArray() ||
   3129            object->IsFixedDoubleArray() || object->IsByteArray());
   3130 
   3131     // The object itself should look OK.
   3132     object->Verify();
   3133 
   3134     // Byte arrays and strings don't have interior pointers.
   3135     if (object->IsCode()) {
   3136       VerifyPointersVisitor code_visitor;
   3137       object->IterateBody(map->instance_type(),
   3138                           object->Size(),
   3139                           &code_visitor);
   3140     } else if (object->IsFixedArray()) {
   3141       FixedArray* array = FixedArray::cast(object);
   3142       for (int j = 0; j < array->length(); j++) {
   3143         Object* element = array->get(j);
   3144         if (element->IsHeapObject()) {
   3145           HeapObject* element_object = HeapObject::cast(element);
   3146           CHECK(heap()->Contains(element_object));
   3147           CHECK(element_object->map()->IsMap());
   3148         }
   3149       }
   3150     }
   3151   }
   3152 }
   3153 #endif
   3154 
   3155 
   3156 #ifdef DEBUG
   3157 void LargeObjectSpace::Print() {
   3158   LargeObjectIterator it(this);
   3159   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
   3160     obj->Print();
   3161   }
   3162 }
   3163 
   3164 
   3165 void LargeObjectSpace::ReportStatistics() {
   3166   PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
   3167   int num_objects = 0;
   3168   ClearHistograms();
   3169   LargeObjectIterator it(this);
   3170   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
   3171     num_objects++;
   3172     CollectHistogramInfo(obj);
   3173   }
   3174 
   3175   PrintF("  number of objects %d, "
   3176          "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
   3177   if (num_objects > 0) ReportHistogram(false);
   3178 }
   3179 
   3180 
   3181 void LargeObjectSpace::CollectCodeStatistics() {
   3182   Isolate* isolate = heap()->isolate();
   3183   LargeObjectIterator obj_it(this);
   3184   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
   3185     if (obj->IsCode()) {
   3186       Code* code = Code::cast(obj);
   3187       isolate->code_kind_statistics()[code->kind()] += code->Size();
   3188     }
   3189   }
   3190 }
   3191 
   3192 
   3193 void Page::Print() {
   3194   // Make a best-effort to print the objects in the page.
   3195   PrintF("Page@%p in %s\n",
   3196          this->address(),
   3197          AllocationSpaceName(this->owner()->identity()));
   3198   printf(" --------------------------------------\n");
   3199   HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
   3200   unsigned mark_size = 0;
   3201   for (HeapObject* object = objects.Next();
   3202        object != NULL;
   3203        object = objects.Next()) {
   3204     bool is_marked = Marking::MarkBitFrom(object).Get();
   3205     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
   3206     if (is_marked) {
   3207       mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
   3208     }
   3209     object->ShortPrint();
   3210     PrintF("\n");
   3211   }
   3212   printf(" --------------------------------------\n");
   3213   printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
   3214 }
   3215 
   3216 #endif  // DEBUG
   3217 
   3218 } }  // namespace v8::internal
   3219