Home | History | Annotate | Download | only in src
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_SPACES_INL_H_
     29 #define V8_SPACES_INL_H_
     30 
     31 #include "heap-profiler.h"
     32 #include "isolate.h"
     33 #include "spaces.h"
     34 #include "v8memory.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 
     40 // -----------------------------------------------------------------------------
     41 // Bitmap
     42 
     43 void Bitmap::Clear(MemoryChunk* chunk) {
     44   Bitmap* bitmap = chunk->markbits();
     45   for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
     46   chunk->ResetLiveBytes();
     47 }
     48 
     49 
     50 // -----------------------------------------------------------------------------
     51 // PageIterator
     52 
     53 
     54 PageIterator::PageIterator(PagedSpace* space)
     55     : space_(space),
     56       prev_page_(&space->anchor_),
     57       next_page_(prev_page_->next_page()) { }
     58 
     59 
     60 bool PageIterator::has_next() {
     61   return next_page_ != &space_->anchor_;
     62 }
     63 
     64 
     65 Page* PageIterator::next() {
     66   ASSERT(has_next());
     67   prev_page_ = next_page_;
     68   next_page_ = next_page_->next_page();
     69   return prev_page_;
     70 }
     71 
     72 
     73 // -----------------------------------------------------------------------------
     74 // NewSpacePageIterator
     75 
     76 
     77 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
     78     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
     79       next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
     80       last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
     81 
     82 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
     83     : prev_page_(space->anchor()),
     84       next_page_(prev_page_->next_page()),
     85       last_page_(prev_page_->prev_page()) { }
     86 
     87 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
     88     : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
     89       next_page_(NewSpacePage::FromAddress(start)),
     90       last_page_(NewSpacePage::FromLimit(limit)) {
     91   SemiSpace::AssertValidRange(start, limit);
     92 }
     93 
     94 
     95 bool NewSpacePageIterator::has_next() {
     96   return prev_page_ != last_page_;
     97 }
     98 
     99 
    100 NewSpacePage* NewSpacePageIterator::next() {
    101   ASSERT(has_next());
    102   prev_page_ = next_page_;
    103   next_page_ = next_page_->next_page();
    104   return prev_page_;
    105 }
    106 
    107 
    108 // -----------------------------------------------------------------------------
    109 // HeapObjectIterator
    110 HeapObject* HeapObjectIterator::FromCurrentPage() {
    111   while (cur_addr_ != cur_end_) {
    112     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
    113       cur_addr_ = space_->limit();
    114       continue;
    115     }
    116     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
    117     int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
    118     cur_addr_ += obj_size;
    119     ASSERT(cur_addr_ <= cur_end_);
    120     if (!obj->IsFiller()) {
    121       ASSERT_OBJECT_SIZE(obj_size);
    122       return obj;
    123     }
    124   }
    125   return NULL;
    126 }
    127 
    128 
    129 // -----------------------------------------------------------------------------
    130 // MemoryAllocator
    131 
    132 #ifdef ENABLE_HEAP_PROTECTION
    133 
    134 void MemoryAllocator::Protect(Address start, size_t size) {
    135   OS::Protect(start, size);
    136 }
    137 
    138 
    139 void MemoryAllocator::Unprotect(Address start,
    140                                 size_t size,
    141                                 Executability executable) {
    142   OS::Unprotect(start, size, executable);
    143 }
    144 
    145 
    146 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
    147   int id = GetChunkId(page);
    148   OS::Protect(chunks_[id].address(), chunks_[id].size());
    149 }
    150 
    151 
    152 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
    153   int id = GetChunkId(page);
    154   OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
    155                 chunks_[id].owner()->executable() == EXECUTABLE);
    156 }
    157 
    158 #endif
    159 
    160 
    161 // --------------------------------------------------------------------------
    162 // PagedSpace
    163 Page* Page::Initialize(Heap* heap,
    164                        MemoryChunk* chunk,
    165                        Executability executable,
    166                        PagedSpace* owner) {
    167   Page* page = reinterpret_cast<Page*>(chunk);
    168   ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
    169   ASSERT(chunk->owner() == owner);
    170   owner->IncreaseCapacity(page->area_size());
    171   owner->Free(page->area_start(), page->area_size());
    172 
    173   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
    174 
    175   return page;
    176 }
    177 
    178 
    179 bool PagedSpace::Contains(Address addr) {
    180   Page* p = Page::FromAddress(addr);
    181   if (!p->is_valid()) return false;
    182   return p->owner() == this;
    183 }
    184 
    185 
    186 void MemoryChunk::set_scan_on_scavenge(bool scan) {
    187   if (scan) {
    188     if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
    189     SetFlag(SCAN_ON_SCAVENGE);
    190   } else {
    191     if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
    192     ClearFlag(SCAN_ON_SCAVENGE);
    193   }
    194   heap_->incremental_marking()->SetOldSpacePageFlags(this);
    195 }
    196 
    197 
    198 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
    199   MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
    200       OffsetFrom(addr) & ~Page::kPageAlignmentMask);
    201   if (maybe->owner() != NULL) return maybe;
    202   LargeObjectIterator iterator(heap->lo_space());
    203   for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
    204     // Fixed arrays are the only pointer-containing objects in large object
    205     // space.
    206     if (o->IsFixedArray()) {
    207       MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
    208       if (chunk->Contains(addr)) {
    209         return chunk;
    210       }
    211     }
    212   }
    213   UNREACHABLE();
    214   return NULL;
    215 }
    216 
    217 
    218 void MemoryChunk::UpdateHighWaterMark(Address mark) {
    219   if (mark == NULL) return;
    220   // Need to subtract one from the mark because when a chunk is full the
    221   // top points to the next address after the chunk, which effectively belongs
    222   // to another chunk. See the comment to Page::FromAllocationTop.
    223   MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
    224   int new_mark = static_cast<int>(mark - chunk->address());
    225   if (new_mark > chunk->high_water_mark_) {
    226     chunk->high_water_mark_ = new_mark;
    227   }
    228 }
    229 
    230 
    231 PointerChunkIterator::PointerChunkIterator(Heap* heap)
    232     : state_(kOldPointerState),
    233       old_pointer_iterator_(heap->old_pointer_space()),
    234       map_iterator_(heap->map_space()),
    235       lo_iterator_(heap->lo_space()) { }
    236 
    237 
    238 Page* Page::next_page() {
    239   ASSERT(next_chunk()->owner() == owner());
    240   return static_cast<Page*>(next_chunk());
    241 }
    242 
    243 
    244 Page* Page::prev_page() {
    245   ASSERT(prev_chunk()->owner() == owner());
    246   return static_cast<Page*>(prev_chunk());
    247 }
    248 
    249 
    250 void Page::set_next_page(Page* page) {
    251   ASSERT(page->owner() == owner());
    252   set_next_chunk(page);
    253 }
    254 
    255 
    256 void Page::set_prev_page(Page* page) {
    257   ASSERT(page->owner() == owner());
    258   set_prev_chunk(page);
    259 }
    260 
    261 
    262 // Try linear allocation in the page of alloc_info's allocation top.  Does
    263 // not contain slow case logic (e.g. move to the next page or try free list
    264 // allocation) so it can be used by all the allocation functions and for all
    265 // the paged spaces.
    266 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
    267   Address current_top = allocation_info_.top();
    268   Address new_top = current_top + size_in_bytes;
    269   if (new_top > allocation_info_.limit()) return NULL;
    270 
    271   allocation_info_.set_top(new_top);
    272   return HeapObject::FromAddress(current_top);
    273 }
    274 
    275 
    276 // Raw allocation.
    277 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
    278   HeapObject* object = AllocateLinearly(size_in_bytes);
    279   if (object != NULL) {
    280     if (identity() == CODE_SPACE) {
    281       SkipList::Update(object->address(), size_in_bytes);
    282     }
    283     return object;
    284   }
    285 
    286   ASSERT(!heap()->linear_allocation() ||
    287          (anchor_.next_chunk() == &anchor_ &&
    288           anchor_.prev_chunk() == &anchor_));
    289 
    290   object = free_list_.Allocate(size_in_bytes);
    291   if (object != NULL) {
    292     if (identity() == CODE_SPACE) {
    293       SkipList::Update(object->address(), size_in_bytes);
    294     }
    295     return object;
    296   }
    297 
    298   object = SlowAllocateRaw(size_in_bytes);
    299   if (object != NULL) {
    300     if (identity() == CODE_SPACE) {
    301       SkipList::Update(object->address(), size_in_bytes);
    302     }
    303     return object;
    304   }
    305 
    306   return Failure::RetryAfterGC(identity());
    307 }
    308 
    309 
    310 // -----------------------------------------------------------------------------
    311 // NewSpace
    312 
    313 
    314 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
    315   Address old_top = allocation_info_.top();
    316 #ifdef DEBUG
    317   // If we are stressing compaction we waste some memory in new space
    318   // in order to get more frequent GCs.
    319   if (FLAG_stress_compaction && !heap()->linear_allocation()) {
    320     if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
    321       int filler_size = size_in_bytes * 4;
    322       for (int i = 0; i < filler_size; i += kPointerSize) {
    323         *(reinterpret_cast<Object**>(old_top + i)) =
    324             heap()->one_pointer_filler_map();
    325       }
    326       old_top += filler_size;
    327       allocation_info_.set_top(allocation_info_.top() + filler_size);
    328     }
    329   }
    330 #endif
    331 
    332   if (allocation_info_.limit() - old_top < size_in_bytes) {
    333     return SlowAllocateRaw(size_in_bytes);
    334   }
    335 
    336   HeapObject* obj = HeapObject::FromAddress(old_top);
    337   allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
    338   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    339 
    340   return obj;
    341 }
    342 
    343 
    344 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
    345   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
    346   return static_cast<LargePage*>(chunk);
    347 }
    348 
    349 
    350 intptr_t LargeObjectSpace::Available() {
    351   return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
    352 }
    353 
    354 
    355 bool FreeListNode::IsFreeListNode(HeapObject* object) {
    356   Map* map = object->map();
    357   Heap* heap = object->GetHeap();
    358   return map == heap->raw_unchecked_free_space_map()
    359       || map == heap->raw_unchecked_one_pointer_filler_map()
    360       || map == heap->raw_unchecked_two_pointer_filler_map();
    361 }
    362 
    363 } }  // namespace v8::internal
    364 
    365 #endif  // V8_SPACES_INL_H_
    366