Home | History | Annotate | Download | only in src
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_SPACES_INL_H_
     29 #define V8_SPACES_INL_H_
     30 
     31 #include "isolate.h"
     32 #include "spaces.h"
     33 #include "v8memory.h"
     34 
     35 namespace v8 {
     36 namespace internal {
     37 
     38 
     39 // -----------------------------------------------------------------------------
     40 // Bitmap
     41 
     42 void Bitmap::Clear(MemoryChunk* chunk) {
     43   Bitmap* bitmap = chunk->markbits();
     44   for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
     45   chunk->ResetLiveBytes();
     46 }
     47 
     48 
     49 // -----------------------------------------------------------------------------
     50 // PageIterator
     51 
     52 
     53 PageIterator::PageIterator(PagedSpace* space)
     54     : space_(space),
     55       prev_page_(&space->anchor_),
     56       next_page_(prev_page_->next_page()) { }
     57 
     58 
     59 bool PageIterator::has_next() {
     60   return next_page_ != &space_->anchor_;
     61 }
     62 
     63 
     64 Page* PageIterator::next() {
     65   ASSERT(has_next());
     66   prev_page_ = next_page_;
     67   next_page_ = next_page_->next_page();
     68   return prev_page_;
     69 }
     70 
     71 
     72 // -----------------------------------------------------------------------------
     73 // NewSpacePageIterator
     74 
     75 
     76 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
     77     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
     78       next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
     79       last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
     80 
     81 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
     82     : prev_page_(space->anchor()),
     83       next_page_(prev_page_->next_page()),
     84       last_page_(prev_page_->prev_page()) { }
     85 
     86 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
     87     : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
     88       next_page_(NewSpacePage::FromAddress(start)),
     89       last_page_(NewSpacePage::FromLimit(limit)) {
     90   SemiSpace::AssertValidRange(start, limit);
     91 }
     92 
     93 
     94 bool NewSpacePageIterator::has_next() {
     95   return prev_page_ != last_page_;
     96 }
     97 
     98 
     99 NewSpacePage* NewSpacePageIterator::next() {
    100   ASSERT(has_next());
    101   prev_page_ = next_page_;
    102   next_page_ = next_page_->next_page();
    103   return prev_page_;
    104 }
    105 
    106 
    107 // -----------------------------------------------------------------------------
    108 // HeapObjectIterator
    109 HeapObject* HeapObjectIterator::FromCurrentPage() {
    110   while (cur_addr_ != cur_end_) {
    111     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
    112       cur_addr_ = space_->limit();
    113       continue;
    114     }
    115     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
    116     int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
    117     cur_addr_ += obj_size;
    118     ASSERT(cur_addr_ <= cur_end_);
    119     if (!obj->IsFiller()) {
    120       ASSERT_OBJECT_SIZE(obj_size);
    121       return obj;
    122     }
    123   }
    124   return NULL;
    125 }
    126 
    127 
    128 // -----------------------------------------------------------------------------
    129 // MemoryAllocator
    130 
    131 #ifdef ENABLE_HEAP_PROTECTION
    132 
    133 void MemoryAllocator::Protect(Address start, size_t size) {
    134   OS::Protect(start, size);
    135 }
    136 
    137 
    138 void MemoryAllocator::Unprotect(Address start,
    139                                 size_t size,
    140                                 Executability executable) {
    141   OS::Unprotect(start, size, executable);
    142 }
    143 
    144 
    145 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
    146   int id = GetChunkId(page);
    147   OS::Protect(chunks_[id].address(), chunks_[id].size());
    148 }
    149 
    150 
    151 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
    152   int id = GetChunkId(page);
    153   OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
    154                 chunks_[id].owner()->executable() == EXECUTABLE);
    155 }
    156 
    157 #endif
    158 
    159 
    160 // --------------------------------------------------------------------------
    161 // PagedSpace
    162 Page* Page::Initialize(Heap* heap,
    163                        MemoryChunk* chunk,
    164                        Executability executable,
    165                        PagedSpace* owner) {
    166   Page* page = reinterpret_cast<Page*>(chunk);
    167   ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
    168   ASSERT(chunk->owner() == owner);
    169   owner->IncreaseCapacity(page->area_size());
    170   owner->Free(page->area_start(), page->area_size());
    171 
    172   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
    173 
    174   return page;
    175 }
    176 
    177 
    178 bool PagedSpace::Contains(Address addr) {
    179   Page* p = Page::FromAddress(addr);
    180   if (!p->is_valid()) return false;
    181   return p->owner() == this;
    182 }
    183 
    184 
    185 void MemoryChunk::set_scan_on_scavenge(bool scan) {
    186   if (scan) {
    187     if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
    188     SetFlag(SCAN_ON_SCAVENGE);
    189   } else {
    190     if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
    191     ClearFlag(SCAN_ON_SCAVENGE);
    192   }
    193   heap_->incremental_marking()->SetOldSpacePageFlags(this);
    194 }
    195 
    196 
    197 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
    198   MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
    199       OffsetFrom(addr) & ~Page::kPageAlignmentMask);
    200   if (maybe->owner() != NULL) return maybe;
    201   LargeObjectIterator iterator(HEAP->lo_space());
    202   for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
    203     // Fixed arrays are the only pointer-containing objects in large object
    204     // space.
    205     if (o->IsFixedArray()) {
    206       MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
    207       if (chunk->Contains(addr)) {
    208         return chunk;
    209       }
    210     }
    211   }
    212   UNREACHABLE();
    213   return NULL;
    214 }
    215 
    216 
    217 void MemoryChunk::UpdateHighWaterMark(Address mark) {
    218   if (mark == NULL) return;
    219   // Need to subtract one from the mark because when a chunk is full the
    220   // top points to the next address after the chunk, which effectively belongs
    221   // to another chunk. See the comment to Page::FromAllocationTop.
    222   MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
    223   int new_mark = static_cast<int>(mark - chunk->address());
    224   if (new_mark > chunk->high_water_mark_) {
    225     chunk->high_water_mark_ = new_mark;
    226   }
    227 }
    228 
    229 
    230 PointerChunkIterator::PointerChunkIterator(Heap* heap)
    231     : state_(kOldPointerState),
    232       old_pointer_iterator_(heap->old_pointer_space()),
    233       map_iterator_(heap->map_space()),
    234       lo_iterator_(heap->lo_space()) { }
    235 
    236 
    237 Page* Page::next_page() {
    238   ASSERT(next_chunk()->owner() == owner());
    239   return static_cast<Page*>(next_chunk());
    240 }
    241 
    242 
    243 Page* Page::prev_page() {
    244   ASSERT(prev_chunk()->owner() == owner());
    245   return static_cast<Page*>(prev_chunk());
    246 }
    247 
    248 
    249 void Page::set_next_page(Page* page) {
    250   ASSERT(page->owner() == owner());
    251   set_next_chunk(page);
    252 }
    253 
    254 
    255 void Page::set_prev_page(Page* page) {
    256   ASSERT(page->owner() == owner());
    257   set_prev_chunk(page);
    258 }
    259 
    260 
    261 // Try linear allocation in the page of alloc_info's allocation top.  Does
    262 // not contain slow case logic (e.g. move to the next page or try free list
    263 // allocation) so it can be used by all the allocation functions and for all
    264 // the paged spaces.
    265 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
    266   Address current_top = allocation_info_.top;
    267   Address new_top = current_top + size_in_bytes;
    268   if (new_top > allocation_info_.limit) return NULL;
    269 
    270   allocation_info_.top = new_top;
    271   return HeapObject::FromAddress(current_top);
    272 }
    273 
    274 
    275 // Raw allocation.
    276 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
    277   HeapObject* object = AllocateLinearly(size_in_bytes);
    278   if (object != NULL) {
    279     if (identity() == CODE_SPACE) {
    280       SkipList::Update(object->address(), size_in_bytes);
    281     }
    282     return object;
    283   }
    284 
    285   ASSERT(!heap()->linear_allocation() ||
    286          (anchor_.next_chunk() == &anchor_ &&
    287           anchor_.prev_chunk() == &anchor_));
    288 
    289   object = free_list_.Allocate(size_in_bytes);
    290   if (object != NULL) {
    291     if (identity() == CODE_SPACE) {
    292       SkipList::Update(object->address(), size_in_bytes);
    293     }
    294     return object;
    295   }
    296 
    297   object = SlowAllocateRaw(size_in_bytes);
    298   if (object != NULL) {
    299     if (identity() == CODE_SPACE) {
    300       SkipList::Update(object->address(), size_in_bytes);
    301     }
    302     return object;
    303   }
    304 
    305   return Failure::RetryAfterGC(identity());
    306 }
    307 
    308 
    309 // -----------------------------------------------------------------------------
    310 // NewSpace
    311 
    312 
    313 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
    314   Address old_top = allocation_info_.top;
    315 #ifdef DEBUG
    316   // If we are stressing compaction we waste some memory in new space
    317   // in order to get more frequent GCs.
    318   if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
    319     if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
    320       int filler_size = size_in_bytes * 4;
    321       for (int i = 0; i < filler_size; i += kPointerSize) {
    322         *(reinterpret_cast<Object**>(old_top + i)) =
    323             HEAP->one_pointer_filler_map();
    324       }
    325       old_top += filler_size;
    326       allocation_info_.top += filler_size;
    327     }
    328   }
    329 #endif
    330 
    331   if (allocation_info_.limit - old_top < size_in_bytes) {
    332     return SlowAllocateRaw(size_in_bytes);
    333   }
    334 
    335   Object* obj = HeapObject::FromAddress(old_top);
    336   allocation_info_.top += size_in_bytes;
    337   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    338 
    339   return obj;
    340 }
    341 
    342 
    343 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
    344   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
    345   return static_cast<LargePage*>(chunk);
    346 }
    347 
    348 
    349 intptr_t LargeObjectSpace::Available() {
    350   return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
    351 }
    352 
    353 
    354 bool FreeListNode::IsFreeListNode(HeapObject* object) {
    355   Map* map = object->map();
    356   Heap* heap = object->GetHeap();
    357   return map == heap->raw_unchecked_free_space_map()
    358       || map == heap->raw_unchecked_one_pointer_filler_map()
    359       || map == heap->raw_unchecked_two_pointer_filler_map();
    360 }
    361 
    362 } }  // namespace v8::internal
    363 
    364 #endif  // V8_SPACES_INL_H_
    365