Home | History | Annotate | Download | only in heap
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_HEAP_SPACES_INL_H_
      6 #define V8_HEAP_SPACES_INL_H_
      7 
      8 #include "src/heap/incremental-marking.h"
      9 #include "src/heap/spaces.h"
     10 #include "src/isolate.h"
     11 #include "src/msan.h"
     12 #include "src/profiler/heap-profiler.h"
     13 #include "src/v8memory.h"
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 
     19 // -----------------------------------------------------------------------------
     20 // Bitmap
     21 
     22 void Bitmap::Clear(MemoryChunk* chunk) {
     23   Bitmap* bitmap = chunk->markbits();
     24   for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
     25   chunk->ResetLiveBytes();
     26 }
     27 
     28 
     29 // -----------------------------------------------------------------------------
     30 // PageIterator
     31 
     32 PageIterator::PageIterator(PagedSpace* space)
     33     : space_(space),
     34       prev_page_(&space->anchor_),
     35       next_page_(prev_page_->next_page()) {}
     36 
     37 
     38 bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
     39 
     40 
     41 Page* PageIterator::next() {
     42   DCHECK(has_next());
     43   prev_page_ = next_page_;
     44   next_page_ = next_page_->next_page();
     45   return prev_page_;
     46 }
     47 
     48 
     49 // -----------------------------------------------------------------------------
     50 // SemiSpaceIterator
     51 
     52 HeapObject* SemiSpaceIterator::Next() {
     53   while (current_ != limit_) {
     54     if (NewSpacePage::IsAtEnd(current_)) {
     55       NewSpacePage* page = NewSpacePage::FromLimit(current_);
     56       page = page->next_page();
     57       DCHECK(!page->is_anchor());
     58       current_ = page->area_start();
     59       if (current_ == limit_) return nullptr;
     60     }
     61     HeapObject* object = HeapObject::FromAddress(current_);
     62     current_ += object->Size();
     63     if (!object->IsFiller()) {
     64       return object;
     65     }
     66   }
     67   return nullptr;
     68 }
     69 
     70 
     71 HeapObject* SemiSpaceIterator::next_object() { return Next(); }
     72 
     73 
     74 // -----------------------------------------------------------------------------
     75 // NewSpacePageIterator
     76 
     77 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
     78     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
     79       next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
     80       last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
     81 
     82 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
     83     : prev_page_(space->anchor()),
     84       next_page_(prev_page_->next_page()),
     85       last_page_(prev_page_->prev_page()) {}
     86 
     87 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
     88     : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
     89       next_page_(NewSpacePage::FromAddress(start)),
     90       last_page_(NewSpacePage::FromLimit(limit)) {
     91   SemiSpace::AssertValidRange(start, limit);
     92 }
     93 
     94 
     95 bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
     96 
     97 
     98 NewSpacePage* NewSpacePageIterator::next() {
     99   DCHECK(has_next());
    100   prev_page_ = next_page_;
    101   next_page_ = next_page_->next_page();
    102   return prev_page_;
    103 }
    104 
    105 
    106 // -----------------------------------------------------------------------------
    107 // HeapObjectIterator
    108 
    109 HeapObject* HeapObjectIterator::Next() {
    110   do {
    111     HeapObject* next_obj = FromCurrentPage();
    112     if (next_obj != NULL) return next_obj;
    113   } while (AdvanceToNextPage());
    114   return NULL;
    115 }
    116 
    117 
    118 HeapObject* HeapObjectIterator::next_object() { return Next(); }
    119 
    120 
    121 HeapObject* HeapObjectIterator::FromCurrentPage() {
    122   while (cur_addr_ != cur_end_) {
    123     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
    124       cur_addr_ = space_->limit();
    125       continue;
    126     }
    127     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
    128     int obj_size = obj->Size();
    129     cur_addr_ += obj_size;
    130     DCHECK(cur_addr_ <= cur_end_);
    131     // TODO(hpayer): Remove the debugging code.
    132     if (cur_addr_ > cur_end_) {
    133       space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
    134                                                       obj_size);
    135     }
    136 
    137     if (!obj->IsFiller()) {
    138       if (obj->IsCode()) {
    139         DCHECK_EQ(space_, space_->heap()->code_space());
    140         DCHECK_CODEOBJECT_SIZE(obj_size, space_);
    141       } else {
    142         DCHECK_OBJECT_SIZE(obj_size);
    143       }
    144       return obj;
    145     }
    146   }
    147   return NULL;
    148 }
    149 
    150 
    151 // -----------------------------------------------------------------------------
    152 // MemoryAllocator
    153 
    154 #ifdef ENABLE_HEAP_PROTECTION
    155 
    156 void MemoryAllocator::Protect(Address start, size_t size) {
    157   base::OS::Protect(start, size);
    158 }
    159 
    160 
    161 void MemoryAllocator::Unprotect(Address start, size_t size,
    162                                 Executability executable) {
    163   base::OS::Unprotect(start, size, executable);
    164 }
    165 
    166 
    167 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
    168   int id = GetChunkId(page);
    169   base::OS::Protect(chunks_[id].address(), chunks_[id].size());
    170 }
    171 
    172 
    173 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
    174   int id = GetChunkId(page);
    175   base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
    176                       chunks_[id].owner()->executable() == EXECUTABLE);
    177 }
    178 
    179 #endif
    180 
    181 
    182 // --------------------------------------------------------------------------
    183 // AllocationResult
    184 
    185 AllocationSpace AllocationResult::RetrySpace() {
    186   DCHECK(IsRetry());
    187   return static_cast<AllocationSpace>(Smi::cast(object_)->value());
    188 }
    189 
    190 
    191 // --------------------------------------------------------------------------
    192 // PagedSpace
    193 
    194 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
    195                        PagedSpace* owner) {
    196   Page* page = reinterpret_cast<Page*>(chunk);
    197   page->mutex_ = new base::Mutex();
    198   DCHECK(page->area_size() <= kAllocatableMemory);
    199   DCHECK(chunk->owner() == owner);
    200   owner->IncreaseCapacity(page->area_size());
    201   owner->Free(page->area_start(), page->area_size());
    202 
    203   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
    204 
    205   return page;
    206 }
    207 
    208 
    209 bool PagedSpace::Contains(Address addr) {
    210   Page* p = Page::FromAddress(addr);
    211   if (!p->is_valid()) return false;
    212   return p->owner() == this;
    213 }
    214 
    215 
    216 bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
    217 
    218 
    219 void MemoryChunk::set_scan_on_scavenge(bool scan) {
    220   if (scan) {
    221     if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
    222     SetFlag(SCAN_ON_SCAVENGE);
    223   } else {
    224     if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
    225     ClearFlag(SCAN_ON_SCAVENGE);
    226   }
    227   heap_->incremental_marking()->SetOldSpacePageFlags(this);
    228 }
    229 
    230 
    231 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
    232   MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
    233       OffsetFrom(addr) & ~Page::kPageAlignmentMask);
    234   if (maybe->owner() != NULL) return maybe;
    235   LargeObjectIterator iterator(heap->lo_space());
    236   for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
    237     // Fixed arrays are the only pointer-containing objects in large object
    238     // space.
    239     if (o->IsFixedArray()) {
    240       MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
    241       if (chunk->Contains(addr)) {
    242         return chunk;
    243       }
    244     }
    245   }
    246   UNREACHABLE();
    247   return NULL;
    248 }
    249 
    250 
    251 PointerChunkIterator::PointerChunkIterator(Heap* heap)
    252     : state_(kOldSpaceState),
    253       old_iterator_(heap->old_space()),
    254       map_iterator_(heap->map_space()),
    255       lo_iterator_(heap->lo_space()) {}
    256 
    257 
    258 MemoryChunk* PointerChunkIterator::next() {
    259   switch (state_) {
    260     case kOldSpaceState: {
    261       if (old_iterator_.has_next()) {
    262         return old_iterator_.next();
    263       }
    264       state_ = kMapState;
    265       // Fall through.
    266     }
    267     case kMapState: {
    268       if (map_iterator_.has_next()) {
    269         return map_iterator_.next();
    270       }
    271       state_ = kLargeObjectState;
    272       // Fall through.
    273     }
    274     case kLargeObjectState: {
    275       HeapObject* heap_object;
    276       do {
    277         heap_object = lo_iterator_.Next();
    278         if (heap_object == NULL) {
    279           state_ = kFinishedState;
    280           return NULL;
    281         }
    282         // Fixed arrays are the only pointer-containing objects in large
    283         // object space.
    284       } while (!heap_object->IsFixedArray());
    285       MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
    286       return answer;
    287     }
    288     case kFinishedState:
    289       return NULL;
    290     default:
    291       break;
    292   }
    293   UNREACHABLE();
    294   return NULL;
    295 }
    296 
    297 
    298 void Page::set_next_page(Page* page) {
    299   DCHECK(page->owner() == owner());
    300   set_next_chunk(page);
    301 }
    302 
    303 
    304 void Page::set_prev_page(Page* page) {
    305   DCHECK(page->owner() == owner());
    306   set_prev_chunk(page);
    307 }
    308 
    309 
    310 // Try linear allocation in the page of alloc_info's allocation top.  Does
    311 // not contain slow case logic (e.g. move to the next page or try free list
    312 // allocation) so it can be used by all the allocation functions and for all
    313 // the paged spaces.
    314 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
    315   Address current_top = allocation_info_.top();
    316   Address new_top = current_top + size_in_bytes;
    317   if (new_top > allocation_info_.limit()) return NULL;
    318 
    319   allocation_info_.set_top(new_top);
    320   return HeapObject::FromAddress(current_top);
    321 }
    322 
    323 
    324 AllocationResult LocalAllocationBuffer::AllocateRawAligned(
    325     int size_in_bytes, AllocationAlignment alignment) {
    326   Address current_top = allocation_info_.top();
    327   int filler_size = Heap::GetFillToAlign(current_top, alignment);
    328 
    329   Address new_top = current_top + filler_size + size_in_bytes;
    330   if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
    331 
    332   allocation_info_.set_top(new_top);
    333   if (filler_size > 0) {
    334     return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
    335                                     filler_size);
    336   }
    337 
    338   return AllocationResult(HeapObject::FromAddress(current_top));
    339 }
    340 
    341 
    342 HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
    343                                                 AllocationAlignment alignment) {
    344   Address current_top = allocation_info_.top();
    345   int filler_size = Heap::GetFillToAlign(current_top, alignment);
    346 
    347   Address new_top = current_top + filler_size + *size_in_bytes;
    348   if (new_top > allocation_info_.limit()) return NULL;
    349 
    350   allocation_info_.set_top(new_top);
    351   if (filler_size > 0) {
    352     *size_in_bytes += filler_size;
    353     return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
    354                                      filler_size);
    355   }
    356 
    357   return HeapObject::FromAddress(current_top);
    358 }
    359 
    360 
    361 // Raw allocation.
    362 AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
    363   HeapObject* object = AllocateLinearly(size_in_bytes);
    364 
    365   if (object == NULL) {
    366     object = free_list_.Allocate(size_in_bytes);
    367     if (object == NULL) {
    368       object = SlowAllocateRaw(size_in_bytes);
    369     }
    370   }
    371 
    372   if (object != NULL) {
    373     if (identity() == CODE_SPACE) {
    374       SkipList::Update(object->address(), size_in_bytes);
    375     }
    376     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
    377     return object;
    378   }
    379 
    380   return AllocationResult::Retry(identity());
    381 }
    382 
    383 
    384 AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
    385     int size_in_bytes) {
    386   base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
    387   return AllocateRawUnaligned(size_in_bytes);
    388 }
    389 
    390 
    391 // Raw allocation.
    392 AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
    393                                                 AllocationAlignment alignment) {
    394   DCHECK(identity() == OLD_SPACE);
    395   int allocation_size = size_in_bytes;
    396   HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
    397 
    398   if (object == NULL) {
    399     // We don't know exactly how much filler we need to align until space is
    400     // allocated, so assume the worst case.
    401     int filler_size = Heap::GetMaximumFillToAlign(alignment);
    402     allocation_size += filler_size;
    403     object = free_list_.Allocate(allocation_size);
    404     if (object == NULL) {
    405       object = SlowAllocateRaw(allocation_size);
    406     }
    407     if (object != NULL && filler_size != 0) {
    408       object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
    409                                        alignment);
    410       // Filler objects are initialized, so mark only the aligned object memory
    411       // as uninitialized.
    412       allocation_size = size_in_bytes;
    413     }
    414   }
    415 
    416   if (object != NULL) {
    417     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
    418     return object;
    419   }
    420 
    421   return AllocationResult::Retry(identity());
    422 }
    423 
    424 
    425 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
    426                                          AllocationAlignment alignment) {
    427 #ifdef V8_HOST_ARCH_32_BIT
    428   return alignment == kDoubleAligned
    429              ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
    430              : AllocateRawUnaligned(size_in_bytes);
    431 #else
    432   return AllocateRawUnaligned(size_in_bytes);
    433 #endif
    434 }
    435 
    436 
    437 // -----------------------------------------------------------------------------
    438 // NewSpace
    439 
    440 
    441 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
    442                                               AllocationAlignment alignment) {
    443   Address top = allocation_info_.top();
    444   int filler_size = Heap::GetFillToAlign(top, alignment);
    445   int aligned_size_in_bytes = size_in_bytes + filler_size;
    446 
    447   if (allocation_info_.limit() - top < aligned_size_in_bytes) {
    448     // See if we can create room.
    449     if (!EnsureAllocation(size_in_bytes, alignment)) {
    450       return AllocationResult::Retry();
    451     }
    452 
    453     top = allocation_info_.top();
    454     filler_size = Heap::GetFillToAlign(top, alignment);
    455     aligned_size_in_bytes = size_in_bytes + filler_size;
    456   }
    457 
    458   HeapObject* obj = HeapObject::FromAddress(top);
    459   allocation_info_.set_top(top + aligned_size_in_bytes);
    460   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    461 
    462   if (filler_size > 0) {
    463     obj = heap()->PrecedeWithFiller(obj, filler_size);
    464   }
    465 
    466   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
    467 
    468   return obj;
    469 }
    470 
    471 
    472 AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
    473   Address top = allocation_info_.top();
    474   if (allocation_info_.limit() < top + size_in_bytes) {
    475     // See if we can create room.
    476     if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
    477       return AllocationResult::Retry();
    478     }
    479 
    480     top = allocation_info_.top();
    481   }
    482 
    483   HeapObject* obj = HeapObject::FromAddress(top);
    484   allocation_info_.set_top(top + size_in_bytes);
    485   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    486 
    487   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
    488 
    489   return obj;
    490 }
    491 
    492 
    493 AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
    494                                        AllocationAlignment alignment) {
    495 #ifdef V8_HOST_ARCH_32_BIT
    496   return alignment == kDoubleAligned
    497              ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
    498              : AllocateRawUnaligned(size_in_bytes);
    499 #else
    500   return AllocateRawUnaligned(size_in_bytes);
    501 #endif
    502 }
    503 
    504 
    505 MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
    506     int size_in_bytes, AllocationAlignment alignment) {
    507   base::LockGuard<base::Mutex> guard(&mutex_);
    508   return AllocateRaw(size_in_bytes, alignment);
    509 }
    510 
    511 
    512 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
    513   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
    514   return static_cast<LargePage*>(chunk);
    515 }
    516 
    517 
    518 intptr_t LargeObjectSpace::Available() {
    519   return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
    520 }
    521 
    522 
    523 LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
    524   return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
    525 }
    526 
    527 
    528 LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
    529                                                         AllocationResult result,
    530                                                         intptr_t size) {
    531   if (result.IsRetry()) return InvalidBuffer();
    532   HeapObject* obj = nullptr;
    533   bool ok = result.To(&obj);
    534   USE(ok);
    535   DCHECK(ok);
    536   Address top = HeapObject::cast(obj)->address();
    537   return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
    538 }
    539 
    540 
    541 bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
    542   if (allocation_info_.top() == other->allocation_info_.limit()) {
    543     allocation_info_.set_top(other->allocation_info_.top());
    544     other->allocation_info_.Reset(nullptr, nullptr);
    545     return true;
    546   }
    547   return false;
    548 }
    549 
    550 }  // namespace internal
    551 }  // namespace v8
    552 
    553 #endif  // V8_HEAP_SPACES_INL_H_
    554