Home | History | Annotate | Download | only in heap
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_HEAP_SPACES_INL_H_
      6 #define V8_HEAP_SPACES_INL_H_
      7 
      8 #include "src/heap/incremental-marking.h"
      9 #include "src/heap/spaces.h"
     10 #include "src/isolate.h"
     11 #include "src/msan.h"
     12 #include "src/profiler/heap-profiler.h"
     13 #include "src/v8memory.h"
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 template <class PAGE_TYPE>
     19 PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
     20   p_ = p_->next_page();
     21   return *this;
     22 }
     23 
     24 template <class PAGE_TYPE>
     25 PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
     26   PageIteratorImpl<PAGE_TYPE> tmp(*this);
     27   operator++();
     28   return tmp;
     29 }
     30 
     31 PageRange::PageRange(Address start, Address limit)
     32     : begin_(Page::FromAddress(start)),
     33       end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
     34 #ifdef DEBUG
     35   if (begin_->InNewSpace()) {
     36     SemiSpace::AssertValidRange(start, limit);
     37   }
     38 #endif  // DEBUG
     39 }
     40 
     41 // -----------------------------------------------------------------------------
     42 // SemiSpaceIterator
     43 
     44 HeapObject* SemiSpaceIterator::Next() {
     45   while (current_ != limit_) {
     46     if (Page::IsAlignedToPageSize(current_)) {
     47       Page* page = Page::FromAllocationAreaAddress(current_);
     48       page = page->next_page();
     49       DCHECK(!page->is_anchor());
     50       current_ = page->area_start();
     51       if (current_ == limit_) return nullptr;
     52     }
     53     HeapObject* object = HeapObject::FromAddress(current_);
     54     current_ += object->Size();
     55     if (!object->IsFiller()) {
     56       return object;
     57     }
     58   }
     59   return nullptr;
     60 }
     61 
     62 // -----------------------------------------------------------------------------
     63 // HeapObjectIterator
     64 
     65 HeapObject* HeapObjectIterator::Next() {
     66   do {
     67     HeapObject* next_obj = FromCurrentPage();
     68     if (next_obj != nullptr) return next_obj;
     69   } while (AdvanceToNextPage());
     70   return nullptr;
     71 }
     72 
     73 HeapObject* HeapObjectIterator::FromCurrentPage() {
     74   while (cur_addr_ != cur_end_) {
     75     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
     76       cur_addr_ = space_->limit();
     77       continue;
     78     }
     79     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
     80     const int obj_size = obj->Size();
     81     cur_addr_ += obj_size;
     82     DCHECK_LE(cur_addr_, cur_end_);
     83     if (!obj->IsFiller()) {
     84       if (obj->IsCode()) {
     85         DCHECK_EQ(space_, space_->heap()->code_space());
     86         DCHECK_CODEOBJECT_SIZE(obj_size, space_);
     87       } else {
     88         DCHECK_OBJECT_SIZE(obj_size);
     89       }
     90       return obj;
     91     }
     92   }
     93   return nullptr;
     94 }
     95 
     96 // -----------------------------------------------------------------------------
     97 // MemoryAllocator
     98 
     99 #ifdef ENABLE_HEAP_PROTECTION
    100 
    101 void MemoryAllocator::Protect(Address start, size_t size) {
    102   base::OS::Protect(start, size);
    103 }
    104 
    105 
    106 void MemoryAllocator::Unprotect(Address start, size_t size,
    107                                 Executability executable) {
    108   base::OS::Unprotect(start, size, executable);
    109 }
    110 
    111 
    112 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
    113   int id = GetChunkId(page);
    114   base::OS::Protect(chunks_[id].address(), chunks_[id].size());
    115 }
    116 
    117 
    118 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
    119   int id = GetChunkId(page);
    120   base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
    121                       chunks_[id].owner()->executable() == EXECUTABLE);
    122 }
    123 
    124 #endif
    125 
    126 // -----------------------------------------------------------------------------
    127 // SemiSpace
    128 
    129 bool SemiSpace::Contains(HeapObject* o) {
    130   return id_ == kToSpace
    131              ? MemoryChunk::FromAddress(o->address())->InToSpace()
    132              : MemoryChunk::FromAddress(o->address())->InFromSpace();
    133 }
    134 
    135 bool SemiSpace::Contains(Object* o) {
    136   return o->IsHeapObject() && Contains(HeapObject::cast(o));
    137 }
    138 
    139 bool SemiSpace::ContainsSlow(Address a) {
    140   for (Page* p : *this) {
    141     if (p == MemoryChunk::FromAddress(a)) return true;
    142   }
    143   return false;
    144 }
    145 
    146 // --------------------------------------------------------------------------
    147 // NewSpace
    148 
    149 bool NewSpace::Contains(HeapObject* o) {
    150   return MemoryChunk::FromAddress(o->address())->InNewSpace();
    151 }
    152 
    153 bool NewSpace::Contains(Object* o) {
    154   return o->IsHeapObject() && Contains(HeapObject::cast(o));
    155 }
    156 
    157 bool NewSpace::ContainsSlow(Address a) {
    158   return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
    159 }
    160 
    161 bool NewSpace::ToSpaceContainsSlow(Address a) {
    162   return to_space_.ContainsSlow(a);
    163 }
    164 
    165 bool NewSpace::FromSpaceContainsSlow(Address a) {
    166   return from_space_.ContainsSlow(a);
    167 }
    168 
    169 bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
    170 bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
    171 
    172 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
    173                        SemiSpace* owner) {
    174   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
    175   bool in_to_space = (owner->id() != kFromSpace);
    176   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
    177                              : MemoryChunk::IN_FROM_SPACE);
    178   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
    179                                        : MemoryChunk::IN_TO_SPACE));
    180   Page* page = static_cast<Page*>(chunk);
    181   heap->incremental_marking()->SetNewSpacePageFlags(page);
    182   page->AllocateLocalTracker();
    183   return page;
    184 }
    185 
    186 // --------------------------------------------------------------------------
    187 // PagedSpace
    188 
    189 template <Page::InitializationMode mode>
    190 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
    191                        PagedSpace* owner) {
    192   Page* page = reinterpret_cast<Page*>(chunk);
    193   DCHECK(page->area_size() <= kAllocatableMemory);
    194   DCHECK(chunk->owner() == owner);
    195 
    196   owner->IncreaseCapacity(page->area_size());
    197   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
    198 
    199   // Make sure that categories are initialized before freeing the area.
    200   page->InitializeFreeListCategories();
    201   // In the case we do not free the memory, we effectively account for the whole
    202   // page as allocated memory that cannot be used for further allocations.
    203   if (mode == kFreeMemory) {
    204     owner->Free(page->area_start(), page->area_size());
    205   }
    206 
    207   return page;
    208 }
    209 
    210 Page* Page::ConvertNewToOld(Page* old_page) {
    211   DCHECK(!old_page->is_anchor());
    212   DCHECK(old_page->InNewSpace());
    213   OldSpace* old_space = old_page->heap()->old_space();
    214   old_page->set_owner(old_space);
    215   old_page->SetFlags(0, ~0);
    216   old_space->AccountCommitted(old_page->size());
    217   Page* new_page = Page::Initialize<kDoNotFreeMemory>(
    218       old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
    219   new_page->InsertAfter(old_space->anchor()->prev_page());
    220   return new_page;
    221 }
    222 
    223 void Page::InitializeFreeListCategories() {
    224   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    225     categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
    226   }
    227 }
    228 
    229 void MemoryChunk::IncrementLiveBytes(HeapObject* object, int by) {
    230   MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
    231 }
    232 
    233 void MemoryChunk::ResetLiveBytes() {
    234   if (FLAG_trace_live_bytes) {
    235     PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n",
    236                  static_cast<void*>(this), live_byte_count_);
    237   }
    238   live_byte_count_ = 0;
    239 }
    240 
    241 void MemoryChunk::IncrementLiveBytes(int by) {
    242   if (FLAG_trace_live_bytes) {
    243     PrintIsolate(
    244         heap()->isolate(), "live-bytes: update page=%p delta=%d %d->%d\n",
    245         static_cast<void*>(this), by, live_byte_count_, live_byte_count_ + by);
    246   }
    247   live_byte_count_ += by;
    248   DCHECK_GE(live_byte_count_, 0);
    249   DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
    250 }
    251 
    252 bool PagedSpace::Contains(Address addr) {
    253   return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
    254 }
    255 
    256 bool PagedSpace::Contains(Object* o) {
    257   if (!o->IsHeapObject()) return false;
    258   Page* p = Page::FromAddress(HeapObject::cast(o)->address());
    259   if (!Page::IsValid(p)) return false;
    260   return p->owner() == this;
    261 }
    262 
    263 void PagedSpace::UnlinkFreeListCategories(Page* page) {
    264   DCHECK_EQ(this, page->owner());
    265   page->ForAllFreeListCategories([this](FreeListCategory* category) {
    266     DCHECK_EQ(free_list(), category->owner());
    267     free_list()->RemoveCategory(category);
    268   });
    269 }
    270 
    271 intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
    272   DCHECK_EQ(this, page->owner());
    273   intptr_t added = 0;
    274   page->ForAllFreeListCategories([&added](FreeListCategory* category) {
    275     added += category->available();
    276     category->Relink();
    277   });
    278   DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
    279   return added;
    280 }
    281 
    282 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
    283   MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
    284   uintptr_t offset = addr - chunk->address();
    285   if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
    286     chunk = heap->lo_space()->FindPageThreadSafe(addr);
    287   }
    288   return chunk;
    289 }
    290 
    291 Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
    292   return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
    293 }
    294 
    295 void Page::MarkNeverAllocateForTesting() {
    296   DCHECK(this->owner()->identity() != NEW_SPACE);
    297   DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
    298   SetFlag(NEVER_ALLOCATE_ON_PAGE);
    299   reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
    300 }
    301 
    302 void Page::MarkEvacuationCandidate() {
    303   DCHECK(!IsFlagSet(NEVER_EVACUATE));
    304   DCHECK_NULL(old_to_old_slots_);
    305   DCHECK_NULL(typed_old_to_old_slots_);
    306   SetFlag(EVACUATION_CANDIDATE);
    307   reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
    308 }
    309 
    310 void Page::ClearEvacuationCandidate() {
    311   if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
    312     DCHECK_NULL(old_to_old_slots_);
    313     DCHECK_NULL(typed_old_to_old_slots_);
    314   }
    315   ClearFlag(EVACUATION_CANDIDATE);
    316   InitializeFreeListCategories();
    317 }
    318 
    319 MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
    320     : heap_(heap),
    321       state_(kOldSpaceState),
    322       old_iterator_(heap->old_space()->begin()),
    323       code_iterator_(heap->code_space()->begin()),
    324       map_iterator_(heap->map_space()->begin()),
    325       lo_iterator_(heap->lo_space()->begin()) {}
    326 
    327 MemoryChunk* MemoryChunkIterator::next() {
    328   switch (state_) {
    329     case kOldSpaceState: {
    330       if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
    331       state_ = kMapState;
    332       // Fall through.
    333     }
    334     case kMapState: {
    335       if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
    336       state_ = kCodeState;
    337       // Fall through.
    338     }
    339     case kCodeState: {
    340       if (code_iterator_ != heap_->code_space()->end())
    341         return *(code_iterator_++);
    342       state_ = kLargeObjectState;
    343       // Fall through.
    344     }
    345     case kLargeObjectState: {
    346       if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
    347       state_ = kFinishedState;
    348       // Fall through;
    349     }
    350     case kFinishedState:
    351       return nullptr;
    352     default:
    353       break;
    354   }
    355   UNREACHABLE();
    356   return nullptr;
    357 }
    358 
    359 Page* FreeListCategory::page() {
    360   return Page::FromAddress(reinterpret_cast<Address>(this));
    361 }
    362 
    363 FreeList* FreeListCategory::owner() {
    364   return reinterpret_cast<PagedSpace*>(
    365              Page::FromAddress(reinterpret_cast<Address>(this))->owner())
    366       ->free_list();
    367 }
    368 
    369 bool FreeListCategory::is_linked() {
    370   return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
    371 }
    372 
    373 // Try linear allocation in the page of alloc_info's allocation top.  Does
    374 // not contain slow case logic (e.g. move to the next page or try free list
    375 // allocation) so it can be used by all the allocation functions and for all
    376 // the paged spaces.
    377 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
    378   Address current_top = allocation_info_.top();
    379   Address new_top = current_top + size_in_bytes;
    380   if (new_top > allocation_info_.limit()) return NULL;
    381 
    382   allocation_info_.set_top(new_top);
    383   return HeapObject::FromAddress(current_top);
    384 }
    385 
    386 
    387 AllocationResult LocalAllocationBuffer::AllocateRawAligned(
    388     int size_in_bytes, AllocationAlignment alignment) {
    389   Address current_top = allocation_info_.top();
    390   int filler_size = Heap::GetFillToAlign(current_top, alignment);
    391 
    392   Address new_top = current_top + filler_size + size_in_bytes;
    393   if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
    394 
    395   allocation_info_.set_top(new_top);
    396   if (filler_size > 0) {
    397     return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
    398                                     filler_size);
    399   }
    400 
    401   return AllocationResult(HeapObject::FromAddress(current_top));
    402 }
    403 
    404 
    405 HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
    406                                                 AllocationAlignment alignment) {
    407   Address current_top = allocation_info_.top();
    408   int filler_size = Heap::GetFillToAlign(current_top, alignment);
    409 
    410   Address new_top = current_top + filler_size + *size_in_bytes;
    411   if (new_top > allocation_info_.limit()) return NULL;
    412 
    413   allocation_info_.set_top(new_top);
    414   if (filler_size > 0) {
    415     *size_in_bytes += filler_size;
    416     return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
    417                                      filler_size);
    418   }
    419 
    420   return HeapObject::FromAddress(current_top);
    421 }
    422 
    423 
    424 // Raw allocation.
    425 AllocationResult PagedSpace::AllocateRawUnaligned(
    426     int size_in_bytes, UpdateSkipList update_skip_list) {
    427   HeapObject* object = AllocateLinearly(size_in_bytes);
    428 
    429   if (object == NULL) {
    430     object = free_list_.Allocate(size_in_bytes);
    431     if (object == NULL) {
    432       object = SlowAllocateRaw(size_in_bytes);
    433     }
    434     if (object != NULL && heap()->incremental_marking()->black_allocation()) {
    435       Address start = object->address();
    436       Address end = object->address() + size_in_bytes;
    437       Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
    438     }
    439   }
    440 
    441   if (object != NULL) {
    442     if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
    443       SkipList::Update(object->address(), size_in_bytes);
    444     }
    445     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
    446     return object;
    447   }
    448 
    449   return AllocationResult::Retry(identity());
    450 }
    451 
    452 
    453 AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
    454     int size_in_bytes) {
    455   base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
    456   return AllocateRawUnaligned(size_in_bytes);
    457 }
    458 
    459 
    460 // Raw allocation.
    461 AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
    462                                                 AllocationAlignment alignment) {
    463   DCHECK(identity() == OLD_SPACE);
    464   int allocation_size = size_in_bytes;
    465   HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
    466 
    467   if (object == NULL) {
    468     // We don't know exactly how much filler we need to align until space is
    469     // allocated, so assume the worst case.
    470     int filler_size = Heap::GetMaximumFillToAlign(alignment);
    471     allocation_size += filler_size;
    472     object = free_list_.Allocate(allocation_size);
    473     if (object == NULL) {
    474       object = SlowAllocateRaw(allocation_size);
    475     }
    476     if (object != NULL) {
    477       if (heap()->incremental_marking()->black_allocation()) {
    478         Address start = object->address();
    479         Address end = object->address() + allocation_size;
    480         Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
    481       }
    482       if (filler_size != 0) {
    483         object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
    484                                          alignment);
    485         // Filler objects are initialized, so mark only the aligned object
    486         // memory as uninitialized.
    487         allocation_size = size_in_bytes;
    488       }
    489     }
    490   }
    491 
    492   if (object != NULL) {
    493     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
    494     return object;
    495   }
    496 
    497   return AllocationResult::Retry(identity());
    498 }
    499 
    500 
    501 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
    502                                          AllocationAlignment alignment) {
    503 #ifdef V8_HOST_ARCH_32_BIT
    504   AllocationResult result =
    505       alignment == kDoubleAligned
    506           ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
    507           : AllocateRawUnaligned(size_in_bytes);
    508 #else
    509   AllocationResult result = AllocateRawUnaligned(size_in_bytes);
    510 #endif
    511   HeapObject* heap_obj = nullptr;
    512   if (!result.IsRetry() && result.To(&heap_obj)) {
    513     AllocationStep(heap_obj->address(), size_in_bytes);
    514   }
    515   return result;
    516 }
    517 
    518 
    519 // -----------------------------------------------------------------------------
    520 // NewSpace
    521 
    522 
    523 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
    524                                               AllocationAlignment alignment) {
    525   Address top = allocation_info_.top();
    526   int filler_size = Heap::GetFillToAlign(top, alignment);
    527   int aligned_size_in_bytes = size_in_bytes + filler_size;
    528 
    529   if (allocation_info_.limit() - top < aligned_size_in_bytes) {
    530     // See if we can create room.
    531     if (!EnsureAllocation(size_in_bytes, alignment)) {
    532       return AllocationResult::Retry();
    533     }
    534 
    535     top = allocation_info_.top();
    536     filler_size = Heap::GetFillToAlign(top, alignment);
    537     aligned_size_in_bytes = size_in_bytes + filler_size;
    538   }
    539 
    540   HeapObject* obj = HeapObject::FromAddress(top);
    541   allocation_info_.set_top(top + aligned_size_in_bytes);
    542   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    543 
    544   if (filler_size > 0) {
    545     obj = heap()->PrecedeWithFiller(obj, filler_size);
    546   }
    547 
    548   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
    549 
    550   return obj;
    551 }
    552 
    553 
    554 AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
    555   Address top = allocation_info_.top();
    556   if (allocation_info_.limit() < top + size_in_bytes) {
    557     // See if we can create room.
    558     if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
    559       return AllocationResult::Retry();
    560     }
    561 
    562     top = allocation_info_.top();
    563   }
    564 
    565   HeapObject* obj = HeapObject::FromAddress(top);
    566   allocation_info_.set_top(top + size_in_bytes);
    567   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    568 
    569   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
    570 
    571   return obj;
    572 }
    573 
    574 
    575 AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
    576                                        AllocationAlignment alignment) {
    577 #ifdef V8_HOST_ARCH_32_BIT
    578   return alignment == kDoubleAligned
    579              ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
    580              : AllocateRawUnaligned(size_in_bytes);
    581 #else
    582   return AllocateRawUnaligned(size_in_bytes);
    583 #endif
    584 }
    585 
    586 
    587 MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
    588     int size_in_bytes, AllocationAlignment alignment) {
    589   base::LockGuard<base::Mutex> guard(&mutex_);
    590   return AllocateRaw(size_in_bytes, alignment);
    591 }
    592 
    593 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
    594                                  Executability executable, Space* owner) {
    595   if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
    596     STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
    597     FATAL("Code page is too large.");
    598   }
    599   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
    600 
    601   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
    602 
    603   // Initialize the owner field for each contained page (except the first, which
    604   // is initialized by MemoryChunk::Initialize).
    605   for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
    606        addr < chunk->area_end(); addr += Page::kPageSize) {
    607     // Clear out kPageHeaderTag.
    608     Memory::Address_at(addr) = 0;
    609   }
    610 
    611   return static_cast<LargePage*>(chunk);
    612 }
    613 
    614 size_t LargeObjectSpace::Available() {
    615   return ObjectSizeFor(heap()->memory_allocator()->Available());
    616 }
    617 
    618 
    619 LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
    620   return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
    621 }
    622 
    623 
    624 LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
    625                                                         AllocationResult result,
    626                                                         intptr_t size) {
    627   if (result.IsRetry()) return InvalidBuffer();
    628   HeapObject* obj = nullptr;
    629   bool ok = result.To(&obj);
    630   USE(ok);
    631   DCHECK(ok);
    632   Address top = HeapObject::cast(obj)->address();
    633   return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
    634 }
    635 
    636 
    637 bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
    638   if (allocation_info_.top() == other->allocation_info_.limit()) {
    639     allocation_info_.set_top(other->allocation_info_.top());
    640     other->allocation_info_.Reset(nullptr, nullptr);
    641     return true;
    642   }
    643   return false;
    644 }
    645 
    646 }  // namespace internal
    647 }  // namespace v8
    648 
    649 #endif  // V8_HEAP_SPACES_INL_H_
    650