Lines Matching refs:chunk
224 // Don't leave a small free block, useless for a large object or chunk.
416 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
423 chunk->set_next_chunk(NULL);
424 chunk->set_prev_chunk(NULL);
425 chunk->initialize_scan_on_scavenge(true);
427 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
429 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
431 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
454 MemoryChunk* chunk = FromAddress(base);
456 ASSERT(base == chunk->address());
458 chunk->heap_ = heap;
459 chunk->size_ = size;
460 chunk->area_start_ = area_start;
461 chunk->area_end_ = area_end;
462 chunk->flags_ = 0;
463 chunk->set_owner(owner);
464 chunk->InitializeReservedMemory();
465 chunk->slots_buffer_ = NULL;
466 chunk->skip_list_ = NULL;
467 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
468 chunk->progress_bar_ = 0;
469 chunk->high_water_mark_ = static_cast<int>(area_start - base);
470 chunk->parallel_sweeping_ = 0;
471 chunk->available_in_small_free_list_ = 0;
472 chunk->available_in_medium_free_list_ = 0;
473 chunk
474 chunk->available_in_huge_free_list_ = 0;
475 chunk->non_available_small_blocks_ = 0;
476 chunk->ResetLiveBytes();
477 Bitmap::Clear(chunk);
478 chunk->initialize_scan_on_scavenge(false);
479 chunk->SetFlag(WAS_SWEPT_PRECISELY);
485 chunk->SetFlag(IS_EXECUTABLE);
489 chunk->SetFlag(CONTAINS_ONLY_DATA);
492 return chunk;
718 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
720 if (chunk == NULL) return NULL;
722 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
729 MemoryChunk* chunk = AllocateChunk(object_size,
733 if (chunk == NULL) return NULL;
734 return LargePage::Initialize(isolate_->heap(), chunk);
738 void MemoryAllocator::Free(MemoryChunk* chunk) {
739 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
740 if (chunk->owner() != NULL) {
742 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
743 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
747 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
749 delete chunk->slots_buffer();
750 delete chunk->skip_list();
752 VirtualMemory* reservation = chunk->reserved_memory();
754 FreeMemory(reservation, chunk->executable());
756 FreeMemory(chunk->address(),
757 chunk->size(),
758 chunk->executable());
910 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
911 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
912 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
914 chunk->IncrementLiveBytes(by);
1198 // this chunk must be a power of two and it must be aligned to its size.
1475 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
2974 // this large page in the chunk map.
3056 // Cut the chunk out from the chunk list.
3064 // Free the chunk.
3095 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3097 bool owned = (chunk->owner() == this);
3109 for (LargePage* chunk = first_page_;
3110 chunk != NULL;
3111 chunk = chunk->next_page()) {
3112 // Each chunk contains an object that starts at the large object page's
3114 HeapObject* object = chunk->GetObject();