Home | History | Annotate | Download | only in src

Lines Matching refs:Page

74   end_page_ = Page::FromAllocationTop(end);
76 Page* p = Page::FromAllocationTop(cur_addr_);
88 Page* cur_page = Page::FromAllocationTop(cur_addr_);
106 Page* p = Page::FromAllocationTop(cur_addr_);
107 ASSERT(p == Page::FromAllocationTop(cur_limit_));
127 // Verify that the cached last page in the space is actually the
128 // last page.
129 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
142 // Page
145 Page::RSetState Page::rset_state_ = Page::IN_USE;
236 *allocated = RoundUp(requested, Page::kPageSize);
238 if (*allocated >= current.size - Page::kPageSize) {
302 capacity_ = RoundUp(capacity, Page::kPageSize);
308 // Due to alignment, allocated space might be one page less than required
313 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
403 // The first page starts on the first page-aligned address from start onward
404 // and the last page ends on the last page-aligned address before
405 // start+size. Page::kPageSize is a power of two so we can divide by
407 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
408 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
412 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
414 if (requested_pages <= 0) return Page::FromAddress(NULL);
415 size_t chunk_size = requested_pages * Page::kPageSize;
424 if (requested_pages <= 0) return Page::FromAddress(NULL);
427 if (chunk == NULL) return Page::FromAddress(NULL);
434 return Page::FromAddress(NULL);
444 Page* MemoryAllocator::CommitPages(Address start, size_t size,
453 return Page::FromAddress(NULL);
507 Page
514 Address low = RoundUp(chunk_start, Page::kPageSize);
518 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
520 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
525 Page* p = Page::FromAddress(page_addr);
526 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
528 page_addr += Page::kPageSize;
531 // Set the next page of the last page to 0.
532 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
535 return Page::FromAddress(low);
539 Page* MemoryAllocator::FreePages(Page* p) {
542 // Find the first page in the same chunk as 'p'
543 Page* first_page = FindFirstPageInSameChunk(p);
544 Page* page_to_return = Page::FromAddress(NULL);
547 // Find the last page in the same chunk as 'prev'.
548 Page* last_page = FindLastPageInSameChunk(p);
549 first_page = GetNextPage(last_page); // first page in next chunk
552 SetNextPage(last_page, Page::FromAddress(NULL));
560 // Find the first page of the next chunk before deleting this chunk.
593 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
597 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
598 return Page::FromAddress(low);
602 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
609 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
612 return Page::FromAddress(high - Page::kPageSize);
632 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
633 * Page::kObjectAreaSize;
649 // contain at least one page, ignore it and allocate instead.
652 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
653 Page::kPageSize * pages_in_chunk,
657 max_capacity_ / Page::kObjectAreaSize);
663 // We are sure that the first page is valid and that we have at least one
664 // page.
667 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
671 // pages and cache the current last page in the space.
672 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
700 Page* page = first_page_;
701 while (page->is_valid()) {
702 MemoryAllocator::ProtectChunkFromPage(page);
703 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
709 Page* page = first_page_;
710 while (page->is_valid()) {
711 MemoryAllocator::UnprotectChunkFromPage(page);
712 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
734 Page* p = Page::FromAddress(addr);
750 bool PagedSpace::IsUsed(Page* page) {
753 if (page == it.next()) return true;
759 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
767 // Set page indexes.
771 Page* p = it.next();
775 // Set mc_forwarding_info_ to the first page in the space.
786 // page in the page, MCSpaceOffsetForAddress considers it is in the
787 // previous page.
788 if (Page::IsAlignedToPageSize(addr)) {
795 // If addr is at the end of a page, it belongs to previous page
796 Page* p = Page::IsAlignedToPageSize(addr)
797 ? Page::FromAllocationTop(addr)
798 : Page::FromAddress(addr);
800 return (index * Page::kPageSize) + p->Offset(addr);
807 Page* current_page = TopPageOf(mc_forwarding_info_);
816 // We do not add the top of page block for current page to the space's
818 // bookkeeping information to it. Instead, we will recover top of page
821 // We do however write the allocation pointer to the page. The encoding
823 // need quick access to the allocation top of each page to decode
831 bool PagedSpace::Expand(Page* last_page) {
832 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
833 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
838 // Last page must be valid and its next page is invalid.
841 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
845 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
848 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
854 // new last page in the space.
868 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
878 Page* top_page = AllocationTopPage();
883 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
888 Page* p = MemoryAllocator::FreePages(top_page->next_page());
894 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
899 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
900 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
907 // Start from the allocation top and loop to the last page in the space.
908 Page* last_page = AllocationTopPage();
909 Page* next_page = last_page->next_page();
936 // The allocation pointer should be valid, and it should be in a page in the
939 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
944 Page* current_page = first_page_;
949 // Unless this is the last page in the space containing allocated
955 // The next page will be above the allocation top.
1819 Page* p = it.next();
1824 // Free the space at the top of the page. We cannot use
1859 // There wasn't enough space in the current page. Lets put the rest
1860 // of the page on the free list and start a fresh page.
1863 Page* reserved_page = TopPageOf(allocation_info_);
1870 bytes_left_to_reserve -= Page::kPageSize;
1889 // page in the space, (2) allocate off the space's free list, (3) expand the
1892 // Linear allocation in this space has failed. If there is another page
1893 // in the space, move to that page and allocate there. This allocation
1894 // should succeed (size_in_bytes should not be greater than a page's
1896 Page* current_page = TopPageOf(allocation_info_);
1901 // There is no next page in this space. Try free list allocation unless that
1913 // Free list allocation failed and there is no next page. Fail if we have
1920 // Try to expand the space and allocate in the new next page.
1931 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
1941 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
1945 // We use up the rest of the page while preserving this invariant.
1955 // Add the block at the top of the page to the space's free list, set the
1956 // allocation info to the next page (assumed to be one), and allocate
1958 HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2114 Page* p = page_it.next();
2123 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
2248 Page* p = it.next();
2249 PrintF("%s page 0x%x:\n", space_name, p);
2275 // allocation pointer except wasted top-of-page blocks is considered
2295 // Update allocation_top of each page in use and compute waste.
2299 Page* page = it.next();
2300 Address page_top = page->AllocationTop();
2301 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
2304 static_cast<int>(page->ObjectAreaEnd() - page_top));
2315 // page in the space, (2) allocate off the space's free list, (3) expand the
2319 // Linear allocation in this space has failed. If there is another page
2320 // in the space, move to that page and allocate there. This allocation
2322 Page* current_page = TopPageOf(allocation_info_);
2327 // There is no next page in this space. Try free list allocation unless
2338 // Free list allocation failed and there is no next page. Fail if we have
2345 // Try to expand the space and allocate in the new next page.
2356 // Move to the next page (there is assumed to be one) and allocate there.
2357 // The top of page block is always wasted, because it is too small to hold a
2359 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2382 Page* p = page_it.next();
2391 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
2438 Page* p = it.next();
2515 if (os_alignment < Page::kPageSize)
2516 size_in_bytes += (Page::kPageSize - os_alignment);
2517 return size_in_bytes + Page::kObjectStartOffset;
2599 // Set the object address and size in the page header and clear its
2601 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2602 Address object_address = page->ObjectAreaStart();
2603 // Clear the low order bit of the second word in the page to flag it as a
2604 // large object page. If the chunk_size happened to be written there, its
2607 page->is_normal_page &= ~0x1;
2608 page->ClearRSet();
2659 ASSERT(Page::is_rset_in_use());
2666 // Clear the normal remembered set region of the page;
2667 Page* page = Page::FromAddress(object->address());
2668 page->ClearRSet();
2680 ASSERT(Page::is_rset_in_use());
2686 Page::kObjectAreaSize / kPointerSize,
2695 // Iterate the normal page remembered set range.
2696 Page* page = Page::FromAddress(object->address());
2698 int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
2699 Min(page->ObjectAreaEnd(), object_end),
2700 page->RSetStart(),
2704 if (object_end > page->ObjectAreaEnd()) {
2705 count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2751 Page* page = Page::FromAddress(address);
2753 SLOW_ASSERT(!page->IsLargeObjectPage()
2756 return page->IsLargeObjectPage();
2767 // Each chunk contains an object that starts at the large object page's
2770 Page* page = Page::FromAddress(object->address());
2771 ASSERT(object->address() == page->ObjectAreaStart());
2807 ASSERT(Page::IsRSetSet(object->address(),
2855 Page* page = Page::FromAddress(object->address());
2858 PrintF("large page 0x%x:\n", page);
2859 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
2862 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
2870 + Page::kObjectAreaSize),