Home | History | Annotate | Download | only in src

Lines Matching refs:page

21   // You can't actually iterate over the anchor page.  It is not a real page,
22 // just an anchor for the double linked page list. Initialize as if we have
23 // reached the end of the anchor page, then the first iteration will move on
24 // to the first page.
35 // You can't actually iterate over the anchor page. It is not a real page,
36 // just an anchor for the double linked page list. Initialize the current
38 // to the first page.
47 HeapObjectIterator::HeapObjectIterator(Page* page,
49 Space* owner = page->owner();
50 ASSERT(owner == page->heap()->old_pointer_space() ||
51 owner == page->heap()->old_data_space() ||
52 owner == page->heap()->map_space() ||
53 owner == page->heap()->cell_space() ||
54 owner == page->heap()->property_cell_space() ||
55 owner == page->heap()->code_space());
57 page->area_start(),
58 page->area_end(),
61 ASSERT(page->WasSweptPrecisely());
80 // We have hit the end of the page and should advance to the next block of
81 // objects. This happens at the end of the page.
85 Page* cur_page;
89 cur_page = Page::FromAddress(cur_addr_ - 1);
214 if (aligned_requested >= (current.size - Page::kPageSize)) {
280 capacity_ = RoundUp(capacity, Page::kPageSize);
281 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
416 void Page::InitializeAsAnchor(PagedSpace* owner) {
427 Address area_end = start + Page::kPageSize;
431 Page::kPageSize,
444 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
445 heap->incremental_marking()->SetNewSpacePageFlags(page);
446 return page;
454 // Flags marks this invalid page as not being in new-space.
606 // +----------------------------+<- aligned at OS page boundary
608 // +----------------------------+<- aligned at OS page boundary
619 // +----------------------------+<- aligned at OS page boundary
683 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
686 area_start = base + Page::kObjectStartOffset;
714 void Page::ResetFreeListStatistics() {
723 Page* MemoryAllocator::AllocatePage(intptr_t size,
730 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
857 // We are guarding code pages: the first OS page after the header
859 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
869 // We are guarding code pages: the first OS page after the header
876 // We are guarding code pages: the last OS page will be protected as
878 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
886 // Commit page header (not executable).
893 // Create guard page after the header.
898 // Commit page body (executable).
905 // Create guard page before the end.
922 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
945 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
947 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
997 Page* p = Page::FromAddress(addr);
1018 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1033 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
1095 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1096 sizes->huge_size_ = page->available_in_huge_free_list();
1097 sizes->small_size_ = page->available_in_small_free_list();
1098 sizes->medium_size_ = page->available_in_medium_free_list();
1099 sizes->large_size_ = page->available_in_large_free_list();
1106 Page* page = page_iterator.next();
1107 page->ResetFreeListStatistics();
1117 void PagedSpace::ReleasePage(Page* page) {
1118 ASSERT(page->LiveBytes() == 0);
1119 ASSERT(AreaSize() == page->area_size());
1121 if (page->WasSwept()) {
1122 intptr_t size = free_list_.EvictFreeListItems(page);
1126 DecreaseUnsweptFreeBytes(page);
1129 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1131 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1134 ASSERT(!free_list_.ContainsPageFreeListItems(page));
1136 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1141 page->Unlink();
1142 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1143 heap()->isolate()->memory_allocator()->Free(page);
1145 heap()->QueueMemoryChunkForFree(page);
1166 Page* page = page_iterator.next();
1167 CHECK(page->owner() == this);
1168 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1171 CHECK(page->WasSweptPrecisely());
1172 HeapObjectIterator it(page, NULL);
1173 Address end_of_previous_object = page->area_start();
1174 Address top = page->area_end();
1201 CHECK_LE(black_size, page->LiveBytes());
1322 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1369 // Normal limit is the end of the current page.
1385 // The current page is already empty. Don't try to make another.
1388 // than what can be stored in a single page.
1394 // Failed to get a new page in to-space.
1398 // Clear remainder of current page.
1429 // Switched to new page. Try allocating again.
1481 // At end of page, switch to next page.
1482 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1483 // Next page should be valid.
1484 CHECK(!page->is_anchor());
1485 current = page->area_start();
1509 ASSERT(maximum_capacity >= Page::kPageSize);
1510 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1512 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1531 int pages = capacity_ / Page::kPageSize;
1541 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1582 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1585 int pages_before = capacity_ / Page::kPageSize;
1586 int pages_after = new_capacity / Page::kPageSize;
1599 Address page_address = start_ + i * Page::kPageSize;
1605 // Duplicate the flags that was set on the old page.
1615 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1627 int pages_after = new_capacity / Page::kPageSize;
1629 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1650 NewSpacePage* page = anchor_.next_page();
1651 while (page != &anchor_) {
1652 page->set_owner(this);
1653 page->SetFlags(flags, mask);
1655 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1656 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1657 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1658 page->ResetLiveBytes();
1660 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1661 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1663 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1664 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1665 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1666 page = page->next_page();
1687 // Fixup back-pointers to the page list anchor now that its address
1724 NewSpacePage* page = anchor_.next_page();
1726 while (page != &anchor_) {
1727 CHECK(page->semi_space() == this);
1728 CHECK(page->InNewSpace());
1729 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1731 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1733 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1737 if (page->heap()->incremental_marking()->IsMarking()) {
1738 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1740 CHECK(!page->IsFlagSet(
1744 // black marking on the page (if we make it match in new-space).
1746 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1747 CHECK(page->prev_page()->next_page() == page);
1748 page = page->next_page();
1756 NewSpacePage* page = NewSpacePage::FromLimit(start);
1758 SemiSpace* space = page->semi_space();
1760 // Start address is before end address, either on same page,
1761 // or end address is on a later page in the linked list of
1763 if (page == end_page) {
1766 while (page != end_page) {
1767 page = page->next_page();
1768 CHECK_NE(page, space->anchor());
2103 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2108 if (Page::FromAddress((*n)->address()) == p) {
2125 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2128 if (Page::FromAddress(node->address()) == p) return true;
2141 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2227 Page* page = Page::FromAddress(start);
2231 page->add_non_available_small_blocks(size_in_bytes);
2239 page->add_available_in_small_free_list(size_in_bytes);
2242 page->add_available_in_medium_free_list(size_in_bytes);
2245 page->add_available_in_large_free_list(size_in_bytes);
2248 page->add_available_in_huge_free_list(size_in_bytes);
2258 Page* page = NULL;
2264 page = Page::FromAddress(node->address());
2265 page->add_available_in_small_free_list(-(*node_size));
2275 page = Page::FromAddress(node->address());
2276 page->add_available_in_medium_free_list(-(*node_size));
2286 page = Page::FromAddress(node->address());
2287 page->add_available_in_large_free_list(-(*node_size));
2300 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2303 page = Page::FromAddress(cur_node->address());
2304 page->add_available_in_huge_free_list(-size);
2323 page = Page::FromAddress(node->address());
2324 page->add_available_in_huge_free_list(-size);
2344 page = Page::FromAddress(node->address());
2345 page->add_available_in_small_free_list(-(*node_size));
2351 page = Page::FromAddress(node->address());
2352 page->add_available_in_medium_free_list(-(*node_size));
2358 page = Page::FromAddress(node->address());
2359 page->add_available_in_large_free_list(-(*node_size));
2371 // or allocate a new page before retrying.
2406 // Verify that it did not turn the page of the new node into an evacuation
2447 intptr_t FreeList::EvictFreeListItems(Page* p) {
2464 bool FreeList::ContainsPageFreeListItems(Page* p) {
2567 if (Page::FromAllocationTop(allocation_info_.top())->
2569 // Create filler object to keep page iterable if it was iterable.
2609 // Free list allocation failed and there is no next page. Fail if we have
2620 // Try to expand the space and allocate in the new next page.
2859 LargePage* page = first_page_;
2861 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2865 space, kAllocationActionFree, page->size());
2866 heap()->isolate()->memory_allocator()->Free(page);
2885 LargePage* page = heap()->isolate()->memory_allocator()->
2887 if (page == NULL) return AllocationResult::Retry(identity());
2888 ASSERT(page->area_size() >= object_size);
2890 size_ += static_cast<int>(page->size());
2893 page->set_next_page(first_page_);
2894 first_page_ = page;
2901 // this large page in the chunk map.
2902 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2903 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2909 entry->value = page;
2912 HeapObject* object = page->GetObject();
2941 LargePage* page = FindPage(a);
2942 if (page != NULL) {
2943 return page->GetObject();
2956 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2957 ASSERT(page->is_valid());
2958 if (page->Contains(a)) {
2959 return page;
2971 // Can this large page contain pointers to non-trivial objects. No other
2977 Page::FromAddress(object->address())->ResetProgressBar();
2978 Page::FromAddress(object->address())->ResetLiveBytes();
2982 LargePage* page = current;
2994 size_ -= static_cast<int>(page->size());
2998 // Remove entries belonging to this page.
3002 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
3003 uintptr_t limit = base + (page->size()-1)/alignment;
3010 heap()->QueueMemoryChunkForFree(page);
3012 heap()->isolate()->memory_allocator()->Free(page);
3039 // Each chunk contains an object that starts at the large object page's
3042 Page* page = Page::FromAddress(object->address());
3043 CHECK(object->address() == page->area_start());
3120 void Page::Print() {
3121 // Make a best-effort to print the objects in the page.
3122 PrintF("Page@%p in %s\n",