Home | History | Annotate | Download | only in src

Lines Matching refs:Page

43   // You can't actually iterate over the anchor page.  It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
46 // to the first page.
57 // You can't actually iterate over the anchor page. It is not a real page,
58 // just an anchor for the double linked page list. Initialize the current
60 // to the first page.
69 HeapObjectIterator::HeapObjectIterator(Page* page,
71 Space* owner = page->owner();
78 page->area_start(),
79 page->area_end(),
82 ASSERT(page->WasSweptPrecisely());
101 // We have hit the end of the page and should advance to the next block of
102 // objects. This happens at the end of the page.
106 Page* cur_page;
110 cur_page = Page::FromAddress(cur_addr_ - 1);
222 if (aligned_requested >= (current.size - Page::kPageSize)) {
274 capacity_ = RoundUp(capacity, Page::kPageSize);
275 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
386 void Page::InitializeAsAnchor(PagedSpace* owner) {
397 Address area_end = start + Page::kPageSize;
401 Page::kPageSize,
414 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
415 heap->incremental_marking()->SetNewSpacePageFlags(page);
416 return page;
424 // Flags marks this invalid page as not being in new-space.
550 area_start = base + Page::kObjectStartOffset;
575 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
583 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
705 // We are guarding code pages: the first OS page after the header
707 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
717 // We are guarding code pages: the first OS page after the header
724 // We are guarding code pages: the last OS page will be protected as
726 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
733 // Commit page header (not executable).
740 // Create guard page after the header.
745 // Commit page body (executable).
753 // Create guard page after the allocatable area.
767 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
783 first_unswept_page_(Page::FromAddress(NULL)),
789 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
791 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
829 Page* p = Page::FromAddress(addr);
850 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
858 Page* p = heap()->isolate()->memory_allocator()->
881 void PagedSpace::ReleasePage(Page* page) {
882 ASSERT(page->LiveBytes() == 0);
883 ASSERT(AreaSize() == page->area_size());
885 // Adjust list of unswept pages if the page is the head of the list.
886 if (first_unswept_page_ == page) {
887 first_unswept_page_ = page->next_page();
889 first_unswept_page_ = Page::FromAddress(NULL);
893 if (page->WasSwept()) {
894 intptr_t size = free_list_.EvictFreeListItems(page);
898 DecreaseUnsweptFreeBytes(page);
901 if (Page::FromAllocationTop(allocation_info_.top) == page) {
905 page->Unlink();
906 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
907 heap()->isolate()->memory_allocator()->Free(page);
909 heap()->QueueMemoryChunkForFree(page);
921 Page* page = it.next();
922 if (!page->WasSwept()) {
923 if (page->LiveBytes() == 0) ReleasePage(page);
925 HeapObject* obj = HeapObject::FromAddress(page->area_start());
934 // Instead we should verify that the page is fully covered
937 free_list_.CountFreeListItems(page, &sizes);
939 ReleasePage(page);
962 Page* page = page_iterator.next();
963 ASSERT(page->owner() == this);
964 if (page == Page::FromAllocationTop(allocation_info_.top)) {
967 ASSERT(page->WasSweptPrecisely());
968 HeapObjectIterator it(page, NULL);
969 Address end_of_previous_object = page->area_start();
970 Address top = page->area_end();
997 ASSERT_LE(black_size, page->LiveBytes());
1118 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1168 // The current page is already empty. Don't try to make another.
1171 // than what can be stored in a single page.
1177 // Failed to get a new page in to-space.
1181 // Clear remainder of current page.
1213 // Switched to new page. Try allocating again.
1265 // At end of page, switch to next page.
1266 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1267 // Next page should be valid.
1268 CHECK(!page->is_anchor());
1269 current = page->area_start();
1293 ASSERT(maximum_capacity >= Page::kPageSize);
1294 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1296 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1314 int pages = capacity_ / Page::kPageSize;
1316 Address start = end - pages * Page::kPageSize;
1323 NewSpacePage* page = anchor();
1326 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1327 new_page->InsertAfter(page);
1328 page = new_page;
1355 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1358 int pages_before = capacity_ / Page::kPageSize;
1359 int pages_after = new_capacity / Page::kPageSize;
1374 Address page_address = end - i * Page::kPageSize;
1380 // Duplicate the flags that was set on the old page.
1390 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1407 int pages_after = new_capacity / Page::kPageSize;
1409 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1430 NewSpacePage* page = anchor_.next_page();
1431 while (page != &anchor_) {
1432 page->set_owner(this);
1433 page->SetFlags(flags, mask);
1435 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1436 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1437 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1438 page->ResetLiveBytes();
1440 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1441 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1443 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1444 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1445 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1446 page = page->next_page();
1467 // Fixup back-pointers to the page list anchor now that its address
1495 NewSpacePage* page = anchor_.next_page();
1497 while (page != &anchor_) {
1498 CHECK(page->semi_space() == this);
1499 CHECK(page->InNewSpace());
1500 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1502 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1504 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1508 if (page->heap()->incremental_marking()->IsMarking()) {
1509 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1511 CHECK(!page->IsFlagSet(
1515 // black marking on the page (if we make it match in new-space).
1517 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1518 CHECK(page->prev_page()->next_page() == page);
1519 page = page->next_page();
1526 NewSpacePage* page = NewSpacePage::FromLimit(start);
1528 SemiSpace* space = page->semi_space();
1530 // Start address is before end address, either on same page,
1531 // or end address is on a later page in the linked list of
1533 if (page == end_page) {
1536 while (page != end_page) {
1537 page = page->next_page();
1538 CHECK_NE(page, space->anchor());
1904 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1943 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1970 // or allocate a new page before retrying.
2004 // Verify that it did not turn the page of the new node into an evacuation
2040 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2043 if (Page::FromAddress(n->address()) == p) {
2053 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2067 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2070 if (Page::FromAddress((*n)->address()) == p) {
2082 intptr_t FreeList::EvictFreeListItems(Page* p) {
2152 // the end of new space either because there is more space on the next page
2167 // the limit and top have moved on to a new page. In that case we try again.
2183 Page* p = first_unswept_page_;
2198 first_unswept_page_ = Page::FromAddress(NULL);
2242 Page* p = first_unswept_page_;
2244 Page* next_page = p->next_page();
2257 first_unswept_page_ = Page::FromAddress(NULL);
2273 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2274 // Create filler object to keep page iterable if it was iterable.
2288 // If there are unswept pages advance lazy sweeper then sweep one page before
2289 // allocating a new page.
2298 // Free list allocation failed and there is no next page. Fail if we have
2306 // Try to expand the space and allocate in the new next page.
2477 // allocation pointer except wasted top-of-page blocks is considered
2566 LargePage* page = first_page_;
2568 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2572 space, kAllocationActionFree, page->size());
2573 heap()->isolate()->memory_allocator()->Free(page);
2592 LargePage* page = heap()->isolate()->memory_allocator()->
2594 if (page == NULL) return Failure::RetryAfterGC(identity());
2595 ASSERT(page->area_size() >= object_size);
2597 size_ += static_cast<int>(page->size());
2600 page->set_next_page(first_page_);
2601 first_page_ = page;
2604 // this large page in the chunk map.
2605 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2606 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2612 entry->value = page;
2615 HeapObject* object = page->GetObject();
2631 LargePage* page = FindPage(a);
2632 if (page != NULL) {
2633 return page->GetObject();
2646 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2647 ASSERT(page->is_valid());
2648 if (page->Contains(a)) {
2649 return page;
2661 // Can this large page contain pointers to non-trivial objects. No other
2671 LargePage* page = current;
2683 size_ -= static_cast<int>(page->size());
2687 // Remove entries belonging to this page.
2691 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
2692 uintptr_t limit = base + (page->size()-1)/alignment;
2699 heap()->QueueMemoryChunkForFree(page);
2701 heap()->isolate()->memory_allocator()->Free(page);
2728 // Each chunk contains an object that starts at the large object page's
2731 Page* page = Page::FromAddress(object->address());
2732 ASSERT(object->address() == page->area_start());
2807 void Page::Print() {
2808 // Make a best-effort to print the objects in the page.
2809 PrintF("Page@%p in %s\n",