Home | History | Annotate | Download | only in src

Lines Matching refs:Page

52 // spaces consists of a list of pages. A page has a page header and an object
56 // Page::kMaxHeapObjectSize, so that they do not have to move during
58 // may be larger than the page size.
65 // object maps so if the page belongs to old pointer space or large object
66 // space it is essential to guarantee that the page does not contain any
74 // To enable lazy cleaning of old space pages we can mark chunks of the page
76 // sections are skipped when scanning the page, even if we are otherwise
83 // Each page may have up to one special garbage section. The start of this
91 // Since the top and limit fields are in the space, not the page, only one page
98 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
107 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
110 ASSERT((Page::kObjectStartOffset <= offset) \
111 && (offset <= Page::kPageSize))
369 // Checks whether addr can be a limit of addresses in this page.
370 // It's a limit if it's in the page, or if it's just after the
371 // last byte of the page.
497 // The start offset of the object area in a page. Aligned to both maps and
624 // Count of bytes marked black on page.
643 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
645 // The only way to get a page pointer is by calling factory methods:
646 // Page* p = Page::FromAddress(addr); or
647 // Page* p = Page::FromAllocationTop(top);
648 class Page : public MemoryChunk {
650 // Returns the page containing a given address. The address ranges
652 // This only works if the object is in fact in a page. See also MemoryChunk::
654 INLINE(static Page* FromAddress(Address a)) {
655 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
658 // Returns the page containing an allocation top. Because an allocation
659 // top address can be the upper bound of the page, we need to subtract
662 INLINE(static Page* FromAllocationTop(Address top)) {
663 Page* p = FromAddress(top - kPointerSize);
667 // Returns the next page in the chain of pages owned by a space.
668 inline Page* next_page();
669 inline Page* prev_page();
670 inline void set_next_page(Page* page);
671 inline void set_prev_page(Page* page);
673 // Checks whether an address is page aligned.
678 // Returns the offset of a given address to this page.
684 // Returns the address for a given offset to the this page.
692 // Page size in bytes. This must be a multiple of the OS page size.
698 // Maximum object size that fits in a page.
701 // Page size mask.
706 static inline Page* Initialize(Heap* heap,
731 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
744 inline void set_next_page(LargePage* page) {
745 set_next_chunk(page);
852 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
857 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
911 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
915 Page* page = Page::FromAddress(addr);
916 SkipList* list = page->skip_list();
919 page->set_skip_list(list);
928 static const int kSize = Page::kPageSize / kRegionSize;
930 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
953 Page* AllocatePage(PagedSpace* owner, Executability executable);
978 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
1075 // Initializes pages in a chunk. Returns the first page address.
1077 // collector to rebuild page headers in the from space, which is
1078 // used as a marking stack and its page headers are destroyed.
1079 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1106 // to its top or from the bottom of the given page to its top.
1108 // If objects are allocated in the page during iteration the iterator may
1118 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1147 // Slow path of next(), goes into the next page. Returns false if the
1168 inline Page* next();
1172 Page* prev_page_; // Previous page returned.
1173 // Next page that will be returned. Cached here so that we can use this
1175 Page* next_page_;
1180 // A space has a circular list of pages. The next page can be accessed via
1181 // Page::next_page() call.
1183 // An abstraction of allocation and relocation pointers in a page-structured
1195 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
1202 // An abstraction of the accounting statistics of a page-structured space.
1204 // including page bookkeeping structures) currently in the space. The 'size'
1208 // to internal fragmentation, top of page areas in map space), and the bytes
1392 void CountFreeListItems(Page* p, SizeStats* sizes);
1394 intptr_t EvictFreeListItems(Page* p);
1399 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1439 // addresses is not big enough to contain a single page-aligned page, a
1457 // iterates over objects in the page containing the address, the cost is
1458 // linear in the number of objects in the page. It may be slow.
1472 // The stats are rebuilt during sweeping by adding each page to the
1526 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1539 // Releases an unused page and shrinks the space.
1540 void ReleasePage(Page* page);
1545 // The dummy page that anchors the linked list of pages.
1546 Page* anchor() { return &anchor_; }
1573 static bool ShouldBeSweptLazily(Page* p) {
1575 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1579 void SetPagesToSweep(Page* first) {
1589 void IncreaseUnsweptFreeBytes(Page* p) {
1594 void DecreaseUnsweptFreeBytes(Page* p) {
1605 Page* FirstPage() { return anchor_.next_page(); }
1606 Page* LastPage() { return anchor_.prev_page(); }
1608 void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
1619 // Return size of allocatable area on a page in this space.
1633 // The dummy page that anchors the double linked list of pages.
1634 Page anchor_;
1642 // Bytes of each page that cannot be allocated. Possibly non-zero
1650 // The first page to be swept when the lazy sweeper advances. Is set
1652 Page* first_unswept_page_;
1728 static const int kAreaSize = Page::kNonCodeObjectAreaSize;
1734 inline void set_next_page(NewSpacePage* page) {
1735 set_next_chunk(page);
1742 inline void set_prev_page(NewSpacePage* page) {
1743 set_prev_chunk(page);
1753 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
1758 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
1769 ~Page::kPageAlignmentMask);
1770 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
1771 return page;
1774 // Find the page for a limit address. A limit address is either an address
1775 // inside a page, or the address right after the last byte of a page.
1804 // A semispace is a contiguous chunk of memory holding page-like memory
1805 // chunks. The mark-compact collector uses the memory of the first page in
1839 // Returns the start address of the first page of the space.
1845 // Returns the start address of the current page of the space.
1855 // Returns one past the end address of the current page of the space.
1867 // Resets the space to using the first page.
1910 // The "from" address must be on a page prior to the "to" address,
1911 // in the linked page order, or it must be earlier on the same page.
1990 NewSpacePage* page = NewSpacePage::FromLimit(current_);
1991 page = page->next_page();
1992 ASSERT(!page->is_anchor());
1993 current_ = page->area_start();
2032 // Make iterator that iterates from the page containing start
2033 // to the page that contains limit in the same semispace.
2040 NewSpacePage* prev_page_; // Previous page returned.
2041 // Next page that will be returned. Cached here so that we can use this
2044 // Last page returned.
2113 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2197 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2220 // Try to switch the active semispace to a new, empty, page.
2222 // are no pages, or the current page is already empty), or true
2271 // Update allocation info to match the current to-space page.
2328 // The limit of allocation for a page in this space.
2329 virtual Address PageAllocationLimit(Page* page) {
2330 return page->area_end();
2359 page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
2362 // The limit of allocation for a page in this space.
2363 virtual Address PageAllocationLimit(Page* page) {
2364 return page->area_end() - page_extra_;
2397 // Given an index, returns the page address.
2415 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2417 // Do map space compaction if there is a page gap.
2458 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2460 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2461 // A large object always starts at Page::kObjectStartOffset to a page.
2476 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2477 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2505 // Finds a large object page containing the given address, returns NULL
2506 // if such a page doesn't exist.