Lines Matching defs:page
54 // spaces consists of a list of pages. A page has a page header and an object
58 // Page::kMaxHeapObjectSize, so that they do not have to move during
60 // may be larger than the page size.
67 // object maps so if the page belongs to old pointer space or large object
68 // space it is essential to guarantee that the page does not contain any
76 // To enable lazy cleaning of old space pages we can mark chunks of the page
78 // sections are skipped when scanning the page, even if we are otherwise
85 // Each page may have up to one special garbage section. The start of this
93 // Since the top and limit fields are in the space, not the page, only one page
100 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
106 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
109 ASSERT((Page::kObjectStartOffset <= offset) \
110 && (offset <= Page::kPageSize))
371 // Checks whether addr can be a limit of addresses in this page.
372 // It's a limit if it's in the page, or if it's just after the
373 // last byte of the page.
403 // Large objects can have a progress bar in their page header. These object
558 // The start offset of the object area in a page. Aligned to both maps and
693 // Count of bytes marked black on page.
701 // Assuming the initial allocation on a page is sequential,
702 // count highest number of bytes ever allocated on the page.
730 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
732 // The only way to get a page pointer is by calling factory methods:
733 // Page* p = Page::FromAddress(addr); or
734 // Page* p = Page::FromAllocationTop(top);
735 class Page : public MemoryChunk {
737 // Returns the page containing a given address. The address ranges
739 // This only works if the object is in fact in a page. See also MemoryChunk::
741 INLINE(static Page* FromAddress(Address a)) {
742 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
745 // Returns the page containing an allocation top. Because an allocation
746 // top address can be the upper bound of the page, we need to subtract
749 INLINE(static Page* FromAllocationTop(Address top)) {
750 Page* p = FromAddress(top - kPointerSize);
754 // Returns the next page in the chain of pages owned by a space.
755 inline Page* next_page();
756 inline Page* prev_page();
757 inline void set_next_page(Page* page);
758 inline void set_prev_page(Page* page);
760 // Checks whether an address is page aligned.
765 // Returns the offset of a given address to this page.
771 // Returns the address for a given offset to the this page.
779 // Page size in bytes. This must be a multiple of the OS page size.
785 // Maximum object size that fits in a page. Objects larger than that size are
791 // Maximum object size that fits in a page. Objects larger than that size
798 // Page size mask.
803 static inline Page* Initialize(Heap* heap,
843 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
856 inline void set_next_page(LargePage* page) {
857 set_next_chunk(page);
963 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
968 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
1022 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1026 Page* page = Page::FromAddress(addr);
1027 SkipList* list = page->skip_list();
1030 page->set_skip_list(list);
1039 static const int kSize = Page::kPageSize / kRegionSize;
1041 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1064 Page* AllocatePage(
1089 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
1209 // Initializes pages in a chunk. Returns the first page address.
1211 // collector to rebuild page headers in the from space, which is
1212 // used as a marking stack and its page headers are destroyed.
1213 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1245 // to its top or from the bottom of the given page to its top.
1247 // If objects are allocated in the page during iteration the iterator may
1257 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1286 // Slow path of next(), goes into the next page. Returns false if the
1307 inline Page* next();
1311 Page* prev_page_; // Previous page returned.
1312 // Next page that will be returned. Cached here so that we can use this
1314 Page* next_page_;
1319 // A space has a circular list of pages. The next page can be accessed via
1320 // Page::next_page() call.
1322 // An abstraction of allocation and relocation pointers in a page-structured
1363 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
1376 // An abstraction of the accounting statistics of a page-structured space.
1378 // including page bookkeeping structures) currently in the space. The 'size'
1382 // to internal fragmentation, top of page areas in map space), and the bytes
1529 intptr_t EvictFreeListItemsInList(Page* p);
1623 intptr_t EvictFreeListItems(Page* p);
1633 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1668 // addresses is not big enough to contain a single page-aligned page, a
1686 // iterates over objects in the page containing the address, the cost is
1687 // linear in the number of objects in the page. It may be slow.
1721 void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1725 // The stats are rebuilt during sweeping by adding each page to the
1794 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1815 // Releases an unused page and shrinks the space.
1816 void ReleasePage(Page* page, bool unlink);
1818 // The dummy page that anchors the linked list of pages.
1819 Page* anchor() { return &anchor_; }
1848 static bool ShouldBeSweptLazily(Page* p) {
1850 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1854 void SetPagesToSweep(Page* first) {
1864 void IncreaseUnsweptFreeBytes(Page* p) {
1873 void DecreaseUnsweptFreeBytes(Page* p) {
1893 Page* FirstPage() { return anchor_.next_page(); }
1894 Page* LastPage() { return anchor_.prev_page(); }
1903 // Return size of allocatable area on a page in this space.
1921 // The dummy page that anchors the double linked list of pages.
1922 Page anchor_;
1932 // The first page to be swept when the lazy sweeper advances. Is set
1934 Page* first_unswept_page_;
2011 static const int kAreaSize = Page::kNonCodeObjectAreaSize;
2017 inline void set_next_page(NewSpacePage* page) {
2018 set_next_chunk(page);
2025 inline void set_prev_page(NewSpacePage* page) {
2026 set_prev_chunk(page);
2036 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
2041 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2052 ~Page::kPageAlignmentMask);
2053 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2054 return page;
2057 // Find the page for a limit address. A limit address is either an address
2058 // inside a page, or the address right after the last byte of a page.
2087 // A semispace is a contiguous chunk of memory holding page-like memory
2088 // chunks. The mark-compact collector uses the memory of the first page in
2122 // Returns the start address of the first page of the space.
2128 // Returns the start address of the current page of the space.
2138 // Returns one past the end address of the current page of the space.
2150 // Resets the space to using the first page.
2191 // The "from" address must be on a page prior to the "to" address,
2192 // in the linked page order, or it must be earlier on the same page.
2282 NewSpacePage* page = NewSpacePage::FromLimit(current_);
2283 page = page->next_page();
2284 ASSERT(!page->is_anchor());
2285 current_ = page->area_start();
2324 // Make iterator that iterates from the page containing start
2325 // to the page that contains limit in the same semispace.
2332 NewSpacePage* prev_page_; // Previous page returned.
2333 // Next page that will be returned. Cached here so that we can use this
2336 // Last page returned.
2405 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2505 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2528 // Try to switch the active semispace to a new, empty, page.
2530 // are no pages, or the current page is already empty), or true
2575 // Update allocation info to match the current to-space page.
2655 // Given an index, returns the page address.
2671 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2673 // Do map space compaction if there is a page gap.
2739 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2741 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2742 // A large object always starts at Page::kObjectStartOffset to a page.
2757 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2758 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2797 // Finds a large object page containing the given address, returns NULL
2798 // if such a page doesn't exist.