Home | History | Annotate | Download | only in src

Lines Matching defs:Page

53 // spaces consists of a list of pages. A page has a page header and an object
57 // Page::kMaxHeapObjectSize, so that they do not have to move during
59 // may be larger than the page size.
66 // object maps so if the page belongs to old pointer space or large object
67 // space it is essential to guarantee that the page does not contain any
75 // To enable lazy cleaning of old space pages we can mark chunks of the page
77 // sections are skipped when scanning the page, even if we are otherwise
84 // Each page may have up to one special garbage section. The start of this
92 // Since the top and limit fields are in the space, not the page, only one page
99 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
105 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
108 ASSERT((Page::kObjectStartOffset <= offset) \
109 && (offset <= Page::kPageSize))
370 // Checks whether addr can be a limit of addresses in this page.
371 // It's a limit if it's in the page, or if it's just after the
372 // last byte of the page.
402 // Large objects can have a progress bar in their page header. These object
557 // The start offset of the object area in a page. Aligned to both maps and
692 // Count of bytes marked black on page.
700 // Assuming the initial allocation on a page is sequential,
701 // count highest number of bytes ever allocated on the page.
729 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
731 // The only way to get a page pointer is by calling factory methods:
732 // Page* p = Page::FromAddress(addr); or
733 // Page* p = Page::FromAllocationTop(top);
734 class Page : public MemoryChunk {
736 // Returns the page containing a given address. The address ranges
738 // This only works if the object is in fact in a page. See also MemoryChunk::
740 INLINE(static Page* FromAddress(Address a)) {
741 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
744 // Returns the page containing an allocation top. Because an allocation
745 // top address can be the upper bound of the page, we need to subtract
748 INLINE(static Page* FromAllocationTop(Address top)) {
749 Page* p = FromAddress(top - kPointerSize);
753 // Returns the next page in the chain of pages owned by a space.
754 inline Page* next_page();
755 inline Page* prev_page();
756 inline void set_next_page(Page* page);
757 inline void set_prev_page(Page* page);
759 // Checks whether an address is page aligned.
764 // Returns the offset of a given address to this page.
770 // Returns the address for a given offset to the this page.
778 // Page size in bytes. This must be a multiple of the OS page size.
784 // Maximum object size that fits in a page. Objects larger than that size
791 // Page size mask.
796 static inline Page* Initialize(Heap* heap,
836 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
849 inline void set_next_page(LargePage* page) {
850 set_next_chunk(page);
956 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
961 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
1015 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1019 Page* page = Page::FromAddress(addr);
1020 SkipList* list = page->skip_list();
1023 page->set_skip_list(list);
1032 static const int kSize = Page::kPageSize / kRegionSize;
1034 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1057 Page* AllocatePage(
1082 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
1185 // Initializes pages in a chunk. Returns the first page address.
1187 // collector to rebuild page headers in the from space, which is
1188 // used as a marking stack and its page headers are destroyed.
1189 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1216 // to its top or from the bottom of the given page to its top.
1218 // If objects are allocated in the page during iteration the iterator may
1228 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1257 // Slow path of next(), goes into the next page. Returns false if the
1278 inline Page* next();
1282 Page* prev_page_; // Previous page returned.
1283 // Next page that will be returned. Cached here so that we can use this
1285 Page* next_page_;
1290 // A space has a circular list of pages. The next page can be accessed via
1291 // Page::next_page() call.
1293 // An abstraction of allocation and relocation pointers in a page-structured
1305 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
1312 // An abstraction of the accounting statistics of a page-structured space.
1314 // including page bookkeeping structures) currently in the space. The 'size'
1318 // to internal fragmentation, top of page areas in map space), and the bytes
1464 intptr_t EvictFreeListItemsInList(Page* p);
1558 intptr_t EvictFreeListItems(Page* p);
1568 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1603 // addresses is not big enough to contain a single page-aligned page, a
1621 // iterates over objects in the page containing the address, the cost is
1622 // linear in the number of objects in the page. It may be slow.
1653 void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1657 // The stats are rebuilt during sweeping by adding each page to the
1722 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1736 // Releases an unused page and shrinks the space.
1737 void ReleasePage(Page* page, bool unlink);
1739 // The dummy page that anchors the linked list of pages.
1740 Page* anchor() { return &anchor_; }
1769 static bool ShouldBeSweptLazily(Page* p) {
1771 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1775 void SetPagesToSweep(Page* first) {
1785 void IncreaseUnsweptFreeBytes(Page* p) {
1794 void DecreaseUnsweptFreeBytes(Page* p) {
1814 Page* FirstPage() { return anchor_.next_page(); }
1815 Page* LastPage() { return anchor_.prev_page(); }
1824 // Return size of allocatable area on a page in this space.
1842 // The dummy page that anchors the double linked list of pages.
1843 Page anchor_;
1851 // Bytes of each page that cannot be allocated. Possibly non-zero
1859 // The first page to be swept when the lazy sweeper advances. Is set
1861 Page* first_unswept_page_;
1938 static const int kAreaSize = Page::kNonCodeObjectAreaSize;
1944 inline void set_next_page(NewSpacePage* page) {
1945 set_next_chunk(page);
1952 inline void set_prev_page(NewSpacePage* page) {
1953 set_prev_chunk(page);
1963 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
1968 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
1979 ~Page::kPageAlignmentMask);
1980 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
1981 return page;
1984 // Find the page for a limit address. A limit address is either an address
1985 // inside a page, or the address right after the last byte of a page.
2014 // A semispace is a contiguous chunk of memory holding page-like memory
2015 // chunks. The mark-compact collector uses the memory of the first page in
2049 // Returns the start address of the first page of the space.
2055 // Returns the start address of the current page of the space.
2065 // Returns one past the end address of the current page of the space.
2077 // Resets the space to using the first page.
2123 // The "from" address must be on a page prior to the "to" address,
2124 // in the linked page order, or it must be earlier on the same page.
2206 NewSpacePage* page = NewSpacePage::FromLimit(current_);
2207 page = page->next_page();
2208 ASSERT(!page->is_anchor());
2209 current_ = page->area_start();
2248 // Make iterator that iterates from the page containing start
2249 // to the page that contains limit in the same semispace.
2256 NewSpacePage* prev_page_; // Previous page returned.
2257 // Next page that will be returned. Cached here so that we can use this
2260 // Last page returned.
2329 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2416 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2439 // Try to switch the active semispace to a new, empty, page.
2441 // are no pages, or the current page is already empty), or true
2488 // Update allocation info to match the current to-space page.
2545 // The limit of allocation for a page in this space.
2546 virtual Address PageAllocationLimit(Page* page) {
2547 return page->area_end();
2574 page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
2577 // The limit of allocation for a page in this space.
2578 virtual Address PageAllocationLimit(Page* page) {
2579 return page->area_end() - page_extra_;
2604 // Given an index, returns the page address.
2620 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2622 // Do map space compaction if there is a page gap.
2688 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2690 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2691 // A large object always starts at Page::kObjectStartOffset to a page.
2706 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2707 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2742 // Finds a large object page containing the given address, returns NULL
2743 // if such a page doesn't exist.