Home | History | Annotate | Download | only in src

Lines Matching full:page

48 // spaces consists of a list of pages. A page has a page header, a remembered
49 // set area, and an object area. A page size is deliberately chosen as 8K
50 // bytes. The first word of a page is an opaque page header that has the
51 // address of the next page and its ownership information. The second word may
52 // have the allocation top address of this page. The next 248 bytes are
57 // Page::kMaxHeapObjectSize, so that they do not have to move during
69 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
78 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
81 ASSERT((Page::kObjectStartOffset <= offset) \
82 && (offset <= Page::kPageSize))
93 // A page normally has 8K bytes. Large object pages may be larger. A page
94 // address is always aligned to the 8K page size. A page is divided into
96 // bytes are used as remembered set, and the rest of the page is the object
101 // bit position (offset from the start of the page) is calculated by dividing
102 // its page offset by 32. Therefore, the object area in a page starts at the
104 // the first two words (64 bits) in a page can be used for other purposes.
109 // For this reason we add an offset to get room for the Page data at the start.
111 // The mark-compact collector transforms a map pointer into a page index and a
112 // page offset. The excact encoding is described in the comments for
115 // The only way to get a page pointer is by calling factory methods:
116 // Page* p = Page::FromAddress(addr); or
117 // Page* p = Page::FromAllocationTop(top);
118 class Page {
120 // Returns the page containing a given address. The address ranges
127 INLINE(static Page* FromAddress(Address a)) {
128 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
131 // Returns the page containing an allocation top. Because an allocation
132 // top address can be the upper bound of the page, we need to subtract
135 INLINE(static Page* FromAllocationTop(Address top)) {
136 Page* p = FromAddress(top - kPointerSize);
141 // Returns the start address of this page.
144 // Checks whether this is a valid page address.
147 // Returns the next page of this page.
148 inline Page* next_page();
150 // Return the end of allocation in this page. Undefined for unused pages.
153 // Returns the start address of the object area in this page.
156 // Returns the end address (exclusive) of the object area in this page.
157 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
165 // Checks whether an address is page aligned.
170 // True if this page is a large object page.
173 // Returns the offset of a given address to this page.
180 // Returns the address for a given offset to the this page.
189 // Clears remembered set in this page.
215 // Page size in bytes. This must be a multiple of the OS page size.
218 // Page size mask.
221 // The offset of the remembered set in a page, in addition to the empty bytes
228 // The end offset of the remembered set in a page
232 // The start offset of the object area in a page.
237 // The start offset of the used part of the remembered set in a page.
244 // Maximum object size that fits in a page.
248 // Page header description.
250 // If a page is not in the large object space, the first word,
251 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
255 // in the current page. If a page is in the large object space, the first
256 // word *may* (if the page start and large object chunk start are the
260 // If the page is not in the large object space, the low-order bit of the
261 // second word is set. If the page is in the large object space, the
262 // second word *may* (if the page start and large object chunk start are
271 // The index of the page in its owner space.
274 // The allocation pointer after relocating objects to this page.
277 // The forwarding address of the first live object in this page.
396 // old space and map space if they are big enough to hold at least one page.
430 // page-aligned page. The call always succeeds, and num_pages is always
432 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
453 // allocate memory for the OS or cannot allocate a single page, this
454 // function returns an invalid page pointer (NULL). The caller must check
455 // whether the returned page is valid (by calling Page::is_valid()). It is
460 static Page* AllocatePages(int requested_pages, int* allocated_pages,
463 // Frees pages from a given page and after. If 'p' is the first page
465 // invalid page pointer. Otherwise, the function searches a page
466 // after 'p' that is the first page of a chunk. Pages after the
467 // found page are freed and the function returns 'p'.
468 static Page* FreePages(Page* p);
489 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
493 static inline void SetNextPage(Page* prev, Page* next);
495 // Returns the next page of a given page.
496 static inline Page* GetNextPage(Page* p);
498 // Checks whether a page belongs to a space.
499 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
501 // Returns the space that owns the given page.
502 static inline PagedSpace* PageOwner(Page* page);
504 // Finds the first/last page in the same chunk as a given page.
505 static Page* FindFirstPageInSameChunk(Page* p);
506 static Page* FindLastPageInSameChunk(Page* p);
514 // Protect/unprotect a chunk given a page in the chunk.
515 static inline void ProtectChunkFromPage(Page* page);
516 static inline void UnprotectChunkFromPage(Page* page);
533 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
584 // Returns the chunk id that a page belongs to.
585 static inline int GetChunkId(Page* p);
590 // Initializes pages in a chunk. Returns the first page address.
592 // collector to rebuild page headers in the from space, which is
593 // used as a marking stack and its page headers are destroyed.
594 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
660 Address cur_limit_; // current page limit
662 Page* end_page_; // caches the page of the end address
677 // Slow path of next, goes into the next page.
706 // allocation pointer or MC allocation pointer in the last page to
708 // page.
724 inline Page* next();
728 Page* prev_page_; // Previous page returned.
729 Page* stop_page_; // Page to stop at (last page returned by the iterator).
734 // A space has a list of pages. The next page can be accessed via
735 // Page::next_page() call. The next page of the last page is an
736 // invalid page pointer. A space can expand and shrink dynamically.
738 // An abstraction of allocation and relocation pointers in a page-structured
747 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
754 // An abstraction of the accounting statistics of a page-structured space.
756 // including page bookkeeping structures) currently in the space. The 'size'
760 // to internal fragmentation, top of page areas in map space), and the bytes
848 // addresses is not big enough to contain a single page-aligned page, a
866 // iterates over objects in the page containing the address, the cost is
867 // linear in the number of objects in the page. It may be slow.
870 // Checks whether page is currently in use by this space.
871 bool IsUsed(Page* page);
879 virtual Address PageAllocationTop(Page* page) = 0;
915 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
923 // Writes relocation info to the top page.
972 // The first page in this space.
973 Page* first_page_;
975 // The last page in this space. Initially set in Setup, updated in
977 Page* last_page_;
985 // Bytes of each page that cannot be allocated. Possibly non-zero
991 // Sets allocation pointer to a page bottom.
992 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
994 // Returns the top page specified by an allocation info structure.
995 static Page* TopPageOf(AllocationInfo alloc_info) {
996 return Page::FromAllocationTop(alloc_info.limit);
1000 Page* p = Page::FromAllocationTop(allocation_info_.top);
1014 bool Expand(Page* last_page);
1017 // the top page of 'alloc_info'. Returns NULL on failure.
1021 // During normal allocation or deserialization, roll to the next page in
1024 virtual HeapObject* AllocateInNextPage(Page* current_page,
1040 // Returns the page of the allocation pointer.
1041 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1043 // Returns a pointer to the page of the relocation pointer.
1044 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1525 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1655 // The top of allocation in a page in this space. Undefined if page is unused.
1656 virtual Address PageAllocationTop(Page* page) {
1657 return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
1676 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1690 // the page after current_page (there is assumed to be one).
1691 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1715 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1718 // The top of allocation in a page in this space. Undefined if page is unused.
1719 virtual Address PageAllocationTop(Page* page) {
1720 return page == TopPageOf(allocation_info_) ? top()
1721 : page->ObjectAreaEnd() - page_extra_;
1739 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1754 // the page after current_page (there is assumed to be one).
1755 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1788 // Given an index, returns the page address.
1818 Page* top_page = it.next();
1831 Page* top_page = Page::FromAddress(new_top);
1844 for (Page* p = first_page_; p != top_page; p = p->next_page())
1861 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
1863 // Do map space compaction if there is a page gap.
1870 // An array of page start address in a map space.
1898 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1900 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1901 // A large object always starts at Page::kObjectStartOffset to a page.
1904 // A LargeObjectChunk holds exactly one large object page with exactly one
1908 // Allocates a new LargeObjectChunk that contains a large object page
1909 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1911 // area start of that page. The allocated chunk size is set in the output
1946 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1947 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;