Home | History | Annotate | Download | only in src

Lines Matching defs:page

72 // a multiple of Page::kPageSize.
75 initial_semispace_size_(Page::kPageSize),
163 ASSERT(MB >= Page::kPageSize);
1313 MemoryChunk* page,
1315 heap->store_buffer_rebuilder_.Callback(page, event);
1319 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1325 // If this page already overflowed the store buffer during this iteration.
1331 // Did we find too many pointers in the previous page? The heuristic is
1332 // that no page can take more then 1/5 the remaining slots in the store
1337 // In this case the page we scanned took a reasonable number of slots in
1344 current_page_ = page;
1346 // The current page overflowed the store buffer again. Wipe out its entries
1351 // in any particular page, though they are likely to be clustered by the
1355 // Store Buffer overflowed while scanning a particular old space page for
1357 ASSERT(current_page_ == page);
1358 ASSERT(page != NULL);
1373 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1386 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
2225 SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
3001 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
3017 STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
3031 STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
3753 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
4245 // Objects on the first page of each space are never moved.
4247 // Discard the first code allocation, which was on a page where it could be
5540 STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
6163 NewSpacePage* page = it.next();
6164 for (Address cursor = page->area_start(), limit = page->area_end();
6292 // scanning a page and ensuring that all pointers to young space are in the
6301 Page* page = pages.next();
6302 Object** current = reinterpret_cast<Object**>(page->area_start());
6304 Address end = page->area_end();
6329 Page* page = pages.next();
6330 Object** current = reinterpret_cast<Object**>(page->area_start());
6332 Address end = page->area_end();
6484 max_semispace_size_ = Page::kPageSize;
6488 if (max_semispace_size < Page::kPageSize) {
6489 max_semispace_size = Page::kPageSize;
6492 Page::kPageSize >> 10);
6519 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6540 // The old generation is paged and needs at least one page for each space.
6543 Page::kPageSize),
6545 Page::kPageSize));
7882 // these smaller pieces it will treat it as a slot on a normal Page.
7885 chunk->address() + Page::kPageSize);
7891 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7895 inner->set_size(Page::kPageSize);
7899 inner->address() + Page::kPageSize);
7913 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7914 uintptr_t p = reinterpret_cast<uintptr_t>(page);
7915 // Tag the page pointer to make it findable in the dump file.
7917 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7919 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.