Lines Matching defs:page
69 // a multiple of Page::kPageSize.
78 reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79 max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80 initial_semispace_size_(Page::kPageSize),
84 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86 initial_semispace_size_(Page::kPageSize),
654 // large-object allocations that are only just larger than the page size.
1041 MemoryChunk* page,
1043 heap->store_buffer_rebuilder_.Callback(page, event);
1047 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1053 // If this page already overflowed the store buffer during this iteration.
1059 // Did we find too many pointers in the previous page? The heuristic is
1060 // that no page can take more then 1/5 the remaining slots in the store
1065 // In this case the page we scanned took a reasonable number of slots in
1072 current_page_ = page;
1074 // The current page overflowed the store buffer again. Wipe out its entries
1079 // in any particular page, though they are likely to be clustered by the
1083 // Store Buffer overflowed while scanning a particular old space page for
1085 ASSERT(current_page_ == page);
1086 ASSERT(page != NULL);
1101 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1114 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1622 (object_size <= Page::kMaxNonCodeHeapObjectSize));
1630 (object_size > Page::kMaxNonCodeHeapObjectSize)) {
2296 STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2317 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2880 STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3298 { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3317 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3797 if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4312 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
4356 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4361 size > Page::kMaxNonCodeHeapObjectSize) {
4392 } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4397 size > Page::kMaxNonCodeHeapObjectSize) {
4536 size > Page::kMaxNonCodeHeapObjectSize) {
4542 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
4669 size > Page::kMaxNonCodeHeapObjectSize) {
4675 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
4804 (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5250 NewSpacePage* page = it.next();
5251 for (Address cursor = page->area_start(), limit = page->area_end();
5380 // scanning a page and ensuring that all pointers to young space are in the
5389 Page* page = pages.next();
5390 Object** current = reinterpret_cast<Object**>(page->area_start());
5392 Address end = page->area_end();
5417 Page* page = pages.next();
5418 Object** current = reinterpret_cast<Object**>(page->area_start());
5420 Address end = page->area_end();
5562 if (max_semispace_size < Page::kPageSize) {
5563 max_semispace_size = Page::kPageSize;
5566 Page::kPageSize >> 10);
5593 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5609 // The old generation is paged and needs at least one page for each space.
5612 Page::kPageSize),
5614 Page::kPageSize));
6949 // these smaller pieces it will treat it as a slot on a normal Page.
6952 chunk->address() + Page::kPageSize);
6958 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6962 inner->set_size(Page::kPageSize);
6966 inner->address() + Page::kPageSize);
6980 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6981 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6982 // Tag the page pointer to make it findable in the dump file.
6984 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6986 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.