Lines Matching refs:Page
47 Page* PageIterator::next() {
57 // Page
59 Page* Page::next_page() {
64 Address Page::AllocationTop() {
70 Address Page::AllocationWatermark() {
79 uint32_t Page::AllocationWatermarkOffset() {
85 void Page::SetAllocationWatermark(Address allocation_watermark) {
95 // value and mark this page as having an invalid watermark.
107 void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
112 Address Page::CachedAllocationWatermark() {
117 uint32_t Page::GetRegionMarks() {
122 void Page::SetRegionMarks(uint32_t marks) {
127 int Page::GetRegionNumberForAddress(Address addr) {
128 // Each page is divided into 256 byte regions. Each region has a corresponding
129 // dirty mark bit in the page header. Region can contain intergenerational
131 // A normal 8K page contains exactly 32 regions so all region marks fit
133 // offset inside page by region size.
134 // A large page can contain more then 32 regions. But we want to avoid
136 // pages so we just ignore the fact that addr points into a large page and
137 // calculate region number as if addr pointed into a normal 8K page. This way
143 // We are using masking with kPageAlignmentMask instead of Page::Offset()
144 // to get an offset to the beginning of 8K page containing addr not to the
145 // beginning of actual page which can be bigger then 8K.
151 uint32_t Page::GetRegionMaskForAddress(Address addr) {
156 uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
183 void Page::MarkRegionDirty(Address address) {
188 bool Page::IsRegionDirty(Address address) {
193 void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
223 void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
228 bool Page::IsWatermarkValid() {
234 void Page::InvalidateWatermark(bool value) {
249 bool Page::GetPageFlag(PageFlag flag) {
254 void Page::SetPageFlag(PageFlag flag, bool value) {
263 void Page::ClearPageFlags() {
268 void Page::ClearGCFields() {
278 bool Page::WasInUseBeforeMC() {
283 void Page::SetWasInUseBeforeMC(bool was_in_use) {
288 bool Page::IsLargeObjectPage() {
293 void Page::SetIsLargeObjectPage(bool is_large_object_page) {
297 bool Page::IsPageExecutable() {
302 void Page::SetIsPageExecutable(bool is_page_executable) {
332 bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
345 Page* MemoryAllocator::GetNextPage(Page* p) {
347 intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
348 return Page::FromAddress(AddressFrom<Address>(raw_addr));
352 int MemoryAllocator::GetChunkId(Page* p) {
354 return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
358 void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
366 PagedSpace* MemoryAllocator::PageOwner(Page* page) {
367 int chunk_id = GetChunkId(page);
395 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
396 int id = GetChunkId(page);
401 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
402 int id = GetChunkId(page);
414 Page* p = Page::FromAddress(addr);
420 // Try linear allocation in the page of alloc_info's allocation top. Does
421 // not contain slow case logic (eg, move to the next page or try free list
469 // Round the chunk address up to the nearest page-aligned address
470 // and return the heap object in that page.
471 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
472 return page->ObjectAreaStart();
478 Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate();