Home | History | Annotate | Download | only in heap

Lines Matching refs:page

24 #include "src/heap/page-parallel-job.h"
122 static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
123 CHECK(page->IsFlagSet(Page::BLACK_PAGE));
125 HeapObjectIterator it(page);
134 // The bottom position is at the start of its page. Allows us to use
135 // page->area_start() as start of range on all pages.
136 CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
140 Page* page = *(it++);
141 Address limit = it != range.end() ? page->area_end() : end;
142 CHECK(limit == end || !page->Contains(end));
143 VerifyMarking(space->heap(), page->area_start(), limit);
149 for (Page* p : *space) {
150 if (p->IsFlagSet(Page::BLACK_PAGE)) {
191 static void VerifyEvacuation(Page* page) {
193 HeapObjectIterator iterator(page);
208 Page* page = *(it++);
209 Address current = page->area_start();
210 Address limit = it != range.end() ? page->area_end() : space->top();
211 CHECK(limit == space->top() || !page->Contains(space->top()));
225 for (Page* p : *space) {
269 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
357 for (Page* p : *space) {
365 for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
382 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
410 for (Page* p : *space) {
412 if (p->IsFlagSet(Page::BLACK_PAGE)) {
413 p->ClearFlag(Page::BLACK_PAGE);
420 for (Page* page : *space) {
421 Bitmap::Clear(page);
438 if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
439 chunk->ClearFlag(Page::BLACK_PAGE);
481 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
500 Page* page) {
501 if (!page->SweepingDone()) {
502 ParallelSweepPage(page, page->owner()->identity());
503 if (!page->SweepingDone()) {
504 // We were not able to sweep that page, i.e., a concurrent
505 // sweeper thread currently owns this page. Wait for the sweeper
506 // thread to be done with this page.
507 page->WaitUntilSweepingCompleted();
519 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
558 for (Page* p : *heap_->new_space()) {
593 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
651 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
657 const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
658 // Time to take for a single area (=payload of page). Used as soon as there
670 // and a goal for a single page.
694 // Pairs of (live_bytes_in_page, page).
695 typedef std::pair<int, Page*> LiveBytesPagePair;
699 for (Page* p : *space) {
701 if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
720 Page* p = pages[i].second;
730 Page* p = pages[i].second;
740 // We use two conditions to decide whether a page qualifies as an evacuation
742 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
743 // between live bytes and capacity of this page (= area).
748 // them starting with the page with the most free memory, adding them to the
763 // - fragmentation of (n+1)-th page does not exceed the specified limit.
779 "compaction-selection-page: space=%s free_bytes_page=%d "
815 for (Page* p : evacuation_candidates_) {
1561 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
1562 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
1602 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
1619 Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1621 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
1623 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
1716 Page::kPageAlignmentMask & ~kPointerAlignmentMask;
1718 Page::kPageAlignmentMask) == mask) {
1719 Page* page = Page::FromAddress(object->address());
1720 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1721 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1723 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1885 static void MoveToOldSpace(Page* page, PagedSpace* owner) {
1886 page->Unlink();
1887 Page* new_page = Page::ConvertNewToOld(page, owner);
1888 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1891 static void MoveToToSpace(Page* page) {
1892 page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1893 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1925 Page::FromAddress(object->address())->owner()->identity());
1951 for (Page* p : *space) {
1952 if (!p->IsFlagSet(Page::BLACK_PAGE)) {
1962 for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
1963 DiscoverGreyObjectsOnPage(page);
2821 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2822 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
2853 Page::FromAddress(heap_obj->address())
2854 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2914 // If we are on a black page, we cannot find the actual object start
2916 if (p->IsFlagSet(Page::BLACK_PAGE)) {
2998 Page* p = Page::FromAddress(slot);
3013 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3041 for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
3072 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3073 return Page::kAllocatableMemory + kPointerSize;
3088 inline bool EvacuatePage(Page* chunk);
3122 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
3124 DCHECK(page->SweepingDone());
3125 int saved_live_bytes = page->LiveBytes();
3127 Heap* heap = page->heap();
3131 switch (ComputeEvacuationMode(page)) {
3133 success = collector_->VisitLiveObjects(page, &new_space_visitor_,
3136 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3140 success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
3146 new_space_page_visitor.account_semispace_copied(page->LiveBytes());
3151 success = collector_->VisitLiveObjects(page, &old_space_visitor_,
3154 // Aborted compaction page. We have to record slots here, since we
3156 // Note: We mark the page as aborted here to be able to record slots
3158 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3161 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
3163 page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3165 // We need to return failure here to indicate that we want this page
3170 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3180 "evacuation[%p]: page=%p new_space=%d "
3183 static_cast<void*>(this), static_cast<void*>(page),
3184 page->InNewSpace(),
3185 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3186 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3187 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3188 page->Contains(heap->new_space()->age_mark()),
3251 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
3257 Page* p = static_cast<Page*>(chunk);
3273 // We have partially compacted the page, i.e., some objects may have
3294 for (Page* page : evacuation_candidates_) {
3295 live_bytes += page->LiveBytes();
3296 job.AddPage(page, &abandoned_pages);
3300 for (Page* page : newspace_evacuation_candidates_) {
3301 live_bytes += page->LiveBytes();
3302 if (!page->NeverEvacuate() &&
3303 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3304 !page->Contains(age_mark)) {
3305 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3306 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
3308 EvacuateNewSpacePageVisitor::MoveToToSpace(page);
3312 job.AddPage(page, &abandoned_pages);
3367 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
3370 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
3376 // Before we sweep objects on the page, we free dead array buffers which
3430 // Clear the mark bits of that page and reset live bytes count.
3446 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3463 Page* page = Page::FromAddress(code->address());
3466 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
3467 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
3479 static void VerifyAllBlackObjects(MemoryChunk* page) {
3480 LiveObjectIterator<kAllLiveObjects> it(page);
3489 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
3492 VerifyAllBlackObjects(page);
3495 LiveObjectIterator<kBlackObjects> it(page);
3501 page->markbits()->ClearRange(
3502 page->AddressToMarkbitIndex(page->area_start()),
3503 page->AddressToMarkbitIndex(object->address()));
3504 if (page->old_to_new_slots() != nullptr) {
3505 page->old_to_new_slots()->RemoveRange(
3506 0, static_cast<int>(object->address() - page->address()));
3508 RecomputeLiveBytes(page);
3514 Bitmap::Clear(page);
3520 void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3521 LiveObjectIterator<kBlackObjects> it(page);
3527 page->SetLiveBytes(new_live_size);
3531 void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
3534 VerifyAllBlackObjects(page);
3537 LiveObjectIterator<kBlackObjects> it(page);
3548 Page* page) {
3550 swept_list_[space->identity()].Add(page);
3575 // to still have access to page headers.
3581 for (Page* p : newspace_evacuation_candidates_) {
3582 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3583 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3585 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3586 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3594 for (Page* p : evacuation_candidates_) {
3600 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3602 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3620 typedef int PerPageData; // Per page data is not used in this job.
3689 DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address())
3690 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
3691 // Slots can be in "to" space after a page has been moved. Since there is
3729 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3774 for (Page* page : NewSpacePageRange(space_start, space_end)) {
3776 page->Contains(space_start) ? space_start : page->area_start();
3777 Address end = page->Contains(space_end) ? space_end : page->area_end();
3778 job.AddPage(page, std::make_pair(start, end));
3820 for (Page* p : evacuation_candidates_) {
3837 Page* page = nullptr;
3838 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
3839 int freed = ParallelSweepPage(page, identity);
3850 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
3853 if (page->mutex()->TryLock()) {
3854 // If this page was already swept in the meantime, we can return here.
3855 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
3856 page->mutex()->Unlock();
3859 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3862 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
3866 heap_->paged_space(identity), page, nullptr);
3870 heap_->paged_space(identity), page, nullptr);
3874 heap_->paged_space(identity), page, nullptr);
3878 swept_list_[identity].Add(page);
3880 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3881 page->mutex()->Unlock();
3886 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
3888 PrepareToBeSweptPage(space, page);
3889 sweeping_list_[space].push_back(page);
3893 Page* page) {
3895 PrepareToBeSweptPage(space, page);
3897 AddSweepingPageSafe(space, page);
3901 Page* page) {
3902 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3903 int to_sweep = page->area_size() - page->LiveBytes();
3908 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3911 Page* page = nullptr;
3913 page = sweeping_list_[space].front();
3916 return page;
3920 Page* page) {
3922 sweeping_list_[space].push_back(page);
3932 // Loop needs to support deletion if live bytes == 0 for a page.
3934 Page* p = *(it++);
3945 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3947 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3948 p->ClearFlag(Page::BLACK_PAGE);
3951 // Check if the space top was in this page, which means that the
3961 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3962 // We need to sweep the page to get it into an iterable state again. Note
3966 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3973 // One unused page is kept, all further are released before sweeping them.
3977 PrintIsolate(isolate(), "sweeping: released page: %p",
4047 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4048 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));