1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "src/heap/mark-compact.h" 6 7 #include <unordered_map> 8 9 #include "src/base/utils/random-number-generator.h" 10 #include "src/cancelable-task.h" 11 #include "src/code-stubs.h" 12 #include "src/compilation-cache.h" 13 #include "src/deoptimizer.h" 14 #include "src/execution.h" 15 #include "src/frames-inl.h" 16 #include "src/global-handles.h" 17 #include "src/heap/array-buffer-collector.h" 18 #include "src/heap/array-buffer-tracker-inl.h" 19 #include "src/heap/gc-tracer.h" 20 #include "src/heap/incremental-marking.h" 21 #include "src/heap/invalidated-slots-inl.h" 22 #include "src/heap/item-parallel-job.h" 23 #include "src/heap/local-allocator-inl.h" 24 #include "src/heap/mark-compact-inl.h" 25 #include "src/heap/object-stats.h" 26 #include "src/heap/objects-visiting-inl.h" 27 #include "src/heap/spaces-inl.h" 28 #include "src/heap/sweeper.h" 29 #include "src/heap/worklist.h" 30 #include "src/ic/stub-cache.h" 31 #include "src/objects/hash-table-inl.h" 32 #include "src/transitions-inl.h" 33 #include "src/utils-inl.h" 34 #include "src/v8.h" 35 #include "src/vm-state-inl.h" 36 37 namespace v8 { 38 namespace internal { 39 40 const char* Marking::kWhiteBitPattern = "00"; 41 const char* Marking::kBlackBitPattern = "11"; 42 const char* Marking::kGreyBitPattern = "10"; 43 const char* Marking::kImpossibleBitPattern = "01"; 44 45 // The following has to hold in order for {MarkingState::MarkBitFrom} to not 46 // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping. 47 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2); 48 49 // ============================================================================= 50 // Verifiers 51 // ============================================================================= 52 53 #ifdef VERIFY_HEAP 54 namespace { 55 56 class MarkingVerifier : public ObjectVisitor, public RootVisitor { 57 public: 58 virtual void Run() = 0; 59 60 protected: 61 explicit MarkingVerifier(Heap* heap) : heap_(heap) {} 62 63 virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0; 64 65 virtual void VerifyPointers(Object** start, Object** end) = 0; 66 virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0; 67 68 virtual bool IsMarked(HeapObject* object) = 0; 69 70 virtual bool IsBlackOrGrey(HeapObject* object) = 0; 71 72 void VisitPointers(HeapObject* host, Object** start, Object** end) override { 73 VerifyPointers(start, end); 74 } 75 76 void VisitPointers(HeapObject* host, MaybeObject** start, 77 MaybeObject** end) override { 78 VerifyPointers(start, end); 79 } 80 81 void VisitRootPointers(Root root, const char* description, Object** start, 82 Object** end) override { 83 VerifyPointers(start, end); 84 } 85 86 void VerifyRoots(VisitMode mode); 87 void VerifyMarkingOnPage(const Page* page, Address start, Address end); 88 void VerifyMarking(NewSpace* new_space); 89 void VerifyMarking(PagedSpace* paged_space); 90 91 Heap* heap_; 92 }; 93 94 void MarkingVerifier::VerifyRoots(VisitMode mode) { 95 heap_->IterateStrongRoots(this, mode); 96 } 97 98 void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start, 99 Address end) { 100 HeapObject* object; 101 Address next_object_must_be_here_or_later = start; 102 for (Address current = start; current < end;) { 103 object = HeapObject::FromAddress(current); 104 // One word fillers at the end of a black area can be grey. 105 if (IsBlackOrGrey(object) && 106 object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) { 107 CHECK(IsMarked(object)); 108 CHECK(current >= next_object_must_be_here_or_later); 109 object->Iterate(this); 110 next_object_must_be_here_or_later = current + object->Size(); 111 // The object is either part of a black area of black allocation or a 112 // regular black object 113 CHECK( 114 bitmap(page)->AllBitsSetInRange( 115 page->AddressToMarkbitIndex(current), 116 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) || 117 bitmap(page)->AllBitsClearInRange( 118 page->AddressToMarkbitIndex(current + kPointerSize * 2), 119 page->AddressToMarkbitIndex(next_object_must_be_here_or_later))); 120 current = next_object_must_be_here_or_later; 121 } else { 122 current += kPointerSize; 123 } 124 } 125 } 126 127 void MarkingVerifier::VerifyMarking(NewSpace* space) { 128 Address end = space->top(); 129 // The bottom position is at the start of its page. Allows us to use 130 // page->area_start() as start of range on all pages. 131 CHECK_EQ(space->first_allocatable_address(), 132 space->first_page()->area_start()); 133 134 PageRange range(space->first_allocatable_address(), end); 135 for (auto it = range.begin(); it != range.end();) { 136 Page* page = *(it++); 137 Address limit = it != range.end() ? page->area_end() : end; 138 CHECK(limit == end || !page->Contains(end)); 139 VerifyMarkingOnPage(page, page->area_start(), limit); 140 } 141 } 142 143 void MarkingVerifier::VerifyMarking(PagedSpace* space) { 144 for (Page* p : *space) { 145 VerifyMarkingOnPage(p, p->area_start(), p->area_end()); 146 } 147 } 148 149 class FullMarkingVerifier : public MarkingVerifier { 150 public: 151 explicit FullMarkingVerifier(Heap* heap) 152 : MarkingVerifier(heap), 153 marking_state_( 154 heap->mark_compact_collector()->non_atomic_marking_state()) {} 155 156 void Run() override { 157 VerifyRoots(VISIT_ONLY_STRONG); 158 VerifyMarking(heap_->new_space()); 159 VerifyMarking(heap_->old_space()); 160 VerifyMarking(heap_->code_space()); 161 VerifyMarking(heap_->map_space()); 162 163 LargeObjectIterator it(heap_->lo_space()); 164 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { 165 if (marking_state_->IsBlackOrGrey(obj)) { 166 obj->Iterate(this); 167 } 168 } 169 } 170 171 protected: 172 Bitmap* bitmap(const MemoryChunk* chunk) override { 173 return marking_state_->bitmap(chunk); 174 } 175 176 bool IsMarked(HeapObject* object) override { 177 return marking_state_->IsBlack(object); 178 } 179 180 bool IsBlackOrGrey(HeapObject* object) override { 181 return marking_state_->IsBlackOrGrey(object); 182 } 183 184 void VerifyPointers(Object** start, Object** end) override { 185 for (Object** current = start; current < end; current++) { 186 if ((*current)->IsHeapObject()) { 187 HeapObject* object = HeapObject::cast(*current); 188 CHECK(marking_state_->IsBlackOrGrey(object)); 189 } 190 } 191 } 192 193 void VerifyPointers(MaybeObject** start, MaybeObject** end) override { 194 for (MaybeObject** current = start; current < end; current++) { 195 HeapObject* object; 196 if ((*current)->ToStrongHeapObject(&object)) { 197 CHECK(marking_state_->IsBlackOrGrey(object)); 198 } 199 } 200 } 201 202 void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override { 203 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); 204 if (!host->IsWeakObject(rinfo->target_object())) { 205 Object* p = rinfo->target_object(); 206 VisitPointer(host, &p); 207 } 208 } 209 210 private: 211 MarkCompactCollector::NonAtomicMarkingState* marking_state_; 212 }; 213 214 class EvacuationVerifier : public ObjectVisitor, public RootVisitor { 215 public: 216 virtual void Run() = 0; 217 218 void VisitPointers(HeapObject* host, Object** start, Object** end) override { 219 VerifyPointers(start, end); 220 } 221 222 void VisitPointers(HeapObject* host, MaybeObject** start, 223 MaybeObject** end) override { 224 VerifyPointers(start, end); 225 } 226 227 void VisitRootPointers(Root root, const char* description, Object** start, 228 Object** end) override { 229 VerifyPointers(start, end); 230 } 231 232 protected: 233 explicit EvacuationVerifier(Heap* heap) : heap_(heap) {} 234 235 inline Heap* heap() { return heap_; } 236 237 virtual void VerifyPointers(Object** start, Object** end) = 0; 238 virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0; 239 240 void VerifyRoots(VisitMode mode); 241 void VerifyEvacuationOnPage(Address start, Address end); 242 void VerifyEvacuation(NewSpace* new_space); 243 void VerifyEvacuation(PagedSpace* paged_space); 244 245 Heap* heap_; 246 }; 247 248 void EvacuationVerifier::VerifyRoots(VisitMode mode) { 249 heap_->IterateStrongRoots(this, mode); 250 } 251 252 void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) { 253 Address current = start; 254 while (current < end) { 255 HeapObject* object = HeapObject::FromAddress(current); 256 if (!object->IsFiller()) object->Iterate(this); 257 current += object->Size(); 258 } 259 } 260 261 void EvacuationVerifier::VerifyEvacuation(NewSpace* space) { 262 PageRange range(space->first_allocatable_address(), space->top()); 263 for (auto it = range.begin(); it != range.end();) { 264 Page* page = *(it++); 265 Address current = page->area_start(); 266 Address limit = it != range.end() ? page->area_end() : space->top(); 267 CHECK(limit == space->top() || !page->Contains(space->top())); 268 VerifyEvacuationOnPage(current, limit); 269 } 270 } 271 272 void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) { 273 for (Page* p : *space) { 274 if (p->IsEvacuationCandidate()) continue; 275 if (p->Contains(space->top())) { 276 CodePageMemoryModificationScope memory_modification_scope(p); 277 heap_->CreateFillerObjectAt( 278 space->top(), static_cast<int>(space->limit() - space->top()), 279 ClearRecordedSlots::kNo); 280 } 281 VerifyEvacuationOnPage(p->area_start(), p->area_end()); 282 } 283 } 284 285 class FullEvacuationVerifier : public EvacuationVerifier { 286 public: 287 explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {} 288 289 void Run() override { 290 VerifyRoots(VISIT_ALL); 291 VerifyEvacuation(heap_->new_space()); 292 VerifyEvacuation(heap_->old_space()); 293 VerifyEvacuation(heap_->code_space()); 294 VerifyEvacuation(heap_->map_space()); 295 } 296 297 protected: 298 void VerifyPointers(Object** start, Object** end) override { 299 for (Object** current = start; current < end; current++) { 300 if ((*current)->IsHeapObject()) { 301 HeapObject* object = HeapObject::cast(*current); 302 if (Heap::InNewSpace(object)) { 303 CHECK(Heap::InToSpace(object)); 304 } 305 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); 306 } 307 } 308 } 309 void VerifyPointers(MaybeObject** start, MaybeObject** end) override { 310 for (MaybeObject** current = start; current < end; current++) { 311 HeapObject* object; 312 if ((*current)->ToStrongHeapObject(&object)) { 313 if (Heap::InNewSpace(object)) { 314 CHECK(Heap::InToSpace(object)); 315 } 316 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); 317 } 318 } 319 } 320 }; 321 322 } // namespace 323 #endif // VERIFY_HEAP 324 325 // ============================================================================= 326 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector 327 // ============================================================================= 328 329 using MarkCompactMarkingVisitor = 330 MarkingVisitor<FixedArrayVisitationMode::kRegular, 331 TraceRetainingPathMode::kEnabled, 332 MarkCompactCollector::MarkingState>; 333 334 namespace { 335 336 int NumberOfAvailableCores() { 337 static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1; 338 // This number of cores should be greater than zero and never change. 339 DCHECK_GE(num_cores, 1); 340 DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1); 341 return num_cores; 342 } 343 344 } // namespace 345 346 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) { 347 DCHECK_GT(pages, 0); 348 int tasks = 349 FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1; 350 if (!heap_->CanExpandOldGeneration( 351 static_cast<size_t>(tasks * Page::kPageSize))) { 352 // Optimize for memory usage near the heap limit. 353 tasks = 1; 354 } 355 return tasks; 356 } 357 358 int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages, 359 int slots) { 360 DCHECK_GT(pages, 0); 361 // Limit the number of update tasks as task creation often dominates the 362 // actual work that is being done. 363 const int kMaxPointerUpdateTasks = 8; 364 const int kSlotsPerTask = 600; 365 const int wanted_tasks = 366 (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages; 367 return FLAG_parallel_pointer_update 368 ? Min(kMaxPointerUpdateTasks, 369 Min(NumberOfAvailableCores(), wanted_tasks)) 370 : 1; 371 } 372 373 int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks( 374 int pages) { 375 DCHECK_GT(pages, 0); 376 // No cap needed because all pages we need to process are fully filled with 377 // interesting objects. 378 return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages) 379 : 1; 380 } 381 382 MarkCompactCollector::MarkCompactCollector(Heap* heap) 383 : MarkCompactCollectorBase(heap), 384 page_parallel_job_semaphore_(0), 385 #ifdef DEBUG 386 state_(IDLE), 387 #endif 388 was_marked_incrementally_(false), 389 evacuation_(false), 390 compacting_(false), 391 black_allocation_(false), 392 have_code_to_deoptimize_(false), 393 marking_worklist_(heap), 394 sweeper_(new Sweeper(heap, non_atomic_marking_state())) { 395 old_to_new_slots_ = -1; 396 } 397 398 MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; } 399 400 void MarkCompactCollector::SetUp() { 401 DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00")); 402 DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11")); 403 DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10")); 404 DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01")); 405 } 406 407 void MarkCompactCollector::TearDown() { 408 AbortCompaction(); 409 AbortWeakObjects(); 410 if (heap()->incremental_marking()->IsMarking()) { 411 marking_worklist()->Clear(); 412 } 413 } 414 415 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { 416 DCHECK(!p->NeverEvacuate()); 417 p->MarkEvacuationCandidate(); 418 evacuation_candidates_.push_back(p); 419 } 420 421 422 static void TraceFragmentation(PagedSpace* space) { 423 int number_of_pages = space->CountTotalPages(); 424 intptr_t reserved = (number_of_pages * space->AreaSize()); 425 intptr_t free = reserved - space->SizeOfObjects(); 426 PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages, 427 static_cast<int>(free), static_cast<double>(free) * 100 / reserved); 428 } 429 430 bool MarkCompactCollector::StartCompaction() { 431 if (!compacting_) { 432 DCHECK(evacuation_candidates_.empty()); 433 434 CollectEvacuationCandidates(heap()->old_space()); 435 436 if (FLAG_compact_code_space) { 437 CollectEvacuationCandidates(heap()->code_space()); 438 } else if (FLAG_trace_fragmentation) { 439 TraceFragmentation(heap()->code_space()); 440 } 441 442 if (FLAG_trace_fragmentation) { 443 TraceFragmentation(heap()->map_space()); 444 } 445 446 compacting_ = !evacuation_candidates_.empty(); 447 } 448 449 return compacting_; 450 } 451 452 void MarkCompactCollector::CollectGarbage() { 453 // Make sure that Prepare() has been called. The individual steps below will 454 // update the state as they proceed. 455 DCHECK(state_ == PREPARE_GC); 456 457 #ifdef ENABLE_MINOR_MC 458 heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages(); 459 #endif // ENABLE_MINOR_MC 460 461 MarkLiveObjects(); 462 ClearNonLiveReferences(); 463 VerifyMarking(); 464 465 RecordObjectStats(); 466 467 StartSweepSpaces(); 468 469 Evacuate(); 470 471 Finish(); 472 } 473 474 #ifdef VERIFY_HEAP 475 void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) { 476 HeapObjectIterator iterator(space); 477 while (HeapObject* object = iterator.Next()) { 478 CHECK(non_atomic_marking_state()->IsBlack(object)); 479 } 480 } 481 482 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { 483 for (Page* p : *space) { 484 CHECK(non_atomic_marking_state()->bitmap(p)->IsClean()); 485 CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p)); 486 } 487 } 488 489 490 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { 491 for (Page* p : PageRange(space->first_allocatable_address(), space->top())) { 492 CHECK(non_atomic_marking_state()->bitmap(p)->IsClean()); 493 CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p)); 494 } 495 } 496 497 498 void MarkCompactCollector::VerifyMarkbitsAreClean() { 499 VerifyMarkbitsAreClean(heap_->old_space()); 500 VerifyMarkbitsAreClean(heap_->code_space()); 501 VerifyMarkbitsAreClean(heap_->map_space()); 502 VerifyMarkbitsAreClean(heap_->new_space()); 503 // Read-only space should always be black since we never collect any objects 504 // in it or linked from it. 505 VerifyMarkbitsAreDirty(heap_->read_only_space()); 506 507 LargeObjectIterator it(heap_->lo_space()); 508 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { 509 CHECK(non_atomic_marking_state()->IsWhite(obj)); 510 CHECK_EQ(0, non_atomic_marking_state()->live_bytes( 511 MemoryChunk::FromAddress(obj->address()))); 512 } 513 } 514 515 #endif // VERIFY_HEAP 516 517 void MarkCompactCollector::ClearMarkbitsInPagedSpace(PagedSpace* space) { 518 for (Page* p : *space) { 519 non_atomic_marking_state()->ClearLiveness(p); 520 } 521 } 522 523 void MarkCompactCollector::ClearMarkbitsInNewSpace(NewSpace* space) { 524 for (Page* p : *space) { 525 non_atomic_marking_state()->ClearLiveness(p); 526 } 527 } 528 529 530 void MarkCompactCollector::ClearMarkbits() { 531 ClearMarkbitsInPagedSpace(heap_->code_space()); 532 ClearMarkbitsInPagedSpace(heap_->map_space()); 533 ClearMarkbitsInPagedSpace(heap_->old_space()); 534 ClearMarkbitsInNewSpace(heap_->new_space()); 535 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); 536 } 537 538 void MarkCompactCollector::EnsureSweepingCompleted() { 539 if (!sweeper()->sweeping_in_progress()) return; 540 541 sweeper()->EnsureCompleted(); 542 heap()->old_space()->RefillFreeList(); 543 heap()->code_space()->RefillFreeList(); 544 heap()->map_space()->RefillFreeList(); 545 546 #ifdef VERIFY_HEAP 547 if (FLAG_verify_heap && !evacuation()) { 548 FullEvacuationVerifier verifier(heap()); 549 verifier.Run(); 550 } 551 #endif 552 } 553 554 void MarkCompactCollector::ComputeEvacuationHeuristics( 555 size_t area_size, int* target_fragmentation_percent, 556 size_t* max_evacuated_bytes) { 557 // For memory reducing and optimize for memory mode we directly define both 558 // constants. 559 const int kTargetFragmentationPercentForReduceMemory = 20; 560 const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB; 561 const int kTargetFragmentationPercentForOptimizeMemory = 20; 562 const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB; 563 564 // For regular mode (which is latency critical) we define less aggressive 565 // defaults to start and switch to a trace-based (using compaction speed) 566 // approach as soon as we have enough samples. 567 const int kTargetFragmentationPercent = 70; 568 const size_t kMaxEvacuatedBytes = 4 * MB; 569 // Time to take for a single area (=payload of page). Used as soon as there 570 // exist enough compaction speed samples. 571 const float kTargetMsPerArea = .5; 572 573 if (heap()->ShouldReduceMemory()) { 574 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory; 575 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory; 576 } else if (heap()->ShouldOptimizeForMemoryUsage()) { 577 *target_fragmentation_percent = 578 kTargetFragmentationPercentForOptimizeMemory; 579 *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory; 580 } else { 581 const double estimated_compaction_speed = 582 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 583 if (estimated_compaction_speed != 0) { 584 // Estimate the target fragmentation based on traced compaction speed 585 // and a goal for a single page. 586 const double estimated_ms_per_area = 587 1 + area_size / estimated_compaction_speed; 588 *target_fragmentation_percent = static_cast<int>( 589 100 - 100 * kTargetMsPerArea / estimated_ms_per_area); 590 if (*target_fragmentation_percent < 591 kTargetFragmentationPercentForReduceMemory) { 592 *target_fragmentation_percent = 593 kTargetFragmentationPercentForReduceMemory; 594 } 595 } else { 596 *target_fragmentation_percent = kTargetFragmentationPercent; 597 } 598 *max_evacuated_bytes = kMaxEvacuatedBytes; 599 } 600 } 601 602 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { 603 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE); 604 605 int number_of_pages = space->CountTotalPages(); 606 size_t area_size = space->AreaSize(); 607 608 // Pairs of (live_bytes_in_page, page). 609 typedef std::pair<size_t, Page*> LiveBytesPagePair; 610 std::vector<LiveBytesPagePair> pages; 611 pages.reserve(number_of_pages); 612 613 DCHECK(!sweeping_in_progress()); 614 Page* owner_of_linear_allocation_area = 615 space->top() == space->limit() 616 ? nullptr 617 : Page::FromAllocationAreaAddress(space->top()); 618 for (Page* p : *space) { 619 if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) || 620 !p->CanAllocate()) 621 continue; 622 // Invariant: Evacuation candidates are just created when marking is 623 // started. This means that sweeping has finished. Furthermore, at the end 624 // of a GC all evacuation candidates are cleared and their slot buffers are 625 // released. 626 CHECK(!p->IsEvacuationCandidate()); 627 CHECK_NULL(p->slot_set<OLD_TO_OLD>()); 628 CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>()); 629 CHECK(p->SweepingDone()); 630 DCHECK(p->area_size() == area_size); 631 pages.push_back(std::make_pair(p->allocated_bytes(), p)); 632 } 633 634 int candidate_count = 0; 635 size_t total_live_bytes = 0; 636 637 const bool reduce_memory = heap()->ShouldReduceMemory(); 638 if (FLAG_manual_evacuation_candidates_selection) { 639 for (size_t i = 0; i < pages.size(); i++) { 640 Page* p = pages[i].second; 641 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { 642 candidate_count++; 643 total_live_bytes += pages[i].first; 644 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); 645 AddEvacuationCandidate(p); 646 } 647 } 648 } else if (FLAG_stress_compaction_random) { 649 double fraction = isolate()->fuzzer_rng()->NextDouble(); 650 size_t pages_to_mark_count = 651 static_cast<size_t>(fraction * (pages.size() + 1)); 652 for (uint64_t i : isolate()->fuzzer_rng()->NextSample( 653 pages.size(), pages_to_mark_count)) { 654 candidate_count++; 655 total_live_bytes += pages[i].first; 656 AddEvacuationCandidate(pages[i].second); 657 } 658 } else if (FLAG_stress_compaction) { 659 for (size_t i = 0; i < pages.size(); i++) { 660 Page* p = pages[i].second; 661 if (i % 2 == 0) { 662 candidate_count++; 663 total_live_bytes += pages[i].first; 664 AddEvacuationCandidate(p); 665 } 666 } 667 } else { 668 // The following approach determines the pages that should be evacuated. 669 // 670 // We use two conditions to decide whether a page qualifies as an evacuation 671 // candidate, or not: 672 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio 673 // between live bytes and capacity of this page (= area). 674 // * Evacuation quota: A global quota determining how much bytes should be 675 // compacted. 676 // 677 // The algorithm sorts all pages by live bytes and then iterates through 678 // them starting with the page with the most free memory, adding them to the 679 // set of evacuation candidates as long as both conditions (fragmentation 680 // and quota) hold. 681 size_t max_evacuated_bytes; 682 int target_fragmentation_percent; 683 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent, 684 &max_evacuated_bytes); 685 686 const size_t free_bytes_threshold = 687 target_fragmentation_percent * (area_size / 100); 688 689 // Sort pages from the most free to the least free, then select 690 // the first n pages for evacuation such that: 691 // - the total size of evacuated objects does not exceed the specified 692 // limit. 693 // - fragmentation of (n+1)-th page does not exceed the specified limit. 694 std::sort(pages.begin(), pages.end(), 695 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) { 696 return a.first < b.first; 697 }); 698 for (size_t i = 0; i < pages.size(); i++) { 699 size_t live_bytes = pages[i].first; 700 DCHECK_GE(area_size, live_bytes); 701 size_t free_bytes = area_size - live_bytes; 702 if (FLAG_always_compact || 703 ((free_bytes >= free_bytes_threshold) && 704 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) { 705 candidate_count++; 706 total_live_bytes += live_bytes; 707 } 708 if (FLAG_trace_fragmentation_verbose) { 709 PrintIsolate(isolate(), 710 "compaction-selection-page: space=%s free_bytes_page=%zu " 711 "fragmentation_limit_kb=%" PRIuS 712 " fragmentation_limit_percent=%d sum_compaction_kb=%zu " 713 "compaction_limit_kb=%zu\n", 714 space->name(), free_bytes / KB, free_bytes_threshold / KB, 715 target_fragmentation_percent, total_live_bytes / KB, 716 max_evacuated_bytes / KB); 717 } 718 } 719 // How many pages we will allocated for the evacuated objects 720 // in the worst case: ceil(total_live_bytes / area_size) 721 int estimated_new_pages = 722 static_cast<int>((total_live_bytes + area_size - 1) / area_size); 723 DCHECK_LE(estimated_new_pages, candidate_count); 724 int estimated_released_pages = candidate_count - estimated_new_pages; 725 // Avoid (compact -> expand) cycles. 726 if ((estimated_released_pages == 0) && !FLAG_always_compact) { 727 candidate_count = 0; 728 } 729 for (int i = 0; i < candidate_count; i++) { 730 AddEvacuationCandidate(pages[i].second); 731 } 732 } 733 734 if (FLAG_trace_fragmentation) { 735 PrintIsolate(isolate(), 736 "compaction-selection: space=%s reduce_memory=%d pages=%d " 737 "total_live_bytes=%zu\n", 738 space->name(), reduce_memory, candidate_count, 739 total_live_bytes / KB); 740 } 741 } 742 743 744 void MarkCompactCollector::AbortCompaction() { 745 if (compacting_) { 746 RememberedSet<OLD_TO_OLD>::ClearAll(heap()); 747 for (Page* p : evacuation_candidates_) { 748 p->ClearEvacuationCandidate(); 749 } 750 compacting_ = false; 751 evacuation_candidates_.clear(); 752 } 753 DCHECK(evacuation_candidates_.empty()); 754 } 755 756 757 void MarkCompactCollector::Prepare() { 758 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); 759 760 #ifdef DEBUG 761 DCHECK(state_ == IDLE); 762 state_ = PREPARE_GC; 763 #endif 764 765 DCHECK(!FLAG_never_compact || !FLAG_always_compact); 766 767 // Instead of waiting we could also abort the sweeper threads here. 768 EnsureSweepingCompleted(); 769 770 if (heap()->incremental_marking()->IsSweeping()) { 771 heap()->incremental_marking()->Stop(); 772 } 773 774 heap()->memory_allocator()->unmapper()->PrepareForMarkCompact(); 775 776 // Clear marking bits if incremental marking is aborted. 777 if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) { 778 heap()->incremental_marking()->Stop(); 779 heap()->incremental_marking()->AbortBlackAllocation(); 780 FinishConcurrentMarking(ConcurrentMarking::StopRequest::PREEMPT_TASKS); 781 heap()->incremental_marking()->Deactivate(); 782 ClearMarkbits(); 783 AbortWeakObjects(); 784 AbortCompaction(); 785 heap_->local_embedder_heap_tracer()->AbortTracing(); 786 marking_worklist()->Clear(); 787 was_marked_incrementally_ = false; 788 } 789 790 if (!was_marked_incrementally_) { 791 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE); 792 heap_->local_embedder_heap_tracer()->TracePrologue(); 793 } 794 795 // Don't start compaction if we are in the middle of incremental 796 // marking cycle. We did not collect any slots. 797 if (!FLAG_never_compact && !was_marked_incrementally_) { 798 StartCompaction(); 799 } 800 801 PagedSpaces spaces(heap()); 802 for (PagedSpace* space = spaces.next(); space != nullptr; 803 space = spaces.next()) { 804 space->PrepareForMarkCompact(); 805 } 806 heap()->account_external_memory_concurrently_freed(); 807 808 #ifdef VERIFY_HEAP 809 if (!was_marked_incrementally_ && FLAG_verify_heap) { 810 VerifyMarkbitsAreClean(); 811 } 812 #endif 813 } 814 815 void MarkCompactCollector::FinishConcurrentMarking( 816 ConcurrentMarking::StopRequest stop_request) { 817 if (FLAG_concurrent_marking) { 818 heap()->concurrent_marking()->Stop(stop_request); 819 heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state()); 820 } 821 } 822 823 void MarkCompactCollector::VerifyMarking() { 824 CHECK(marking_worklist()->IsEmpty()); 825 DCHECK(heap_->incremental_marking()->IsStopped()); 826 #ifdef VERIFY_HEAP 827 if (FLAG_verify_heap) { 828 FullMarkingVerifier verifier(heap()); 829 verifier.Run(); 830 } 831 #endif 832 #ifdef VERIFY_HEAP 833 if (FLAG_verify_heap) { 834 heap()->old_space()->VerifyLiveBytes(); 835 heap()->map_space()->VerifyLiveBytes(); 836 heap()->code_space()->VerifyLiveBytes(); 837 } 838 #endif 839 } 840 841 void MarkCompactCollector::Finish() { 842 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); 843 844 #ifdef DEBUG 845 heap()->VerifyCountersBeforeConcurrentSweeping(); 846 #endif 847 848 CHECK(weak_objects_.current_ephemerons.IsEmpty()); 849 CHECK(weak_objects_.discovered_ephemerons.IsEmpty()); 850 weak_objects_.next_ephemerons.Clear(); 851 852 sweeper()->StartSweeperTasks(); 853 sweeper()->StartIterabilityTasks(); 854 855 // Clear the marking state of live large objects. 856 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); 857 858 #ifdef DEBUG 859 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); 860 state_ = IDLE; 861 #endif 862 heap_->isolate()->inner_pointer_to_code_cache()->Flush(); 863 864 // The stub caches are not traversed during GC; clear them to force 865 // their lazy re-initialization. This must be done after the 866 // GC, because it relies on the new address of certain old space 867 // objects (empty string, illegal builtin). 868 isolate()->load_stub_cache()->Clear(); 869 isolate()->store_stub_cache()->Clear(); 870 871 if (have_code_to_deoptimize_) { 872 // Some code objects were marked for deoptimization during the GC. 873 Deoptimizer::DeoptimizeMarkedCode(isolate()); 874 have_code_to_deoptimize_ = false; 875 } 876 } 877 878 class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor { 879 public: 880 explicit RootMarkingVisitor(MarkCompactCollector* collector) 881 : collector_(collector) {} 882 883 void VisitRootPointer(Root root, const char* description, Object** p) final { 884 MarkObjectByPointer(root, p); 885 } 886 887 void VisitRootPointers(Root root, const char* description, Object** start, 888 Object** end) final { 889 for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p); 890 } 891 892 private: 893 V8_INLINE void MarkObjectByPointer(Root root, Object** p) { 894 if (!(*p)->IsHeapObject()) return; 895 896 collector_->MarkRootObject(root, HeapObject::cast(*p)); 897 } 898 899 MarkCompactCollector* const collector_; 900 }; 901 902 // This visitor is used to visit the body of special objects held alive by 903 // other roots. 904 // 905 // It is currently used for 906 // - Code held alive by the top optimized frame. This code cannot be deoptimized 907 // and thus have to be kept alive in an isolate way, i.e., it should not keep 908 // alive other code objects reachable through the weak list but they should 909 // keep alive its embedded pointers (which would otherwise be dropped). 910 // - Prefix of the string table. 911 class MarkCompactCollector::CustomRootBodyMarkingVisitor final 912 : public ObjectVisitor { 913 public: 914 explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector) 915 : collector_(collector) {} 916 917 void VisitPointer(HeapObject* host, Object** p) final { 918 MarkObject(host, *p); 919 } 920 921 void VisitPointers(HeapObject* host, Object** start, Object** end) final { 922 for (Object** p = start; p < end; p++) { 923 DCHECK(!HasWeakHeapObjectTag(*p)); 924 MarkObject(host, *p); 925 } 926 } 927 928 void VisitPointers(HeapObject* host, MaybeObject** start, 929 MaybeObject** end) final { 930 // At the moment, custom roots cannot contain weak pointers. 931 UNREACHABLE(); 932 } 933 934 // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers. 935 936 private: 937 void MarkObject(HeapObject* host, Object* object) { 938 if (!object->IsHeapObject()) return; 939 collector_->MarkObject(host, HeapObject::cast(object)); 940 } 941 942 MarkCompactCollector* const collector_; 943 }; 944 945 class InternalizedStringTableCleaner : public ObjectVisitor { 946 public: 947 InternalizedStringTableCleaner(Heap* heap, HeapObject* table) 948 : heap_(heap), pointers_removed_(0), table_(table) {} 949 950 void VisitPointers(HeapObject* host, Object** start, Object** end) override { 951 // Visit all HeapObject pointers in [start, end). 952 Object* the_hole = ReadOnlyRoots(heap_).the_hole_value(); 953 MarkCompactCollector::NonAtomicMarkingState* marking_state = 954 heap_->mark_compact_collector()->non_atomic_marking_state(); 955 for (Object** p = start; p < end; p++) { 956 Object* o = *p; 957 if (o->IsHeapObject()) { 958 HeapObject* heap_object = HeapObject::cast(o); 959 if (marking_state->IsWhite(heap_object)) { 960 pointers_removed_++; 961 // Set the entry to the_hole_value (as deleted). 962 *p = the_hole; 963 } else { 964 // StringTable contains only old space strings. 965 DCHECK(!Heap::InNewSpace(o)); 966 MarkCompactCollector::RecordSlot(table_, p, heap_object); 967 } 968 } 969 } 970 } 971 972 void VisitPointers(HeapObject* host, MaybeObject** start, 973 MaybeObject** end) final { 974 UNREACHABLE(); 975 } 976 977 int PointersRemoved() { 978 return pointers_removed_; 979 } 980 981 private: 982 Heap* heap_; 983 int pointers_removed_; 984 HeapObject* table_; 985 }; 986 987 class ExternalStringTableCleaner : public RootVisitor { 988 public: 989 explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {} 990 991 void VisitRootPointers(Root root, const char* description, Object** start, 992 Object** end) override { 993 // Visit all HeapObject pointers in [start, end). 994 MarkCompactCollector::NonAtomicMarkingState* marking_state = 995 heap_->mark_compact_collector()->non_atomic_marking_state(); 996 Object* the_hole = ReadOnlyRoots(heap_).the_hole_value(); 997 for (Object** p = start; p < end; p++) { 998 Object* o = *p; 999 if (o->IsHeapObject()) { 1000 HeapObject* heap_object = HeapObject::cast(o); 1001 if (marking_state->IsWhite(heap_object)) { 1002 if (o->IsExternalString()) { 1003 heap_->FinalizeExternalString(String::cast(*p)); 1004 } else { 1005 // The original external string may have been internalized. 1006 DCHECK(o->IsThinString()); 1007 } 1008 // Set the entry to the_hole_value (as deleted). 1009 *p = the_hole; 1010 } 1011 } 1012 } 1013 } 1014 1015 private: 1016 Heap* heap_; 1017 }; 1018 1019 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects 1020 // are retained. 1021 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { 1022 public: 1023 explicit MarkCompactWeakObjectRetainer( 1024 MarkCompactCollector::NonAtomicMarkingState* marking_state) 1025 : marking_state_(marking_state) {} 1026 1027 virtual Object* RetainAs(Object* object) { 1028 HeapObject* heap_object = HeapObject::cast(object); 1029 DCHECK(!marking_state_->IsGrey(heap_object)); 1030 if (marking_state_->IsBlack(heap_object)) { 1031 return object; 1032 } else if (object->IsAllocationSite() && 1033 !(AllocationSite::cast(object)->IsZombie())) { 1034 // "dead" AllocationSites need to live long enough for a traversal of new 1035 // space. These sites get a one-time reprieve. 1036 1037 Object* nested = object; 1038 while (nested->IsAllocationSite()) { 1039 AllocationSite* current_site = AllocationSite::cast(nested); 1040 // MarkZombie will override the nested_site, read it first before 1041 // marking 1042 nested = current_site->nested_site(); 1043 current_site->MarkZombie(); 1044 marking_state_->WhiteToBlack(current_site); 1045 } 1046 1047 return object; 1048 } else { 1049 return nullptr; 1050 } 1051 } 1052 1053 private: 1054 MarkCompactCollector::NonAtomicMarkingState* marking_state_; 1055 }; 1056 1057 class RecordMigratedSlotVisitor : public ObjectVisitor { 1058 public: 1059 explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector) 1060 : collector_(collector) {} 1061 1062 inline void VisitPointer(HeapObject* host, Object** p) final { 1063 DCHECK(!HasWeakHeapObjectTag(*p)); 1064 RecordMigratedSlot(host, reinterpret_cast<MaybeObject*>(*p), 1065 reinterpret_cast<Address>(p)); 1066 } 1067 1068 inline void VisitPointer(HeapObject* host, MaybeObject** p) final { 1069 RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p)); 1070 } 1071 1072 inline void VisitPointers(HeapObject* host, Object** start, 1073 Object** end) final { 1074 while (start < end) { 1075 VisitPointer(host, start); 1076 ++start; 1077 } 1078 } 1079 1080 inline void VisitPointers(HeapObject* host, MaybeObject** start, 1081 MaybeObject** end) final { 1082 while (start < end) { 1083 VisitPointer(host, start); 1084 ++start; 1085 } 1086 } 1087 1088 inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override { 1089 DCHECK_EQ(host, rinfo->host()); 1090 DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode())); 1091 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 1092 // The target is always in old space, we don't have to record the slot in 1093 // the old-to-new remembered set. 1094 DCHECK(!Heap::InNewSpace(target)); 1095 collector_->RecordRelocSlot(host, rinfo, target); 1096 } 1097 1098 inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override { 1099 DCHECK_EQ(host, rinfo->host()); 1100 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); 1101 HeapObject* object = HeapObject::cast(rinfo->target_object()); 1102 GenerationalBarrierForCode(host, rinfo, object); 1103 collector_->RecordRelocSlot(host, rinfo, object); 1104 } 1105 1106 // Entries that are skipped for recording. 1107 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} 1108 inline void VisitExternalReference(Foreign* host, Address* p) final {} 1109 inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {} 1110 inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {} 1111 1112 protected: 1113 inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject* value, 1114 Address slot) { 1115 if (value->IsStrongOrWeakHeapObject()) { 1116 Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); 1117 if (p->InNewSpace()) { 1118 DCHECK_IMPLIES(p->InToSpace(), 1119 p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)); 1120 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>( 1121 Page::FromAddress(slot), slot); 1122 } else if (p->IsEvacuationCandidate()) { 1123 RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>( 1124 Page::FromAddress(slot), slot); 1125 } 1126 } 1127 } 1128 1129 MarkCompactCollector* collector_; 1130 }; 1131 1132 class MigrationObserver { 1133 public: 1134 explicit MigrationObserver(Heap* heap) : heap_(heap) {} 1135 1136 virtual ~MigrationObserver() {} 1137 virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, 1138 int size) = 0; 1139 1140 protected: 1141 Heap* heap_; 1142 }; 1143 1144 class ProfilingMigrationObserver final : public MigrationObserver { 1145 public: 1146 explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {} 1147 1148 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, 1149 int size) final { 1150 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) { 1151 PROFILE(heap_->isolate(), 1152 CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst))); 1153 } 1154 heap_->OnMoveEvent(dst, src, size); 1155 } 1156 }; 1157 1158 class HeapObjectVisitor { 1159 public: 1160 virtual ~HeapObjectVisitor() {} 1161 virtual bool Visit(HeapObject* object, int size) = 0; 1162 }; 1163 1164 class EvacuateVisitorBase : public HeapObjectVisitor { 1165 public: 1166 void AddObserver(MigrationObserver* observer) { 1167 migration_function_ = RawMigrateObject<MigrationMode::kObserved>; 1168 observers_.push_back(observer); 1169 } 1170 1171 protected: 1172 enum MigrationMode { kFast, kObserved }; 1173 1174 typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst, 1175 HeapObject* src, int size, 1176 AllocationSpace dest); 1177 1178 template <MigrationMode mode> 1179 static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst, 1180 HeapObject* src, int size, 1181 AllocationSpace dest) { 1182 Address dst_addr = dst->address(); 1183 Address src_addr = src->address(); 1184 DCHECK(base->heap_->AllowedToBeMigrated(src, dest)); 1185 DCHECK(dest != LO_SPACE); 1186 if (dest == OLD_SPACE) { 1187 DCHECK_OBJECT_SIZE(size); 1188 DCHECK(IsAligned(size, kPointerSize)); 1189 base->heap_->CopyBlock(dst_addr, src_addr, size); 1190 if (mode != MigrationMode::kFast) 1191 base->ExecuteMigrationObservers(dest, src, dst, size); 1192 dst->IterateBodyFast(dst->map(), size, base->record_visitor_); 1193 } else if (dest == CODE_SPACE) { 1194 DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space()); 1195 base->heap_->CopyBlock(dst_addr, src_addr, size); 1196 Code::cast(dst)->Relocate(dst_addr - src_addr); 1197 if (mode != MigrationMode::kFast) 1198 base->ExecuteMigrationObservers(dest, src, dst, size); 1199 dst->IterateBodyFast(dst->map(), size, base->record_visitor_); 1200 } else { 1201 DCHECK_OBJECT_SIZE(size); 1202 DCHECK(dest == NEW_SPACE); 1203 base->heap_->CopyBlock(dst_addr, src_addr, size); 1204 if (mode != MigrationMode::kFast) 1205 base->ExecuteMigrationObservers(dest, src, dst, size); 1206 } 1207 base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr), 1208 static_cast<base::AtomicWord>(dst_addr)); 1209 } 1210 1211 EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator, 1212 RecordMigratedSlotVisitor* record_visitor) 1213 : heap_(heap), 1214 local_allocator_(local_allocator), 1215 record_visitor_(record_visitor) { 1216 migration_function_ = RawMigrateObject<MigrationMode::kFast>; 1217 } 1218 1219 inline bool TryEvacuateObject(AllocationSpace target_space, 1220 HeapObject* object, int size, 1221 HeapObject** target_object) { 1222 #ifdef VERIFY_HEAP 1223 if (AbortCompactionForTesting(object)) return false; 1224 #endif // VERIFY_HEAP 1225 AllocationAlignment alignment = 1226 HeapObject::RequiredAlignment(object->map()); 1227 AllocationResult allocation = 1228 local_allocator_->Allocate(target_space, size, alignment); 1229 if (allocation.To(target_object)) { 1230 MigrateObject(*target_object, object, size, target_space); 1231 return true; 1232 } 1233 return false; 1234 } 1235 1236 inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src, 1237 HeapObject* dst, int size) { 1238 for (MigrationObserver* obs : observers_) { 1239 obs->Move(dest, src, dst, size); 1240 } 1241 } 1242 1243 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size, 1244 AllocationSpace dest) { 1245 migration_function_(this, dst, src, size, dest); 1246 } 1247 1248 #ifdef VERIFY_HEAP 1249 bool AbortCompactionForTesting(HeapObject* object) { 1250 if (FLAG_stress_compaction) { 1251 const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) & 1252 kPageAlignmentMask & ~kPointerAlignmentMask; 1253 if ((object->address() & kPageAlignmentMask) == mask) { 1254 Page* page = Page::FromAddress(object->address()); 1255 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) { 1256 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING); 1257 } else { 1258 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING); 1259 return true; 1260 } 1261 } 1262 } 1263 return false; 1264 } 1265 #endif // VERIFY_HEAP 1266 1267 Heap* heap_; 1268 LocalAllocator* local_allocator_; 1269 RecordMigratedSlotVisitor* record_visitor_; 1270 std::vector<MigrationObserver*> observers_; 1271 MigrateFunction migration_function_; 1272 }; 1273 1274 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { 1275 public: 1276 explicit EvacuateNewSpaceVisitor( 1277 Heap* heap, LocalAllocator* local_allocator, 1278 RecordMigratedSlotVisitor* record_visitor, 1279 Heap::PretenuringFeedbackMap* local_pretenuring_feedback) 1280 : EvacuateVisitorBase(heap, local_allocator, record_visitor), 1281 buffer_(LocalAllocationBuffer::InvalidBuffer()), 1282 promoted_size_(0), 1283 semispace_copied_size_(0), 1284 local_pretenuring_feedback_(local_pretenuring_feedback), 1285 is_incremental_marking_(heap->incremental_marking()->IsMarking()) {} 1286 1287 inline bool Visit(HeapObject* object, int size) override { 1288 if (TryEvacuateWithoutCopy(object)) return true; 1289 HeapObject* target_object = nullptr; 1290 if (heap_->ShouldBePromoted(object->address()) && 1291 TryEvacuateObject(OLD_SPACE, object, size, &target_object)) { 1292 promoted_size_ += size; 1293 return true; 1294 } 1295 heap_->UpdateAllocationSite(object->map(), object, 1296 local_pretenuring_feedback_); 1297 HeapObject* target = nullptr; 1298 AllocationSpace space = AllocateTargetObject(object, size, &target); 1299 MigrateObject(HeapObject::cast(target), object, size, space); 1300 semispace_copied_size_ += size; 1301 return true; 1302 } 1303 1304 intptr_t promoted_size() { return promoted_size_; } 1305 intptr_t semispace_copied_size() { return semispace_copied_size_; } 1306 1307 private: 1308 inline bool TryEvacuateWithoutCopy(HeapObject* object) { 1309 if (is_incremental_marking_) return false; 1310 1311 Map* map = object->map(); 1312 1313 // Some objects can be evacuated without creating a copy. 1314 if (map->visitor_id() == kVisitThinString) { 1315 HeapObject* actual = ThinString::cast(object)->unchecked_actual(); 1316 if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false; 1317 base::Relaxed_Store( 1318 reinterpret_cast<base::AtomicWord*>(object->address()), 1319 reinterpret_cast<base::AtomicWord>( 1320 MapWord::FromForwardingAddress(actual).ToMap())); 1321 return true; 1322 } 1323 // TODO(mlippautz): Handle ConsString. 1324 1325 return false; 1326 } 1327 1328 inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size, 1329 HeapObject** target_object) { 1330 AllocationAlignment alignment = 1331 HeapObject::RequiredAlignment(old_object->map()); 1332 AllocationSpace space_allocated_in = NEW_SPACE; 1333 AllocationResult allocation = 1334 local_allocator_->Allocate(NEW_SPACE, size, alignment); 1335 if (allocation.IsRetry()) { 1336 allocation = AllocateInOldSpace(size, alignment); 1337 space_allocated_in = OLD_SPACE; 1338 } 1339 bool ok = allocation.To(target_object); 1340 DCHECK(ok); 1341 USE(ok); 1342 return space_allocated_in; 1343 } 1344 1345 inline AllocationResult AllocateInOldSpace(int size_in_bytes, 1346 AllocationAlignment alignment) { 1347 AllocationResult allocation = 1348 local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment); 1349 if (allocation.IsRetry()) { 1350 heap_->FatalProcessOutOfMemory( 1351 "MarkCompactCollector: semi-space copy, fallback in old gen"); 1352 } 1353 return allocation; 1354 } 1355 1356 LocalAllocationBuffer buffer_; 1357 intptr_t promoted_size_; 1358 intptr_t semispace_copied_size_; 1359 Heap::PretenuringFeedbackMap* local_pretenuring_feedback_; 1360 bool is_incremental_marking_; 1361 }; 1362 1363 template <PageEvacuationMode mode> 1364 class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor { 1365 public: 1366 explicit EvacuateNewSpacePageVisitor( 1367 Heap* heap, RecordMigratedSlotVisitor* record_visitor, 1368 Heap::PretenuringFeedbackMap* local_pretenuring_feedback) 1369 : heap_(heap), 1370 record_visitor_(record_visitor), 1371 moved_bytes_(0), 1372 local_pretenuring_feedback_(local_pretenuring_feedback) {} 1373 1374 static void Move(Page* page) { 1375 switch (mode) { 1376 case NEW_TO_NEW: 1377 page->heap()->new_space()->MovePageFromSpaceToSpace(page); 1378 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); 1379 break; 1380 case NEW_TO_OLD: { 1381 page->heap()->new_space()->from_space().RemovePage(page); 1382 Page* new_page = Page::ConvertNewToOld(page); 1383 DCHECK(!new_page->InNewSpace()); 1384 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); 1385 break; 1386 } 1387 } 1388 } 1389 1390 inline bool Visit(HeapObject* object, int size) { 1391 if (mode == NEW_TO_NEW) { 1392 heap_->UpdateAllocationSite(object->map(), object, 1393 local_pretenuring_feedback_); 1394 } else if (mode == NEW_TO_OLD) { 1395 object->IterateBodyFast(record_visitor_); 1396 } 1397 return true; 1398 } 1399 1400 intptr_t moved_bytes() { return moved_bytes_; } 1401 void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; } 1402 1403 private: 1404 Heap* heap_; 1405 RecordMigratedSlotVisitor* record_visitor_; 1406 intptr_t moved_bytes_; 1407 Heap::PretenuringFeedbackMap* local_pretenuring_feedback_; 1408 }; 1409 1410 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { 1411 public: 1412 EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator, 1413 RecordMigratedSlotVisitor* record_visitor) 1414 : EvacuateVisitorBase(heap, local_allocator, record_visitor) {} 1415 1416 inline bool Visit(HeapObject* object, int size) override { 1417 HeapObject* target_object = nullptr; 1418 if (TryEvacuateObject( 1419 Page::FromAddress(object->address())->owner()->identity(), object, 1420 size, &target_object)) { 1421 DCHECK(object->map_word().IsForwardingAddress()); 1422 return true; 1423 } 1424 return false; 1425 } 1426 }; 1427 1428 class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor { 1429 public: 1430 explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {} 1431 1432 inline bool Visit(HeapObject* object, int size) { 1433 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); 1434 object->IterateBodyFast(&visitor); 1435 return true; 1436 } 1437 1438 private: 1439 Heap* heap_; 1440 }; 1441 1442 bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, Object** p) { 1443 Object* o = *p; 1444 if (!o->IsHeapObject()) return false; 1445 HeapObject* heap_object = HeapObject::cast(o); 1446 return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite( 1447 heap_object); 1448 } 1449 1450 void MarkCompactCollector::MarkStringTable( 1451 ObjectVisitor* custom_root_body_visitor) { 1452 StringTable* string_table = heap()->string_table(); 1453 // Mark the string table itself. 1454 if (marking_state()->WhiteToBlack(string_table)) { 1455 // Explicitly mark the prefix. 1456 string_table->IteratePrefix(custom_root_body_visitor); 1457 } 1458 } 1459 1460 void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor, 1461 ObjectVisitor* custom_root_body_visitor) { 1462 // Mark the heap roots including global variables, stack variables, 1463 // etc., and all objects reachable from them. 1464 heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG); 1465 1466 // Custom marking for string table and top optimized frame. 1467 MarkStringTable(custom_root_body_visitor); 1468 ProcessTopOptimizedFrame(custom_root_body_visitor); 1469 } 1470 1471 void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() { 1472 bool work_to_do = true; 1473 int iterations = 0; 1474 int max_iterations = FLAG_ephemeron_fixpoint_iterations; 1475 1476 while (work_to_do) { 1477 PerformWrapperTracing(); 1478 1479 if (iterations >= max_iterations) { 1480 // Give up fixpoint iteration and switch to linear algorithm. 1481 ProcessEphemeronsLinear(); 1482 break; 1483 } 1484 1485 // Move ephemerons from next_ephemerons into current_ephemerons to 1486 // drain them in this iteration. 1487 weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons); 1488 heap()->concurrent_marking()->set_ephemeron_marked(false); 1489 1490 { 1491 TRACE_GC(heap()->tracer(), 1492 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING); 1493 1494 if (FLAG_parallel_marking) { 1495 DCHECK(FLAG_concurrent_marking); 1496 heap_->concurrent_marking()->RescheduleTasksIfNeeded(); 1497 } 1498 1499 work_to_do = ProcessEphemerons(); 1500 FinishConcurrentMarking( 1501 ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS); 1502 } 1503 1504 CHECK(weak_objects_.current_ephemerons.IsEmpty()); 1505 CHECK(weak_objects_.discovered_ephemerons.IsEmpty()); 1506 1507 work_to_do = work_to_do || !marking_worklist()->IsEmpty() || 1508 heap()->concurrent_marking()->ephemeron_marked() || 1509 !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone(); 1510 ++iterations; 1511 } 1512 1513 CHECK(marking_worklist()->IsEmpty()); 1514 CHECK(weak_objects_.current_ephemerons.IsEmpty()); 1515 CHECK(weak_objects_.discovered_ephemerons.IsEmpty()); 1516 } 1517 1518 bool MarkCompactCollector::ProcessEphemerons() { 1519 Ephemeron ephemeron; 1520 bool ephemeron_marked = false; 1521 1522 // Drain current_ephemerons and push ephemerons where key and value are still 1523 // unreachable into next_ephemerons. 1524 while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) { 1525 if (VisitEphemeron(ephemeron.key, ephemeron.value)) { 1526 ephemeron_marked = true; 1527 } 1528 } 1529 1530 // Drain marking worklist and push discovered ephemerons into 1531 // discovered_ephemerons. 1532 ProcessMarkingWorklist(); 1533 1534 // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase 1535 // before) and push ephemerons where key and value are still unreachable into 1536 // next_ephemerons. 1537 while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) { 1538 if (VisitEphemeron(ephemeron.key, ephemeron.value)) { 1539 ephemeron_marked = true; 1540 } 1541 } 1542 1543 // Flush local ephemerons for main task to global pool. 1544 weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThread); 1545 weak_objects_.next_ephemerons.FlushToGlobal(kMainThread); 1546 1547 return ephemeron_marked; 1548 } 1549 1550 void MarkCompactCollector::ProcessEphemeronsLinear() { 1551 TRACE_GC(heap()->tracer(), 1552 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR); 1553 CHECK(heap()->concurrent_marking()->IsStopped()); 1554 std::unordered_multimap<HeapObject*, HeapObject*> key_to_values; 1555 Ephemeron ephemeron; 1556 1557 DCHECK(weak_objects_.current_ephemerons.IsEmpty()); 1558 weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons); 1559 1560 while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) { 1561 VisitEphemeron(ephemeron.key, ephemeron.value); 1562 1563 if (non_atomic_marking_state()->IsWhite(ephemeron.value)) { 1564 key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value)); 1565 } 1566 } 1567 1568 ephemeron_marking_.newly_discovered_limit = key_to_values.size(); 1569 bool work_to_do = true; 1570 1571 while (work_to_do) { 1572 PerformWrapperTracing(); 1573 1574 ResetNewlyDiscovered(); 1575 ephemeron_marking_.newly_discovered_limit = key_to_values.size(); 1576 1577 { 1578 TRACE_GC(heap()->tracer(), 1579 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING); 1580 // Drain marking worklist and push all discovered objects into 1581 // newly_discovered. 1582 ProcessMarkingWorklistInternal< 1583 MarkCompactCollector::MarkingWorklistProcessingMode:: 1584 kTrackNewlyDiscoveredObjects>(); 1585 } 1586 1587 while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) { 1588 VisitEphemeron(ephemeron.key, ephemeron.value); 1589 1590 if (non_atomic_marking_state()->IsWhite(ephemeron.value)) { 1591 key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value)); 1592 } 1593 } 1594 1595 if (ephemeron_marking_.newly_discovered_overflowed) { 1596 // If newly_discovered was overflowed just visit all ephemerons in 1597 // next_ephemerons. 1598 weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) { 1599 if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) && 1600 non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) { 1601 marking_worklist()->Push(ephemeron.value); 1602 } 1603 }); 1604 1605 } else { 1606 // This is the good case: newly_discovered stores all discovered 1607 // objects. Now use key_to_values to see if discovered objects keep more 1608 // objects alive due to ephemeron semantics. 1609 for (HeapObject* object : ephemeron_marking_.newly_discovered) { 1610 auto range = key_to_values.equal_range(object); 1611 for (auto it = range.first; it != range.second; ++it) { 1612 HeapObject* value = it->second; 1613 MarkObject(object, value); 1614 } 1615 } 1616 } 1617 1618 // Do NOT drain marking worklist here, otherwise the current checks 1619 // for work_to_do are not sufficient for determining if another iteration 1620 // is necessary. 1621 1622 work_to_do = !marking_worklist()->IsEmpty() || 1623 !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone(); 1624 CHECK(weak_objects_.discovered_ephemerons.IsEmpty()); 1625 } 1626 1627 ResetNewlyDiscovered(); 1628 ephemeron_marking_.newly_discovered.shrink_to_fit(); 1629 1630 CHECK(marking_worklist()->IsEmpty()); 1631 } 1632 1633 void MarkCompactCollector::PerformWrapperTracing() { 1634 if (heap_->local_embedder_heap_tracer()->InUse()) { 1635 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING); 1636 heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer(); 1637 heap_->local_embedder_heap_tracer()->Trace( 1638 std::numeric_limits<double>::infinity()); 1639 } 1640 } 1641 1642 void MarkCompactCollector::ProcessMarkingWorklist() { 1643 ProcessMarkingWorklistInternal< 1644 MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(); 1645 } 1646 1647 template <MarkCompactCollector::MarkingWorklistProcessingMode mode> 1648 void MarkCompactCollector::ProcessMarkingWorklistInternal() { 1649 HeapObject* object; 1650 MarkCompactMarkingVisitor visitor(this, marking_state()); 1651 while ((object = marking_worklist()->Pop()) != nullptr) { 1652 DCHECK(!object->IsFiller()); 1653 DCHECK(object->IsHeapObject()); 1654 DCHECK(heap()->Contains(object)); 1655 DCHECK(!(marking_state()->IsWhite(object))); 1656 marking_state()->GreyToBlack(object); 1657 if (mode == MarkCompactCollector::MarkingWorklistProcessingMode:: 1658 kTrackNewlyDiscoveredObjects) { 1659 AddNewlyDiscovered(object); 1660 } 1661 Map* map = object->map(); 1662 MarkObject(object, map); 1663 visitor.Visit(map, object); 1664 } 1665 DCHECK(marking_worklist()->IsBailoutEmpty()); 1666 } 1667 1668 bool MarkCompactCollector::VisitEphemeron(HeapObject* key, HeapObject* value) { 1669 if (marking_state()->IsBlackOrGrey(key)) { 1670 if (marking_state()->WhiteToGrey(value)) { 1671 marking_worklist()->Push(value); 1672 return true; 1673 } 1674 1675 } else if (marking_state()->IsWhite(value)) { 1676 weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value}); 1677 } 1678 1679 return false; 1680 } 1681 1682 void MarkCompactCollector::ProcessEphemeronMarking() { 1683 DCHECK(marking_worklist()->IsEmpty()); 1684 1685 // Incremental marking might leave ephemerons in main task's local 1686 // buffer, flush it into global pool. 1687 weak_objects_.next_ephemerons.FlushToGlobal(kMainThread); 1688 1689 ProcessEphemeronsUntilFixpoint(); 1690 1691 CHECK(marking_worklist()->IsEmpty()); 1692 CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone()); 1693 } 1694 1695 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { 1696 for (StackFrameIterator it(isolate(), isolate()->thread_local_top()); 1697 !it.done(); it.Advance()) { 1698 if (it.frame()->type() == StackFrame::INTERPRETED) { 1699 return; 1700 } 1701 if (it.frame()->type() == StackFrame::OPTIMIZED) { 1702 Code* code = it.frame()->LookupCode(); 1703 if (!code->CanDeoptAt(it.frame()->pc())) { 1704 Code::BodyDescriptor::IterateBody(code->map(), code, visitor); 1705 } 1706 return; 1707 } 1708 } 1709 } 1710 1711 void MarkCompactCollector::RecordObjectStats() { 1712 if (V8_UNLIKELY(FLAG_gc_stats)) { 1713 heap()->CreateObjectStats(); 1714 ObjectStatsCollector collector(heap(), heap()->live_object_stats_, 1715 heap()->dead_object_stats_); 1716 collector.Collect(); 1717 if (V8_UNLIKELY(FLAG_gc_stats & 1718 v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) { 1719 std::stringstream live, dead; 1720 heap()->live_object_stats_->Dump(live); 1721 heap()->dead_object_stats_->Dump(dead); 1722 TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"), 1723 "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD, 1724 "live", TRACE_STR_COPY(live.str().c_str()), "dead", 1725 TRACE_STR_COPY(dead.str().c_str())); 1726 } 1727 if (FLAG_trace_gc_object_stats) { 1728 heap()->live_object_stats_->PrintJSON("live"); 1729 heap()->dead_object_stats_->PrintJSON("dead"); 1730 } 1731 heap()->live_object_stats_->CheckpointObjectStats(); 1732 heap()->dead_object_stats_->ClearObjectStats(); 1733 } 1734 } 1735 1736 void MarkCompactCollector::MarkLiveObjects() { 1737 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); 1738 // The recursive GC marker detects when it is nearing stack overflow, 1739 // and switches to a different marking system. JS interrupts interfere 1740 // with the C stack limit check. 1741 PostponeInterruptsScope postpone(isolate()); 1742 1743 { 1744 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL); 1745 IncrementalMarking* incremental_marking = heap_->incremental_marking(); 1746 if (was_marked_incrementally_) { 1747 incremental_marking->Finalize(); 1748 } else { 1749 CHECK(incremental_marking->IsStopped()); 1750 } 1751 } 1752 1753 #ifdef DEBUG 1754 DCHECK(state_ == PREPARE_GC); 1755 state_ = MARK_LIVE_OBJECTS; 1756 #endif 1757 1758 heap_->local_embedder_heap_tracer()->EnterFinalPause(); 1759 1760 RootMarkingVisitor root_visitor(this); 1761 1762 { 1763 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS); 1764 CustomRootBodyMarkingVisitor custom_root_body_visitor(this); 1765 MarkRoots(&root_visitor, &custom_root_body_visitor); 1766 } 1767 1768 { 1769 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN); 1770 if (FLAG_parallel_marking) { 1771 DCHECK(FLAG_concurrent_marking); 1772 heap_->concurrent_marking()->RescheduleTasksIfNeeded(); 1773 } 1774 ProcessMarkingWorklist(); 1775 1776 FinishConcurrentMarking( 1777 ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS); 1778 ProcessMarkingWorklist(); 1779 } 1780 1781 { 1782 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE); 1783 1784 DCHECK(marking_worklist()->IsEmpty()); 1785 1786 // Mark objects reachable through the embedder heap. This phase is 1787 // opportunistic as it may not discover graphs that are only reachable 1788 // through ephemerons. 1789 { 1790 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS); 1791 while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone()) { 1792 PerformWrapperTracing(); 1793 ProcessMarkingWorklist(); 1794 } 1795 DCHECK(marking_worklist()->IsEmpty()); 1796 } 1797 1798 // The objects reachable from the roots are marked, yet unreachable objects 1799 // are unmarked. Mark objects reachable due to embedder heap tracing or 1800 // harmony weak maps. 1801 { 1802 TRACE_GC(heap()->tracer(), 1803 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON); 1804 ProcessEphemeronMarking(); 1805 DCHECK(marking_worklist()->IsEmpty()); 1806 } 1807 1808 // The objects reachable from the roots, weak maps, and embedder heap 1809 // tracing are marked. Objects pointed to only by weak global handles cannot 1810 // be immediately reclaimed. Instead, we have to mark them as pending and 1811 // mark objects reachable from them. 1812 // 1813 // First we identify nonlive weak handles and mark them as pending 1814 // destruction. 1815 { 1816 TRACE_GC(heap()->tracer(), 1817 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES); 1818 heap()->isolate()->global_handles()->IdentifyWeakHandles( 1819 &IsUnmarkedHeapObject); 1820 ProcessMarkingWorklist(); 1821 } 1822 1823 // Process finalizers, effectively keeping them alive until the next 1824 // garbage collection. 1825 { 1826 TRACE_GC(heap()->tracer(), 1827 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS); 1828 heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers( 1829 &root_visitor); 1830 ProcessMarkingWorklist(); 1831 } 1832 1833 // Repeat ephemeron processing from the newly marked objects. 1834 { 1835 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY); 1836 ProcessEphemeronMarking(); 1837 { 1838 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE); 1839 heap()->local_embedder_heap_tracer()->TraceEpilogue(); 1840 } 1841 DCHECK(marking_worklist()->IsEmpty()); 1842 } 1843 1844 { 1845 heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles( 1846 &IsUnmarkedHeapObject); 1847 } 1848 } 1849 1850 if (was_marked_incrementally_) { 1851 heap()->incremental_marking()->Deactivate(); 1852 } 1853 } 1854 1855 void MarkCompactCollector::ClearNonLiveReferences() { 1856 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); 1857 1858 { 1859 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); 1860 1861 // Prune the string table removing all strings only pointed to by the 1862 // string table. Cannot use string_table() here because the string 1863 // table is marked. 1864 StringTable* string_table = heap()->string_table(); 1865 InternalizedStringTableCleaner internalized_visitor(heap(), string_table); 1866 string_table->IterateElements(&internalized_visitor); 1867 string_table->ElementsRemoved(internalized_visitor.PointersRemoved()); 1868 1869 ExternalStringTableCleaner external_visitor(heap()); 1870 heap()->external_string_table_.IterateAll(&external_visitor); 1871 heap()->external_string_table_.CleanUpAll(); 1872 } 1873 1874 { 1875 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS); 1876 // Process the weak references. 1877 MarkCompactWeakObjectRetainer mark_compact_object_retainer( 1878 non_atomic_marking_state()); 1879 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer); 1880 } 1881 1882 { 1883 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS); 1884 // ClearFullMapTransitions must be called before weak references are 1885 // cleared. 1886 ClearFullMapTransitions(); 1887 } 1888 ClearWeakReferences(); 1889 MarkDependentCodeForDeoptimization(); 1890 1891 ClearWeakCollections(); 1892 1893 DCHECK(weak_objects_.transition_arrays.IsEmpty()); 1894 DCHECK(weak_objects_.weak_references.IsEmpty()); 1895 DCHECK(weak_objects_.weak_objects_in_code.IsEmpty()); 1896 } 1897 1898 void MarkCompactCollector::MarkDependentCodeForDeoptimization() { 1899 std::pair<HeapObject*, Code*> weak_object_in_code; 1900 while (weak_objects_.weak_objects_in_code.Pop(kMainThread, 1901 &weak_object_in_code)) { 1902 HeapObject* object = weak_object_in_code.first; 1903 Code* code = weak_object_in_code.second; 1904 if (!non_atomic_marking_state()->IsBlackOrGrey(object) && 1905 !code->marked_for_deoptimization()) { 1906 code->SetMarkedForDeoptimization("weak objects"); 1907 code->InvalidateEmbeddedObjects(heap_); 1908 have_code_to_deoptimize_ = true; 1909 } 1910 } 1911 } 1912 1913 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) { 1914 DCHECK(non_atomic_marking_state()->IsWhite(dead_target)); 1915 Object* potential_parent = dead_target->constructor_or_backpointer(); 1916 if (potential_parent->IsMap()) { 1917 Map* parent = Map::cast(potential_parent); 1918 DisallowHeapAllocation no_gc_obviously; 1919 if (non_atomic_marking_state()->IsBlackOrGrey(parent) && 1920 TransitionsAccessor(isolate(), parent, &no_gc_obviously) 1921 .HasSimpleTransitionTo(dead_target)) { 1922 ClearPotentialSimpleMapTransition(parent, dead_target); 1923 } 1924 } 1925 } 1926 1927 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map, 1928 Map* dead_target) { 1929 DCHECK(!map->is_prototype_map()); 1930 DCHECK(!dead_target->is_prototype_map()); 1931 DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target)); 1932 // Take ownership of the descriptor array. 1933 int number_of_own_descriptors = map->NumberOfOwnDescriptors(); 1934 DescriptorArray* descriptors = map->instance_descriptors(); 1935 if (descriptors == dead_target->instance_descriptors() && 1936 number_of_own_descriptors > 0) { 1937 TrimDescriptorArray(map, descriptors); 1938 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors); 1939 } 1940 } 1941 1942 void MarkCompactCollector::ClearFullMapTransitions() { 1943 TransitionArray* array; 1944 while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) { 1945 int num_transitions = array->number_of_entries(); 1946 if (num_transitions > 0) { 1947 Map* map; 1948 // The array might contain "undefined" elements because it's not yet 1949 // filled. Allow it. 1950 if (array->GetTargetIfExists(0, isolate(), &map)) { 1951 DCHECK_NOT_NULL(map); // Weak pointers aren't cleared yet. 1952 Map* parent = Map::cast(map->constructor_or_backpointer()); 1953 bool parent_is_alive = 1954 non_atomic_marking_state()->IsBlackOrGrey(parent); 1955 DescriptorArray* descriptors = 1956 parent_is_alive ? parent->instance_descriptors() : nullptr; 1957 bool descriptors_owner_died = 1958 CompactTransitionArray(parent, array, descriptors); 1959 if (descriptors_owner_died) { 1960 TrimDescriptorArray(parent, descriptors); 1961 } 1962 } 1963 } 1964 } 1965 } 1966 1967 bool MarkCompactCollector::CompactTransitionArray( 1968 Map* map, TransitionArray* transitions, DescriptorArray* descriptors) { 1969 DCHECK(!map->is_prototype_map()); 1970 int num_transitions = transitions->number_of_entries(); 1971 bool descriptors_owner_died = false; 1972 int transition_index = 0; 1973 // Compact all live transitions to the left. 1974 for (int i = 0; i < num_transitions; ++i) { 1975 Map* target = transitions->GetTarget(i); 1976 DCHECK_EQ(target->constructor_or_backpointer(), map); 1977 if (non_atomic_marking_state()->IsWhite(target)) { 1978 if (descriptors != nullptr && 1979 target->instance_descriptors() == descriptors) { 1980 DCHECK(!target->is_prototype_map()); 1981 descriptors_owner_died = true; 1982 } 1983 } else { 1984 if (i != transition_index) { 1985 Name* key = transitions->GetKey(i); 1986 transitions->SetKey(transition_index, key); 1987 HeapObjectReference** key_slot = 1988 transitions->GetKeySlot(transition_index); 1989 RecordSlot(transitions, key_slot, key); 1990 MaybeObject* raw_target = transitions->GetRawTarget(i); 1991 transitions->SetRawTarget(transition_index, raw_target); 1992 HeapObjectReference** target_slot = 1993 transitions->GetTargetSlot(transition_index); 1994 RecordSlot(transitions, target_slot, raw_target->GetHeapObject()); 1995 } 1996 transition_index++; 1997 } 1998 } 1999 // If there are no transitions to be cleared, return. 2000 if (transition_index == num_transitions) { 2001 DCHECK(!descriptors_owner_died); 2002 return false; 2003 } 2004 // Note that we never eliminate a transition array, though we might right-trim 2005 // such that number_of_transitions() == 0. If this assumption changes, 2006 // TransitionArray::Insert() will need to deal with the case that a transition 2007 // array disappeared during GC. 2008 int trim = transitions->Capacity() - transition_index; 2009 if (trim > 0) { 2010 heap_->RightTrimWeakFixedArray(transitions, 2011 trim * TransitionArray::kEntrySize); 2012 transitions->SetNumberOfTransitions(transition_index); 2013 } 2014 return descriptors_owner_died; 2015 } 2016 2017 void MarkCompactCollector::TrimDescriptorArray(Map* map, 2018 DescriptorArray* descriptors) { 2019 int number_of_own_descriptors = map->NumberOfOwnDescriptors(); 2020 if (number_of_own_descriptors == 0) { 2021 DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array()); 2022 return; 2023 } 2024 2025 int number_of_descriptors = descriptors->number_of_descriptors_storage(); 2026 int to_trim = number_of_descriptors - number_of_own_descriptors; 2027 if (to_trim > 0) { 2028 heap_->RightTrimWeakFixedArray(descriptors, 2029 to_trim * DescriptorArray::kEntrySize); 2030 descriptors->SetNumberOfDescriptors(number_of_own_descriptors); 2031 2032 TrimEnumCache(map, descriptors); 2033 descriptors->Sort(); 2034 2035 if (FLAG_unbox_double_fields) { 2036 LayoutDescriptor* layout_descriptor = map->layout_descriptor(); 2037 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors, 2038 number_of_own_descriptors); 2039 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true)); 2040 } 2041 } 2042 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors); 2043 map->set_owns_descriptors(true); 2044 } 2045 2046 void MarkCompactCollector::TrimEnumCache(Map* map, 2047 DescriptorArray* descriptors) { 2048 int live_enum = map->EnumLength(); 2049 if (live_enum == kInvalidEnumCacheSentinel) { 2050 live_enum = map->NumberOfEnumerableProperties(); 2051 } 2052 if (live_enum == 0) return descriptors->ClearEnumCache(); 2053 EnumCache* enum_cache = descriptors->GetEnumCache(); 2054 2055 FixedArray* keys = enum_cache->keys(); 2056 int to_trim = keys->length() - live_enum; 2057 if (to_trim <= 0) return; 2058 heap_->RightTrimFixedArray(keys, to_trim); 2059 2060 FixedArray* indices = enum_cache->indices(); 2061 to_trim = indices->length() - live_enum; 2062 if (to_trim <= 0) return; 2063 heap_->RightTrimFixedArray(indices, to_trim); 2064 } 2065 2066 void MarkCompactCollector::ClearWeakCollections() { 2067 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS); 2068 EphemeronHashTable* table; 2069 2070 while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) { 2071 for (int i = 0; i < table->Capacity(); i++) { 2072 HeapObject* key = HeapObject::cast(table->KeyAt(i)); 2073 #ifdef VERIFY_HEAP 2074 Object* value = table->ValueAt(i); 2075 2076 if (value->IsHeapObject()) { 2077 CHECK_IMPLIES( 2078 non_atomic_marking_state()->IsBlackOrGrey(key), 2079 non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value))); 2080 } 2081 #endif 2082 if (!non_atomic_marking_state()->IsBlackOrGrey(key)) { 2083 table->RemoveEntry(i); 2084 } 2085 } 2086 } 2087 } 2088 2089 void MarkCompactCollector::ClearWeakReferences() { 2090 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES); 2091 std::pair<HeapObject*, HeapObjectReference**> slot; 2092 while (weak_objects_.weak_references.Pop(kMainThread, &slot)) { 2093 HeapObject* value; 2094 HeapObjectReference** location = slot.second; 2095 if ((*location)->ToWeakHeapObject(&value)) { 2096 DCHECK(!value->IsCell()); 2097 if (non_atomic_marking_state()->IsBlackOrGrey(value)) { 2098 // The value of the weak reference is alive. 2099 RecordSlot(slot.first, location, value); 2100 } else { 2101 if (value->IsMap()) { 2102 // The map is non-live. 2103 ClearPotentialSimpleMapTransition(Map::cast(value)); 2104 } 2105 *location = HeapObjectReference::ClearedValue(); 2106 } 2107 } 2108 } 2109 } 2110 2111 void MarkCompactCollector::AbortWeakObjects() { 2112 weak_objects_.transition_arrays.Clear(); 2113 weak_objects_.ephemeron_hash_tables.Clear(); 2114 weak_objects_.current_ephemerons.Clear(); 2115 weak_objects_.next_ephemerons.Clear(); 2116 weak_objects_.discovered_ephemerons.Clear(); 2117 weak_objects_.weak_references.Clear(); 2118 weak_objects_.weak_objects_in_code.Clear(); 2119 } 2120 2121 void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo, 2122 Object* target) { 2123 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); 2124 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host)); 2125 if (target_page->IsEvacuationCandidate() && 2126 (rinfo->host() == nullptr || 2127 !source_page->ShouldSkipEvacuationSlotRecording())) { 2128 RelocInfo::Mode rmode = rinfo->rmode(); 2129 Address addr = rinfo->pc(); 2130 SlotType slot_type = SlotTypeForRelocInfoMode(rmode); 2131 if (rinfo->IsInConstantPool()) { 2132 addr = rinfo->constant_pool_entry_address(); 2133 if (RelocInfo::IsCodeTargetMode(rmode)) { 2134 slot_type = CODE_ENTRY_SLOT; 2135 } else { 2136 DCHECK(RelocInfo::IsEmbeddedObject(rmode)); 2137 slot_type = OBJECT_SLOT; 2138 } 2139 } 2140 RememberedSet<OLD_TO_OLD>::InsertTyped( 2141 source_page, reinterpret_cast<Address>(host), slot_type, addr); 2142 } 2143 } 2144 2145 template <AccessMode access_mode> 2146 static inline SlotCallbackResult UpdateSlot( 2147 MaybeObject** slot, MaybeObject* old, HeapObject* heap_obj, 2148 HeapObjectReferenceType reference_type) { 2149 MapWord map_word = heap_obj->map_word(); 2150 if (map_word.IsForwardingAddress()) { 2151 DCHECK(Heap::InFromSpace(heap_obj) || 2152 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) || 2153 Page::FromAddress(heap_obj->address()) 2154 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); 2155 MaybeObject* target = 2156 reference_type == HeapObjectReferenceType::WEAK 2157 ? HeapObjectReference::Weak(map_word.ToForwardingAddress()) 2158 : HeapObjectReference::Strong(map_word.ToForwardingAddress()); 2159 if (access_mode == AccessMode::NON_ATOMIC) { 2160 *slot = target; 2161 } else { 2162 base::AsAtomicPointer::Release_CompareAndSwap(slot, old, target); 2163 } 2164 DCHECK(!Heap::InFromSpace(target)); 2165 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target)); 2166 } else { 2167 DCHECK(heap_obj->map()->IsMap()); 2168 } 2169 // OLD_TO_OLD slots are always removed after updating. 2170 return REMOVE_SLOT; 2171 } 2172 2173 template <AccessMode access_mode> 2174 static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) { 2175 MaybeObject* obj = base::AsAtomicPointer::Relaxed_Load(slot); 2176 HeapObject* heap_obj; 2177 if (obj->ToWeakHeapObject(&heap_obj)) { 2178 UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK); 2179 } else if (obj->ToStrongHeapObject(&heap_obj)) { 2180 return UpdateSlot<access_mode>(slot, obj, heap_obj, 2181 HeapObjectReferenceType::STRONG); 2182 } 2183 return REMOVE_SLOT; 2184 } 2185 2186 template <AccessMode access_mode> 2187 static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) { 2188 DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrongHeapObject()); 2189 Object** slot = reinterpret_cast<Object**>(maybe_slot); 2190 Object* obj = base::AsAtomicPointer::Relaxed_Load(slot); 2191 if (obj->IsHeapObject()) { 2192 HeapObject* heap_obj = HeapObject::cast(obj); 2193 return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj), 2194 heap_obj, HeapObjectReferenceType::STRONG); 2195 } 2196 return REMOVE_SLOT; 2197 } 2198 2199 // Visitor for updating root pointers and to-space pointers. 2200 // It does not expect to encounter pointers to dead objects. 2201 // TODO(ulan): Remove code object specific functions. This visitor 2202 // nevers visits code objects. 2203 class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor { 2204 public: 2205 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {} 2206 2207 void VisitPointer(HeapObject* host, Object** p) override { 2208 UpdateStrongSlotInternal(p); 2209 } 2210 2211 void VisitPointer(HeapObject* host, MaybeObject** p) override { 2212 UpdateSlotInternal(p); 2213 } 2214 2215 void VisitPointers(HeapObject* host, Object** start, Object** end) override { 2216 for (Object** p = start; p < end; p++) { 2217 UpdateStrongSlotInternal(p); 2218 } 2219 } 2220 2221 void VisitPointers(HeapObject* host, MaybeObject** start, 2222 MaybeObject** end) final { 2223 for (MaybeObject** p = start; p < end; p++) { 2224 UpdateSlotInternal(p); 2225 } 2226 } 2227 2228 void VisitRootPointer(Root root, const char* description, 2229 Object** p) override { 2230 UpdateStrongSlotInternal(p); 2231 } 2232 2233 void VisitRootPointers(Root root, const char* description, Object** start, 2234 Object** end) override { 2235 for (Object** p = start; p < end; p++) UpdateStrongSlotInternal(p); 2236 } 2237 2238 void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override { 2239 UpdateTypedSlotHelper::UpdateEmbeddedPointer( 2240 heap_, rinfo, UpdateStrongMaybeObjectSlotInternal); 2241 } 2242 2243 void VisitCodeTarget(Code* host, RelocInfo* rinfo) override { 2244 UpdateTypedSlotHelper::UpdateCodeTarget( 2245 rinfo, UpdateStrongMaybeObjectSlotInternal); 2246 } 2247 2248 private: 2249 static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal( 2250 MaybeObject** slot) { 2251 DCHECK(!(*slot)->IsWeakHeapObject()); 2252 DCHECK(!(*slot)->IsClearedWeakHeapObject()); 2253 return UpdateStrongSlotInternal(reinterpret_cast<Object**>(slot)); 2254 } 2255 2256 static inline SlotCallbackResult UpdateStrongSlotInternal(Object** slot) { 2257 DCHECK(!HasWeakHeapObjectTag(*slot)); 2258 return UpdateStrongSlot<AccessMode::NON_ATOMIC>( 2259 reinterpret_cast<MaybeObject**>(slot)); 2260 } 2261 2262 static inline SlotCallbackResult UpdateSlotInternal(MaybeObject** slot) { 2263 return UpdateSlot<AccessMode::NON_ATOMIC>(slot); 2264 } 2265 2266 Heap* heap_; 2267 }; 2268 2269 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, 2270 Object** p) { 2271 MapWord map_word = HeapObject::cast(*p)->map_word(); 2272 2273 if (map_word.IsForwardingAddress()) { 2274 String* new_string = String::cast(map_word.ToForwardingAddress()); 2275 2276 if (new_string->IsExternalString()) { 2277 heap->ProcessMovedExternalString( 2278 Page::FromAddress(reinterpret_cast<Address>(*p)), 2279 Page::FromHeapObject(new_string), ExternalString::cast(new_string)); 2280 } 2281 return new_string; 2282 } 2283 2284 return String::cast(*p); 2285 } 2286 2287 void MarkCompactCollector::EvacuatePrologue() { 2288 // New space. 2289 NewSpace* new_space = heap()->new_space(); 2290 // Append the list of new space pages to be processed. 2291 for (Page* p : 2292 PageRange(new_space->first_allocatable_address(), new_space->top())) { 2293 new_space_evacuation_pages_.push_back(p); 2294 } 2295 new_space->Flip(); 2296 new_space->ResetLinearAllocationArea(); 2297 2298 // Old space. 2299 DCHECK(old_space_evacuation_pages_.empty()); 2300 old_space_evacuation_pages_ = std::move(evacuation_candidates_); 2301 evacuation_candidates_.clear(); 2302 DCHECK(evacuation_candidates_.empty()); 2303 } 2304 2305 void MarkCompactCollector::EvacuateEpilogue() { 2306 aborted_evacuation_candidates_.clear(); 2307 // New space. 2308 heap()->new_space()->set_age_mark(heap()->new_space()->top()); 2309 // Deallocate unmarked large objects. 2310 heap()->lo_space()->FreeUnmarkedObjects(); 2311 // Old space. Deallocate evacuated candidate pages. 2312 ReleaseEvacuationCandidates(); 2313 // Give pages that are queued to be freed back to the OS. 2314 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); 2315 #ifdef DEBUG 2316 // Old-to-old slot sets must be empty after evacuation. 2317 for (Page* p : *heap()->old_space()) { 2318 DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>())); 2319 DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>())); 2320 DCHECK_NULL(p->invalidated_slots()); 2321 } 2322 #endif 2323 } 2324 2325 class Evacuator : public Malloced { 2326 public: 2327 enum EvacuationMode { 2328 kObjectsNewToOld, 2329 kPageNewToOld, 2330 kObjectsOldToOld, 2331 kPageNewToNew, 2332 }; 2333 2334 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { 2335 // Note: The order of checks is important in this function. 2336 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) 2337 return kPageNewToOld; 2338 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION)) 2339 return kPageNewToNew; 2340 if (chunk->InNewSpace()) return kObjectsNewToOld; 2341 return kObjectsOldToOld; 2342 } 2343 2344 // NewSpacePages with more live bytes than this threshold qualify for fast 2345 // evacuation. 2346 static int PageEvacuationThreshold() { 2347 if (FLAG_page_promotion) 2348 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; 2349 return Page::kAllocatableMemory + kPointerSize; 2350 } 2351 2352 Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) 2353 : heap_(heap), 2354 local_allocator_(heap_), 2355 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), 2356 new_space_visitor_(heap_, &local_allocator_, record_visitor, 2357 &local_pretenuring_feedback_), 2358 new_to_new_page_visitor_(heap_, record_visitor, 2359 &local_pretenuring_feedback_), 2360 new_to_old_page_visitor_(heap_, record_visitor, 2361 &local_pretenuring_feedback_), 2362 2363 old_space_visitor_(heap_, &local_allocator_, record_visitor), 2364 duration_(0.0), 2365 bytes_compacted_(0) {} 2366 2367 virtual ~Evacuator() {} 2368 2369 void EvacuatePage(Page* page); 2370 2371 void AddObserver(MigrationObserver* observer) { 2372 new_space_visitor_.AddObserver(observer); 2373 old_space_visitor_.AddObserver(observer); 2374 } 2375 2376 // Merge back locally cached info sequentially. Note that this method needs 2377 // to be called from the main thread. 2378 inline void Finalize(); 2379 2380 virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0; 2381 2382 protected: 2383 static const int kInitialLocalPretenuringFeedbackCapacity = 256; 2384 2385 // |saved_live_bytes| returns the live bytes of the page that was processed. 2386 virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0; 2387 2388 inline Heap* heap() { return heap_; } 2389 2390 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { 2391 duration_ += duration; 2392 bytes_compacted_ += bytes_compacted; 2393 } 2394 2395 Heap* heap_; 2396 2397 // Locally cached collector data. 2398 LocalAllocator local_allocator_; 2399 Heap::PretenuringFeedbackMap local_pretenuring_feedback_; 2400 2401 // Visitors for the corresponding spaces. 2402 EvacuateNewSpaceVisitor new_space_visitor_; 2403 EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW> 2404 new_to_new_page_visitor_; 2405 EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD> 2406 new_to_old_page_visitor_; 2407 EvacuateOldSpaceVisitor old_space_visitor_; 2408 2409 // Book keeping info. 2410 double duration_; 2411 intptr_t bytes_compacted_; 2412 }; 2413 2414 void Evacuator::EvacuatePage(Page* page) { 2415 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage"); 2416 DCHECK(page->SweepingDone()); 2417 intptr_t saved_live_bytes = 0; 2418 double evacuation_time = 0.0; 2419 { 2420 AlwaysAllocateScope always_allocate(heap()->isolate()); 2421 TimedScope timed_scope(&evacuation_time); 2422 RawEvacuatePage(page, &saved_live_bytes); 2423 } 2424 ReportCompactionProgress(evacuation_time, saved_live_bytes); 2425 if (FLAG_trace_evacuation) { 2426 PrintIsolate( 2427 heap()->isolate(), 2428 "evacuation[%p]: page=%p new_space=%d " 2429 "page_evacuation=%d executable=%d contains_age_mark=%d " 2430 "live_bytes=%" V8PRIdPTR " time=%f success=%d\n", 2431 static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(), 2432 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || 2433 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), 2434 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), 2435 page->Contains(heap()->new_space()->age_mark()), saved_live_bytes, 2436 evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); 2437 } 2438 } 2439 2440 void Evacuator::Finalize() { 2441 local_allocator_.Finalize(); 2442 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); 2443 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + 2444 new_to_old_page_visitor_.moved_bytes()); 2445 heap()->IncrementSemiSpaceCopiedObjectSize( 2446 new_space_visitor_.semispace_copied_size() + 2447 new_to_new_page_visitor_.moved_bytes()); 2448 heap()->IncrementYoungSurvivorsCounter( 2449 new_space_visitor_.promoted_size() + 2450 new_space_visitor_.semispace_copied_size() + 2451 new_to_old_page_visitor_.moved_bytes() + 2452 new_to_new_page_visitor_.moved_bytes()); 2453 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); 2454 } 2455 2456 class FullEvacuator : public Evacuator { 2457 public: 2458 FullEvacuator(MarkCompactCollector* collector, 2459 RecordMigratedSlotVisitor* record_visitor) 2460 : Evacuator(collector->heap(), record_visitor), collector_(collector) {} 2461 2462 GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override { 2463 return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY; 2464 } 2465 2466 protected: 2467 void RawEvacuatePage(Page* page, intptr_t* live_bytes) override; 2468 2469 MarkCompactCollector* collector_; 2470 }; 2471 2472 void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) { 2473 const EvacuationMode evacuation_mode = ComputeEvacuationMode(page); 2474 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"), 2475 "FullEvacuator::RawEvacuatePage", "evacuation_mode", 2476 evacuation_mode); 2477 MarkCompactCollector::NonAtomicMarkingState* marking_state = 2478 collector_->non_atomic_marking_state(); 2479 *live_bytes = marking_state->live_bytes(page); 2480 HeapObject* failed_object = nullptr; 2481 switch (evacuation_mode) { 2482 case kObjectsNewToOld: 2483 LiveObjectVisitor::VisitBlackObjectsNoFail( 2484 page, marking_state, &new_space_visitor_, 2485 LiveObjectVisitor::kClearMarkbits); 2486 // ArrayBufferTracker will be updated during pointers updating. 2487 break; 2488 case kPageNewToOld: 2489 LiveObjectVisitor::VisitBlackObjectsNoFail( 2490 page, marking_state, &new_to_old_page_visitor_, 2491 LiveObjectVisitor::kKeepMarking); 2492 new_to_old_page_visitor_.account_moved_bytes( 2493 marking_state->live_bytes(page)); 2494 // ArrayBufferTracker will be updated during sweeping. 2495 break; 2496 case kPageNewToNew: 2497 LiveObjectVisitor::VisitBlackObjectsNoFail( 2498 page, marking_state, &new_to_new_page_visitor_, 2499 LiveObjectVisitor::kKeepMarking); 2500 new_to_new_page_visitor_.account_moved_bytes( 2501 marking_state->live_bytes(page)); 2502 // ArrayBufferTracker will be updated during sweeping. 2503 break; 2504 case kObjectsOldToOld: { 2505 const bool success = LiveObjectVisitor::VisitBlackObjects( 2506 page, marking_state, &old_space_visitor_, 2507 LiveObjectVisitor::kClearMarkbits, &failed_object); 2508 if (!success) { 2509 // Aborted compaction page. Actual processing happens on the main 2510 // thread for simplicity reasons. 2511 collector_->ReportAbortedEvacuationCandidate(failed_object, page); 2512 } else { 2513 // ArrayBufferTracker will be updated during pointers updating. 2514 } 2515 break; 2516 } 2517 } 2518 } 2519 2520 class PageEvacuationItem : public ItemParallelJob::Item { 2521 public: 2522 explicit PageEvacuationItem(Page* page) : page_(page) {} 2523 virtual ~PageEvacuationItem() {} 2524 Page* page() const { return page_; } 2525 2526 private: 2527 Page* page_; 2528 }; 2529 2530 class PageEvacuationTask : public ItemParallelJob::Task { 2531 public: 2532 PageEvacuationTask(Isolate* isolate, Evacuator* evacuator) 2533 : ItemParallelJob::Task(isolate), 2534 evacuator_(evacuator), 2535 tracer_(isolate->heap()->tracer()) {} 2536 2537 void RunInParallel() override { 2538 TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope()); 2539 PageEvacuationItem* item = nullptr; 2540 while ((item = GetItem<PageEvacuationItem>()) != nullptr) { 2541 evacuator_->EvacuatePage(item->page()); 2542 item->MarkFinished(); 2543 } 2544 }; 2545 2546 private: 2547 Evacuator* evacuator_; 2548 GCTracer* tracer_; 2549 }; 2550 2551 template <class Evacuator, class Collector> 2552 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( 2553 Collector* collector, ItemParallelJob* job, 2554 RecordMigratedSlotVisitor* record_visitor, 2555 MigrationObserver* migration_observer, const intptr_t live_bytes) { 2556 // Used for trace summary. 2557 double compaction_speed = 0; 2558 if (FLAG_trace_evacuation) { 2559 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 2560 } 2561 2562 const bool profiling = 2563 heap()->isolate()->is_profiling() || 2564 heap()->isolate()->logger()->is_listening_to_code_events() || 2565 heap()->isolate()->heap_profiler()->is_tracking_object_moves() || 2566 heap()->has_heap_object_allocation_tracker(); 2567 ProfilingMigrationObserver profiling_observer(heap()); 2568 2569 const int wanted_num_tasks = 2570 NumberOfParallelCompactionTasks(job->NumberOfItems()); 2571 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; 2572 for (int i = 0; i < wanted_num_tasks; i++) { 2573 evacuators[i] = new Evacuator(collector, record_visitor); 2574 if (profiling) evacuators[i]->AddObserver(&profiling_observer); 2575 if (migration_observer != nullptr) 2576 evacuators[i]->AddObserver(migration_observer); 2577 job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i])); 2578 } 2579 job->Run(isolate()->async_counters()); 2580 for (int i = 0; i < wanted_num_tasks; i++) { 2581 evacuators[i]->Finalize(); 2582 delete evacuators[i]; 2583 } 2584 delete[] evacuators; 2585 2586 if (FLAG_trace_evacuation) { 2587 PrintIsolate(isolate(), 2588 "%8.0f ms: evacuation-summary: parallel=%s pages=%d " 2589 "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR 2590 " compaction_speed=%.f\n", 2591 isolate()->time_millis_since_init(), 2592 FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(), 2593 wanted_num_tasks, job->NumberOfTasks(), 2594 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1, 2595 live_bytes, compaction_speed); 2596 } 2597 } 2598 2599 bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) { 2600 const bool reduce_memory = heap()->ShouldReduceMemory(); 2601 const Address age_mark = heap()->new_space()->age_mark(); 2602 return !reduce_memory && !p->NeverEvacuate() && 2603 (live_bytes > Evacuator::PageEvacuationThreshold()) && 2604 !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes); 2605 } 2606 2607 void MarkCompactCollector::EvacuatePagesInParallel() { 2608 ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(), 2609 &page_parallel_job_semaphore_); 2610 intptr_t live_bytes = 0; 2611 2612 for (Page* page : old_space_evacuation_pages_) { 2613 live_bytes += non_atomic_marking_state()->live_bytes(page); 2614 evacuation_job.AddItem(new PageEvacuationItem(page)); 2615 } 2616 2617 for (Page* page : new_space_evacuation_pages_) { 2618 intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page); 2619 if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue; 2620 live_bytes += live_bytes_on_page; 2621 if (ShouldMovePage(page, live_bytes_on_page)) { 2622 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { 2623 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); 2624 DCHECK_EQ(heap()->old_space(), page->owner()); 2625 // The move added page->allocated_bytes to the old space, but we are 2626 // going to sweep the page and add page->live_byte_count. 2627 heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(), 2628 page); 2629 } else { 2630 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); 2631 } 2632 } 2633 evacuation_job.AddItem(new PageEvacuationItem(page)); 2634 } 2635 if (evacuation_job.NumberOfItems() == 0) return; 2636 2637 RecordMigratedSlotVisitor record_visitor(this); 2638 CreateAndExecuteEvacuationTasks<FullEvacuator>( 2639 this, &evacuation_job, &record_visitor, nullptr, live_bytes); 2640 PostProcessEvacuationCandidates(); 2641 } 2642 2643 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { 2644 public: 2645 virtual Object* RetainAs(Object* object) { 2646 if (object->IsHeapObject()) { 2647 HeapObject* heap_object = HeapObject::cast(object); 2648 MapWord map_word = heap_object->map_word(); 2649 if (map_word.IsForwardingAddress()) { 2650 return map_word.ToForwardingAddress(); 2651 } 2652 } 2653 return object; 2654 } 2655 }; 2656 2657 // Return true if the given code is deoptimized or will be deoptimized. 2658 bool MarkCompactCollector::WillBeDeoptimized(Code* code) { 2659 return code->is_optimized_code() && code->marked_for_deoptimization(); 2660 } 2661 2662 void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) { 2663 EvacuateRecordOnlyVisitor visitor(heap()); 2664 LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(), 2665 &visitor, 2666 LiveObjectVisitor::kKeepMarking); 2667 } 2668 2669 template <class Visitor, typename MarkingState> 2670 bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk, 2671 MarkingState* marking_state, 2672 Visitor* visitor, 2673 IterationMode iteration_mode, 2674 HeapObject** failed_object) { 2675 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), 2676 "LiveObjectVisitor::VisitBlackObjects"); 2677 for (auto object_and_size : 2678 LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) { 2679 HeapObject* const object = object_and_size.first; 2680 if (!visitor->Visit(object, object_and_size.second)) { 2681 if (iteration_mode == kClearMarkbits) { 2682 marking_state->bitmap(chunk)->ClearRange( 2683 chunk->AddressToMarkbitIndex(chunk->area_start()), 2684 chunk->AddressToMarkbitIndex(object->address())); 2685 *failed_object = object; 2686 } 2687 return false; 2688 } 2689 } 2690 if (iteration_mode == kClearMarkbits) { 2691 marking_state->ClearLiveness(chunk); 2692 } 2693 return true; 2694 } 2695 2696 template <class Visitor, typename MarkingState> 2697 void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk, 2698 MarkingState* marking_state, 2699 Visitor* visitor, 2700 IterationMode iteration_mode) { 2701 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), 2702 "LiveObjectVisitor::VisitBlackObjectsNoFail"); 2703 for (auto object_and_size : 2704 LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) { 2705 HeapObject* const object = object_and_size.first; 2706 DCHECK(marking_state->IsBlack(object)); 2707 const bool success = visitor->Visit(object, object_and_size.second); 2708 USE(success); 2709 DCHECK(success); 2710 } 2711 if (iteration_mode == kClearMarkbits) { 2712 marking_state->ClearLiveness(chunk); 2713 } 2714 } 2715 2716 template <class Visitor, typename MarkingState> 2717 void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk, 2718 MarkingState* marking_state, 2719 Visitor* visitor, 2720 IterationMode iteration_mode) { 2721 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), 2722 "LiveObjectVisitor::VisitGreyObjectsNoFail"); 2723 for (auto object_and_size : 2724 LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) { 2725 HeapObject* const object = object_and_size.first; 2726 DCHECK(marking_state->IsGrey(object)); 2727 const bool success = visitor->Visit(object, object_and_size.second); 2728 USE(success); 2729 DCHECK(success); 2730 } 2731 if (iteration_mode == kClearMarkbits) { 2732 marking_state->ClearLiveness(chunk); 2733 } 2734 } 2735 2736 template <typename MarkingState> 2737 void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk, 2738 MarkingState* marking_state) { 2739 int new_live_size = 0; 2740 for (auto object_and_size : 2741 LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) { 2742 new_live_size += object_and_size.second; 2743 } 2744 marking_state->SetLiveBytes(chunk, new_live_size); 2745 } 2746 2747 void MarkCompactCollector::Evacuate() { 2748 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); 2749 base::LockGuard<base::Mutex> guard(heap()->relocation_mutex()); 2750 2751 { 2752 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); 2753 EvacuatePrologue(); 2754 } 2755 2756 { 2757 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); 2758 EvacuationScope evacuation_scope(this); 2759 EvacuatePagesInParallel(); 2760 } 2761 2762 UpdatePointersAfterEvacuation(); 2763 2764 { 2765 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); 2766 if (!heap()->new_space()->Rebalance()) { 2767 heap()->FatalProcessOutOfMemory("NewSpace::Rebalance"); 2768 } 2769 } 2770 2771 // Give pages that are queued to be freed back to the OS. Note that filtering 2772 // slots only handles old space (for unboxed doubles), and thus map space can 2773 // still contain stale pointers. We only free the chunks after pointer updates 2774 // to still have access to page headers. 2775 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); 2776 2777 { 2778 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 2779 2780 for (Page* p : new_space_evacuation_pages_) { 2781 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { 2782 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); 2783 sweeper()->AddPageForIterability(p); 2784 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { 2785 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); 2786 DCHECK_EQ(OLD_SPACE, p->owner()->identity()); 2787 sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR); 2788 } 2789 } 2790 new_space_evacuation_pages_.clear(); 2791 2792 for (Page* p : old_space_evacuation_pages_) { 2793 // Important: skip list should be cleared only after roots were updated 2794 // because root iteration traverses the stack and might have to find 2795 // code objects from non-updated pc pointing into evacuation candidate. 2796 SkipList* list = p->skip_list(); 2797 if (list != nullptr) list->Clear(); 2798 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { 2799 sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR); 2800 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); 2801 } 2802 } 2803 } 2804 2805 { 2806 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); 2807 EvacuateEpilogue(); 2808 } 2809 2810 #ifdef VERIFY_HEAP 2811 if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) { 2812 FullEvacuationVerifier verifier(heap()); 2813 verifier.Run(); 2814 } 2815 #endif 2816 } 2817 2818 class UpdatingItem : public ItemParallelJob::Item { 2819 public: 2820 virtual ~UpdatingItem() {} 2821 virtual void Process() = 0; 2822 }; 2823 2824 class PointersUpdatingTask : public ItemParallelJob::Task { 2825 public: 2826 explicit PointersUpdatingTask(Isolate* isolate, 2827 GCTracer::BackgroundScope::ScopeId scope) 2828 : ItemParallelJob::Task(isolate), 2829 tracer_(isolate->heap()->tracer()), 2830 scope_(scope) {} 2831 2832 void RunInParallel() override { 2833 TRACE_BACKGROUND_GC(tracer_, scope_); 2834 UpdatingItem* item = nullptr; 2835 while ((item = GetItem<UpdatingItem>()) != nullptr) { 2836 item->Process(); 2837 item->MarkFinished(); 2838 } 2839 }; 2840 2841 private: 2842 GCTracer* tracer_; 2843 GCTracer::BackgroundScope::ScopeId scope_; 2844 }; 2845 2846 template <typename MarkingState> 2847 class ToSpaceUpdatingItem : public UpdatingItem { 2848 public: 2849 explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end, 2850 MarkingState* marking_state) 2851 : chunk_(chunk), 2852 start_(start), 2853 end_(end), 2854 marking_state_(marking_state) {} 2855 virtual ~ToSpaceUpdatingItem() {} 2856 2857 void Process() override { 2858 if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { 2859 // New->new promoted pages contain garbage so they require iteration using 2860 // markbits. 2861 ProcessVisitLive(); 2862 } else { 2863 ProcessVisitAll(); 2864 } 2865 } 2866 2867 private: 2868 void ProcessVisitAll() { 2869 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), 2870 "ToSpaceUpdatingItem::ProcessVisitAll"); 2871 PointersUpdatingVisitor visitor(chunk_->heap()); 2872 for (Address cur = start_; cur < end_;) { 2873 HeapObject* object = HeapObject::FromAddress(cur); 2874 Map* map = object->map(); 2875 int size = object->SizeFromMap(map); 2876 object->IterateBodyFast(map, size, &visitor); 2877 cur += size; 2878 } 2879 } 2880 2881 void ProcessVisitLive() { 2882 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), 2883 "ToSpaceUpdatingItem::ProcessVisitLive"); 2884 // For young generation evacuations we want to visit grey objects, for 2885 // full MC, we need to visit black objects. 2886 PointersUpdatingVisitor visitor(chunk_->heap()); 2887 for (auto object_and_size : LiveObjectRange<kAllLiveObjects>( 2888 chunk_, marking_state_->bitmap(chunk_))) { 2889 object_and_size.first->IterateBodyFast(&visitor); 2890 } 2891 } 2892 2893 MemoryChunk* chunk_; 2894 Address start_; 2895 Address end_; 2896 MarkingState* marking_state_; 2897 }; 2898 2899 template <typename MarkingState> 2900 class RememberedSetUpdatingItem : public UpdatingItem { 2901 public: 2902 explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state, 2903 MemoryChunk* chunk, 2904 RememberedSetUpdatingMode updating_mode) 2905 : heap_(heap), 2906 marking_state_(marking_state), 2907 chunk_(chunk), 2908 updating_mode_(updating_mode) {} 2909 virtual ~RememberedSetUpdatingItem() {} 2910 2911 void Process() override { 2912 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), 2913 "RememberedSetUpdatingItem::Process"); 2914 base::LockGuard<base::Mutex> guard(chunk_->mutex()); 2915 CodePageMemoryModificationScope memory_modification_scope(chunk_); 2916 UpdateUntypedPointers(); 2917 UpdateTypedPointers(); 2918 } 2919 2920 private: 2921 inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) { 2922 MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address); 2923 HeapObject* heap_object; 2924 if (!(*slot)->ToStrongOrWeakHeapObject(&heap_object)) { 2925 return REMOVE_SLOT; 2926 } 2927 if (Heap::InFromSpace(heap_object)) { 2928 MapWord map_word = heap_object->map_word(); 2929 if (map_word.IsForwardingAddress()) { 2930 HeapObjectReference::Update( 2931 reinterpret_cast<HeapObjectReference**>(slot), 2932 map_word.ToForwardingAddress()); 2933 } 2934 bool success = (*slot)->ToStrongOrWeakHeapObject(&heap_object); 2935 USE(success); 2936 DCHECK(success); 2937 // If the object was in from space before and is after executing the 2938 // callback in to space, the object is still live. 2939 // Unfortunately, we do not know about the slot. It could be in a 2940 // just freed free space object. 2941 if (Heap::InToSpace(heap_object)) { 2942 return KEEP_SLOT; 2943 } 2944 } else if (Heap::InToSpace(heap_object)) { 2945 // Slots can point to "to" space if the page has been moved, or if the 2946 // slot has been recorded multiple times in the remembered set, or 2947 // if the slot was already updated during old->old updating. 2948 // In case the page has been moved, check markbits to determine liveness 2949 // of the slot. In the other case, the slot can just be kept. 2950 if (Page::FromAddress(heap_object->address()) 2951 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { 2952 // IsBlackOrGrey is required because objects are marked as grey for 2953 // the young generation collector while they are black for the full 2954 // MC.); 2955 if (marking_state_->IsBlackOrGrey(heap_object)) { 2956 return KEEP_SLOT; 2957 } else { 2958 return REMOVE_SLOT; 2959 } 2960 } 2961 return KEEP_SLOT; 2962 } else { 2963 DCHECK(!Heap::InNewSpace(heap_object)); 2964 } 2965 return REMOVE_SLOT; 2966 } 2967 2968 void UpdateUntypedPointers() { 2969 if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) { 2970 RememberedSet<OLD_TO_NEW>::Iterate( 2971 chunk_, 2972 [this](Address slot) { return CheckAndUpdateOldToNewSlot(slot); }, 2973 SlotSet::PREFREE_EMPTY_BUCKETS); 2974 } 2975 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && 2976 (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) { 2977 InvalidatedSlotsFilter filter(chunk_); 2978 RememberedSet<OLD_TO_OLD>::Iterate( 2979 chunk_, 2980 [&filter](Address slot) { 2981 if (!filter.IsValid(slot)) return REMOVE_SLOT; 2982 return UpdateSlot<AccessMode::NON_ATOMIC>( 2983 reinterpret_cast<MaybeObject**>(slot)); 2984 }, 2985 SlotSet::PREFREE_EMPTY_BUCKETS); 2986 } 2987 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && 2988 chunk_->invalidated_slots() != nullptr) { 2989 #ifdef DEBUG 2990 for (auto object_size : *chunk_->invalidated_slots()) { 2991 HeapObject* object = object_size.first; 2992 int size = object_size.second; 2993 DCHECK_LE(object