Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #include "src/base/atomicops.h"
      8 #include "src/code-stubs.h"
      9 #include "src/compilation-cache.h"
     10 #include "src/cpu-profiler.h"
     11 #include "src/deoptimizer.h"
     12 #include "src/execution.h"
     13 #include "src/gdb-jit.h"
     14 #include "src/global-handles.h"
     15 #include "src/heap-profiler.h"
     16 #include "src/ic-inl.h"
     17 #include "src/incremental-marking.h"
     18 #include "src/mark-compact.h"
     19 #include "src/objects-visiting.h"
     20 #include "src/objects-visiting-inl.h"
     21 #include "src/spaces-inl.h"
     22 #include "src/stub-cache.h"
     23 #include "src/sweeper-thread.h"
     24 
     25 namespace v8 {
     26 namespace internal {
     27 
     28 
     29 const char* Marking::kWhiteBitPattern = "00";
     30 const char* Marking::kBlackBitPattern = "10";
     31 const char* Marking::kGreyBitPattern = "11";
     32 const char* Marking::kImpossibleBitPattern = "01";
     33 
     34 
     35 // -------------------------------------------------------------------------
     36 // MarkCompactCollector
     37 
     38 MarkCompactCollector::MarkCompactCollector(Heap* heap) :  // NOLINT
     39 #ifdef DEBUG
     40       state_(IDLE),
     41 #endif
     42       sweep_precisely_(false),
     43       reduce_memory_footprint_(false),
     44       abort_incremental_marking_(false),
     45       marking_parity_(ODD_MARKING_PARITY),
     46       compacting_(false),
     47       was_marked_incrementally_(false),
     48       sweeping_pending_(false),
     49       pending_sweeper_jobs_semaphore_(0),
     50       sequential_sweeping_(false),
     51       tracer_(NULL),
     52       migration_slots_buffer_(NULL),
     53       heap_(heap),
     54       code_flusher_(NULL),
     55       have_code_to_deoptimize_(false) { }
     56 
     57 #ifdef VERIFY_HEAP
     58 class VerifyMarkingVisitor: public ObjectVisitor {
     59  public:
     60   explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
     61 
     62   void VisitPointers(Object** start, Object** end) {
     63     for (Object** current = start; current < end; current++) {
     64       if ((*current)->IsHeapObject()) {
     65         HeapObject* object = HeapObject::cast(*current);
     66         CHECK(heap_->mark_compact_collector()->IsMarked(object));
     67       }
     68     }
     69   }
     70 
     71   void VisitEmbeddedPointer(RelocInfo* rinfo) {
     72     ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
     73     if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
     74       Object* p = rinfo->target_object();
     75       VisitPointer(&p);
     76     }
     77   }
     78 
     79   void VisitCell(RelocInfo* rinfo) {
     80     Code* code = rinfo->host();
     81     ASSERT(rinfo->rmode() == RelocInfo::CELL);
     82     if (!code->IsWeakObject(rinfo->target_cell())) {
     83       ObjectVisitor::VisitCell(rinfo);
     84     }
     85   }
     86 
     87  private:
     88   Heap* heap_;
     89 };
     90 
     91 
     92 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
     93   VerifyMarkingVisitor visitor(heap);
     94   HeapObject* object;
     95   Address next_object_must_be_here_or_later = bottom;
     96 
     97   for (Address current = bottom;
     98        current < top;
     99        current += kPointerSize) {
    100     object = HeapObject::FromAddress(current);
    101     if (MarkCompactCollector::IsMarked(object)) {
    102       CHECK(current >= next_object_must_be_here_or_later);
    103       object->Iterate(&visitor);
    104       next_object_must_be_here_or_later = current + object->Size();
    105     }
    106   }
    107 }
    108 
    109 
    110 static void VerifyMarking(NewSpace* space) {
    111   Address end = space->top();
    112   NewSpacePageIterator it(space->bottom(), end);
    113   // The bottom position is at the start of its page. Allows us to use
    114   // page->area_start() as start of range on all pages.
    115   CHECK_EQ(space->bottom(),
    116             NewSpacePage::FromAddress(space->bottom())->area_start());
    117   while (it.has_next()) {
    118     NewSpacePage* page = it.next();
    119     Address limit = it.has_next() ? page->area_end() : end;
    120     CHECK(limit == end || !page->Contains(end));
    121     VerifyMarking(space->heap(), page->area_start(), limit);
    122   }
    123 }
    124 
    125 
    126 static void VerifyMarking(PagedSpace* space) {
    127   PageIterator it(space);
    128 
    129   while (it.has_next()) {
    130     Page* p = it.next();
    131     VerifyMarking(space->heap(), p->area_start(), p->area_end());
    132   }
    133 }
    134 
    135 
    136 static void VerifyMarking(Heap* heap) {
    137   VerifyMarking(heap->old_pointer_space());
    138   VerifyMarking(heap->old_data_space());
    139   VerifyMarking(heap->code_space());
    140   VerifyMarking(heap->cell_space());
    141   VerifyMarking(heap->property_cell_space());
    142   VerifyMarking(heap->map_space());
    143   VerifyMarking(heap->new_space());
    144 
    145   VerifyMarkingVisitor visitor(heap);
    146 
    147   LargeObjectIterator it(heap->lo_space());
    148   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    149     if (MarkCompactCollector::IsMarked(obj)) {
    150       obj->Iterate(&visitor);
    151     }
    152   }
    153 
    154   heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
    155 }
    156 
    157 
    158 class VerifyEvacuationVisitor: public ObjectVisitor {
    159  public:
    160   void VisitPointers(Object** start, Object** end) {
    161     for (Object** current = start; current < end; current++) {
    162       if ((*current)->IsHeapObject()) {
    163         HeapObject* object = HeapObject::cast(*current);
    164         CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
    165       }
    166     }
    167   }
    168 };
    169 
    170 
    171 static void VerifyEvacuation(Address bottom, Address top) {
    172   VerifyEvacuationVisitor visitor;
    173   HeapObject* object;
    174   Address next_object_must_be_here_or_later = bottom;
    175 
    176   for (Address current = bottom;
    177        current < top;
    178        current += kPointerSize) {
    179     object = HeapObject::FromAddress(current);
    180     if (MarkCompactCollector::IsMarked(object)) {
    181       CHECK(current >= next_object_must_be_here_or_later);
    182       object->Iterate(&visitor);
    183       next_object_must_be_here_or_later = current + object->Size();
    184     }
    185   }
    186 }
    187 
    188 
    189 static void VerifyEvacuation(NewSpace* space) {
    190   NewSpacePageIterator it(space->bottom(), space->top());
    191   VerifyEvacuationVisitor visitor;
    192 
    193   while (it.has_next()) {
    194     NewSpacePage* page = it.next();
    195     Address current = page->area_start();
    196     Address limit = it.has_next() ? page->area_end() : space->top();
    197     CHECK(limit == space->top() || !page->Contains(space->top()));
    198     while (current < limit) {
    199       HeapObject* object = HeapObject::FromAddress(current);
    200       object->Iterate(&visitor);
    201       current += object->Size();
    202     }
    203   }
    204 }
    205 
    206 
    207 static void VerifyEvacuation(PagedSpace* space) {
    208   // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
    209   // swept pages.
    210   if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
    211       space->was_swept_conservatively()) return;
    212   PageIterator it(space);
    213 
    214   while (it.has_next()) {
    215     Page* p = it.next();
    216     if (p->IsEvacuationCandidate()) continue;
    217     VerifyEvacuation(p->area_start(), p->area_end());
    218   }
    219 }
    220 
    221 
    222 static void VerifyEvacuation(Heap* heap) {
    223   VerifyEvacuation(heap->old_pointer_space());
    224   VerifyEvacuation(heap->old_data_space());
    225   VerifyEvacuation(heap->code_space());
    226   VerifyEvacuation(heap->cell_space());
    227   VerifyEvacuation(heap->property_cell_space());
    228   VerifyEvacuation(heap->map_space());
    229   VerifyEvacuation(heap->new_space());
    230 
    231   VerifyEvacuationVisitor visitor;
    232   heap->IterateStrongRoots(&visitor, VISIT_ALL);
    233 }
    234 #endif  // VERIFY_HEAP
    235 
    236 
    237 #ifdef DEBUG
    238 class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
    239  public:
    240   VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
    241 
    242   void VisitPointers(Object** start, Object** end) {
    243     for (Object** current = start; current < end; current++) {
    244       if ((*current)->IsHeapObject()) {
    245         HeapObject* object = HeapObject::cast(*current);
    246         if (object->IsString()) continue;
    247         switch (object->map()->instance_type()) {
    248           case JS_FUNCTION_TYPE:
    249             CheckContext(JSFunction::cast(object)->context());
    250             break;
    251           case JS_GLOBAL_PROXY_TYPE:
    252             CheckContext(JSGlobalProxy::cast(object)->native_context());
    253             break;
    254           case JS_GLOBAL_OBJECT_TYPE:
    255           case JS_BUILTINS_OBJECT_TYPE:
    256             CheckContext(GlobalObject::cast(object)->native_context());
    257             break;
    258           case JS_ARRAY_TYPE:
    259           case JS_DATE_TYPE:
    260           case JS_OBJECT_TYPE:
    261           case JS_REGEXP_TYPE:
    262             VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
    263             break;
    264           case MAP_TYPE:
    265             VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
    266             VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
    267             break;
    268           case FIXED_ARRAY_TYPE:
    269             if (object->IsContext()) {
    270               CheckContext(object);
    271             } else {
    272               FixedArray* array = FixedArray::cast(object);
    273               int length = array->length();
    274               // Set array length to zero to prevent cycles while iterating
    275               // over array bodies, this is easier than intrusive marking.
    276               array->set_length(0);
    277               array->IterateBody(
    278                   FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
    279               array->set_length(length);
    280             }
    281             break;
    282           case CELL_TYPE:
    283           case JS_PROXY_TYPE:
    284           case JS_VALUE_TYPE:
    285           case TYPE_FEEDBACK_INFO_TYPE:
    286             object->Iterate(this);
    287             break;
    288           case DECLARED_ACCESSOR_INFO_TYPE:
    289           case EXECUTABLE_ACCESSOR_INFO_TYPE:
    290           case BYTE_ARRAY_TYPE:
    291           case CALL_HANDLER_INFO_TYPE:
    292           case CODE_TYPE:
    293           case FIXED_DOUBLE_ARRAY_TYPE:
    294           case HEAP_NUMBER_TYPE:
    295           case INTERCEPTOR_INFO_TYPE:
    296           case ODDBALL_TYPE:
    297           case SCRIPT_TYPE:
    298           case SHARED_FUNCTION_INFO_TYPE:
    299             break;
    300           default:
    301             UNREACHABLE();
    302         }
    303       }
    304     }
    305   }
    306 
    307  private:
    308   void CheckContext(Object* context) {
    309     if (!context->IsContext()) return;
    310     Context* native_context = Context::cast(context)->native_context();
    311     if (current_native_context_ == NULL) {
    312       current_native_context_ = native_context;
    313     } else {
    314       CHECK_EQ(current_native_context_, native_context);
    315     }
    316   }
    317 
    318   Context* current_native_context_;
    319 };
    320 
    321 
    322 static void VerifyNativeContextSeparation(Heap* heap) {
    323   HeapObjectIterator it(heap->code_space());
    324 
    325   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
    326     VerifyNativeContextSeparationVisitor visitor;
    327     Code::cast(object)->CodeIterateBody(&visitor);
    328   }
    329 }
    330 #endif
    331 
    332 
    333 void MarkCompactCollector::SetUp() {
    334   free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
    335   free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
    336 }
    337 
    338 
    339 void MarkCompactCollector::TearDown() {
    340   AbortCompaction();
    341 }
    342 
    343 
    344 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
    345   p->MarkEvacuationCandidate();
    346   evacuation_candidates_.Add(p);
    347 }
    348 
    349 
    350 static void TraceFragmentation(PagedSpace* space) {
    351   int number_of_pages = space->CountTotalPages();
    352   intptr_t reserved = (number_of_pages * space->AreaSize());
    353   intptr_t free = reserved - space->SizeOfObjects();
    354   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
    355          AllocationSpaceName(space->identity()),
    356          number_of_pages,
    357          static_cast<int>(free),
    358          static_cast<double>(free) * 100 / reserved);
    359 }
    360 
    361 
    362 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
    363   if (!compacting_) {
    364     ASSERT(evacuation_candidates_.length() == 0);
    365 
    366 #ifdef ENABLE_GDB_JIT_INTERFACE
    367     // If GDBJIT interface is active disable compaction.
    368     if (FLAG_gdbjit) return false;
    369 #endif
    370 
    371     CollectEvacuationCandidates(heap()->old_pointer_space());
    372     CollectEvacuationCandidates(heap()->old_data_space());
    373 
    374     if (FLAG_compact_code_space &&
    375         (mode == NON_INCREMENTAL_COMPACTION ||
    376          FLAG_incremental_code_compaction)) {
    377       CollectEvacuationCandidates(heap()->code_space());
    378     } else if (FLAG_trace_fragmentation) {
    379       TraceFragmentation(heap()->code_space());
    380     }
    381 
    382     if (FLAG_trace_fragmentation) {
    383       TraceFragmentation(heap()->map_space());
    384       TraceFragmentation(heap()->cell_space());
    385       TraceFragmentation(heap()->property_cell_space());
    386     }
    387 
    388     heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
    389     heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
    390     heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
    391 
    392     compacting_ = evacuation_candidates_.length() > 0;
    393   }
    394 
    395   return compacting_;
    396 }
    397 
    398 
    399 void MarkCompactCollector::CollectGarbage() {
    400   // Make sure that Prepare() has been called. The individual steps below will
    401   // update the state as they proceed.
    402   ASSERT(state_ == PREPARE_GC);
    403 
    404   MarkLiveObjects();
    405   ASSERT(heap_->incremental_marking()->IsStopped());
    406 
    407   if (FLAG_collect_maps) ClearNonLiveReferences();
    408 
    409   ClearWeakCollections();
    410 
    411 #ifdef VERIFY_HEAP
    412   if (FLAG_verify_heap) {
    413     VerifyMarking(heap_);
    414   }
    415 #endif
    416 
    417   SweepSpaces();
    418 
    419 #ifdef DEBUG
    420   if (FLAG_verify_native_context_separation) {
    421     VerifyNativeContextSeparation(heap_);
    422   }
    423 #endif
    424 
    425 #ifdef VERIFY_HEAP
    426   if (heap()->weak_embedded_objects_verification_enabled()) {
    427     VerifyWeakEmbeddedObjectsInCode();
    428   }
    429   if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
    430     VerifyOmittedMapChecks();
    431   }
    432 #endif
    433 
    434   Finish();
    435 
    436   if (marking_parity_ == EVEN_MARKING_PARITY) {
    437     marking_parity_ = ODD_MARKING_PARITY;
    438   } else {
    439     ASSERT(marking_parity_ == ODD_MARKING_PARITY);
    440     marking_parity_ = EVEN_MARKING_PARITY;
    441   }
    442 
    443   tracer_ = NULL;
    444 }
    445 
    446 
    447 #ifdef VERIFY_HEAP
    448 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
    449   PageIterator it(space);
    450 
    451   while (it.has_next()) {
    452     Page* p = it.next();
    453     CHECK(p->markbits()->IsClean());
    454     CHECK_EQ(0, p->LiveBytes());
    455   }
    456 }
    457 
    458 
    459 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
    460   NewSpacePageIterator it(space->bottom(), space->top());
    461 
    462   while (it.has_next()) {
    463     NewSpacePage* p = it.next();
    464     CHECK(p->markbits()->IsClean());
    465     CHECK_EQ(0, p->LiveBytes());
    466   }
    467 }
    468 
    469 
    470 void MarkCompactCollector::VerifyMarkbitsAreClean() {
    471   VerifyMarkbitsAreClean(heap_->old_pointer_space());
    472   VerifyMarkbitsAreClean(heap_->old_data_space());
    473   VerifyMarkbitsAreClean(heap_->code_space());
    474   VerifyMarkbitsAreClean(heap_->cell_space());
    475   VerifyMarkbitsAreClean(heap_->property_cell_space());
    476   VerifyMarkbitsAreClean(heap_->map_space());
    477   VerifyMarkbitsAreClean(heap_->new_space());
    478 
    479   LargeObjectIterator it(heap_->lo_space());
    480   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    481     MarkBit mark_bit = Marking::MarkBitFrom(obj);
    482     CHECK(Marking::IsWhite(mark_bit));
    483     CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
    484   }
    485 }
    486 
    487 
    488 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
    489   HeapObjectIterator code_iterator(heap()->code_space());
    490   for (HeapObject* obj = code_iterator.Next();
    491        obj != NULL;
    492        obj = code_iterator.Next()) {
    493     Code* code = Code::cast(obj);
    494     if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
    495     if (WillBeDeoptimized(code)) continue;
    496     code->VerifyEmbeddedObjectsDependency();
    497   }
    498 }
    499 
    500 
    501 void MarkCompactCollector::VerifyOmittedMapChecks() {
    502   HeapObjectIterator iterator(heap()->map_space());
    503   for (HeapObject* obj = iterator.Next();
    504        obj != NULL;
    505        obj = iterator.Next()) {
    506     Map* map = Map::cast(obj);
    507     map->VerifyOmittedMapChecks();
    508   }
    509 }
    510 #endif  // VERIFY_HEAP
    511 
    512 
    513 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
    514   PageIterator it(space);
    515 
    516   while (it.has_next()) {
    517     Bitmap::Clear(it.next());
    518   }
    519 }
    520 
    521 
    522 static void ClearMarkbitsInNewSpace(NewSpace* space) {
    523   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
    524 
    525   while (it.has_next()) {
    526     Bitmap::Clear(it.next());
    527   }
    528 }
    529 
    530 
    531 void MarkCompactCollector::ClearMarkbits() {
    532   ClearMarkbitsInPagedSpace(heap_->code_space());
    533   ClearMarkbitsInPagedSpace(heap_->map_space());
    534   ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
    535   ClearMarkbitsInPagedSpace(heap_->old_data_space());
    536   ClearMarkbitsInPagedSpace(heap_->cell_space());
    537   ClearMarkbitsInPagedSpace(heap_->property_cell_space());
    538   ClearMarkbitsInNewSpace(heap_->new_space());
    539 
    540   LargeObjectIterator it(heap_->lo_space());
    541   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    542     MarkBit mark_bit = Marking::MarkBitFrom(obj);
    543     mark_bit.Clear();
    544     mark_bit.Next().Clear();
    545     Page::FromAddress(obj->address())->ResetProgressBar();
    546     Page::FromAddress(obj->address())->ResetLiveBytes();
    547   }
    548 }
    549 
    550 
    551 class MarkCompactCollector::SweeperTask : public v8::Task {
    552  public:
    553   SweeperTask(Heap* heap, PagedSpace* space)
    554     : heap_(heap), space_(space) {}
    555 
    556   virtual ~SweeperTask() {}
    557 
    558  private:
    559   // v8::Task overrides.
    560   virtual void Run() V8_OVERRIDE {
    561     heap_->mark_compact_collector()->SweepInParallel(space_);
    562     heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
    563   }
    564 
    565   Heap* heap_;
    566   PagedSpace* space_;
    567 
    568   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
    569 };
    570 
    571 
    572 void MarkCompactCollector::StartSweeperThreads() {
    573   ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
    574   ASSERT(free_list_old_data_space_.get()->IsEmpty());
    575   sweeping_pending_ = true;
    576   for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
    577     isolate()->sweeper_threads()[i]->StartSweeping();
    578   }
    579   if (FLAG_job_based_sweeping) {
    580     V8::GetCurrentPlatform()->CallOnBackgroundThread(
    581         new SweeperTask(heap(), heap()->old_data_space()),
    582         v8::Platform::kShortRunningTask);
    583     V8::GetCurrentPlatform()->CallOnBackgroundThread(
    584         new SweeperTask(heap(), heap()->old_pointer_space()),
    585         v8::Platform::kShortRunningTask);
    586   }
    587 }
    588 
    589 
    590 void MarkCompactCollector::WaitUntilSweepingCompleted() {
    591   ASSERT(sweeping_pending_ == true);
    592   for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
    593     isolate()->sweeper_threads()[i]->WaitForSweeperThread();
    594   }
    595   if (FLAG_job_based_sweeping) {
    596     // Wait twice for both jobs.
    597     pending_sweeper_jobs_semaphore_.Wait();
    598     pending_sweeper_jobs_semaphore_.Wait();
    599   }
    600   ParallelSweepSpacesComplete();
    601   sweeping_pending_ = false;
    602   RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
    603   RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
    604   heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
    605   heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
    606 }
    607 
    608 
    609 bool MarkCompactCollector::IsSweepingCompleted() {
    610   for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
    611     if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
    612       return false;
    613     }
    614   }
    615   if (FLAG_job_based_sweeping) {
    616     if (!pending_sweeper_jobs_semaphore_.WaitFor(TimeDelta::FromSeconds(0))) {
    617       return false;
    618     }
    619     pending_sweeper_jobs_semaphore_.Signal();
    620   }
    621   return true;
    622 }
    623 
    624 
    625 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
    626   FreeList* free_list;
    627 
    628   if (space == heap()->old_pointer_space()) {
    629     free_list = free_list_old_pointer_space_.get();
    630   } else if (space == heap()->old_data_space()) {
    631     free_list = free_list_old_data_space_.get();
    632   } else {
    633     // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
    634     // to only refill them for old data and pointer spaces.
    635     return;
    636   }
    637 
    638   intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
    639   space->AddToAccountingStats(freed_bytes);
    640   space->DecrementUnsweptFreeBytes(freed_bytes);
    641 }
    642 
    643 
    644 bool MarkCompactCollector::AreSweeperThreadsActivated() {
    645   return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
    646 }
    647 
    648 
    649 bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
    650   return sweeping_pending_;
    651 }
    652 
    653 
    654 void Marking::TransferMark(Address old_start, Address new_start) {
    655   // This is only used when resizing an object.
    656   ASSERT(MemoryChunk::FromAddress(old_start) ==
    657          MemoryChunk::FromAddress(new_start));
    658 
    659   if (!heap_->incremental_marking()->IsMarking()) return;
    660 
    661   // If the mark doesn't move, we don't check the color of the object.
    662   // It doesn't matter whether the object is black, since it hasn't changed
    663   // size, so the adjustment to the live data count will be zero anyway.
    664   if (old_start == new_start) return;
    665 
    666   MarkBit new_mark_bit = MarkBitFrom(new_start);
    667   MarkBit old_mark_bit = MarkBitFrom(old_start);
    668 
    669 #ifdef DEBUG
    670   ObjectColor old_color = Color(old_mark_bit);
    671 #endif
    672 
    673   if (Marking::IsBlack(old_mark_bit)) {
    674     old_mark_bit.Clear();
    675     ASSERT(IsWhite(old_mark_bit));
    676     Marking::MarkBlack(new_mark_bit);
    677     return;
    678   } else if (Marking::IsGrey(old_mark_bit)) {
    679     old_mark_bit.Clear();
    680     old_mark_bit.Next().Clear();
    681     ASSERT(IsWhite(old_mark_bit));
    682     heap_->incremental_marking()->WhiteToGreyAndPush(
    683         HeapObject::FromAddress(new_start), new_mark_bit);
    684     heap_->incremental_marking()->RestartIfNotMarking();
    685   }
    686 
    687 #ifdef DEBUG
    688   ObjectColor new_color = Color(new_mark_bit);
    689   ASSERT(new_color == old_color);
    690 #endif
    691 }
    692 
    693 
    694 const char* AllocationSpaceName(AllocationSpace space) {
    695   switch (space) {
    696     case NEW_SPACE: return "NEW_SPACE";
    697     case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
    698     case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
    699     case CODE_SPACE: return "CODE_SPACE";
    700     case MAP_SPACE: return "MAP_SPACE";
    701     case CELL_SPACE: return "CELL_SPACE";
    702     case PROPERTY_CELL_SPACE:
    703       return "PROPERTY_CELL_SPACE";
    704     case LO_SPACE: return "LO_SPACE";
    705     default:
    706       UNREACHABLE();
    707   }
    708 
    709   return NULL;
    710 }
    711 
    712 
    713 // Returns zero for pages that have so little fragmentation that it is not
    714 // worth defragmenting them.  Otherwise a positive integer that gives an
    715 // estimate of fragmentation on an arbitrary scale.
    716 static int FreeListFragmentation(PagedSpace* space, Page* p) {
    717   // If page was not swept then there are no free list items on it.
    718   if (!p->WasSwept()) {
    719     if (FLAG_trace_fragmentation) {
    720       PrintF("%p [%s]: %d bytes live (unswept)\n",
    721              reinterpret_cast<void*>(p),
    722              AllocationSpaceName(space->identity()),
    723              p->LiveBytes());
    724     }
    725     return 0;
    726   }
    727 
    728   PagedSpace::SizeStats sizes;
    729   space->ObtainFreeListStatistics(p, &sizes);
    730 
    731   intptr_t ratio;
    732   intptr_t ratio_threshold;
    733   intptr_t area_size = space->AreaSize();
    734   if (space->identity() == CODE_SPACE) {
    735     ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
    736         area_size;
    737     ratio_threshold = 10;
    738   } else {
    739     ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
    740         area_size;
    741     ratio_threshold = 15;
    742   }
    743 
    744   if (FLAG_trace_fragmentation) {
    745     PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
    746            reinterpret_cast<void*>(p),
    747            AllocationSpaceName(space->identity()),
    748            static_cast<int>(sizes.small_size_),
    749            static_cast<double>(sizes.small_size_ * 100) /
    750            area_size,
    751            static_cast<int>(sizes.medium_size_),
    752            static_cast<double>(sizes.medium_size_ * 100) /
    753            area_size,
    754            static_cast<int>(sizes.large_size_),
    755            static_cast<double>(sizes.large_size_ * 100) /
    756            area_size,
    757            static_cast<int>(sizes.huge_size_),
    758            static_cast<double>(sizes.huge_size_ * 100) /
    759            area_size,
    760            (ratio > ratio_threshold) ? "[fragmented]" : "");
    761   }
    762 
    763   if (FLAG_always_compact && sizes.Total() != area_size) {
    764     return 1;
    765   }
    766 
    767   if (ratio <= ratio_threshold) return 0;  // Not fragmented.
    768 
    769   return static_cast<int>(ratio - ratio_threshold);
    770 }
    771 
    772 
    773 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
    774   ASSERT(space->identity() == OLD_POINTER_SPACE ||
    775          space->identity() == OLD_DATA_SPACE ||
    776          space->identity() == CODE_SPACE);
    777 
    778   static const int kMaxMaxEvacuationCandidates = 1000;
    779   int number_of_pages = space->CountTotalPages();
    780   int max_evacuation_candidates =
    781       static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
    782 
    783   if (FLAG_stress_compaction || FLAG_always_compact) {
    784     max_evacuation_candidates = kMaxMaxEvacuationCandidates;
    785   }
    786 
    787   class Candidate {
    788    public:
    789     Candidate() : fragmentation_(0), page_(NULL) { }
    790     Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
    791 
    792     int fragmentation() { return fragmentation_; }
    793     Page* page() { return page_; }
    794 
    795    private:
    796     int fragmentation_;
    797     Page* page_;
    798   };
    799 
    800   enum CompactionMode {
    801     COMPACT_FREE_LISTS,
    802     REDUCE_MEMORY_FOOTPRINT
    803   };
    804 
    805   CompactionMode mode = COMPACT_FREE_LISTS;
    806 
    807   intptr_t reserved = number_of_pages * space->AreaSize();
    808   intptr_t over_reserved = reserved - space->SizeOfObjects();
    809   static const intptr_t kFreenessThreshold = 50;
    810 
    811   if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
    812     // If reduction of memory footprint was requested, we are aggressive
    813     // about choosing pages to free.  We expect that half-empty pages
    814     // are easier to compact so slightly bump the limit.
    815     mode = REDUCE_MEMORY_FOOTPRINT;
    816     max_evacuation_candidates += 2;
    817   }
    818 
    819 
    820   if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
    821     // If over-usage is very high (more than a third of the space), we
    822     // try to free all mostly empty pages.  We expect that almost empty
    823     // pages are even easier to compact so bump the limit even more.
    824     mode = REDUCE_MEMORY_FOOTPRINT;
    825     max_evacuation_candidates *= 2;
    826   }
    827 
    828   if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
    829     PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
    830            "evacuation candidate limit: %d\n",
    831            static_cast<double>(over_reserved) / MB,
    832            static_cast<double>(reserved) / MB,
    833            static_cast<int>(kFreenessThreshold),
    834            max_evacuation_candidates);
    835   }
    836 
    837   intptr_t estimated_release = 0;
    838 
    839   Candidate candidates[kMaxMaxEvacuationCandidates];
    840 
    841   max_evacuation_candidates =
    842       Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
    843 
    844   int count = 0;
    845   int fragmentation = 0;
    846   Candidate* least = NULL;
    847 
    848   PageIterator it(space);
    849   if (it.has_next()) it.next();  // Never compact the first page.
    850 
    851   while (it.has_next()) {
    852     Page* p = it.next();
    853     p->ClearEvacuationCandidate();
    854 
    855     if (FLAG_stress_compaction) {
    856       unsigned int counter = space->heap()->ms_count();
    857       uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
    858       if ((counter & 1) == (page_number & 1)) fragmentation = 1;
    859     } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
    860       // Don't try to release too many pages.
    861       if (estimated_release >= over_reserved) {
    862         continue;
    863       }
    864 
    865       intptr_t free_bytes = 0;
    866 
    867       if (!p->WasSwept()) {
    868         free_bytes = (p->area_size() - p->LiveBytes());
    869       } else {
    870         PagedSpace::SizeStats sizes;
    871         space->ObtainFreeListStatistics(p, &sizes);
    872         free_bytes = sizes.Total();
    873       }
    874 
    875       int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
    876 
    877       if (free_pct >= kFreenessThreshold) {
    878         estimated_release += free_bytes;
    879         fragmentation = free_pct;
    880       } else {
    881         fragmentation = 0;
    882       }
    883 
    884       if (FLAG_trace_fragmentation) {
    885         PrintF("%p [%s]: %d (%.2f%%) free %s\n",
    886                reinterpret_cast<void*>(p),
    887                AllocationSpaceName(space->identity()),
    888                static_cast<int>(free_bytes),
    889                static_cast<double>(free_bytes * 100) / p->area_size(),
    890                (fragmentation > 0) ? "[fragmented]" : "");
    891       }
    892     } else {
    893       fragmentation = FreeListFragmentation(space, p);
    894     }
    895 
    896     if (fragmentation != 0) {
    897       if (count < max_evacuation_candidates) {
    898         candidates[count++] = Candidate(fragmentation, p);
    899       } else {
    900         if (least == NULL) {
    901           for (int i = 0; i < max_evacuation_candidates; i++) {
    902             if (least == NULL ||
    903                 candidates[i].fragmentation() < least->fragmentation()) {
    904               least = candidates + i;
    905             }
    906           }
    907         }
    908         if (least->fragmentation() < fragmentation) {
    909           *least = Candidate(fragmentation, p);
    910           least = NULL;
    911         }
    912       }
    913     }
    914   }
    915 
    916   for (int i = 0; i < count; i++) {
    917     AddEvacuationCandidate(candidates[i].page());
    918   }
    919 
    920   if (count > 0 && FLAG_trace_fragmentation) {
    921     PrintF("Collected %d evacuation candidates for space %s\n",
    922            count,
    923            AllocationSpaceName(space->identity()));
    924   }
    925 }
    926 
    927 
    928 void MarkCompactCollector::AbortCompaction() {
    929   if (compacting_) {
    930     int npages = evacuation_candidates_.length();
    931     for (int i = 0; i < npages; i++) {
    932       Page* p = evacuation_candidates_[i];
    933       slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
    934       p->ClearEvacuationCandidate();
    935       p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
    936     }
    937     compacting_ = false;
    938     evacuation_candidates_.Rewind(0);
    939     invalidated_code_.Rewind(0);
    940   }
    941   ASSERT_EQ(0, evacuation_candidates_.length());
    942 }
    943 
    944 
    945 void MarkCompactCollector::Prepare(GCTracer* tracer) {
    946   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
    947 
    948   // Rather than passing the tracer around we stash it in a static member
    949   // variable.
    950   tracer_ = tracer;
    951 
    952 #ifdef DEBUG
    953   ASSERT(state_ == IDLE);
    954   state_ = PREPARE_GC;
    955 #endif
    956 
    957   ASSERT(!FLAG_never_compact || !FLAG_always_compact);
    958 
    959   if (IsConcurrentSweepingInProgress()) {
    960     // Instead of waiting we could also abort the sweeper threads here.
    961     WaitUntilSweepingCompleted();
    962   }
    963 
    964   // Clear marking bits if incremental marking is aborted.
    965   if (was_marked_incrementally_ && abort_incremental_marking_) {
    966     heap()->incremental_marking()->Abort();
    967     ClearMarkbits();
    968     AbortCompaction();
    969     was_marked_incrementally_ = false;
    970   }
    971 
    972   // Don't start compaction if we are in the middle of incremental
    973   // marking cycle. We did not collect any slots.
    974   if (!FLAG_never_compact && !was_marked_incrementally_) {
    975     StartCompaction(NON_INCREMENTAL_COMPACTION);
    976   }
    977 
    978   PagedSpaces spaces(heap());
    979   for (PagedSpace* space = spaces.next();
    980        space != NULL;
    981        space = spaces.next()) {
    982     space->PrepareForMarkCompact();
    983   }
    984 
    985 #ifdef VERIFY_HEAP
    986   if (!was_marked_incrementally_ && FLAG_verify_heap) {
    987     VerifyMarkbitsAreClean();
    988   }
    989 #endif
    990 }
    991 
    992 
    993 void MarkCompactCollector::Finish() {
    994 #ifdef DEBUG
    995   ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
    996   state_ = IDLE;
    997 #endif
    998   // The stub cache is not traversed during GC; clear the cache to
    999   // force lazy re-initialization of it. This must be done after the
   1000   // GC, because it relies on the new address of certain old space
   1001   // objects (empty string, illegal builtin).
   1002   isolate()->stub_cache()->Clear();
   1003 
   1004   if (have_code_to_deoptimize_) {
   1005     // Some code objects were marked for deoptimization during the GC.
   1006     Deoptimizer::DeoptimizeMarkedCode(isolate());
   1007     have_code_to_deoptimize_ = false;
   1008   }
   1009 }
   1010 
   1011 
   1012 // -------------------------------------------------------------------------
   1013 // Phase 1: tracing and marking live objects.
   1014 //   before: all objects are in normal state.
   1015 //   after: a live object's map pointer is marked as '00'.
   1016 
   1017 // Marking all live objects in the heap as part of mark-sweep or mark-compact
   1018 // collection.  Before marking, all objects are in their normal state.  After
   1019 // marking, live objects' map pointers are marked indicating that the object
   1020 // has been found reachable.
   1021 //
   1022 // The marking algorithm is a (mostly) depth-first (because of possible stack
   1023 // overflow) traversal of the graph of objects reachable from the roots.  It
   1024 // uses an explicit stack of pointers rather than recursion.  The young
   1025 // generation's inactive ('from') space is used as a marking stack.  The
   1026 // objects in the marking stack are the ones that have been reached and marked
   1027 // but their children have not yet been visited.
   1028 //
   1029 // The marking stack can overflow during traversal.  In that case, we set an
   1030 // overflow flag.  When the overflow flag is set, we continue marking objects
   1031 // reachable from the objects on the marking stack, but no longer push them on
   1032 // the marking stack.  Instead, we mark them as both marked and overflowed.
   1033 // When the stack is in the overflowed state, objects marked as overflowed
   1034 // have been reached and marked but their children have not been visited yet.
   1035 // After emptying the marking stack, we clear the overflow flag and traverse
   1036 // the heap looking for objects marked as overflowed, push them on the stack,
   1037 // and continue with marking.  This process repeats until all reachable
   1038 // objects have been marked.
   1039 
   1040 void CodeFlusher::ProcessJSFunctionCandidates() {
   1041   Code* lazy_compile =
   1042       isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
   1043   Object* undefined = isolate_->heap()->undefined_value();
   1044 
   1045   JSFunction* candidate = jsfunction_candidates_head_;
   1046   JSFunction* next_candidate;
   1047   while (candidate != NULL) {
   1048     next_candidate = GetNextCandidate(candidate);
   1049     ClearNextCandidate(candidate, undefined);
   1050 
   1051     SharedFunctionInfo* shared = candidate->shared();
   1052 
   1053     Code* code = shared->code();
   1054     MarkBit code_mark = Marking::MarkBitFrom(code);
   1055     if (!code_mark.Get()) {
   1056       if (FLAG_trace_code_flushing && shared->is_compiled()) {
   1057         PrintF("[code-flushing clears: ");
   1058         shared->ShortPrint();
   1059         PrintF(" - age: %d]\n", code->GetAge());
   1060       }
   1061       shared->set_code(lazy_compile);
   1062       candidate->set_code(lazy_compile);
   1063     } else {
   1064       candidate->set_code(code);
   1065     }
   1066 
   1067     // We are in the middle of a GC cycle so the write barrier in the code
   1068     // setter did not record the slot update and we have to do that manually.
   1069     Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
   1070     Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
   1071     isolate_->heap()->mark_compact_collector()->
   1072         RecordCodeEntrySlot(slot, target);
   1073 
   1074     Object** shared_code_slot =
   1075         HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
   1076     isolate_->heap()->mark_compact_collector()->
   1077         RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
   1078 
   1079     candidate = next_candidate;
   1080   }
   1081 
   1082   jsfunction_candidates_head_ = NULL;
   1083 }
   1084 
   1085 
   1086 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
   1087   Code* lazy_compile =
   1088       isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
   1089 
   1090   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
   1091   SharedFunctionInfo* next_candidate;
   1092   while (candidate != NULL) {
   1093     next_candidate = GetNextCandidate(candidate);
   1094     ClearNextCandidate(candidate);
   1095 
   1096     Code* code = candidate->code();
   1097     MarkBit code_mark = Marking::MarkBitFrom(code);
   1098     if (!code_mark.Get()) {
   1099       if (FLAG_trace_code_flushing && candidate->is_compiled()) {
   1100         PrintF("[code-flushing clears: ");
   1101         candidate->ShortPrint();
   1102         PrintF(" - age: %d]\n", code->GetAge());
   1103       }
   1104       candidate->set_code(lazy_compile);
   1105     }
   1106 
   1107     Object** code_slot =
   1108         HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
   1109     isolate_->heap()->mark_compact_collector()->
   1110         RecordSlot(code_slot, code_slot, *code_slot);
   1111 
   1112     candidate = next_candidate;
   1113   }
   1114 
   1115   shared_function_info_candidates_head_ = NULL;
   1116 }
   1117 
   1118 
   1119 void CodeFlusher::ProcessOptimizedCodeMaps() {
   1120   STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
   1121 
   1122   SharedFunctionInfo* holder = optimized_code_map_holder_head_;
   1123   SharedFunctionInfo* next_holder;
   1124 
   1125   while (holder != NULL) {
   1126     next_holder = GetNextCodeMap(holder);
   1127     ClearNextCodeMap(holder);
   1128 
   1129     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
   1130     int new_length = SharedFunctionInfo::kEntriesStart;
   1131     int old_length = code_map->length();
   1132     for (int i = SharedFunctionInfo::kEntriesStart;
   1133          i < old_length;
   1134          i += SharedFunctionInfo::kEntryLength) {
   1135       Code* code =
   1136           Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
   1137       if (!Marking::MarkBitFrom(code).Get()) continue;
   1138 
   1139       // Move every slot in the entry.
   1140       for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
   1141         int dst_index = new_length++;
   1142         Object** slot = code_map->RawFieldOfElementAt(dst_index);
   1143         Object* object = code_map->get(i + j);
   1144         code_map->set(dst_index, object);
   1145         if (j == SharedFunctionInfo::kOsrAstIdOffset) {
   1146           ASSERT(object->IsSmi());
   1147         } else {
   1148           ASSERT(Marking::IsBlack(
   1149               Marking::MarkBitFrom(HeapObject::cast(*slot))));
   1150           isolate_->heap()->mark_compact_collector()->
   1151               RecordSlot(slot, slot, *slot);
   1152         }
   1153       }
   1154     }
   1155 
   1156     // Trim the optimized code map if entries have been removed.
   1157     if (new_length < old_length) {
   1158       holder->TrimOptimizedCodeMap(old_length - new_length);
   1159     }
   1160 
   1161     holder = next_holder;
   1162   }
   1163 
   1164   optimized_code_map_holder_head_ = NULL;
   1165 }
   1166 
   1167 
   1168 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
   1169   // Make sure previous flushing decisions are revisited.
   1170   isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
   1171 
   1172   if (FLAG_trace_code_flushing) {
   1173     PrintF("[code-flushing abandons function-info: ");
   1174     shared_info->ShortPrint();
   1175     PrintF("]\n");
   1176   }
   1177 
   1178   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
   1179   SharedFunctionInfo* next_candidate;
   1180   if (candidate == shared_info) {
   1181     next_candidate = GetNextCandidate(shared_info);
   1182     shared_function_info_candidates_head_ = next_candidate;
   1183     ClearNextCandidate(shared_info);
   1184   } else {
   1185     while (candidate != NULL) {
   1186       next_candidate = GetNextCandidate(candidate);
   1187 
   1188       if (next_candidate == shared_info) {
   1189         next_candidate = GetNextCandidate(shared_info);
   1190         SetNextCandidate(candidate, next_candidate);
   1191         ClearNextCandidate(shared_info);
   1192         break;
   1193       }
   1194 
   1195       candidate = next_candidate;
   1196     }
   1197   }
   1198 }
   1199 
   1200 
   1201 void CodeFlusher::EvictCandidate(JSFunction* function) {
   1202   ASSERT(!function->next_function_link()->IsUndefined());
   1203   Object* undefined = isolate_->heap()->undefined_value();
   1204 
   1205   // Make sure previous flushing decisions are revisited.
   1206   isolate_->heap()->incremental_marking()->RecordWrites(function);
   1207   isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
   1208 
   1209   if (FLAG_trace_code_flushing) {
   1210     PrintF("[code-flushing abandons closure: ");
   1211     function->shared()->ShortPrint();
   1212     PrintF("]\n");
   1213   }
   1214 
   1215   JSFunction* candidate = jsfunction_candidates_head_;
   1216   JSFunction* next_candidate;
   1217   if (candidate == function) {
   1218     next_candidate = GetNextCandidate(function);
   1219     jsfunction_candidates_head_ = next_candidate;
   1220     ClearNextCandidate(function, undefined);
   1221   } else {
   1222     while (candidate != NULL) {
   1223       next_candidate = GetNextCandidate(candidate);
   1224 
   1225       if (next_candidate == function) {
   1226         next_candidate = GetNextCandidate(function);
   1227         SetNextCandidate(candidate, next_candidate);
   1228         ClearNextCandidate(function, undefined);
   1229         break;
   1230       }
   1231 
   1232       candidate = next_candidate;
   1233     }
   1234   }
   1235 }
   1236 
   1237 
   1238 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
   1239   ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
   1240          get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
   1241 
   1242   // Make sure previous flushing decisions are revisited.
   1243   isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
   1244 
   1245   if (FLAG_trace_code_flushing) {
   1246     PrintF("[code-flushing abandons code-map: ");
   1247     code_map_holder->ShortPrint();
   1248     PrintF("]\n");
   1249   }
   1250 
   1251   SharedFunctionInfo* holder = optimized_code_map_holder_head_;
   1252   SharedFunctionInfo* next_holder;
   1253   if (holder == code_map_holder) {
   1254     next_holder = GetNextCodeMap(code_map_holder);
   1255     optimized_code_map_holder_head_ = next_holder;
   1256     ClearNextCodeMap(code_map_holder);
   1257   } else {
   1258     while (holder != NULL) {
   1259       next_holder = GetNextCodeMap(holder);
   1260 
   1261       if (next_holder == code_map_holder) {
   1262         next_holder = GetNextCodeMap(code_map_holder);
   1263         SetNextCodeMap(holder, next_holder);
   1264         ClearNextCodeMap(code_map_holder);
   1265         break;
   1266       }
   1267 
   1268       holder = next_holder;
   1269     }
   1270   }
   1271 }
   1272 
   1273 
   1274 void CodeFlusher::EvictJSFunctionCandidates() {
   1275   JSFunction* candidate = jsfunction_candidates_head_;
   1276   JSFunction* next_candidate;
   1277   while (candidate != NULL) {
   1278     next_candidate = GetNextCandidate(candidate);
   1279     EvictCandidate(candidate);
   1280     candidate = next_candidate;
   1281   }
   1282   ASSERT(jsfunction_candidates_head_ == NULL);
   1283 }
   1284 
   1285 
   1286 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
   1287   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
   1288   SharedFunctionInfo* next_candidate;
   1289   while (candidate != NULL) {
   1290     next_candidate = GetNextCandidate(candidate);
   1291     EvictCandidate(candidate);
   1292     candidate = next_candidate;
   1293   }
   1294   ASSERT(shared_function_info_candidates_head_ == NULL);
   1295 }
   1296 
   1297 
   1298 void CodeFlusher::EvictOptimizedCodeMaps() {
   1299   SharedFunctionInfo* holder = optimized_code_map_holder_head_;
   1300   SharedFunctionInfo* next_holder;
   1301   while (holder != NULL) {
   1302     next_holder = GetNextCodeMap(holder);
   1303     EvictOptimizedCodeMap(holder);
   1304     holder = next_holder;
   1305   }
   1306   ASSERT(optimized_code_map_holder_head_ == NULL);
   1307 }
   1308 
   1309 
   1310 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
   1311   Heap* heap = isolate_->heap();
   1312 
   1313   JSFunction** slot = &jsfunction_candidates_head_;
   1314   JSFunction* candidate = jsfunction_candidates_head_;
   1315   while (candidate != NULL) {
   1316     if (heap->InFromSpace(candidate)) {
   1317       v->VisitPointer(reinterpret_cast<Object**>(slot));
   1318     }
   1319     candidate = GetNextCandidate(*slot);
   1320     slot = GetNextCandidateSlot(*slot);
   1321   }
   1322 }
   1323 
   1324 
   1325 MarkCompactCollector::~MarkCompactCollector() {
   1326   if (code_flusher_ != NULL) {
   1327     delete code_flusher_;
   1328     code_flusher_ = NULL;
   1329   }
   1330 }
   1331 
   1332 
   1333 static inline HeapObject* ShortCircuitConsString(Object** p) {
   1334   // Optimization: If the heap object pointed to by p is a non-internalized
   1335   // cons string whose right substring is HEAP->empty_string, update
   1336   // it in place to its left substring.  Return the updated value.
   1337   //
   1338   // Here we assume that if we change *p, we replace it with a heap object
   1339   // (i.e., the left substring of a cons string is always a heap object).
   1340   //
   1341   // The check performed is:
   1342   //   object->IsConsString() && !object->IsInternalizedString() &&
   1343   //   (ConsString::cast(object)->second() == HEAP->empty_string())
   1344   // except the maps for the object and its possible substrings might be
   1345   // marked.
   1346   HeapObject* object = HeapObject::cast(*p);
   1347   if (!FLAG_clever_optimizations) return object;
   1348   Map* map = object->map();
   1349   InstanceType type = map->instance_type();
   1350   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
   1351 
   1352   Object* second = reinterpret_cast<ConsString*>(object)->second();
   1353   Heap* heap = map->GetHeap();
   1354   if (second != heap->empty_string()) {
   1355     return object;
   1356   }
   1357 
   1358   // Since we don't have the object's start, it is impossible to update the
   1359   // page dirty marks. Therefore, we only replace the string with its left
   1360   // substring when page dirty marks do not change.
   1361   Object* first = reinterpret_cast<ConsString*>(object)->first();
   1362   if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
   1363 
   1364   *p = first;
   1365   return HeapObject::cast(first);
   1366 }
   1367 
   1368 
   1369 class MarkCompactMarkingVisitor
   1370     : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
   1371  public:
   1372   static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
   1373                                    Map* map, HeapObject* obj);
   1374 
   1375   static void ObjectStatsCountFixedArray(
   1376       FixedArrayBase* fixed_array,
   1377       FixedArraySubInstanceType fast_type,
   1378       FixedArraySubInstanceType dictionary_type);
   1379 
   1380   template<MarkCompactMarkingVisitor::VisitorId id>
   1381   class ObjectStatsTracker {
   1382    public:
   1383     static inline void Visit(Map* map, HeapObject* obj);
   1384   };
   1385 
   1386   static void Initialize();
   1387 
   1388   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
   1389     MarkObjectByPointer(heap->mark_compact_collector(), p, p);
   1390   }
   1391 
   1392   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
   1393     // Mark all objects pointed to in [start, end).
   1394     const int kMinRangeForMarkingRecursion = 64;
   1395     if (end - start >= kMinRangeForMarkingRecursion) {
   1396       if (VisitUnmarkedObjects(heap, start, end)) return;
   1397       // We are close to a stack overflow, so just mark the objects.
   1398     }
   1399     MarkCompactCollector* collector = heap->mark_compact_collector();
   1400     for (Object** p = start; p < end; p++) {
   1401       MarkObjectByPointer(collector, start, p);
   1402     }
   1403   }
   1404 
   1405   // Marks the object black and pushes it on the marking stack.
   1406   INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
   1407     MarkBit mark = Marking::MarkBitFrom(object);
   1408     heap->mark_compact_collector()->MarkObject(object, mark);
   1409   }
   1410 
   1411   // Marks the object black without pushing it on the marking stack.
   1412   // Returns true if object needed marking and false otherwise.
   1413   INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
   1414     MarkBit mark_bit = Marking::MarkBitFrom(object);
   1415     if (!mark_bit.Get()) {
   1416       heap->mark_compact_collector()->SetMark(object, mark_bit);
   1417       return true;
   1418     }
   1419     return false;
   1420   }
   1421 
   1422   // Mark object pointed to by p.
   1423   INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
   1424                                          Object** anchor_slot,
   1425                                          Object** p)) {
   1426     if (!(*p)->IsHeapObject()) return;
   1427     HeapObject* object = ShortCircuitConsString(p);
   1428     collector->RecordSlot(anchor_slot, p, object);
   1429     MarkBit mark = Marking::MarkBitFrom(object);
   1430     collector->MarkObject(object, mark);
   1431   }
   1432 
   1433 
   1434   // Visit an unmarked object.
   1435   INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
   1436                                          HeapObject* obj)) {
   1437 #ifdef DEBUG
   1438     ASSERT(collector->heap()->Contains(obj));
   1439     ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
   1440 #endif
   1441     Map* map = obj->map();
   1442     Heap* heap = obj->GetHeap();
   1443     MarkBit mark = Marking::MarkBitFrom(obj);
   1444     heap->mark_compact_collector()->SetMark(obj, mark);
   1445     // Mark the map pointer and the body.
   1446     MarkBit map_mark = Marking::MarkBitFrom(map);
   1447     heap->mark_compact_collector()->MarkObject(map, map_mark);
   1448     IterateBody(map, obj);
   1449   }
   1450 
   1451   // Visit all unmarked objects pointed to by [start, end).
   1452   // Returns false if the operation fails (lack of stack space).
   1453   INLINE(static bool VisitUnmarkedObjects(Heap* heap,
   1454                                           Object** start,
   1455                                           Object** end)) {
   1456     // Return false is we are close to the stack limit.
   1457     StackLimitCheck check(heap->isolate());
   1458     if (check.HasOverflowed()) return false;
   1459 
   1460     MarkCompactCollector* collector = heap->mark_compact_collector();
   1461     // Visit the unmarked objects.
   1462     for (Object** p = start; p < end; p++) {
   1463       Object* o = *p;
   1464       if (!o->IsHeapObject()) continue;
   1465       collector->RecordSlot(start, p, o);
   1466       HeapObject* obj = HeapObject::cast(o);
   1467       MarkBit mark = Marking::MarkBitFrom(obj);
   1468       if (mark.Get()) continue;
   1469       VisitUnmarkedObject(collector, obj);
   1470     }
   1471     return true;
   1472   }
   1473 
   1474  private:
   1475   template<int id>
   1476   static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
   1477 
   1478   // Code flushing support.
   1479 
   1480   static const int kRegExpCodeThreshold = 5;
   1481 
   1482   static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
   1483                                           JSRegExp* re,
   1484                                           bool is_ascii) {
   1485     // Make sure that the fixed array is in fact initialized on the RegExp.
   1486     // We could potentially trigger a GC when initializing the RegExp.
   1487     if (HeapObject::cast(re->data())->map()->instance_type() !=
   1488             FIXED_ARRAY_TYPE) return;
   1489 
   1490     // Make sure this is a RegExp that actually contains code.
   1491     if (re->TypeTag() != JSRegExp::IRREGEXP) return;
   1492 
   1493     Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
   1494     if (!code->IsSmi() &&
   1495         HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
   1496       // Save a copy that can be reinstated if we need the code again.
   1497       re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
   1498 
   1499       // Saving a copy might create a pointer into compaction candidate
   1500       // that was not observed by marker.  This might happen if JSRegExp data
   1501       // was marked through the compilation cache before marker reached JSRegExp
   1502       // object.
   1503       FixedArray* data = FixedArray::cast(re->data());
   1504       Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
   1505       heap->mark_compact_collector()->
   1506           RecordSlot(slot, slot, code);
   1507 
   1508       // Set a number in the 0-255 range to guarantee no smi overflow.
   1509       re->SetDataAt(JSRegExp::code_index(is_ascii),
   1510                     Smi::FromInt(heap->sweep_generation() & 0xff));
   1511     } else if (code->IsSmi()) {
   1512       int value = Smi::cast(code)->value();
   1513       // The regexp has not been compiled yet or there was a compilation error.
   1514       if (value == JSRegExp::kUninitializedValue ||
   1515           value == JSRegExp::kCompilationErrorValue) {
   1516         return;
   1517       }
   1518 
   1519       // Check if we should flush now.
   1520       if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
   1521         re->SetDataAt(JSRegExp::code_index(is_ascii),
   1522                       Smi::FromInt(JSRegExp::kUninitializedValue));
   1523         re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
   1524                       Smi::FromInt(JSRegExp::kUninitializedValue));
   1525       }
   1526     }
   1527   }
   1528 
   1529 
   1530   // Works by setting the current sweep_generation (as a smi) in the
   1531   // code object place in the data array of the RegExp and keeps a copy
   1532   // around that can be reinstated if we reuse the RegExp before flushing.
   1533   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
   1534   // we flush the code.
   1535   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
   1536     Heap* heap = map->GetHeap();
   1537     MarkCompactCollector* collector = heap->mark_compact_collector();
   1538     if (!collector->is_code_flushing_enabled()) {
   1539       VisitJSRegExp(map, object);
   1540       return;
   1541     }
   1542     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
   1543     // Flush code or set age on both ASCII and two byte code.
   1544     UpdateRegExpCodeAgeAndFlush(heap, re, true);
   1545     UpdateRegExpCodeAgeAndFlush(heap, re, false);
   1546     // Visit the fields of the RegExp, including the updated FixedArray.
   1547     VisitJSRegExp(map, object);
   1548   }
   1549 
   1550   static VisitorDispatchTable<Callback> non_count_table_;
   1551 };
   1552 
   1553 
   1554 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
   1555     FixedArrayBase* fixed_array,
   1556     FixedArraySubInstanceType fast_type,
   1557     FixedArraySubInstanceType dictionary_type) {
   1558   Heap* heap = fixed_array->map()->GetHeap();
   1559   if (fixed_array->map() != heap->fixed_cow_array_map() &&
   1560       fixed_array->map() != heap->fixed_double_array_map() &&
   1561       fixed_array != heap->empty_fixed_array()) {
   1562     if (fixed_array->IsDictionary()) {
   1563       heap->RecordFixedArraySubTypeStats(dictionary_type,
   1564                                          fixed_array->Size());
   1565     } else {
   1566       heap->RecordFixedArraySubTypeStats(fast_type,
   1567                                          fixed_array->Size());
   1568     }
   1569   }
   1570 }
   1571 
   1572 
   1573 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
   1574     MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
   1575   Heap* heap = map->GetHeap();
   1576   int object_size = obj->Size();
   1577   heap->RecordObjectStats(map->instance_type(), object_size);
   1578   non_count_table_.GetVisitorById(id)(map, obj);
   1579   if (obj->IsJSObject()) {
   1580     JSObject* object = JSObject::cast(obj);
   1581     ObjectStatsCountFixedArray(object->elements(),
   1582                                DICTIONARY_ELEMENTS_SUB_TYPE,
   1583                                FAST_ELEMENTS_SUB_TYPE);
   1584     ObjectStatsCountFixedArray(object->properties(),
   1585                                DICTIONARY_PROPERTIES_SUB_TYPE,
   1586                                FAST_PROPERTIES_SUB_TYPE);
   1587   }
   1588 }
   1589 
   1590 
   1591 template<MarkCompactMarkingVisitor::VisitorId id>
   1592 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
   1593     Map* map, HeapObject* obj) {
   1594   ObjectStatsVisitBase(id, map, obj);
   1595 }
   1596 
   1597 
   1598 template<>
   1599 class MarkCompactMarkingVisitor::ObjectStatsTracker<
   1600     MarkCompactMarkingVisitor::kVisitMap> {
   1601  public:
   1602   static inline void Visit(Map* map, HeapObject* obj) {
   1603     Heap* heap = map->GetHeap();
   1604     Map* map_obj = Map::cast(obj);
   1605     ASSERT(map->instance_type() == MAP_TYPE);
   1606     DescriptorArray* array = map_obj->instance_descriptors();
   1607     if (map_obj->owns_descriptors() &&
   1608         array != heap->empty_descriptor_array()) {
   1609       int fixed_array_size = array->Size();
   1610       heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
   1611                                          fixed_array_size);
   1612     }
   1613     if (map_obj->HasTransitionArray()) {
   1614       int fixed_array_size = map_obj->transitions()->Size();
   1615       heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
   1616                                          fixed_array_size);
   1617     }
   1618     if (map_obj->has_code_cache()) {
   1619       CodeCache* cache = CodeCache::cast(map_obj->code_cache());
   1620       heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
   1621                                          cache->default_cache()->Size());
   1622       if (!cache->normal_type_cache()->IsUndefined()) {
   1623         heap->RecordFixedArraySubTypeStats(
   1624             MAP_CODE_CACHE_SUB_TYPE,
   1625             FixedArray::cast(cache->normal_type_cache())->Size());
   1626       }
   1627     }
   1628     ObjectStatsVisitBase(kVisitMap, map, obj);
   1629   }
   1630 };
   1631 
   1632 
   1633 template<>
   1634 class MarkCompactMarkingVisitor::ObjectStatsTracker<
   1635     MarkCompactMarkingVisitor::kVisitCode> {
   1636  public:
   1637   static inline void Visit(Map* map, HeapObject* obj) {
   1638     Heap* heap = map->GetHeap();
   1639     int object_size = obj->Size();
   1640     ASSERT(map->instance_type() == CODE_TYPE);
   1641     Code* code_obj = Code::cast(obj);
   1642     heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
   1643                                  object_size);
   1644     ObjectStatsVisitBase(kVisitCode, map, obj);
   1645   }
   1646 };
   1647 
   1648 
   1649 template<>
   1650 class MarkCompactMarkingVisitor::ObjectStatsTracker<
   1651     MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
   1652  public:
   1653   static inline void Visit(Map* map, HeapObject* obj) {
   1654     Heap* heap = map->GetHeap();
   1655     SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
   1656     if (sfi->scope_info() != heap->empty_fixed_array()) {
   1657       heap->RecordFixedArraySubTypeStats(
   1658           SCOPE_INFO_SUB_TYPE,
   1659           FixedArray::cast(sfi->scope_info())->Size());
   1660     }
   1661     ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
   1662   }
   1663 };
   1664 
   1665 
   1666 template<>
   1667 class MarkCompactMarkingVisitor::ObjectStatsTracker<
   1668     MarkCompactMarkingVisitor::kVisitFixedArray> {
   1669  public:
   1670   static inline void Visit(Map* map, HeapObject* obj) {
   1671     Heap* heap = map->GetHeap();
   1672     FixedArray* fixed_array = FixedArray::cast(obj);
   1673     if (fixed_array == heap->string_table()) {
   1674       heap->RecordFixedArraySubTypeStats(
   1675           STRING_TABLE_SUB_TYPE,
   1676           fixed_array->Size());
   1677     }
   1678     ObjectStatsVisitBase(kVisitFixedArray, map, obj);
   1679   }
   1680 };
   1681 
   1682 
   1683 void MarkCompactMarkingVisitor::Initialize() {
   1684   StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
   1685 
   1686   table_.Register(kVisitJSRegExp,
   1687                   &VisitRegExpAndFlushCode);
   1688 
   1689   if (FLAG_track_gc_object_stats) {
   1690     // Copy the visitor table to make call-through possible.
   1691     non_count_table_.CopyFrom(&table_);
   1692 #define VISITOR_ID_COUNT_FUNCTION(id)                                   \
   1693     table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
   1694     VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
   1695 #undef VISITOR_ID_COUNT_FUNCTION
   1696   }
   1697 }
   1698 
   1699 
   1700 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
   1701     MarkCompactMarkingVisitor::non_count_table_;
   1702 
   1703 
   1704 class CodeMarkingVisitor : public ThreadVisitor {
   1705  public:
   1706   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
   1707       : collector_(collector) {}
   1708 
   1709   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
   1710     collector_->PrepareThreadForCodeFlushing(isolate, top);
   1711   }
   1712 
   1713  private:
   1714   MarkCompactCollector* collector_;
   1715 };
   1716 
   1717 
   1718 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
   1719  public:
   1720   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
   1721       : collector_(collector) {}
   1722 
   1723   void VisitPointers(Object** start, Object** end) {
   1724     for (Object** p = start; p < end; p++) VisitPointer(p);
   1725   }
   1726 
   1727   void VisitPointer(Object** slot) {
   1728     Object* obj = *slot;
   1729     if (obj->IsSharedFunctionInfo()) {
   1730       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
   1731       MarkBit shared_mark = Marking::MarkBitFrom(shared);
   1732       MarkBit code_mark = Marking::MarkBitFrom(shared->code());
   1733       collector_->MarkObject(shared->code(), code_mark);
   1734       collector_->MarkObject(shared, shared_mark);
   1735     }
   1736   }
   1737 
   1738  private:
   1739   MarkCompactCollector* collector_;
   1740 };
   1741 
   1742 
   1743 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
   1744                                                         ThreadLocalTop* top) {
   1745   for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
   1746     // Note: for the frame that has a pending lazy deoptimization
   1747     // StackFrame::unchecked_code will return a non-optimized code object for
   1748     // the outermost function and StackFrame::LookupCode will return
   1749     // actual optimized code object.
   1750     StackFrame* frame = it.frame();
   1751     Code* code = frame->unchecked_code();
   1752     MarkBit code_mark = Marking::MarkBitFrom(code);
   1753     MarkObject(code, code_mark);
   1754     if (frame->is_optimized()) {
   1755       MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
   1756                                                           frame->LookupCode());
   1757     }
   1758   }
   1759 }
   1760 
   1761 
   1762 void MarkCompactCollector::PrepareForCodeFlushing() {
   1763   // Enable code flushing for non-incremental cycles.
   1764   if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
   1765     EnableCodeFlushing(!was_marked_incrementally_);
   1766   }
   1767 
   1768   // If code flushing is disabled, there is no need to prepare for it.
   1769   if (!is_code_flushing_enabled()) return;
   1770 
   1771   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
   1772   // relies on it being marked before any other descriptor array.
   1773   HeapObject* descriptor_array = heap()->empty_descriptor_array();
   1774   MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
   1775   MarkObject(descriptor_array, descriptor_array_mark);
   1776 
   1777   // Make sure we are not referencing the code from the stack.
   1778   ASSERT(this == heap()->mark_compact_collector());
   1779   PrepareThreadForCodeFlushing(heap()->isolate(),
   1780                                heap()->isolate()->thread_local_top());
   1781 
   1782   // Iterate the archived stacks in all threads to check if
   1783   // the code is referenced.
   1784   CodeMarkingVisitor code_marking_visitor(this);
   1785   heap()->isolate()->thread_manager()->IterateArchivedThreads(
   1786       &code_marking_visitor);
   1787 
   1788   SharedFunctionInfoMarkingVisitor visitor(this);
   1789   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
   1790   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
   1791 
   1792   ProcessMarkingDeque();
   1793 }
   1794 
   1795 
   1796 // Visitor class for marking heap roots.
   1797 class RootMarkingVisitor : public ObjectVisitor {
   1798  public:
   1799   explicit RootMarkingVisitor(Heap* heap)
   1800     : collector_(heap->mark_compact_collector()) { }
   1801 
   1802   void VisitPointer(Object** p) {
   1803     MarkObjectByPointer(p);
   1804   }
   1805 
   1806   void VisitPointers(Object** start, Object** end) {
   1807     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
   1808   }
   1809 
   1810   // Skip the weak next code link in a code object, which is visited in
   1811   // ProcessTopOptimizedFrame.
   1812   void VisitNextCodeLink(Object** p) { }
   1813 
   1814  private:
   1815   void MarkObjectByPointer(Object** p) {
   1816     if (!(*p)->IsHeapObject()) return;
   1817 
   1818     // Replace flat cons strings in place.
   1819     HeapObject* object = ShortCircuitConsString(p);
   1820     MarkBit mark_bit = Marking::MarkBitFrom(object);
   1821     if (mark_bit.Get()) return;
   1822 
   1823     Map* map = object->map();
   1824     // Mark the object.
   1825     collector_->SetMark(object, mark_bit);
   1826 
   1827     // Mark the map pointer and body, and push them on the marking stack.
   1828     MarkBit map_mark = Marking::MarkBitFrom(map);
   1829     collector_->MarkObject(map, map_mark);
   1830     MarkCompactMarkingVisitor::IterateBody(map, object);
   1831 
   1832     // Mark all the objects reachable from the map and body.  May leave
   1833     // overflowed objects in the heap.
   1834     collector_->EmptyMarkingDeque();
   1835   }
   1836 
   1837   MarkCompactCollector* collector_;
   1838 };
   1839 
   1840 
   1841 // Helper class for pruning the string table.
   1842 template<bool finalize_external_strings>
   1843 class StringTableCleaner : public ObjectVisitor {
   1844  public:
   1845   explicit StringTableCleaner(Heap* heap)
   1846     : heap_(heap), pointers_removed_(0) { }
   1847 
   1848   virtual void VisitPointers(Object** start, Object** end) {
   1849     // Visit all HeapObject pointers in [start, end).
   1850     for (Object** p = start; p < end; p++) {
   1851       Object* o = *p;
   1852       if (o->IsHeapObject() &&
   1853           !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
   1854         if (finalize_external_strings) {
   1855           ASSERT(o->IsExternalString());
   1856           heap_->FinalizeExternalString(String::cast(*p));
   1857         } else {
   1858           pointers_removed_++;
   1859         }
   1860         // Set the entry to the_hole_value (as deleted).
   1861         *p = heap_->the_hole_value();
   1862       }
   1863     }
   1864   }
   1865 
   1866   int PointersRemoved() {
   1867     ASSERT(!finalize_external_strings);
   1868     return pointers_removed_;
   1869   }
   1870 
   1871  private:
   1872   Heap* heap_;
   1873   int pointers_removed_;
   1874 };
   1875 
   1876 
   1877 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
   1878 typedef StringTableCleaner<true> ExternalStringTableCleaner;
   1879 
   1880 
   1881 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
   1882 // are retained.
   1883 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
   1884  public:
   1885   virtual Object* RetainAs(Object* object) {
   1886     if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
   1887       return object;
   1888     } else if (object->IsAllocationSite() &&
   1889                !(AllocationSite::cast(object)->IsZombie())) {
   1890       // "dead" AllocationSites need to live long enough for a traversal of new
   1891       // space. These sites get a one-time reprieve.
   1892       AllocationSite* site = AllocationSite::cast(object);
   1893       site->MarkZombie();
   1894       site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
   1895       return object;
   1896     } else {
   1897       return NULL;
   1898     }
   1899   }
   1900 };
   1901 
   1902 
   1903 // Fill the marking stack with overflowed objects returned by the given
   1904 // iterator.  Stop when the marking stack is filled or the end of the space
   1905 // is reached, whichever comes first.
   1906 template<class T>
   1907 static void DiscoverGreyObjectsWithIterator(Heap* heap,
   1908                                             MarkingDeque* marking_deque,
   1909                                             T* it) {
   1910   // The caller should ensure that the marking stack is initially not full,
   1911   // so that we don't waste effort pointlessly scanning for objects.
   1912   ASSERT(!marking_deque->IsFull());
   1913 
   1914   Map* filler_map = heap->one_pointer_filler_map();
   1915   for (HeapObject* object = it->Next();
   1916        object != NULL;
   1917        object = it->Next()) {
   1918     MarkBit markbit = Marking::MarkBitFrom(object);
   1919     if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
   1920       Marking::GreyToBlack(markbit);
   1921       MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
   1922       marking_deque->PushBlack(object);
   1923       if (marking_deque->IsFull()) return;
   1924     }
   1925   }
   1926 }
   1927 
   1928 
   1929 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
   1930 
   1931 
   1932 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
   1933                                       MemoryChunk* p) {
   1934   ASSERT(!marking_deque->IsFull());
   1935   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   1936   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   1937   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
   1938   ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
   1939 
   1940   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
   1941     Address cell_base = it.CurrentCellBase();
   1942     MarkBit::CellType* cell = it.CurrentCell();
   1943 
   1944     const MarkBit::CellType current_cell = *cell;
   1945     if (current_cell == 0) continue;
   1946 
   1947     MarkBit::CellType grey_objects;
   1948     if (it.HasNext()) {
   1949       const MarkBit::CellType next_cell = *(cell+1);
   1950       grey_objects = current_cell &
   1951           ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
   1952     } else {
   1953       grey_objects = current_cell & (current_cell >> 1);
   1954     }
   1955 
   1956     int offset = 0;
   1957     while (grey_objects != 0) {
   1958       int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
   1959       grey_objects >>= trailing_zeros;
   1960       offset += trailing_zeros;
   1961       MarkBit markbit(cell, 1 << offset, false);
   1962       ASSERT(Marking::IsGrey(markbit));
   1963       Marking::GreyToBlack(markbit);
   1964       Address addr = cell_base + offset * kPointerSize;
   1965       HeapObject* object = HeapObject::FromAddress(addr);
   1966       MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
   1967       marking_deque->PushBlack(object);
   1968       if (marking_deque->IsFull()) return;
   1969       offset += 2;
   1970       grey_objects >>= 2;
   1971     }
   1972 
   1973     grey_objects >>= (Bitmap::kBitsPerCell - 1);
   1974   }
   1975 }
   1976 
   1977 
   1978 int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
   1979     NewSpace* new_space,
   1980     NewSpacePage* p) {
   1981   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   1982   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   1983   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
   1984   ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
   1985 
   1986   MarkBit::CellType* cells = p->markbits()->cells();
   1987   int survivors_size = 0;
   1988 
   1989   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
   1990     Address cell_base = it.CurrentCellBase();
   1991     MarkBit::CellType* cell = it.CurrentCell();
   1992 
   1993     MarkBit::CellType current_cell = *cell;
   1994     if (current_cell == 0) continue;
   1995 
   1996     int offset = 0;
   1997     while (current_cell != 0) {
   1998       int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
   1999       current_cell >>= trailing_zeros;
   2000       offset += trailing_zeros;
   2001       Address address = cell_base + offset * kPointerSize;
   2002       HeapObject* object = HeapObject::FromAddress(address);
   2003 
   2004       int size = object->Size();
   2005       survivors_size += size;
   2006 
   2007       Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
   2008 
   2009       offset++;
   2010       current_cell >>= 1;
   2011       // Aggressively promote young survivors to the old space.
   2012       if (TryPromoteObject(object, size)) {
   2013         continue;
   2014       }
   2015 
   2016       // Promotion failed. Just migrate object to another semispace.
   2017       AllocationResult allocation = new_space->AllocateRaw(size);
   2018       if (allocation.IsRetry()) {
   2019         if (!new_space->AddFreshPage()) {
   2020           // Shouldn't happen. We are sweeping linearly, and to-space
   2021           // has the same number of pages as from-space, so there is
   2022           // always room.
   2023           UNREACHABLE();
   2024         }
   2025         allocation = new_space->AllocateRaw(size);
   2026         ASSERT(!allocation.IsRetry());
   2027       }
   2028       Object* target = allocation.ToObjectChecked();
   2029 
   2030       MigrateObject(HeapObject::cast(target),
   2031                     object,
   2032                     size,
   2033                     NEW_SPACE);
   2034       heap()->IncrementSemiSpaceCopiedObjectSize(size);
   2035     }
   2036     *cells = 0;
   2037   }
   2038   return survivors_size;
   2039 }
   2040 
   2041 
   2042 static void DiscoverGreyObjectsInSpace(Heap* heap,
   2043                                        MarkingDeque* marking_deque,
   2044                                        PagedSpace* space) {
   2045   PageIterator it(space);
   2046   while (it.has_next()) {
   2047     Page* p = it.next();
   2048     DiscoverGreyObjectsOnPage(marking_deque, p);
   2049     if (marking_deque->IsFull()) return;
   2050   }
   2051 }
   2052 
   2053 
   2054 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
   2055                                           MarkingDeque* marking_deque) {
   2056   NewSpace* space = heap->new_space();
   2057   NewSpacePageIterator it(space->bottom(), space->top());
   2058   while (it.has_next()) {
   2059     NewSpacePage* page = it.next();
   2060     DiscoverGreyObjectsOnPage(marking_deque, page);
   2061     if (marking_deque->IsFull()) return;
   2062   }
   2063 }
   2064 
   2065 
   2066 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
   2067   Object* o = *p;
   2068   if (!o->IsHeapObject()) return false;
   2069   HeapObject* heap_object = HeapObject::cast(o);
   2070   MarkBit mark = Marking::MarkBitFrom(heap_object);
   2071   return !mark.Get();
   2072 }
   2073 
   2074 
   2075 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
   2076                                                         Object** p) {
   2077   Object* o = *p;
   2078   ASSERT(o->IsHeapObject());
   2079   HeapObject* heap_object = HeapObject::cast(o);
   2080   MarkBit mark = Marking::MarkBitFrom(heap_object);
   2081   return !mark.Get();
   2082 }
   2083 
   2084 
   2085 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
   2086   StringTable* string_table = heap()->string_table();
   2087   // Mark the string table itself.
   2088   MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
   2089   if (!string_table_mark.Get()) {
   2090     // String table could have already been marked by visiting the handles list.
   2091     SetMark(string_table, string_table_mark);
   2092   }
   2093   // Explicitly mark the prefix.
   2094   string_table->IteratePrefix(visitor);
   2095   ProcessMarkingDeque();
   2096 }
   2097 
   2098 
   2099 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
   2100   MarkBit mark_bit = Marking::MarkBitFrom(site);
   2101   SetMark(site, mark_bit);
   2102 }
   2103 
   2104 
   2105 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
   2106   // Mark the heap roots including global variables, stack variables,
   2107   // etc., and all objects reachable from them.
   2108   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
   2109 
   2110   // Handle the string table specially.
   2111   MarkStringTable(visitor);
   2112 
   2113   MarkWeakObjectToCodeTable();
   2114 
   2115   // There may be overflowed objects in the heap.  Visit them now.
   2116   while (marking_deque_.overflowed()) {
   2117     RefillMarkingDeque();
   2118     EmptyMarkingDeque();
   2119   }
   2120 }
   2121 
   2122 
   2123 void MarkCompactCollector::MarkImplicitRefGroups() {
   2124   List<ImplicitRefGroup*>* ref_groups =
   2125       isolate()->global_handles()->implicit_ref_groups();
   2126 
   2127   int last = 0;
   2128   for (int i = 0; i < ref_groups->length(); i++) {
   2129     ImplicitRefGroup* entry = ref_groups->at(i);
   2130     ASSERT(entry != NULL);
   2131 
   2132     if (!IsMarked(*entry->parent)) {
   2133       (*ref_groups)[last++] = entry;
   2134       continue;
   2135     }
   2136 
   2137     Object*** children = entry->children;
   2138     // A parent object is marked, so mark all child heap objects.
   2139     for (size_t j = 0; j < entry->length; ++j) {
   2140       if ((*children[j])->IsHeapObject()) {
   2141         HeapObject* child = HeapObject::cast(*children[j]);
   2142         MarkBit mark = Marking::MarkBitFrom(child);
   2143         MarkObject(child, mark);
   2144       }
   2145     }
   2146 
   2147     // Once the entire group has been marked, dispose it because it's
   2148     // not needed anymore.
   2149     delete entry;
   2150   }
   2151   ref_groups->Rewind(last);
   2152 }
   2153 
   2154 
   2155 void MarkCompactCollector::MarkWeakObjectToCodeTable() {
   2156   HeapObject* weak_object_to_code_table =
   2157       HeapObject::cast(heap()->weak_object_to_code_table());
   2158   if (!IsMarked(weak_object_to_code_table)) {
   2159     MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
   2160     SetMark(weak_object_to_code_table, mark);
   2161   }
   2162 }
   2163 
   2164 
   2165 // Mark all objects reachable from the objects on the marking stack.
   2166 // Before: the marking stack contains zero or more heap object pointers.
   2167 // After: the marking stack is empty, and all objects reachable from the
   2168 // marking stack have been marked, or are overflowed in the heap.
   2169 void MarkCompactCollector::EmptyMarkingDeque() {
   2170   while (!marking_deque_.IsEmpty()) {
   2171     HeapObject* object = marking_deque_.Pop();
   2172     ASSERT(object->IsHeapObject());
   2173     ASSERT(heap()->Contains(object));
   2174     ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
   2175 
   2176     Map* map = object->map();
   2177     MarkBit map_mark = Marking::MarkBitFrom(map);
   2178     MarkObject(map, map_mark);
   2179 
   2180     MarkCompactMarkingVisitor::IterateBody(map, object);
   2181   }
   2182 }
   2183 
   2184 
   2185 // Sweep the heap for overflowed objects, clear their overflow bits, and
   2186 // push them on the marking stack.  Stop early if the marking stack fills
   2187 // before sweeping completes.  If sweeping completes, there are no remaining
   2188 // overflowed objects in the heap so the overflow flag on the markings stack
   2189 // is cleared.
   2190 void MarkCompactCollector::RefillMarkingDeque() {
   2191   ASSERT(marking_deque_.overflowed());
   2192 
   2193   DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
   2194   if (marking_deque_.IsFull()) return;
   2195 
   2196   DiscoverGreyObjectsInSpace(heap(),
   2197                              &marking_deque_,
   2198                              heap()->old_pointer_space());
   2199   if (marking_deque_.IsFull()) return;
   2200 
   2201   DiscoverGreyObjectsInSpace(heap(),
   2202                              &marking_deque_,
   2203                              heap()->old_data_space());
   2204   if (marking_deque_.IsFull()) return;
   2205 
   2206   DiscoverGreyObjectsInSpace(heap(),
   2207                              &marking_deque_,
   2208                              heap()->code_space());
   2209   if (marking_deque_.IsFull()) return;
   2210 
   2211   DiscoverGreyObjectsInSpace(heap(),
   2212                              &marking_deque_,
   2213                              heap()->map_space());
   2214   if (marking_deque_.IsFull()) return;
   2215 
   2216   DiscoverGreyObjectsInSpace(heap(),
   2217                              &marking_deque_,
   2218                              heap()->cell_space());
   2219   if (marking_deque_.IsFull()) return;
   2220 
   2221   DiscoverGreyObjectsInSpace(heap(),
   2222                              &marking_deque_,
   2223                              heap()->property_cell_space());
   2224   if (marking_deque_.IsFull()) return;
   2225 
   2226   LargeObjectIterator lo_it(heap()->lo_space());
   2227   DiscoverGreyObjectsWithIterator(heap(),
   2228                                   &marking_deque_,
   2229                                   &lo_it);
   2230   if (marking_deque_.IsFull()) return;
   2231 
   2232   marking_deque_.ClearOverflowed();
   2233 }
   2234 
   2235 
   2236 // Mark all objects reachable (transitively) from objects on the marking
   2237 // stack.  Before: the marking stack contains zero or more heap object
   2238 // pointers.  After: the marking stack is empty and there are no overflowed
   2239 // objects in the heap.
   2240 void MarkCompactCollector::ProcessMarkingDeque() {
   2241   EmptyMarkingDeque();
   2242   while (marking_deque_.overflowed()) {
   2243     RefillMarkingDeque();
   2244     EmptyMarkingDeque();
   2245   }
   2246 }
   2247 
   2248 
   2249 // Mark all objects reachable (transitively) from objects on the marking
   2250 // stack including references only considered in the atomic marking pause.
   2251 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
   2252   bool work_to_do = true;
   2253   ASSERT(marking_deque_.IsEmpty());
   2254   while (work_to_do) {
   2255     isolate()->global_handles()->IterateObjectGroups(
   2256         visitor, &IsUnmarkedHeapObjectWithHeap);
   2257     MarkImplicitRefGroups();
   2258     ProcessWeakCollections();
   2259     work_to_do = !marking_deque_.IsEmpty();
   2260     ProcessMarkingDeque();
   2261   }
   2262 }
   2263 
   2264 
   2265 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
   2266   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
   2267        !it.done(); it.Advance()) {
   2268     if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
   2269       return;
   2270     }
   2271     if (it.frame()->type() == StackFrame::OPTIMIZED) {
   2272       Code* code = it.frame()->LookupCode();
   2273       if (!code->CanDeoptAt(it.frame()->pc())) {
   2274         code->CodeIterateBody(visitor);
   2275       }
   2276       ProcessMarkingDeque();
   2277       return;
   2278     }
   2279   }
   2280 }
   2281 
   2282 
   2283 void MarkCompactCollector::MarkLiveObjects() {
   2284   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
   2285   // The recursive GC marker detects when it is nearing stack overflow,
   2286   // and switches to a different marking system.  JS interrupts interfere
   2287   // with the C stack limit check.
   2288   PostponeInterruptsScope postpone(isolate());
   2289 
   2290   bool incremental_marking_overflowed = false;
   2291   IncrementalMarking* incremental_marking = heap_->incremental_marking();
   2292   if (was_marked_incrementally_) {
   2293     // Finalize the incremental marking and check whether we had an overflow.
   2294     // Both markers use grey color to mark overflowed objects so
   2295     // non-incremental marker can deal with them as if overflow
   2296     // occured during normal marking.
   2297     // But incremental marker uses a separate marking deque
   2298     // so we have to explicitly copy its overflow state.
   2299     incremental_marking->Finalize();
   2300     incremental_marking_overflowed =
   2301         incremental_marking->marking_deque()->overflowed();
   2302     incremental_marking->marking_deque()->ClearOverflowed();
   2303   } else {
   2304     // Abort any pending incremental activities e.g. incremental sweeping.
   2305     incremental_marking->Abort();
   2306   }
   2307 
   2308 #ifdef DEBUG
   2309   ASSERT(state_ == PREPARE_GC);
   2310   state_ = MARK_LIVE_OBJECTS;
   2311 #endif
   2312   // The to space contains live objects, a page in from space is used as a
   2313   // marking stack.
   2314   Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
   2315   Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
   2316   if (FLAG_force_marking_deque_overflows) {
   2317     marking_deque_end = marking_deque_start + 64 * kPointerSize;
   2318   }
   2319   marking_deque_.Initialize(marking_deque_start,
   2320                             marking_deque_end);
   2321   ASSERT(!marking_deque_.overflowed());
   2322 
   2323   if (incremental_marking_overflowed) {
   2324     // There are overflowed objects left in the heap after incremental marking.
   2325     marking_deque_.SetOverflowed();
   2326   }
   2327 
   2328   PrepareForCodeFlushing();
   2329 
   2330   if (was_marked_incrementally_) {
   2331     // There is no write barrier on cells so we have to scan them now at the end
   2332     // of the incremental marking.
   2333     {
   2334       HeapObjectIterator cell_iterator(heap()->cell_space());
   2335       HeapObject* cell;
   2336       while ((cell = cell_iterator.Next()) != NULL) {
   2337         ASSERT(cell->IsCell());
   2338         if (IsMarked(cell)) {
   2339           int offset = Cell::kValueOffset;
   2340           MarkCompactMarkingVisitor::VisitPointer(
   2341               heap(),
   2342               reinterpret_cast<Object**>(cell->address() + offset));
   2343         }
   2344       }
   2345     }
   2346     {
   2347       HeapObjectIterator js_global_property_cell_iterator(
   2348           heap()->property_cell_space());
   2349       HeapObject* cell;
   2350       while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
   2351         ASSERT(cell->IsPropertyCell());
   2352         if (IsMarked(cell)) {
   2353           MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
   2354         }
   2355       }
   2356     }
   2357   }
   2358 
   2359   RootMarkingVisitor root_visitor(heap());
   2360   MarkRoots(&root_visitor);
   2361 
   2362   ProcessTopOptimizedFrame(&root_visitor);
   2363 
   2364   // The objects reachable from the roots are marked, yet unreachable
   2365   // objects are unmarked.  Mark objects reachable due to host
   2366   // application specific logic or through Harmony weak maps.
   2367   ProcessEphemeralMarking(&root_visitor);
   2368 
   2369   // The objects reachable from the roots, weak maps or object groups
   2370   // are marked, yet unreachable objects are unmarked.  Mark objects
   2371   // reachable only from weak global handles.
   2372   //
   2373   // First we identify nonlive weak handles and mark them as pending
   2374   // destruction.
   2375   heap()->isolate()->global_handles()->IdentifyWeakHandles(
   2376       &IsUnmarkedHeapObject);
   2377   // Then we mark the objects and process the transitive closure.
   2378   heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
   2379   while (marking_deque_.overflowed()) {
   2380     RefillMarkingDeque();
   2381     EmptyMarkingDeque();
   2382   }
   2383 
   2384   // Repeat host application specific and Harmony weak maps marking to
   2385   // mark unmarked objects reachable from the weak roots.
   2386   ProcessEphemeralMarking(&root_visitor);
   2387 
   2388   AfterMarking();
   2389 }
   2390 
   2391 
   2392 void MarkCompactCollector::AfterMarking() {
   2393   // Object literal map caches reference strings (cache keys) and maps
   2394   // (cache values). At this point still useful maps have already been
   2395   // marked. Mark the keys for the alive values before we process the
   2396   // string table.
   2397   ProcessMapCaches();
   2398 
   2399   // Prune the string table removing all strings only pointed to by the
   2400   // string table.  Cannot use string_table() here because the string
   2401   // table is marked.
   2402   StringTable* string_table = heap()->string_table();
   2403   InternalizedStringTableCleaner internalized_visitor(heap());
   2404   string_table->IterateElements(&internalized_visitor);
   2405   string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
   2406 
   2407   ExternalStringTableCleaner external_visitor(heap());
   2408   heap()->external_string_table_.Iterate(&external_visitor);
   2409   heap()->external_string_table_.CleanUp();
   2410 
   2411   // Process the weak references.
   2412   MarkCompactWeakObjectRetainer mark_compact_object_retainer;
   2413   heap()->ProcessWeakReferences(&mark_compact_object_retainer);
   2414 
   2415   // Remove object groups after marking phase.
   2416   heap()->isolate()->global_handles()->RemoveObjectGroups();
   2417   heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
   2418 
   2419   // Flush code from collected candidates.
   2420   if (is_code_flushing_enabled()) {
   2421     code_flusher_->ProcessCandidates();
   2422     // If incremental marker does not support code flushing, we need to
   2423     // disable it before incremental marking steps for next cycle.
   2424     if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
   2425       EnableCodeFlushing(false);
   2426     }
   2427   }
   2428 
   2429   if (FLAG_track_gc_object_stats) {
   2430     heap()->CheckpointObjectStats();
   2431   }
   2432 }
   2433 
   2434 
   2435 void MarkCompactCollector::ProcessMapCaches() {
   2436   Object* raw_context = heap()->native_contexts_list();
   2437   while (raw_context != heap()->undefined_value()) {
   2438     Context* context = reinterpret_cast<Context*>(raw_context);
   2439     if (IsMarked(context)) {
   2440       HeapObject* raw_map_cache =
   2441           HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
   2442       // A map cache may be reachable from the stack. In this case
   2443       // it's already transitively marked and it's too late to clean
   2444       // up its parts.
   2445       if (!IsMarked(raw_map_cache) &&
   2446           raw_map_cache != heap()->undefined_value()) {
   2447         MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
   2448         int existing_elements = map_cache->NumberOfElements();
   2449         int used_elements = 0;
   2450         for (int i = MapCache::kElementsStartIndex;
   2451              i < map_cache->length();
   2452              i += MapCache::kEntrySize) {
   2453           Object* raw_key = map_cache->get(i);
   2454           if (raw_key == heap()->undefined_value() ||
   2455               raw_key == heap()->the_hole_value()) continue;
   2456           STATIC_ASSERT(MapCache::kEntrySize == 2);
   2457           Object* raw_map = map_cache->get(i + 1);
   2458           if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
   2459             ++used_elements;
   2460           } else {
   2461             // Delete useless entries with unmarked maps.
   2462             ASSERT(raw_map->IsMap());
   2463             map_cache->set_the_hole(i);
   2464             map_cache->set_the_hole(i + 1);
   2465           }
   2466         }
   2467         if (used_elements == 0) {
   2468           context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
   2469         } else {
   2470           // Note: we don't actually shrink the cache here to avoid
   2471           // extra complexity during GC. We rely on subsequent cache
   2472           // usages (EnsureCapacity) to do this.
   2473           map_cache->ElementsRemoved(existing_elements - used_elements);
   2474           MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
   2475           MarkObject(map_cache, map_cache_markbit);
   2476         }
   2477       }
   2478     }
   2479     // Move to next element in the list.
   2480     raw_context = context->get(Context::NEXT_CONTEXT_LINK);
   2481   }
   2482   ProcessMarkingDeque();
   2483 }
   2484 
   2485 
   2486 void MarkCompactCollector::ClearNonLiveReferences() {
   2487   // Iterate over the map space, setting map transitions that go from
   2488   // a marked map to an unmarked map to null transitions.  This action
   2489   // is carried out only on maps of JSObjects and related subtypes.
   2490   HeapObjectIterator map_iterator(heap()->map_space());
   2491   for (HeapObject* obj = map_iterator.Next();
   2492        obj != NULL;
   2493        obj = map_iterator.Next()) {
   2494     Map* map = Map::cast(obj);
   2495 
   2496     if (!map->CanTransition()) continue;
   2497 
   2498     MarkBit map_mark = Marking::MarkBitFrom(map);
   2499     ClearNonLivePrototypeTransitions(map);
   2500     ClearNonLiveMapTransitions(map, map_mark);
   2501 
   2502     if (map_mark.Get()) {
   2503       ClearNonLiveDependentCode(map->dependent_code());
   2504     } else {
   2505       ClearDependentCode(map->dependent_code());
   2506       map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
   2507     }
   2508   }
   2509 
   2510   // Iterate over property cell space, removing dependent code that is not
   2511   // otherwise kept alive by strong references.
   2512   HeapObjectIterator cell_iterator(heap_->property_cell_space());
   2513   for (HeapObject* cell = cell_iterator.Next();
   2514        cell != NULL;
   2515        cell = cell_iterator.Next()) {
   2516     if (IsMarked(cell)) {
   2517       ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
   2518     }
   2519   }
   2520 
   2521   // Iterate over allocation sites, removing dependent code that is not
   2522   // otherwise kept alive by strong references.
   2523   Object* undefined = heap()->undefined_value();
   2524   for (Object* site = heap()->allocation_sites_list();
   2525        site != undefined;
   2526        site = AllocationSite::cast(site)->weak_next()) {
   2527     if (IsMarked(site)) {
   2528       ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
   2529     }
   2530   }
   2531 
   2532   if (heap_->weak_object_to_code_table()->IsHashTable()) {
   2533     WeakHashTable* table =
   2534         WeakHashTable::cast(heap_->weak_object_to_code_table());
   2535     uint32_t capacity = table->Capacity();
   2536     for (uint32_t i = 0; i < capacity; i++) {
   2537       uint32_t key_index = table->EntryToIndex(i);
   2538       Object* key = table->get(key_index);
   2539       if (!table->IsKey(key)) continue;
   2540       uint32_t value_index = table->EntryToValueIndex(i);
   2541       Object* value = table->get(value_index);
   2542       if (key->IsCell() && !IsMarked(key)) {
   2543         Cell* cell = Cell::cast(key);
   2544         Object* object = cell->value();
   2545         if (IsMarked(object)) {
   2546           MarkBit mark = Marking::MarkBitFrom(cell);
   2547           SetMark(cell, mark);
   2548           Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
   2549           RecordSlot(value_slot, value_slot, *value_slot);
   2550         }
   2551       }
   2552       if (IsMarked(key)) {
   2553         if (!IsMarked(value)) {
   2554           HeapObject* obj = HeapObject::cast(value);
   2555           MarkBit mark = Marking::MarkBitFrom(obj);
   2556           SetMark(obj, mark);
   2557         }
   2558         ClearNonLiveDependentCode(DependentCode::cast(value));
   2559       } else {
   2560         ClearDependentCode(DependentCode::cast(value));
   2561         table->set(key_index, heap_->the_hole_value());
   2562         table->set(value_index, heap_->the_hole_value());
   2563         table->ElementRemoved();
   2564       }
   2565     }
   2566   }
   2567 }
   2568 
   2569 
   2570 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
   2571   int number_of_transitions = map->NumberOfProtoTransitions();
   2572   FixedArray* prototype_transitions = map->GetPrototypeTransitions();
   2573 
   2574   int new_number_of_transitions = 0;
   2575   const int header = Map::kProtoTransitionHeaderSize;
   2576   const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
   2577   const int map_offset = header + Map::kProtoTransitionMapOffset;
   2578   const int step = Map::kProtoTransitionElementsPerEntry;
   2579   for (int i = 0; i < number_of_transitions; i++) {
   2580     Object* prototype = prototype_transitions->get(proto_offset + i * step);
   2581     Object* cached_map = prototype_transitions->get(map_offset + i * step);
   2582     if (IsMarked(prototype) && IsMarked(cached_map)) {
   2583       ASSERT(!prototype->IsUndefined());
   2584       int proto_index = proto_offset + new_number_of_transitions * step;
   2585       int map_index = map_offset + new_number_of_transitions * step;
   2586       if (new_number_of_transitions != i) {
   2587         prototype_transitions->set(
   2588             proto_index,
   2589             prototype,
   2590             UPDATE_WRITE_BARRIER);
   2591         prototype_transitions->set(
   2592             map_index,
   2593             cached_map,
   2594             SKIP_WRITE_BARRIER);
   2595       }
   2596       Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
   2597       RecordSlot(slot, slot, prototype);
   2598       new_number_of_transitions++;
   2599     }
   2600   }
   2601 
   2602   if (new_number_of_transitions != number_of_transitions) {
   2603     map->SetNumberOfProtoTransitions(new_number_of_transitions);
   2604   }
   2605 
   2606   // Fill slots that became free with undefined value.
   2607   for (int i = new_number_of_transitions * step;
   2608        i < number_of_transitions * step;
   2609        i++) {
   2610     prototype_transitions->set_undefined(header + i);
   2611   }
   2612 }
   2613 
   2614 
   2615 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
   2616                                                       MarkBit map_mark) {
   2617   Object* potential_parent = map->GetBackPointer();
   2618   if (!potential_parent->IsMap()) return;
   2619   Map* parent = Map::cast(potential_parent);
   2620 
   2621   // Follow back pointer, check whether we are dealing with a map transition
   2622   // from a live map to a dead path and in case clear transitions of parent.
   2623   bool current_is_alive = map_mark.Get();
   2624   bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
   2625   if (!current_is_alive && parent_is_alive) {
   2626     parent->ClearNonLiveTransitions(heap());
   2627   }
   2628 }
   2629 
   2630 
   2631 void MarkCompactCollector::ClearDependentICList(Object* head) {
   2632   Object* current = head;
   2633   Object* undefined = heap()->undefined_value();
   2634   while (current != undefined) {
   2635     Code* code = Code::cast(current);
   2636     if (IsMarked(code)) {
   2637       ASSERT(code->is_weak_stub());
   2638       IC::InvalidateMaps(code);
   2639     }
   2640     current = code->next_code_link();
   2641     code->set_next_code_link(undefined);
   2642   }
   2643 }
   2644 
   2645 
   2646 void MarkCompactCollector::ClearDependentCode(
   2647     DependentCode* entries) {
   2648   DisallowHeapAllocation no_allocation;
   2649   DependentCode::GroupStartIndexes starts(entries);
   2650   int number_of_entries = starts.number_of_entries();
   2651   if (number_of_entries == 0) return;
   2652   int g = DependentCode::kWeakICGroup;
   2653   if (starts.at(g) != starts.at(g + 1)) {
   2654     int i = starts.at(g);
   2655     ASSERT(i + 1 == starts.at(g + 1));
   2656     Object* head = entries->object_at(i);
   2657     ClearDependentICList(head);
   2658   }
   2659   g = DependentCode::kWeakCodeGroup;
   2660   for (int i = starts.at(g); i < starts.at(g + 1); i++) {
   2661     // If the entry is compilation info then the map must be alive,
   2662     // and ClearDependentCode shouldn't be called.
   2663     ASSERT(entries->is_code_at(i));
   2664     Code* code = entries->code_at(i);
   2665     if (IsMarked(code) && !code->marked_for_deoptimization()) {
   2666       code->set_marked_for_deoptimization(true);
   2667       code->InvalidateEmbeddedObjects();
   2668       have_code_to_deoptimize_ = true;
   2669     }
   2670   }
   2671   for (int i = 0; i < number_of_entries; i++) {
   2672     entries->clear_at(i);
   2673   }
   2674 }
   2675 
   2676 
   2677 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
   2678     DependentCode* entries, int group, int start, int end, int new_start) {
   2679   int survived = 0;
   2680   if (group == DependentCode::kWeakICGroup) {
   2681     // Dependent weak IC stubs form a linked list and only the head is stored
   2682     // in the dependent code array.
   2683     if (start != end) {
   2684       ASSERT(start + 1 == end);
   2685       Object* old_head = entries->object_at(start);
   2686       MarkCompactWeakObjectRetainer retainer;
   2687       Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
   2688       entries->set_object_at(new_start, head);
   2689       Object** slot = entries->slot_at(new_start);
   2690       RecordSlot(slot, slot, head);
   2691       // We do not compact this group even if the head is undefined,
   2692       // more dependent ICs are likely to be added later.
   2693       survived = 1;
   2694     }
   2695   } else {
   2696     for (int i = start; i < end; i++) {
   2697       Object* obj = entries->object_at(i);
   2698       ASSERT(obj->IsCode() || IsMarked(obj));
   2699       if (IsMarked(obj) &&
   2700           (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
   2701         if (new_start + survived != i) {
   2702           entries->set_object_at(new_start + survived, obj);
   2703         }
   2704         Object** slot = entries->slot_at(new_start + survived);
   2705         RecordSlot(slot, slot, obj);
   2706         survived++;
   2707       }
   2708     }
   2709   }
   2710   entries->set_number_of_entries(
   2711       static_cast<DependentCode::DependencyGroup>(group), survived);
   2712   return survived;
   2713 }
   2714 
   2715 
   2716 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
   2717   DisallowHeapAllocation no_allocation;
   2718   DependentCode::GroupStartIndexes starts(entries);
   2719   int number_of_entries = starts.number_of_entries();
   2720   if (number_of_entries == 0) return;
   2721   int new_number_of_entries = 0;
   2722   // Go through all groups, remove dead codes and compact.
   2723   for (int g = 0; g < DependentCode::kGroupCount; g++) {
   2724     int survived = ClearNonLiveDependentCodeInGroup(
   2725         entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
   2726     new_number_of_entries += survived;
   2727   }
   2728   for (int i = new_number_of_entries; i < number_of_entries; i++) {
   2729     entries->clear_at(i);
   2730   }
   2731 }
   2732 
   2733 
   2734 void MarkCompactCollector::ProcessWeakCollections() {
   2735   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
   2736   Object* weak_collection_obj = heap()->encountered_weak_collections();
   2737   while (weak_collection_obj != Smi::FromInt(0)) {
   2738     JSWeakCollection* weak_collection =
   2739         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
   2740     ASSERT(MarkCompactCollector::IsMarked(weak_collection));
   2741     if (weak_collection->table()->IsHashTable()) {
   2742       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
   2743       Object** anchor = reinterpret_cast<Object**>(table->address());
   2744       for (int i = 0; i < table->Capacity(); i++) {
   2745         if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
   2746           Object** key_slot =
   2747               table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
   2748           RecordSlot(anchor, key_slot, *key_slot);
   2749           Object** value_slot =
   2750               table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
   2751           MarkCompactMarkingVisitor::MarkObjectByPointer(
   2752               this, anchor, value_slot);
   2753         }
   2754       }
   2755     }
   2756     weak_collection_obj = weak_collection->next();
   2757   }
   2758 }
   2759 
   2760 
   2761 void MarkCompactCollector::ClearWeakCollections() {
   2762   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
   2763   Object* weak_collection_obj = heap()->encountered_weak_collections();
   2764   while (weak_collection_obj != Smi::FromInt(0)) {
   2765     JSWeakCollection* weak_collection =
   2766         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
   2767     ASSERT(MarkCompactCollector::IsMarked(weak_collection));
   2768     if (weak_collection->table()->IsHashTable()) {
   2769       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
   2770       for (int i = 0; i < table->Capacity(); i++) {
   2771         HeapObject* key = HeapObject::cast(table->KeyAt(i));
   2772         if (!MarkCompactCollector::IsMarked(key)) {
   2773           table->RemoveEntry(i);
   2774         }
   2775       }
   2776     }
   2777     weak_collection_obj = weak_collection->next();
   2778     weak_collection->set_next(heap()->undefined_value());
   2779   }
   2780   heap()->set_encountered_weak_collections(Smi::FromInt(0));
   2781 }
   2782 
   2783 
   2784 // We scavange new space simultaneously with sweeping. This is done in two
   2785 // passes.
   2786 //
   2787 // The first pass migrates all alive objects from one semispace to another or
   2788 // promotes them to old space.  Forwarding address is written directly into
   2789 // first word of object without any encoding.  If object is dead we write
   2790 // NULL as a forwarding address.
   2791 //
   2792 // The second pass updates pointers to new space in all spaces.  It is possible
   2793 // to encounter pointers to dead new space objects during traversal of pointers
   2794 // to new space.  We should clear them to avoid encountering them during next
   2795 // pointer iteration.  This is an issue if the store buffer overflows and we
   2796 // have to scan the entire old space, including dead objects, looking for
   2797 // pointers to new space.
   2798 void MarkCompactCollector::MigrateObject(HeapObject* dst,
   2799                                          HeapObject* src,
   2800                                          int size,
   2801                                          AllocationSpace dest) {
   2802   Address dst_addr = dst->address();
   2803   Address src_addr = src->address();
   2804   HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
   2805   if (heap_profiler->is_tracking_object_moves()) {
   2806     heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
   2807   }
   2808   ASSERT(heap()->AllowedToBeMigrated(src, dest));
   2809   ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
   2810   if (dest == OLD_POINTER_SPACE) {
   2811     Address src_slot = src_addr;
   2812     Address dst_slot = dst_addr;
   2813     ASSERT(IsAligned(size, kPointerSize));
   2814 
   2815     for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
   2816       Object* value = Memory::Object_at(src_slot);
   2817 
   2818       Memory::Object_at(dst_slot) = value;
   2819 
   2820       if (heap_->InNewSpace(value)) {
   2821         heap_->store_buffer()->Mark(dst_slot);
   2822       } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
   2823         SlotsBuffer::AddTo(&slots_buffer_allocator_,
   2824                            &migration_slots_buffer_,
   2825                            reinterpret_cast<Object**>(dst_slot),
   2826                            SlotsBuffer::IGNORE_OVERFLOW);
   2827       }
   2828 
   2829       src_slot += kPointerSize;
   2830       dst_slot += kPointerSize;
   2831     }
   2832 
   2833     if (compacting_ && dst->IsJSFunction()) {
   2834       Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
   2835       Address code_entry = Memory::Address_at(code_entry_slot);
   2836 
   2837       if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
   2838         SlotsBuffer::AddTo(&slots_buffer_allocator_,
   2839                            &migration_slots_buffer_,
   2840                            SlotsBuffer::CODE_ENTRY_SLOT,
   2841                            code_entry_slot,
   2842                            SlotsBuffer::IGNORE_OVERFLOW);
   2843       }
   2844     } else if (compacting_ && dst->IsConstantPoolArray()) {
   2845       ConstantPoolArray* array = ConstantPoolArray::cast(dst);
   2846       ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
   2847       while (!code_iter.is_finished()) {
   2848         Address code_entry_slot =
   2849             dst_addr + array->OffsetOfElementAt(code_iter.next_index());
   2850         Address code_entry = Memory::Address_at(code_entry_slot);
   2851 
   2852         if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
   2853           SlotsBuffer::AddTo(&slots_buffer_allocator_,
   2854                              &migration_slots_buffer_,
   2855                              SlotsBuffer::CODE_ENTRY_SLOT,
   2856                              code_entry_slot,
   2857                              SlotsBuffer::IGNORE_OVERFLOW);
   2858         }
   2859       }
   2860     }
   2861   } else if (dest == CODE_SPACE) {
   2862     PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
   2863     heap()->MoveBlock(dst_addr, src_addr, size);
   2864     SlotsBuffer::AddTo(&slots_buffer_allocator_,
   2865                        &migration_slots_buffer_,
   2866                        SlotsBuffer::RELOCATED_CODE_OBJECT,
   2867                        dst_addr,
   2868                        SlotsBuffer::IGNORE_OVERFLOW);
   2869     Code::cast(dst)->Relocate(dst_addr - src_addr);
   2870   } else {
   2871     ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
   2872     heap()->MoveBlock(dst_addr, src_addr, size);
   2873   }
   2874   Memory::Address_at(src_addr) = dst_addr;
   2875 }
   2876 
   2877 
   2878 // Visitor for updating pointers from live objects in old spaces to new space.
   2879 // It does not expect to encounter pointers to dead objects.
   2880 class PointersUpdatingVisitor: public ObjectVisitor {
   2881  public:
   2882   explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
   2883 
   2884   void VisitPointer(Object** p) {
   2885     UpdatePointer(p);
   2886   }
   2887 
   2888   void VisitPointers(Object** start, Object** end) {
   2889     for (Object** p = start; p < end; p++) UpdatePointer(p);
   2890   }
   2891 
   2892   void VisitEmbeddedPointer(RelocInfo* rinfo) {
   2893     ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
   2894     Object* target = rinfo->target_object();
   2895     Object* old_target = target;
   2896     VisitPointer(&target);
   2897     // Avoid unnecessary changes that might unnecessary flush the instruction
   2898     // cache.
   2899     if (target != old_target) {
   2900       rinfo->set_target_object(target);
   2901     }
   2902   }
   2903 
   2904   void VisitCodeTarget(RelocInfo* rinfo) {
   2905     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
   2906     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
   2907     Object* old_target = target;
   2908     VisitPointer(&target);
   2909     if (target != old_target) {
   2910       rinfo->set_target_address(Code::cast(target)->instruction_start());
   2911     }
   2912   }
   2913 
   2914   void VisitCodeAgeSequence(RelocInfo* rinfo) {
   2915     ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
   2916     Object* stub = rinfo->code_age_stub();
   2917     ASSERT(stub != NULL);
   2918     VisitPointer(&stub);
   2919     if (stub != rinfo->code_age_stub()) {
   2920       rinfo->set_code_age_stub(Code::cast(stub));
   2921     }
   2922   }
   2923 
   2924   void VisitDebugTarget(RelocInfo* rinfo) {
   2925     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
   2926             rinfo->IsPatchedReturnSequence()) ||
   2927            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
   2928             rinfo->IsPatchedDebugBreakSlotSequence()));
   2929     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
   2930     VisitPointer(&target);
   2931     rinfo->set_call_address(Code::cast(target)->instruction_start());
   2932   }
   2933 
   2934   static inline void UpdateSlot(Heap* heap, Object** slot) {
   2935     Object* obj = *slot;
   2936 
   2937     if (!obj->IsHeapObject()) return;
   2938 
   2939     HeapObject* heap_obj = HeapObject::cast(obj);
   2940 
   2941     MapWord map_word = heap_obj->map_word();
   2942     if (map_word.IsForwardingAddress()) {
   2943       ASSERT(heap->InFromSpace(heap_obj) ||
   2944              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
   2945       HeapObject* target = map_word.ToForwardingAddress();
   2946       *slot = target;
   2947       ASSERT(!heap->InFromSpace(target) &&
   2948              !MarkCompactCollector::IsOnEvacuationCandidate(target));
   2949     }
   2950   }
   2951 
   2952  private:
   2953   inline void UpdatePointer(Object** p) {
   2954     UpdateSlot(heap_, p);
   2955   }
   2956 
   2957   Heap* heap_;
   2958 };
   2959 
   2960 
   2961 static void UpdatePointer(HeapObject** address, HeapObject* object) {
   2962   Address new_addr = Memory::Address_at(object->address());
   2963 
   2964   // The new space sweep will overwrite the map word of dead objects
   2965   // with NULL. In this case we do not need to transfer this entry to
   2966   // the store buffer which we are rebuilding.
   2967   // We perform the pointer update with a no barrier compare-and-swap. The
   2968   // compare and swap may fail in the case where the pointer update tries to
   2969   // update garbage memory which was concurrently accessed by the sweeper.
   2970   if (new_addr != NULL) {
   2971     base::NoBarrier_CompareAndSwap(
   2972         reinterpret_cast<base::AtomicWord*>(address),
   2973         reinterpret_cast<base::AtomicWord>(object),
   2974         reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
   2975   } else {
   2976     // We have to zap this pointer, because the store buffer may overflow later,
   2977     // and then we have to scan the entire heap and we don't want to find
   2978     // spurious newspace pointers in the old space.
   2979     // TODO(mstarzinger): This was changed to a sentinel value to track down
   2980     // rare crashes, change it back to Smi::FromInt(0) later.
   2981     base::NoBarrier_CompareAndSwap(
   2982         reinterpret_cast<base::AtomicWord*>(address),
   2983         reinterpret_cast<base::AtomicWord>(object),
   2984         reinterpret_cast<base::AtomicWord>(Smi::FromInt(0x0f100d00 >> 1)));
   2985   }
   2986 }
   2987 
   2988 
   2989 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
   2990                                                          Object** p) {
   2991   MapWord map_word = HeapObject::cast(*p)->map_word();
   2992 
   2993   if (map_word.IsForwardingAddress()) {
   2994     return String::cast(map_word.ToForwardingAddress());
   2995   }
   2996 
   2997   return String::cast(*p);
   2998 }
   2999 
   3000 
   3001 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
   3002                                             int object_size) {
   3003   ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
   3004 
   3005   OldSpace* target_space = heap()->TargetSpace(object);
   3006 
   3007   ASSERT(target_space == heap()->old_pointer_space() ||
   3008          target_space == heap()->old_data_space());
   3009   HeapObject* target;
   3010   AllocationResult allocation = target_space->AllocateRaw(object_size);
   3011   if (allocation.To(&target)) {
   3012     MigrateObject(target,
   3013                   object,
   3014                   object_size,
   3015                   target_space->identity());
   3016     heap()->IncrementPromotedObjectsSize(object_size);
   3017     return true;
   3018   }
   3019 
   3020   return false;
   3021 }
   3022 
   3023 
   3024 void MarkCompactCollector::EvacuateNewSpace() {
   3025   // There are soft limits in the allocation code, designed trigger a mark
   3026   // sweep collection by failing allocations.  But since we are already in
   3027   // a mark-sweep allocation, there is no sense in trying to trigger one.
   3028   AlwaysAllocateScope scope(isolate());
   3029 
   3030   NewSpace* new_space = heap()->new_space();
   3031 
   3032   // Store allocation range before flipping semispaces.
   3033   Address from_bottom = new_space->bottom();
   3034   Address from_top = new_space->top();
   3035 
   3036   // Flip the semispaces.  After flipping, to space is empty, from space has
   3037   // live objects.
   3038   new_space->Flip();
   3039   new_space->ResetAllocationInfo();
   3040 
   3041   int survivors_size = 0;
   3042 
   3043   // First pass: traverse all objects in inactive semispace, remove marks,
   3044   // migrate live objects and write forwarding addresses.  This stage puts
   3045   // new entries in the store buffer and may cause some pages to be marked
   3046   // scan-on-scavenge.
   3047   NewSpacePageIterator it(from_bottom, from_top);
   3048   while (it.has_next()) {
   3049     NewSpacePage* p = it.next();
   3050     survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
   3051   }
   3052 
   3053   heap_->IncrementYoungSurvivorsCounter(survivors_size);
   3054   new_space->set_age_mark(new_space->top());
   3055 }
   3056 
   3057 
   3058 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
   3059   AlwaysAllocateScope always_allocate(isolate());
   3060   PagedSpace* space = static_cast<PagedSpace*>(p->owner());
   3061   ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
   3062   p->MarkSweptPrecisely();
   3063 
   3064   int offsets[16];
   3065 
   3066   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
   3067     Address cell_base = it.CurrentCellBase();
   3068     MarkBit::CellType* cell = it.CurrentCell();
   3069 
   3070     if (*cell == 0) continue;
   3071 
   3072     int live_objects = MarkWordToObjectStarts(*cell, offsets);
   3073     for (int i = 0; i < live_objects; i++) {
   3074       Address object_addr = cell_base + offsets[i] * kPointerSize;
   3075       HeapObject* object = HeapObject::FromAddress(object_addr);
   3076       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
   3077 
   3078       int size = object->Size();
   3079 
   3080       HeapObject* target_object;
   3081       AllocationResult allocation = space->AllocateRaw(size);
   3082       if (!allocation.To(&target_object)) {
   3083         // OS refused to give us memory.
   3084         V8::FatalProcessOutOfMemory("Evacuation");
   3085         return;
   3086       }
   3087 
   3088       MigrateObject(target_object, object, size, space->identity());
   3089       ASSERT(object->map_word().IsForwardingAddress());
   3090     }
   3091 
   3092     // Clear marking bits for current cell.
   3093     *cell = 0;
   3094   }
   3095   p->ResetLiveBytes();
   3096 }
   3097 
   3098 
   3099 void MarkCompactCollector::EvacuatePages() {
   3100   int npages = evacuation_candidates_.length();
   3101   for (int i = 0; i < npages; i++) {
   3102     Page* p = evacuation_candidates_[i];
   3103     ASSERT(p->IsEvacuationCandidate() ||
   3104            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
   3105     ASSERT(static_cast<int>(p->parallel_sweeping()) ==
   3106            MemoryChunk::PARALLEL_SWEEPING_DONE);
   3107     if (p->IsEvacuationCandidate()) {
   3108       // During compaction we might have to request a new page.
   3109       // Check that space still have room for that.
   3110       if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
   3111         EvacuateLiveObjectsFromPage(p);
   3112       } else {
   3113         // Without room for expansion evacuation is not guaranteed to succeed.
   3114         // Pessimistically abandon unevacuated pages.
   3115         for (int j = i; j < npages; j++) {
   3116           Page* page = evacuation_candidates_[j];
   3117           slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
   3118           page->ClearEvacuationCandidate();
   3119           page->SetFlag(Page::RESCAN_ON_EVACUATION);
   3120         }
   3121         return;
   3122       }
   3123     }
   3124   }
   3125 }
   3126 
   3127 
   3128 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
   3129  public:
   3130   virtual Object* RetainAs(Object* object) {
   3131     if (object->IsHeapObject()) {
   3132       HeapObject* heap_object = HeapObject::cast(object);
   3133       MapWord map_word = heap_object->map_word();
   3134       if (map_word.IsForwardingAddress()) {
   3135         return map_word.ToForwardingAddress();
   3136       }
   3137     }
   3138     return object;
   3139   }
   3140 };
   3141 
   3142 
   3143 static inline void UpdateSlot(Isolate* isolate,
   3144                               ObjectVisitor* v,
   3145                               SlotsBuffer::SlotType slot_type,
   3146                               Address addr) {
   3147   switch (slot_type) {
   3148     case SlotsBuffer::CODE_TARGET_SLOT: {
   3149       RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
   3150       rinfo.Visit(isolate, v);
   3151       break;
   3152     }
   3153     case SlotsBuffer::CODE_ENTRY_SLOT: {
   3154       v->VisitCodeEntry(addr);
   3155       break;
   3156     }
   3157     case SlotsBuffer::RELOCATED_CODE_OBJECT: {
   3158       HeapObject* obj = HeapObject::FromAddress(addr);
   3159       Code::cast(obj)->CodeIterateBody(v);
   3160       break;
   3161     }
   3162     case SlotsBuffer::DEBUG_TARGET_SLOT: {
   3163       RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
   3164       if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
   3165       break;
   3166     }
   3167     case SlotsBuffer::JS_RETURN_SLOT: {
   3168       RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
   3169       if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
   3170       break;
   3171     }
   3172     case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
   3173       RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
   3174       rinfo.Visit(isolate, v);
   3175       break;
   3176     }
   3177     default:
   3178       UNREACHABLE();
   3179       break;
   3180   }
   3181 }
   3182 
   3183 
   3184 enum SweepingMode {
   3185   SWEEP_ONLY,
   3186   SWEEP_AND_VISIT_LIVE_OBJECTS
   3187 };
   3188 
   3189 
   3190 enum SkipListRebuildingMode {
   3191   REBUILD_SKIP_LIST,
   3192   IGNORE_SKIP_LIST
   3193 };
   3194 
   3195 
   3196 enum FreeSpaceTreatmentMode {
   3197   IGNORE_FREE_SPACE,
   3198   ZAP_FREE_SPACE
   3199 };
   3200 
   3201 
   3202 // Sweep a space precisely.  After this has been done the space can
   3203 // be iterated precisely, hitting only the live objects.  Code space
   3204 // is always swept precisely because we want to be able to iterate
   3205 // over it.  Map space is swept precisely, because it is not compacted.
   3206 // Slots in live objects pointing into evacuation candidates are updated
   3207 // if requested.
   3208 template<SweepingMode sweeping_mode,
   3209          SkipListRebuildingMode skip_list_mode,
   3210          FreeSpaceTreatmentMode free_space_mode>
   3211 static void SweepPrecisely(PagedSpace* space,
   3212                            Page* p,
   3213                            ObjectVisitor* v) {
   3214   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
   3215   ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
   3216             space->identity() == CODE_SPACE);
   3217   ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
   3218 
   3219   double start_time = 0.0;
   3220   if (FLAG_print_cumulative_gc_stat) {
   3221     start_time = OS::TimeCurrentMillis();
   3222   }
   3223 
   3224   p->MarkSweptPrecisely();
   3225 
   3226   Address free_start = p->area_start();
   3227   ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
   3228   int offsets[16];
   3229 
   3230   SkipList* skip_list = p->skip_list();
   3231   int curr_region = -1;
   3232   if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
   3233     skip_list->Clear();
   3234   }
   3235 
   3236   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
   3237     Address cell_base = it.CurrentCellBase();
   3238     MarkBit::CellType* cell = it.CurrentCell();
   3239     int live_objects = MarkWordToObjectStarts(*cell, offsets);
   3240     int live_index = 0;
   3241     for ( ; live_objects != 0; live_objects--) {
   3242       Address free_end = cell_base + offsets[live_index++] * kPointerSize;
   3243       if (free_end != free_start) {
   3244         if (free_space_mode == ZAP_FREE_SPACE) {
   3245           memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
   3246         }
   3247         space->Free(free_start, static_cast<int>(free_end - free_start));
   3248 #ifdef ENABLE_GDB_JIT_INTERFACE
   3249         if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
   3250           GDBJITInterface::RemoveCodeRange(free_start, free_end);
   3251         }
   3252 #endif
   3253       }
   3254       HeapObject* live_object = HeapObject::FromAddress(free_end);
   3255       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
   3256       Map* map = live_object->map();
   3257       int size = live_object->SizeFromMap(map);
   3258       if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
   3259         live_object->IterateBody(map->instance_type(), size, v);
   3260       }
   3261       if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
   3262         int new_region_start =
   3263             SkipList::RegionNumber(free_end);
   3264         int new_region_end =
   3265             SkipList::RegionNumber(free_end + size - kPointerSize);
   3266         if (new_region_start != curr_region ||
   3267             new_region_end != curr_region) {
   3268           skip_list->AddObject(free_end, size);
   3269           curr_region = new_region_end;
   3270         }
   3271       }
   3272       free_start = free_end + size;
   3273     }
   3274     // Clear marking bits for current cell.
   3275     *cell = 0;
   3276   }
   3277   if (free_start != p->area_end()) {
   3278     if (free_space_mode == ZAP_FREE_SPACE) {
   3279       memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
   3280     }
   3281     space->Free(free_start, static_cast<int>(p->area_end() - free_start));
   3282 #ifdef ENABLE_GDB_JIT_INTERFACE
   3283     if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
   3284       GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
   3285     }
   3286 #endif
   3287   }
   3288   p->ResetLiveBytes();
   3289   if (FLAG_print_cumulative_gc_stat) {
   3290     space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
   3291   }
   3292 }
   3293 
   3294 
   3295 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
   3296   Page* p = Page::FromAddress(code->address());
   3297 
   3298   if (p->IsEvacuationCandidate() ||
   3299       p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
   3300     return false;
   3301   }
   3302 
   3303   Address code_start = code->address();
   3304   Address code_end = code_start + code->Size();
   3305 
   3306   uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
   3307   uint32_t end_index =
   3308       MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
   3309 
   3310   Bitmap* b = p->markbits();
   3311 
   3312   MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
   3313   MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
   3314 
   3315   MarkBit::CellType* start_cell = start_mark_bit.cell();
   3316   MarkBit::CellType* end_cell = end_mark_bit.cell();
   3317 
   3318   if (value) {
   3319     MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
   3320     MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
   3321 
   3322     if (start_cell == end_cell) {
   3323       *start_cell |= start_mask & end_mask;
   3324     } else {
   3325       *start_cell |= start_mask;
   3326       for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
   3327         *cell = ~0;
   3328       }
   3329       *end_cell |= end_mask;
   3330     }
   3331   } else {
   3332     for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
   3333       *cell = 0;
   3334     }
   3335   }
   3336 
   3337   return true;
   3338 }
   3339 
   3340 
   3341 static bool IsOnInvalidatedCodeObject(Address addr) {
   3342   // We did not record any slots in large objects thus
   3343   // we can safely go to the page from the slot address.
   3344   Page* p = Page::FromAddress(addr);
   3345 
   3346   // First check owner's identity because old pointer and old data spaces
   3347   // are swept lazily and might still have non-zero mark-bits on some
   3348   // pages.
   3349   if (p->owner()->identity() != CODE_SPACE) return false;
   3350 
   3351   // In code space only bits on evacuation candidates (but we don't record
   3352   // any slots on them) and under invalidated code objects are non-zero.
   3353   MarkBit mark_bit =
   3354       p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
   3355 
   3356   return mark_bit.Get();
   3357 }
   3358 
   3359 
   3360 void MarkCompactCollector::InvalidateCode(Code* code) {
   3361   if (heap_->incremental_marking()->IsCompacting() &&
   3362       !ShouldSkipEvacuationSlotRecording(code)) {
   3363     ASSERT(compacting_);
   3364 
   3365     // If the object is white than no slots were recorded on it yet.
   3366     MarkBit mark_bit = Marking::MarkBitFrom(code);
   3367     if (Marking::IsWhite(mark_bit)) return;
   3368 
   3369     invalidated_code_.Add(code);
   3370   }
   3371 }
   3372 
   3373 
   3374 // Return true if the given code is deoptimized or will be deoptimized.
   3375 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
   3376   return code->is_optimized_code() && code->marked_for_deoptimization();
   3377 }
   3378 
   3379 
   3380 bool MarkCompactCollector::MarkInvalidatedCode() {
   3381   bool code_marked = false;
   3382 
   3383   int length = invalidated_code_.length();
   3384   for (int i = 0; i < length; i++) {
   3385     Code* code = invalidated_code_[i];
   3386 
   3387     if (SetMarkBitsUnderInvalidatedCode(code, true)) {
   3388       code_marked = true;
   3389     }
   3390   }
   3391 
   3392   return code_marked;
   3393 }
   3394 
   3395 
   3396 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
   3397   int length = invalidated_code_.length();
   3398   for (int i = 0; i < length; i++) {
   3399     if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
   3400   }
   3401 }
   3402 
   3403 
   3404 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
   3405   int length = invalidated_code_.length();
   3406   for (int i = 0; i < length; i++) {
   3407     Code* code = invalidated_code_[i];
   3408     if (code != NULL) {
   3409       code->Iterate(visitor);
   3410       SetMarkBitsUnderInvalidatedCode(code, false);
   3411     }
   3412   }
   3413   invalidated_code_.Rewind(0);
   3414 }
   3415 
   3416 
   3417 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
   3418   Heap::RelocationLock relocation_lock(heap());
   3419 
   3420   bool code_slots_filtering_required;
   3421   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
   3422     code_slots_filtering_required = MarkInvalidatedCode();
   3423     EvacuateNewSpace();
   3424   }
   3425 
   3426   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
   3427     EvacuatePages();
   3428   }
   3429 
   3430   // Second pass: find pointers to new space and update them.
   3431   PointersUpdatingVisitor updating_visitor(heap());
   3432 
   3433   { GCTracer::Scope gc_scope(tracer_,
   3434                              GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
   3435     // Update pointers in to space.
   3436     SemiSpaceIterator to_it(heap()->new_space()->bottom(),
   3437                             heap()->new_space()->top());
   3438     for (HeapObject* object = to_it.Next();
   3439          object != NULL;
   3440          object = to_it.Next()) {
   3441       Map* map = object->map();
   3442       object->IterateBody(map->instance_type(),
   3443                           object->SizeFromMap(map),
   3444                           &updating_visitor);
   3445     }
   3446   }
   3447 
   3448   { GCTracer::Scope gc_scope(tracer_,
   3449                              GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
   3450     // Update roots.
   3451     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
   3452   }
   3453 
   3454   { GCTracer::Scope gc_scope(tracer_,
   3455                              GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
   3456     StoreBufferRebuildScope scope(heap_,
   3457                                   heap_->store_buffer(),
   3458                                   &Heap::ScavengeStoreBufferCallback);
   3459     heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
   3460         &UpdatePointer);
   3461   }
   3462 
   3463   { GCTracer::Scope gc_scope(tracer_,
   3464                              GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
   3465     SlotsBuffer::UpdateSlotsRecordedIn(heap_,
   3466                                        migration_slots_buffer_,
   3467                                        code_slots_filtering_required);
   3468     if (FLAG_trace_fragmentation) {
   3469       PrintF("  migration slots buffer: %d\n",
   3470              SlotsBuffer::SizeOfChain(migration_slots_buffer_));
   3471     }
   3472 
   3473     if (compacting_ && was_marked_incrementally_) {
   3474       // It's difficult to filter out slots recorded for large objects.
   3475       LargeObjectIterator it(heap_->lo_space());
   3476       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
   3477         // LargeObjectSpace is not swept yet thus we have to skip
   3478         // dead objects explicitly.
   3479         if (!IsMarked(obj)) continue;
   3480 
   3481         Page* p = Page::FromAddress(obj->address());
   3482         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
   3483           obj->Iterate(&updating_visitor);
   3484           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
   3485         }
   3486       }
   3487     }
   3488   }
   3489 
   3490   int npages = evacuation_candidates_.length();
   3491   { GCTracer::Scope gc_scope(
   3492       tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
   3493     for (int i = 0; i < npages; i++) {
   3494       Page* p = evacuation_candidates_[i];
   3495       ASSERT(p->IsEvacuationCandidate() ||
   3496              p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
   3497 
   3498       if (p->IsEvacuationCandidate()) {
   3499         SlotsBuffer::UpdateSlotsRecordedIn(heap_,
   3500                                            p->slots_buffer(),
   3501                                            code_slots_filtering_required);
   3502         if (FLAG_trace_fragmentation) {
   3503           PrintF("  page %p slots buffer: %d\n",
   3504                  reinterpret_cast<void*>(p),
   3505                  SlotsBuffer::SizeOfChain(p->slots_buffer()));
   3506         }
   3507 
   3508         // Important: skip list should be cleared only after roots were updated
   3509         // because root iteration traverses the stack and might have to find
   3510         // code objects from non-updated pc pointing into evacuation candidate.
   3511         SkipList* list = p->skip_list();
   3512         if (list != NULL) list->Clear();
   3513       } else {
   3514         if (FLAG_gc_verbose) {
   3515           PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
   3516                  reinterpret_cast<intptr_t>(p));
   3517         }
   3518         PagedSpace* space = static_cast<PagedSpace*>(p->owner());
   3519         p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
   3520 
   3521         switch (space->identity()) {
   3522           case OLD_DATA_SPACE:
   3523             SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
   3524             break;
   3525           case OLD_POINTER_SPACE:
   3526             SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
   3527                            IGNORE_SKIP_LIST,
   3528                            IGNORE_FREE_SPACE>(
   3529                 space, p, &updating_visitor);
   3530             break;
   3531           case CODE_SPACE:
   3532             if (FLAG_zap_code_space) {
   3533               SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
   3534                              REBUILD_SKIP_LIST,
   3535                              ZAP_FREE_SPACE>(
   3536                   space, p, &updating_visitor);
   3537             } else {
   3538               SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
   3539                              REBUILD_SKIP_LIST,
   3540                              IGNORE_FREE_SPACE>(
   3541                   space, p, &updating_visitor);
   3542             }
   3543             break;
   3544           default:
   3545             UNREACHABLE();
   3546             break;
   3547         }
   3548       }
   3549     }
   3550   }
   3551 
   3552   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
   3553 
   3554   // Update pointers from cells.
   3555   HeapObjectIterator cell_iterator(heap_->cell_space());
   3556   for (HeapObject* cell = cell_iterator.Next();
   3557        cell != NULL;
   3558        cell = cell_iterator.Next()) {
   3559     if (cell->IsCell()) {
   3560       Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
   3561     }
   3562   }
   3563 
   3564   HeapObjectIterator js_global_property_cell_iterator(
   3565       heap_->property_cell_space());
   3566   for (HeapObject* cell = js_global_property_cell_iterator.Next();
   3567        cell != NULL;
   3568        cell = js_global_property_cell_iterator.Next()) {
   3569     if (cell->IsPropertyCell()) {
   3570       PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
   3571     }
   3572   }
   3573 
   3574   heap_->string_table()->Iterate(&updating_visitor);
   3575   updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
   3576   if (heap_->weak_object_to_code_table()->IsHashTable()) {
   3577     WeakHashTable* table =
   3578         WeakHashTable::cast(heap_->weak_object_to_code_table());
   3579     table->Iterate(&updating_visitor);
   3580     table->Rehash(heap_->isolate()->factory()->undefined_value());
   3581   }
   3582 
   3583   // Update pointers from external string table.
   3584   heap_->UpdateReferencesInExternalStringTable(
   3585       &UpdateReferenceInExternalStringTableEntry);
   3586 
   3587   EvacuationWeakObjectRetainer evacuation_object_retainer;
   3588   heap()->ProcessWeakReferences(&evacuation_object_retainer);
   3589 
   3590   // Visit invalidated code (we ignored all slots on it) and clear mark-bits
   3591   // under it.
   3592   ProcessInvalidatedCode(&updating_visitor);
   3593 
   3594   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
   3595 
   3596 #ifdef VERIFY_HEAP
   3597   if (FLAG_verify_heap) {
   3598     VerifyEvacuation(heap_);
   3599   }
   3600 #endif
   3601 
   3602   slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
   3603   ASSERT(migration_slots_buffer_ == NULL);
   3604 }
   3605 
   3606 
   3607 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
   3608   int npages = evacuation_candidates_.length();
   3609   for (int i = 0; i < npages; i++) {
   3610     Page* p = evacuation_candidates_[i];
   3611     if (!p->IsEvacuationCandidate()) continue;
   3612     p->Unlink();
   3613     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
   3614     p->InsertAfter(space->LastPage());
   3615   }
   3616 }
   3617 
   3618 
   3619 void MarkCompactCollector::ReleaseEvacuationCandidates() {
   3620   int npages = evacuation_candidates_.length();
   3621   for (int i = 0; i < npages; i++) {
   3622     Page* p = evacuation_candidates_[i];
   3623     if (!p->IsEvacuationCandidate()) continue;
   3624     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
   3625     space->Free(p->area_start(), p->area_size());
   3626     p->set_scan_on_scavenge(false);
   3627     slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
   3628     p->ResetLiveBytes();
   3629     space->ReleasePage(p);
   3630   }
   3631   evacuation_candidates_.Rewind(0);
   3632   compacting_ = false;
   3633   heap()->FreeQueuedChunks();
   3634 }
   3635 
   3636 
   3637 static const int kStartTableEntriesPerLine = 5;
   3638 static const int kStartTableLines = 171;
   3639 static const int kStartTableInvalidLine = 127;
   3640 static const int kStartTableUnusedEntry = 126;
   3641 
   3642 #define _ kStartTableUnusedEntry
   3643 #define X kStartTableInvalidLine
   3644 // Mark-bit to object start offset table.
   3645 //
   3646 // The line is indexed by the mark bits in a byte.  The first number on
   3647 // the line describes the number of live object starts for the line and the
   3648 // other numbers on the line describe the offsets (in words) of the object
   3649 // starts.
   3650 //
   3651 // Since objects are at least 2 words large we don't have entries for two
   3652 // consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
   3653 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
   3654   0, _, _, _, _,  // 0
   3655   1, 0, _, _, _,  // 1
   3656   1, 1, _, _, _,  // 2
   3657   X, _, _, _, _,  // 3
   3658   1, 2, _, _, _,  // 4
   3659   2, 0, 2, _, _,  // 5
   3660   X, _, _, _, _,  // 6
   3661   X, _, _, _, _,  // 7
   3662   1, 3, _, _, _,  // 8
   3663   2, 0, 3, _, _,  // 9
   3664   2, 1, 3, _, _,  // 10
   3665   X, _, _, _, _,  // 11
   3666   X, _, _, _, _,  // 12
   3667   X, _, _, _, _,  // 13
   3668   X, _, _, _, _,  // 14
   3669   X, _, _, _, _,  // 15
   3670   1, 4, _, _, _,  // 16
   3671   2, 0, 4, _, _,  // 17
   3672   2, 1, 4, _, _,  // 18
   3673   X, _, _, _, _,  // 19
   3674   2, 2, 4, _, _,  // 20
   3675   3, 0, 2, 4, _,  // 21
   3676   X, _, _, _, _,  // 22
   3677   X, _, _, _, _,  // 23
   3678   X, _, _, _, _,  // 24
   3679   X, _, _, _, _,  // 25
   3680   X, _, _, _, _,  // 26
   3681   X, _, _, _, _,  // 27
   3682   X, _, _, _, _,  // 28
   3683   X, _, _, _, _,  // 29
   3684   X, _, _, _, _,  // 30
   3685   X, _, _, _, _,  // 31
   3686   1, 5, _, _, _,  // 32
   3687   2, 0, 5, _, _,  // 33
   3688   2, 1, 5, _, _,  // 34
   3689   X, _, _, _, _,  // 35
   3690   2, 2, 5, _, _,  // 36
   3691   3, 0, 2, 5, _,  // 37
   3692   X, _, _, _, _,  // 38
   3693   X, _, _, _, _,  // 39
   3694   2, 3, 5, _, _,  // 40
   3695   3, 0, 3, 5, _,  // 41
   3696   3, 1, 3, 5, _,  // 42
   3697   X, _, _, _, _,  // 43
   3698   X, _, _, _, _,  // 44
   3699   X, _, _, _, _,  // 45
   3700   X, _, _, _, _,  // 46
   3701   X, _, _, _, _,  // 47
   3702   X, _, _, _, _,  // 48
   3703   X, _, _, _, _,  // 49
   3704   X, _, _, _, _,  // 50
   3705   X, _, _, _, _,  // 51
   3706   X, _, _, _, _,  // 52
   3707   X, _, _, _, _,  // 53
   3708   X, _, _, _, _,  // 54
   3709   X, _, _, _, _,  // 55
   3710   X, _, _, _, _,  // 56
   3711   X, _, _, _, _,  // 57
   3712   X, _, _, _, _,  // 58
   3713   X, _, _, _, _,  // 59
   3714   X, _, _, _, _,  // 60
   3715   X, _, _, _, _,  // 61
   3716   X, _, _, _, _,  // 62
   3717   X, _, _, _, _,  // 63
   3718   1, 6, _, _, _,  // 64
   3719   2, 0, 6, _, _,  // 65
   3720   2, 1, 6, _, _,  // 66
   3721   X, _, _, _, _,  // 67
   3722   2, 2, 6, _, _,  // 68
   3723   3, 0, 2, 6, _,  // 69
   3724   X, _, _, _, _,  // 70
   3725   X, _, _, _, _,  // 71
   3726   2, 3, 6, _, _,  // 72
   3727   3, 0, 3, 6, _,  // 73
   3728   3, 1, 3, 6, _,  // 74
   3729   X, _, _, _, _,  // 75
   3730   X, _, _, _, _,  // 76
   3731   X, _, _, _, _,  // 77
   3732   X, _, _, _, _,  // 78
   3733   X, _, _, _, _,  // 79
   3734   2, 4, 6, _, _,  // 80
   3735   3, 0, 4, 6, _,  // 81
   3736   3, 1, 4, 6, _,  // 82
   3737   X, _, _, _, _,  // 83
   3738   3, 2, 4, 6, _,  // 84
   3739   4, 0, 2, 4, 6,  // 85
   3740   X, _, _, _, _,  // 86
   3741   X, _, _, _, _,  // 87
   3742   X, _, _, _, _,  // 88
   3743   X, _, _, _, _,  // 89
   3744   X, _, _, _, _,  // 90
   3745   X, _, _, _, _,  // 91
   3746   X, _, _, _, _,  // 92
   3747   X, _, _, _, _,  // 93
   3748   X, _, _, _, _,  // 94
   3749   X, _, _, _, _,  // 95
   3750   X, _, _, _, _,  // 96
   3751   X, _, _, _, _,  // 97
   3752   X, _, _, _, _,  // 98
   3753   X, _, _, _, _,  // 99
   3754   X, _, _, _, _,  // 100
   3755   X, _, _, _, _,  // 101
   3756   X, _, _, _, _,  // 102
   3757   X, _, _, _, _,  // 103
   3758   X, _, _, _, _,  // 104
   3759   X, _, _, _, _,  // 105
   3760   X, _, _, _, _,  // 106
   3761   X, _, _, _, _,  // 107
   3762   X, _, _, _, _,  // 108
   3763   X, _, _, _, _,  // 109
   3764   X, _, _, _, _,  // 110
   3765   X, _, _, _, _,  // 111
   3766   X, _, _, _, _,  // 112
   3767   X, _, _, _, _,  // 113
   3768   X, _, _, _, _,  // 114
   3769   X, _, _, _, _,  // 115
   3770   X, _, _, _, _,  // 116
   3771   X, _, _, _, _,  // 117
   3772   X, _, _, _, _,  // 118
   3773   X, _, _, _, _,  // 119
   3774   X, _, _, _, _,  // 120
   3775   X, _, _, _, _,  // 121
   3776   X, _, _, _, _,  // 122
   3777   X, _, _, _, _,  // 123
   3778   X, _, _, _, _,  // 124
   3779   X, _, _, _, _,  // 125
   3780   X, _, _, _, _,  // 126
   3781   X, _, _, _, _,  // 127
   3782   1, 7, _, _, _,  // 128
   3783   2, 0, 7, _, _,  // 129
   3784   2, 1, 7, _, _,  // 130
   3785   X, _, _, _, _,  // 131
   3786   2, 2, 7, _, _,  // 132
   3787   3, 0, 2, 7, _,  // 133
   3788   X, _, _, _, _,  // 134
   3789   X, _, _, _, _,  // 135
   3790   2, 3, 7, _, _,  // 136
   3791   3, 0, 3, 7, _,  // 137
   3792   3, 1, 3, 7, _,  // 138
   3793   X, _, _, _, _,  // 139
   3794   X, _, _, _, _,  // 140
   3795   X, _, _, _, _,  // 141
   3796   X, _, _, _, _,  // 142
   3797   X, _, _, _, _,  // 143
   3798   2, 4, 7, _, _,  // 144
   3799   3, 0, 4, 7, _,  // 145
   3800   3, 1, 4, 7, _,  // 146
   3801   X, _, _, _, _,  // 147
   3802   3, 2, 4, 7, _,  // 148
   3803   4, 0, 2, 4, 7,  // 149
   3804   X, _, _, _, _,  // 150
   3805   X, _, _, _, _,  // 151
   3806   X, _, _, _, _,  // 152
   3807   X, _, _, _, _,  // 153
   3808   X, _, _, _, _,  // 154
   3809   X, _, _, _, _,  // 155
   3810   X, _, _, _, _,  // 156
   3811   X, _, _, _, _,  // 157
   3812   X, _, _, _, _,  // 158
   3813   X, _, _, _, _,  // 159
   3814   2, 5, 7, _, _,  // 160
   3815   3, 0, 5, 7, _,  // 161
   3816   3, 1, 5, 7, _,  // 162
   3817   X, _, _, _, _,  // 163
   3818   3, 2, 5, 7, _,  // 164
   3819   4, 0, 2, 5, 7,  // 165
   3820   X, _, _, _, _,  // 166
   3821   X, _, _, _, _,  // 167
   3822   3, 3, 5, 7, _,  // 168
   3823   4, 0, 3, 5, 7,  // 169
   3824   4, 1, 3, 5, 7   // 170
   3825 };
   3826 #undef _
   3827 #undef X
   3828 
   3829 
   3830 // Takes a word of mark bits.  Returns the number of objects that start in the
   3831 // range.  Puts the offsets of the words in the supplied array.
   3832 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
   3833   int objects = 0;
   3834   int offset = 0;
   3835 
   3836   // No consecutive 1 bits.
   3837   ASSERT((mark_bits & 0x180) != 0x180);
   3838   ASSERT((mark_bits & 0x18000) != 0x18000);
   3839   ASSERT((mark_bits & 0x1800000) != 0x1800000);
   3840 
   3841   while (mark_bits != 0) {
   3842     int byte = (mark_bits & 0xff);
   3843     mark_bits >>= 8;
   3844     if (byte != 0) {
   3845       ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
   3846       char* table = kStartTable + byte * kStartTableEntriesPerLine;
   3847       int objects_in_these_8_words = table[0];
   3848       ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
   3849       ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
   3850       for (int i = 0; i < objects_in_these_8_words; i++) {
   3851         starts[objects++] = offset + table[1 + i];
   3852       }
   3853     }
   3854     offset += 8;
   3855   }
   3856   return objects;
   3857 }
   3858 
   3859 
   3860 static inline Address DigestFreeStart(Address approximate_free_start,
   3861                                       uint32_t free_start_cell) {
   3862   ASSERT(free_start_cell != 0);
   3863 
   3864   // No consecutive 1 bits.
   3865   ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
   3866 
   3867   int offsets[16];
   3868   uint32_t cell = free_start_cell;
   3869   int offset_of_last_live;
   3870   if ((cell & 0x80000000u) != 0) {
   3871     // This case would overflow below.
   3872     offset_of_last_live = 31;
   3873   } else {
   3874     // Remove all but one bit, the most significant.  This is an optimization
   3875     // that may or may not be worthwhile.
   3876     cell |= cell >> 16;
   3877     cell |= cell >> 8;
   3878     cell |= cell >> 4;
   3879     cell |= cell >> 2;
   3880     cell |= cell >> 1;
   3881     cell = (cell + 1) >> 1;
   3882     int live_objects = MarkWordToObjectStarts(cell, offsets);
   3883     ASSERT(live_objects == 1);
   3884     offset_of_last_live = offsets[live_objects - 1];
   3885   }
   3886   Address last_live_start =
   3887       approximate_free_start + offset_of_last_live * kPointerSize;
   3888   HeapObject* last_live = HeapObject::FromAddress(last_live_start);
   3889   Address free_start = last_live_start + last_live->Size();
   3890   return free_start;
   3891 }
   3892 
   3893 
   3894 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
   3895   ASSERT(cell != 0);
   3896 
   3897   // No consecutive 1 bits.
   3898   ASSERT((cell & (cell << 1)) == 0);
   3899 
   3900   int offsets[16];
   3901   if (cell == 0x80000000u) {  // Avoid overflow below.
   3902     return block_address + 31 * kPointerSize;
   3903   }
   3904   uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
   3905   ASSERT((first_set_bit & cell) == first_set_bit);
   3906   int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
   3907   ASSERT(live_objects == 1);
   3908   USE(live_objects);
   3909   return block_address + offsets[0] * kPointerSize;
   3910 }
   3911 
   3912 
   3913 template<MarkCompactCollector::SweepingParallelism mode>
   3914 static intptr_t Free(PagedSpace* space,
   3915                      FreeList* free_list,
   3916                      Address start,
   3917                      int size) {
   3918   if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
   3919     return space->Free(start, size);
   3920   } else {
   3921     return size - free_list->Free(start, size);
   3922   }
   3923 }
   3924 
   3925 
   3926 // Force instantiation of templatized SweepConservatively method for
   3927 // SWEEP_SEQUENTIALLY mode.
   3928 template intptr_t MarkCompactCollector::
   3929     SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
   3930         PagedSpace*, FreeList*, Page*);
   3931 
   3932 
   3933 // Force instantiation of templatized SweepConservatively method for
   3934 // SWEEP_IN_PARALLEL mode.
   3935 template intptr_t MarkCompactCollector::
   3936     SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
   3937         PagedSpace*, FreeList*, Page*);
   3938 
   3939 
   3940 // Sweeps a space conservatively.  After this has been done the larger free
   3941 // spaces have been put on the free list and the smaller ones have been
   3942 // ignored and left untouched.  A free space is always either ignored or put
   3943 // on the free list, never split up into two parts.  This is important
   3944 // because it means that any FreeSpace maps left actually describe a region of
   3945 // memory that can be ignored when scanning.  Dead objects other than free
   3946 // spaces will not contain the free space map.
   3947 template<MarkCompactCollector::SweepingParallelism mode>
   3948 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
   3949                                                    FreeList* free_list,
   3950                                                    Page* p) {
   3951   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
   3952   ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
   3953          free_list != NULL) ||
   3954          (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
   3955          free_list == NULL));
   3956 
   3957   // When parallel sweeping is active, the page will be marked after
   3958   // sweeping by the main thread.
   3959   if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
   3960     p->MarkSweptConservatively();
   3961   }
   3962 
   3963   intptr_t freed_bytes = 0;
   3964   size_t size = 0;
   3965 
   3966   // Skip over all the dead objects at the start of the page and mark them free.
   3967   Address cell_base = 0;
   3968   MarkBit::CellType* cell = NULL;
   3969   MarkBitCellIterator it(p);
   3970   for (; !it.Done(); it.Advance()) {
   3971     cell_base = it.CurrentCellBase();
   3972     cell = it.CurrentCell();
   3973     if (*cell != 0) break;
   3974   }
   3975 
   3976   if (it.Done()) {
   3977     size = p->area_end() - p->area_start();
   3978     freed_bytes += Free<mode>(space, free_list, p->area_start(),
   3979                               static_cast<int>(size));
   3980     ASSERT_EQ(0, p->LiveBytes());
   3981     return freed_bytes;
   3982   }
   3983 
   3984   // Grow the size of the start-of-page free space a little to get up to the
   3985   // first live object.
   3986   Address free_end = StartOfLiveObject(cell_base, *cell);
   3987   // Free the first free space.
   3988   size = free_end - p->area_start();
   3989   freed_bytes += Free<mode>(space, free_list, p->area_start(),
   3990                             static_cast<int>(size));
   3991 
   3992   // The start of the current free area is represented in undigested form by
   3993   // the address of the last 32-word section that contained a live object and
   3994   // the marking bitmap for that cell, which describes where the live object
   3995   // started.  Unless we find a large free space in the bitmap we will not
   3996   // digest this pair into a real address.  We start the iteration here at the
   3997   // first word in the marking bit map that indicates a live object.
   3998   Address free_start = cell_base;
   3999   MarkBit::CellType free_start_cell = *cell;
   4000 
   4001   for (; !it.Done(); it.Advance()) {
   4002     cell_base = it.CurrentCellBase();
   4003     cell = it.CurrentCell();
   4004     if (*cell != 0) {
   4005       // We have a live object.  Check approximately whether it is more than 32
   4006       // words since the last live object.
   4007       if (cell_base - free_start > 32 * kPointerSize) {
   4008         free_start = DigestFreeStart(free_start, free_start_cell);
   4009         if (cell_base - free_start > 32 * kPointerSize) {
   4010           // Now that we know the exact start of the free space it still looks
   4011           // like we have a large enough free space to be worth bothering with.
   4012           // so now we need to find the start of the first live object at the
   4013           // end of the free space.
   4014           free_end = StartOfLiveObject(cell_base, *cell);
   4015           freed_bytes += Free<mode>(space, free_list, free_start,
   4016                                     static_cast<int>(free_end - free_start));
   4017         }
   4018       }
   4019       // Update our undigested record of where the current free area started.
   4020       free_start = cell_base;
   4021       free_start_cell = *cell;
   4022       // Clear marking bits for current cell.
   4023       *cell = 0;
   4024     }
   4025   }
   4026 
   4027   // Handle the free space at the end of the page.
   4028   if (cell_base - free_start > 32 * kPointerSize) {
   4029     free_start = DigestFreeStart(free_start, free_start_cell);
   4030     freed_bytes += Free<mode>(space, free_list, free_start,
   4031                               static_cast<int>(p->area_end() - free_start));
   4032   }
   4033 
   4034   p->ResetLiveBytes();
   4035   return freed_bytes;
   4036 }
   4037 
   4038 
   4039 void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
   4040   PageIterator it(space);
   4041   FreeList* free_list = space == heap()->old_pointer_space()
   4042                             ? free_list_old_pointer_space_.get()
   4043                             : free_list_old_data_space_.get();
   4044   FreeList private_free_list(space);
   4045   while (it.has_next()) {
   4046     Page* p = it.next();
   4047 
   4048     if (p->TryParallelSweeping()) {
   4049       SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
   4050       free_list->Concatenate(&private_free_list);
   4051       p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
   4052     }
   4053     if (p == space->end_of_unswept_pages()) break;
   4054   }
   4055 }
   4056 
   4057 
   4058 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   4059   space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
   4060                                       sweeper == PARALLEL_CONSERVATIVE ||
   4061                                       sweeper == CONCURRENT_CONSERVATIVE);
   4062   space->ClearStats();
   4063 
   4064   // We defensively initialize end_of_unswept_pages_ here with the first page
   4065   // of the pages list.
   4066   space->set_end_of_unswept_pages(space->FirstPage());
   4067 
   4068   PageIterator it(space);
   4069 
   4070   int pages_swept = 0;
   4071   bool unused_page_present = false;
   4072   bool parallel_sweeping_active = false;
   4073 
   4074   while (it.has_next()) {
   4075     Page* p = it.next();
   4076     ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
   4077 
   4078     // Clear sweeping flags indicating that marking bits are still intact.
   4079     p->ClearSweptPrecisely();
   4080     p->ClearSweptConservatively();
   4081 
   4082     if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
   4083         p->IsEvacuationCandidate()) {
   4084       // Will be processed in EvacuateNewSpaceAndCandidates.
   4085       ASSERT(evacuation_candidates_.length() > 0);
   4086       continue;
   4087     }
   4088 
   4089     // One unused page is kept, all further are released before sweeping them.
   4090     if (p->LiveBytes() == 0) {
   4091       if (unused_page_present) {
   4092         if (FLAG_gc_verbose) {
   4093           PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
   4094                  reinterpret_cast<intptr_t>(p));
   4095         }
   4096         // Adjust unswept free bytes because releasing a page expects said
   4097         // counter to be accurate for unswept pages.
   4098         space->IncreaseUnsweptFreeBytes(p);
   4099         space->ReleasePage(p);
   4100         continue;
   4101       }
   4102       unused_page_present = true;
   4103     }
   4104 
   4105     switch (sweeper) {
   4106       case CONSERVATIVE: {
   4107         if (FLAG_gc_verbose) {
   4108           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
   4109                  reinterpret_cast<intptr_t>(p));
   4110         }
   4111         SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
   4112         pages_swept++;
   4113         break;
   4114       }
   4115       case CONCURRENT_CONSERVATIVE:
   4116       case PARALLEL_CONSERVATIVE: {
   4117         if (!parallel_sweeping_active) {
   4118           if (FLAG_gc_verbose) {
   4119             PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
   4120                    reinterpret_cast<intptr_t>(p));
   4121           }
   4122           SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
   4123           pages_swept++;
   4124           parallel_sweeping_active = true;
   4125         } else {
   4126           if (FLAG_gc_verbose) {
   4127             PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
   4128                    reinterpret_cast<intptr_t>(p));
   4129           }
   4130           p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
   4131           space->IncreaseUnsweptFreeBytes(p);
   4132         }
   4133         space->set_end_of_unswept_pages(p);
   4134         break;
   4135       }
   4136       case PRECISE: {
   4137         if (FLAG_gc_verbose) {
   4138           PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
   4139                  reinterpret_cast<intptr_t>(p));
   4140         }
   4141         if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
   4142           SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
   4143               space, p, NULL);
   4144         } else if (space->identity() == CODE_SPACE) {
   4145           SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
   4146               space, p, NULL);
   4147         } else {
   4148           SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
   4149               space, p, NULL);
   4150         }
   4151         pages_swept++;
   4152         break;
   4153       }
   4154       default: {
   4155         UNREACHABLE();
   4156       }
   4157     }
   4158   }
   4159 
   4160   if (FLAG_gc_verbose) {
   4161     PrintF("SweepSpace: %s (%d pages swept)\n",
   4162            AllocationSpaceName(space->identity()),
   4163            pages_swept);
   4164   }
   4165 
   4166   // Give pages that are queued to be freed back to the OS.
   4167   heap()->FreeQueuedChunks();
   4168 }
   4169 
   4170 
   4171 void MarkCompactCollector::SweepSpaces() {
   4172   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
   4173 #ifdef DEBUG
   4174   state_ = SWEEP_SPACES;
   4175 #endif
   4176   SweeperType how_to_sweep = CONSERVATIVE;
   4177   if (AreSweeperThreadsActivated()) {
   4178     if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
   4179     if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
   4180   }
   4181   if (sweep_precisely_) how_to_sweep = PRECISE;
   4182 
   4183   MoveEvacuationCandidatesToEndOfPagesList();
   4184 
   4185   // Noncompacting collections simply sweep the spaces to clear the mark
   4186   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   4187   // the map space last because freeing non-live maps overwrites them and
   4188   // the other spaces rely on possibly non-live maps to get the sizes for
   4189   // non-live objects.
   4190   { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
   4191     { SequentialSweepingScope scope(this);
   4192       SweepSpace(heap()->old_pointer_space(), how_to_sweep);
   4193       SweepSpace(heap()->old_data_space(), how_to_sweep);
   4194     }
   4195 
   4196     if (how_to_sweep == PARALLEL_CONSERVATIVE ||
   4197         how_to_sweep == CONCURRENT_CONSERVATIVE) {
   4198       StartSweeperThreads();
   4199     }
   4200 
   4201     if (how_to_sweep == PARALLEL_CONSERVATIVE) {
   4202       WaitUntilSweepingCompleted();
   4203     }
   4204   }
   4205   RemoveDeadInvalidatedCode();
   4206   SweepSpace(heap()->code_space(), PRECISE);
   4207 
   4208   SweepSpace(heap()->cell_space(), PRECISE);
   4209   SweepSpace(heap()->property_cell_space(), PRECISE);
   4210 
   4211   EvacuateNewSpaceAndCandidates();
   4212 
   4213   // ClearNonLiveTransitions depends on precise sweeping of map space to
   4214   // detect whether unmarked map became dead in this collection or in one
   4215   // of the previous ones.
   4216   SweepSpace(heap()->map_space(), PRECISE);
   4217 
   4218   // Deallocate unmarked objects and clear marked bits for marked objects.
   4219   heap_->lo_space()->FreeUnmarkedObjects();
   4220 
   4221   // Deallocate evacuated candidate pages.
   4222   ReleaseEvacuationCandidates();
   4223 }
   4224 
   4225 
   4226 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
   4227   PageIterator it(space);
   4228   while (it.has_next()) {
   4229     Page* p = it.next();
   4230     if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
   4231       p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
   4232       p->MarkSweptConservatively();
   4233     }
   4234     ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
   4235   }
   4236 }
   4237 
   4238 
   4239 void MarkCompactCollector::ParallelSweepSpacesComplete() {
   4240   ParallelSweepSpaceComplete(heap()->old_pointer_space());
   4241   ParallelSweepSpaceComplete(heap()->old_data_space());
   4242 }
   4243 
   4244 
   4245 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
   4246   if (isolate()->debug()->is_loaded() ||
   4247       isolate()->debug()->has_break_points()) {
   4248     enable = false;
   4249   }
   4250 
   4251   if (enable) {
   4252     if (code_flusher_ != NULL) return;
   4253     code_flusher_ = new CodeFlusher(isolate());
   4254   } else {
   4255     if (code_flusher_ == NULL) return;
   4256     code_flusher_->EvictAllCandidates();
   4257     delete code_flusher_;
   4258     code_flusher_ = NULL;
   4259   }
   4260 
   4261   if (FLAG_trace_code_flushing) {
   4262     PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
   4263   }
   4264 }
   4265 
   4266 
   4267 // TODO(1466) ReportDeleteIfNeeded is not called currently.
   4268 // Our profiling tools do not expect intersections between
   4269 // code objects. We should either reenable it or change our tools.
   4270 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
   4271                                                 Isolate* isolate) {
   4272 #ifdef ENABLE_GDB_JIT_INTERFACE
   4273   if (obj->IsCode()) {
   4274     GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
   4275   }
   4276 #endif
   4277   if (obj->IsCode()) {
   4278     PROFILE(isolate, CodeDeleteEvent(obj->address()));
   4279   }
   4280 }
   4281 
   4282 
   4283 Isolate* MarkCompactCollector::isolate() const {
   4284   return heap_->isolate();
   4285 }
   4286 
   4287 
   4288 void MarkCompactCollector::Initialize() {
   4289   MarkCompactMarkingVisitor::Initialize();
   4290   IncrementalMarking::Initialize();
   4291 }
   4292 
   4293 
   4294 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
   4295   return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
   4296 }
   4297 
   4298 
   4299 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
   4300                         SlotsBuffer** buffer_address,
   4301                         SlotType type,
   4302                         Address addr,
   4303                         AdditionMode mode) {
   4304   SlotsBuffer* buffer = *buffer_address;
   4305   if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
   4306     if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
   4307       allocator->DeallocateChain(buffer_address);
   4308       return false;
   4309     }
   4310     buffer = allocator->AllocateBuffer(buffer);
   4311     *buffer_address = buffer;
   4312   }
   4313   ASSERT(buffer->HasSpaceForTypedSlot());
   4314   buffer->Add(reinterpret_cast<ObjectSlot>(type));
   4315   buffer->Add(reinterpret_cast<ObjectSlot>(addr));
   4316   return true;
   4317 }
   4318 
   4319 
   4320 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
   4321   if (RelocInfo::IsCodeTarget(rmode)) {
   4322     return SlotsBuffer::CODE_TARGET_SLOT;
   4323   } else if (RelocInfo::IsEmbeddedObject(rmode)) {
   4324     return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
   4325   } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
   4326     return SlotsBuffer::DEBUG_TARGET_SLOT;
   4327   } else if (RelocInfo::IsJSReturn(rmode)) {
   4328     return SlotsBuffer::JS_RETURN_SLOT;
   4329   }
   4330   UNREACHABLE();
   4331   return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
   4332 }
   4333 
   4334 
   4335 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
   4336   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
   4337   RelocInfo::Mode rmode = rinfo->rmode();
   4338   if (target_page->IsEvacuationCandidate() &&
   4339       (rinfo->host() == NULL ||
   4340        !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
   4341     bool success;
   4342     if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
   4343       // This doesn't need to be typed since it is just a normal heap pointer.
   4344       Object** target_pointer =
   4345           reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
   4346       success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
   4347                                    target_page->slots_buffer_address(),
   4348                                    target_pointer,
   4349                                    SlotsBuffer::FAIL_ON_OVERFLOW);
   4350     } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
   4351       success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
   4352                                    target_page->slots_buffer_address(),
   4353                                    SlotsBuffer::CODE_ENTRY_SLOT,
   4354                                    rinfo->constant_pool_entry_address(),
   4355                                    SlotsBuffer::FAIL_ON_OVERFLOW);
   4356     } else {
   4357       success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
   4358                                   target_page->slots_buffer_address(),
   4359                                   SlotTypeForRMode(rmode),
   4360                                   rinfo->pc(),
   4361                                   SlotsBuffer::FAIL_ON_OVERFLOW);
   4362     }
   4363     if (!success) {
   4364       EvictEvacuationCandidate(target_page);
   4365     }
   4366   }
   4367 }
   4368 
   4369 
   4370 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
   4371   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
   4372   if (target_page->IsEvacuationCandidate() &&
   4373       !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
   4374     if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
   4375                             target_page->slots_buffer_address(),
   4376                             SlotsBuffer::CODE_ENTRY_SLOT,
   4377                             slot,
   4378                             SlotsBuffer::FAIL_ON_OVERFLOW)) {
   4379       EvictEvacuationCandidate(target_page);
   4380     }
   4381   }
   4382 }
   4383 
   4384 
   4385 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
   4386   ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
   4387   if (is_compacting()) {
   4388     Code* host = isolate()->inner_pointer_to_code_cache()->
   4389         GcSafeFindCodeForInnerPointer(pc);
   4390     MarkBit mark_bit = Marking::MarkBitFrom(host);
   4391     if (Marking::IsBlack(mark_bit)) {
   4392       RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
   4393       RecordRelocSlot(&rinfo, target);
   4394     }
   4395   }
   4396 }
   4397 
   4398 
   4399 static inline SlotsBuffer::SlotType DecodeSlotType(
   4400     SlotsBuffer::ObjectSlot slot) {
   4401   return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
   4402 }
   4403 
   4404 
   4405 void SlotsBuffer::UpdateSlots(Heap* heap) {
   4406   PointersUpdatingVisitor v(heap);
   4407 
   4408   for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
   4409     ObjectSlot slot = slots_[slot_idx];
   4410     if (!IsTypedSlot(slot)) {
   4411       PointersUpdatingVisitor::UpdateSlot(heap, slot);
   4412     } else {
   4413       ++slot_idx;
   4414       ASSERT(slot_idx < idx_);
   4415       UpdateSlot(heap->isolate(),
   4416                  &v,
   4417                  DecodeSlotType(slot),
   4418                  reinterpret_cast<Address>(slots_[slot_idx]));
   4419     }
   4420   }
   4421 }
   4422 
   4423 
   4424 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
   4425   PointersUpdatingVisitor v(heap);
   4426 
   4427   for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
   4428     ObjectSlot slot = slots_[slot_idx];
   4429     if (!IsTypedSlot(slot)) {
   4430       if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
   4431         PointersUpdatingVisitor::UpdateSlot(heap, slot);
   4432       }
   4433     } else {
   4434       ++slot_idx;
   4435       ASSERT(slot_idx < idx_);
   4436       Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
   4437       if (!IsOnInvalidatedCodeObject(pc)) {
   4438         UpdateSlot(heap->isolate(),
   4439                    &v,
   4440                    DecodeSlotType(slot),
   4441                    reinterpret_cast<Address>(slots_[slot_idx]));
   4442       }
   4443     }
   4444   }
   4445 }
   4446 
   4447 
   4448 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
   4449   return new SlotsBuffer(next_buffer);
   4450 }
   4451 
   4452 
   4453 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
   4454   delete buffer;
   4455 }
   4456 
   4457 
   4458 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
   4459   SlotsBuffer* buffer = *buffer_address;
   4460   while (buffer != NULL) {
   4461     SlotsBuffer* next_buffer = buffer->next();
   4462     DeallocateBuffer(buffer);
   4463     buffer = next_buffer;
   4464   }
   4465   *buffer_address = NULL;
   4466 }
   4467 
   4468 
   4469 } }  // namespace v8::internal
   4470