Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "incremental-marking.h"
     31 
     32 #include "code-stubs.h"
     33 #include "compilation-cache.h"
     34 #include "objects-visiting.h"
     35 #include "objects-visiting-inl.h"
     36 #include "v8conversions.h"
     37 
     38 namespace v8 {
     39 namespace internal {
     40 
     41 
     42 IncrementalMarking::IncrementalMarking(Heap* heap)
     43     : heap_(heap),
     44       state_(STOPPED),
     45       marking_deque_memory_(NULL),
     46       marking_deque_memory_committed_(false),
     47       steps_count_(0),
     48       steps_took_(0),
     49       longest_step_(0.0),
     50       old_generation_space_available_at_start_of_incremental_(0),
     51       old_generation_space_used_at_start_of_incremental_(0),
     52       steps_count_since_last_gc_(0),
     53       steps_took_since_last_gc_(0),
     54       should_hurry_(false),
     55       marking_speed_(0),
     56       allocated_(0),
     57       no_marking_scope_depth_(0),
     58       unscanned_bytes_of_large_object_(0) {
     59 }
     60 
     61 
     62 void IncrementalMarking::TearDown() {
     63   delete marking_deque_memory_;
     64 }
     65 
     66 
     67 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
     68                                          Object** slot,
     69                                          Object* value) {
     70   if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
     71     MarkBit obj_bit = Marking::MarkBitFrom(obj);
     72     if (Marking::IsBlack(obj_bit)) {
     73       // Object is not going to be rescanned we need to record the slot.
     74       heap_->mark_compact_collector()->RecordSlot(
     75           HeapObject::RawField(obj, 0), slot, value);
     76     }
     77   }
     78 }
     79 
     80 
     81 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
     82                                              Object** slot,
     83                                              Isolate* isolate) {
     84   ASSERT(obj->IsHeapObject());
     85   IncrementalMarking* marking = isolate->heap()->incremental_marking();
     86   ASSERT(!marking->is_compacting_);
     87 
     88   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
     89   int counter = chunk->write_barrier_counter();
     90   if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
     91     marking->write_barriers_invoked_since_last_step_ +=
     92         MemoryChunk::kWriteBarrierCounterGranularity -
     93             chunk->write_barrier_counter();
     94     chunk->set_write_barrier_counter(
     95         MemoryChunk::kWriteBarrierCounterGranularity);
     96   }
     97 
     98   marking->RecordWrite(obj, slot, *slot);
     99 }
    100 
    101 
    102 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
    103                                                           Object** slot,
    104                                                           Isolate* isolate) {
    105   ASSERT(obj->IsHeapObject());
    106   IncrementalMarking* marking = isolate->heap()->incremental_marking();
    107   ASSERT(marking->is_compacting_);
    108 
    109   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
    110   int counter = chunk->write_barrier_counter();
    111   if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
    112     marking->write_barriers_invoked_since_last_step_ +=
    113         MemoryChunk::kWriteBarrierCounterGranularity -
    114             chunk->write_barrier_counter();
    115     chunk->set_write_barrier_counter(
    116         MemoryChunk::kWriteBarrierCounterGranularity);
    117   }
    118 
    119   marking->RecordWrite(obj, slot, *slot);
    120 }
    121 
    122 
    123 void IncrementalMarking::RecordCodeTargetPatch(Code* host,
    124                                                Address pc,
    125                                                HeapObject* value) {
    126   if (IsMarking()) {
    127     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
    128     RecordWriteIntoCode(host, &rinfo, value);
    129   }
    130 }
    131 
    132 
    133 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
    134   if (IsMarking()) {
    135     Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
    136         GcSafeFindCodeForInnerPointer(pc);
    137     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
    138     RecordWriteIntoCode(host, &rinfo, value);
    139   }
    140 }
    141 
    142 
    143 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
    144                                                     Object** slot,
    145                                                     Code* value) {
    146   if (BaseRecordWrite(host, slot, value)) {
    147     ASSERT(slot != NULL);
    148     heap_->mark_compact_collector()->
    149         RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
    150   }
    151 }
    152 
    153 
    154 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
    155                                                  RelocInfo* rinfo,
    156                                                  Object* value) {
    157   MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
    158   if (Marking::IsWhite(value_bit)) {
    159     MarkBit obj_bit = Marking::MarkBitFrom(obj);
    160     if (Marking::IsBlack(obj_bit)) {
    161       BlackToGreyAndUnshift(obj, obj_bit);
    162       RestartIfNotMarking();
    163     }
    164     // Object is either grey or white.  It will be scanned if survives.
    165     return;
    166   }
    167 
    168   if (is_compacting_) {
    169     MarkBit obj_bit = Marking::MarkBitFrom(obj);
    170     if (Marking::IsBlack(obj_bit)) {
    171       // Object is not going to be rescanned.  We need to record the slot.
    172       heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
    173                                                        Code::cast(value));
    174     }
    175   }
    176 }
    177 
    178 
    179 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
    180   if (obj->IsHeapObject()) {
    181     HeapObject* heap_obj = HeapObject::cast(obj);
    182     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
    183     if (Marking::IsBlack(mark_bit)) {
    184       MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
    185                                             -heap_obj->Size());
    186     }
    187     Marking::AnyToGrey(mark_bit);
    188   }
    189 }
    190 
    191 
    192 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
    193                                        MarkBit mark_bit,
    194                                        int size) {
    195   ASSERT(!Marking::IsImpossible(mark_bit));
    196   if (mark_bit.Get()) return;
    197   mark_bit.Set();
    198   MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
    199   ASSERT(Marking::IsBlack(mark_bit));
    200 }
    201 
    202 
    203 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
    204                                         MarkBit mark_bit,
    205                                         int size) {
    206   ASSERT(!Marking::IsImpossible(mark_bit));
    207   if (Marking::IsBlack(mark_bit)) return;
    208   Marking::MarkBlack(mark_bit);
    209   MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
    210   ASSERT(Marking::IsBlack(mark_bit));
    211 }
    212 
    213 
    214 class IncrementalMarkingMarkingVisitor
    215     : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
    216  public:
    217   static void Initialize() {
    218     StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
    219     table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
    220     table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
    221     table_.Register(kVisitJSRegExp, &VisitJSRegExp);
    222   }
    223 
    224   static const int kProgressBarScanningChunk = 32 * 1024;
    225 
    226   static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
    227     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
    228     // TODO(mstarzinger): Move setting of the flag to the allocation site of
    229     // the array. The visitor should just check the flag.
    230     if (FLAG_use_marking_progress_bar &&
    231         chunk->owner()->identity() == LO_SPACE) {
    232       chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
    233     }
    234     if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
    235       Heap* heap = map->GetHeap();
    236       // When using a progress bar for large fixed arrays, scan only a chunk of
    237       // the array and try to push it onto the marking deque again until it is
    238       // fully scanned. Fall back to scanning it through to the end in case this
    239       // fails because of a full deque.
    240       int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
    241       int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
    242                              chunk->progress_bar());
    243       int end_offset = Min(object_size,
    244                            start_offset + kProgressBarScanningChunk);
    245       int already_scanned_offset = start_offset;
    246       bool scan_until_end = false;
    247       do {
    248         VisitPointersWithAnchor(heap,
    249                                 HeapObject::RawField(object, 0),
    250                                 HeapObject::RawField(object, start_offset),
    251                                 HeapObject::RawField(object, end_offset));
    252         start_offset = end_offset;
    253         end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
    254         scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
    255       } while (scan_until_end && start_offset < object_size);
    256       chunk->set_progress_bar(start_offset);
    257       if (start_offset < object_size) {
    258         heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
    259         heap->incremental_marking()->NotifyIncompleteScanOfObject(
    260             object_size - (start_offset - already_scanned_offset));
    261       }
    262     } else {
    263       FixedArrayVisitor::Visit(map, object);
    264     }
    265   }
    266 
    267   static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
    268     Context* context = Context::cast(object);
    269 
    270     // We will mark cache black with a separate pass
    271     // when we finish marking.
    272     MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
    273     VisitNativeContext(map, context);
    274   }
    275 
    276   static void VisitWeakCollection(Map* map, HeapObject* object) {
    277     Heap* heap = map->GetHeap();
    278     VisitPointers(heap,
    279                   HeapObject::RawField(object,
    280                                        JSWeakCollection::kPropertiesOffset),
    281                   HeapObject::RawField(object, JSWeakCollection::kSize));
    282   }
    283 
    284   static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {}
    285 
    286   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
    287     Object* obj = *p;
    288     if (obj->NonFailureIsHeapObject()) {
    289       heap->mark_compact_collector()->RecordSlot(p, p, obj);
    290       MarkObject(heap, obj);
    291     }
    292   }
    293 
    294   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
    295     for (Object** p = start; p < end; p++) {
    296       Object* obj = *p;
    297       if (obj->NonFailureIsHeapObject()) {
    298         heap->mark_compact_collector()->RecordSlot(start, p, obj);
    299         MarkObject(heap, obj);
    300       }
    301     }
    302   }
    303 
    304   INLINE(static void VisitPointersWithAnchor(Heap* heap,
    305                                              Object** anchor,
    306                                              Object** start,
    307                                              Object** end)) {
    308     for (Object** p = start; p < end; p++) {
    309       Object* obj = *p;
    310       if (obj->NonFailureIsHeapObject()) {
    311         heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
    312         MarkObject(heap, obj);
    313       }
    314     }
    315   }
    316 
    317   // Marks the object grey and pushes it on the marking stack.
    318   INLINE(static void MarkObject(Heap* heap, Object* obj)) {
    319     HeapObject* heap_object = HeapObject::cast(obj);
    320     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
    321     if (mark_bit.data_only()) {
    322       MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
    323     } else if (Marking::IsWhite(mark_bit)) {
    324       heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
    325     }
    326   }
    327 
    328   // Marks the object black without pushing it on the marking stack.
    329   // Returns true if object needed marking and false otherwise.
    330   INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
    331     HeapObject* heap_object = HeapObject::cast(obj);
    332     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
    333     if (Marking::IsWhite(mark_bit)) {
    334       mark_bit.Set();
    335       MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
    336                                             heap_object->Size());
    337       return true;
    338     }
    339     return false;
    340   }
    341 };
    342 
    343 
    344 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
    345  public:
    346   explicit IncrementalMarkingRootMarkingVisitor(
    347       IncrementalMarking* incremental_marking)
    348       : incremental_marking_(incremental_marking) {
    349   }
    350 
    351   void VisitPointer(Object** p) {
    352     MarkObjectByPointer(p);
    353   }
    354 
    355   void VisitPointers(Object** start, Object** end) {
    356     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
    357   }
    358 
    359  private:
    360   void MarkObjectByPointer(Object** p) {
    361     Object* obj = *p;
    362     if (!obj->IsHeapObject()) return;
    363 
    364     HeapObject* heap_object = HeapObject::cast(obj);
    365     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
    366     if (mark_bit.data_only()) {
    367       MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
    368     } else {
    369       if (Marking::IsWhite(mark_bit)) {
    370         incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
    371       }
    372     }
    373   }
    374 
    375   IncrementalMarking* incremental_marking_;
    376 };
    377 
    378 
    379 void IncrementalMarking::Initialize() {
    380   IncrementalMarkingMarkingVisitor::Initialize();
    381 }
    382 
    383 
    384 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
    385                                               bool is_marking,
    386                                               bool is_compacting) {
    387   if (is_marking) {
    388     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
    389     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
    390 
    391     // It's difficult to filter out slots recorded for large objects.
    392     if (chunk->owner()->identity() == LO_SPACE &&
    393         chunk->size() > static_cast<size_t>(Page::kPageSize) &&
    394         is_compacting) {
    395       chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
    396     }
    397   } else if (chunk->owner()->identity() == CELL_SPACE ||
    398              chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
    399              chunk->scan_on_scavenge()) {
    400     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
    401     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
    402   } else {
    403     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
    404     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
    405   }
    406 }
    407 
    408 
    409 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
    410                                               bool is_marking) {
    411   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
    412   if (is_marking) {
    413     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
    414   } else {
    415     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
    416   }
    417   chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
    418 }
    419 
    420 
    421 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
    422     PagedSpace* space) {
    423   PageIterator it(space);
    424   while (it.has_next()) {
    425     Page* p = it.next();
    426     SetOldSpacePageFlags(p, false, false);
    427   }
    428 }
    429 
    430 
    431 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
    432     NewSpace* space) {
    433   NewSpacePageIterator it(space);
    434   while (it.has_next()) {
    435     NewSpacePage* p = it.next();
    436     SetNewSpacePageFlags(p, false);
    437   }
    438 }
    439 
    440 
    441 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
    442   DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
    443   DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
    444   DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
    445   DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
    446   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
    447   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
    448   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
    449 
    450   LargePage* lop = heap_->lo_space()->first_page();
    451   while (lop->is_valid()) {
    452     SetOldSpacePageFlags(lop, false, false);
    453     lop = lop->next_page();
    454   }
    455 }
    456 
    457 
    458 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
    459   PageIterator it(space);
    460   while (it.has_next()) {
    461     Page* p = it.next();
    462     SetOldSpacePageFlags(p, true, is_compacting_);
    463   }
    464 }
    465 
    466 
    467 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
    468   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
    469   while (it.has_next()) {
    470     NewSpacePage* p = it.next();
    471     SetNewSpacePageFlags(p, true);
    472   }
    473 }
    474 
    475 
    476 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
    477   ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
    478   ActivateIncrementalWriteBarrier(heap_->old_data_space());
    479   ActivateIncrementalWriteBarrier(heap_->cell_space());
    480   ActivateIncrementalWriteBarrier(heap_->property_cell_space());
    481   ActivateIncrementalWriteBarrier(heap_->map_space());
    482   ActivateIncrementalWriteBarrier(heap_->code_space());
    483   ActivateIncrementalWriteBarrier(heap_->new_space());
    484 
    485   LargePage* lop = heap_->lo_space()->first_page();
    486   while (lop->is_valid()) {
    487     SetOldSpacePageFlags(lop, true, is_compacting_);
    488     lop = lop->next_page();
    489   }
    490 }
    491 
    492 
    493 bool IncrementalMarking::WorthActivating() {
    494 #ifndef DEBUG
    495   static const intptr_t kActivationThreshold = 8 * MB;
    496 #else
    497   // TODO(gc) consider setting this to some low level so that some
    498   // debug tests run with incremental marking and some without.
    499   static const intptr_t kActivationThreshold = 0;
    500 #endif
    501   // Only start incremental marking in a safe state: 1) when expose GC is
    502   // deactivated, 2) when incremental marking is turned on, 3) when we are
    503   // currently not in a GC, and 4) when we are currently not serializing
    504   // or deserializing the heap.
    505   return !FLAG_expose_gc &&
    506       FLAG_incremental_marking &&
    507       FLAG_incremental_marking_steps &&
    508       heap_->gc_state() == Heap::NOT_IN_GC &&
    509       !Serializer::enabled() &&
    510       heap_->isolate()->IsInitialized() &&
    511       heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
    512 }
    513 
    514 
    515 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
    516   ASSERT(RecordWriteStub::GetMode(stub) ==
    517          RecordWriteStub::STORE_BUFFER_ONLY);
    518 
    519   if (!IsMarking()) {
    520     // Initially stub is generated in STORE_BUFFER_ONLY mode thus
    521     // we don't need to do anything if incremental marking is
    522     // not active.
    523   } else if (IsCompacting()) {
    524     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
    525   } else {
    526     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
    527   }
    528 }
    529 
    530 
    531 static void PatchIncrementalMarkingRecordWriteStubs(
    532     Heap* heap, RecordWriteStub::Mode mode) {
    533   UnseededNumberDictionary* stubs = heap->code_stubs();
    534 
    535   int capacity = stubs->Capacity();
    536   for (int i = 0; i < capacity; i++) {
    537     Object* k = stubs->KeyAt(i);
    538     if (stubs->IsKey(k)) {
    539       uint32_t key = NumberToUint32(k);
    540 
    541       if (CodeStub::MajorKeyFromKey(key) ==
    542           CodeStub::RecordWrite) {
    543         Object* e = stubs->ValueAt(i);
    544         if (e->IsCode()) {
    545           RecordWriteStub::Patch(Code::cast(e), mode);
    546         }
    547       }
    548     }
    549   }
    550 }
    551 
    552 
    553 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
    554   if (marking_deque_memory_ == NULL) {
    555     marking_deque_memory_ = new VirtualMemory(4 * MB);
    556   }
    557   if (!marking_deque_memory_committed_) {
    558     bool success = marking_deque_memory_->Commit(
    559         reinterpret_cast<Address>(marking_deque_memory_->address()),
    560         marking_deque_memory_->size(),
    561         false);  // Not executable.
    562     CHECK(success);
    563     marking_deque_memory_committed_ = true;
    564   }
    565 }
    566 
    567 
    568 void IncrementalMarking::UncommitMarkingDeque() {
    569   if (state_ == STOPPED && marking_deque_memory_committed_) {
    570     bool success = marking_deque_memory_->Uncommit(
    571         reinterpret_cast<Address>(marking_deque_memory_->address()),
    572         marking_deque_memory_->size());
    573     CHECK(success);
    574     marking_deque_memory_committed_ = false;
    575   }
    576 }
    577 
    578 
    579 void IncrementalMarking::Start(CompactionFlag flag) {
    580   if (FLAG_trace_incremental_marking) {
    581     PrintF("[IncrementalMarking] Start\n");
    582   }
    583   ASSERT(FLAG_incremental_marking);
    584   ASSERT(FLAG_incremental_marking_steps);
    585   ASSERT(state_ == STOPPED);
    586   ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
    587   ASSERT(!Serializer::enabled());
    588   ASSERT(heap_->isolate()->IsInitialized());
    589 
    590   ResetStepCounters();
    591 
    592   if (heap_->IsSweepingComplete()) {
    593     StartMarking(flag);
    594   } else {
    595     if (FLAG_trace_incremental_marking) {
    596       PrintF("[IncrementalMarking] Start sweeping.\n");
    597     }
    598     state_ = SWEEPING;
    599   }
    600 
    601   heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
    602 }
    603 
    604 
    605 void IncrementalMarking::StartMarking(CompactionFlag flag) {
    606   if (FLAG_trace_incremental_marking) {
    607     PrintF("[IncrementalMarking] Start marking\n");
    608   }
    609 
    610   is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
    611       heap_->mark_compact_collector()->StartCompaction(
    612           MarkCompactCollector::INCREMENTAL_COMPACTION);
    613 
    614   state_ = MARKING;
    615 
    616   RecordWriteStub::Mode mode = is_compacting_ ?
    617       RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
    618 
    619   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
    620 
    621   EnsureMarkingDequeIsCommitted();
    622 
    623   // Initialize marking stack.
    624   Address addr = static_cast<Address>(marking_deque_memory_->address());
    625   size_t size = marking_deque_memory_->size();
    626   if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
    627   marking_deque_.Initialize(addr, addr + size);
    628 
    629   ActivateIncrementalWriteBarrier();
    630 
    631   // Marking bits are cleared by the sweeper.
    632 #ifdef VERIFY_HEAP
    633   if (FLAG_verify_heap) {
    634     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
    635   }
    636 #endif
    637 
    638   heap_->CompletelyClearInstanceofCache();
    639   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
    640 
    641   if (FLAG_cleanup_code_caches_at_gc) {
    642     // We will mark cache black with a separate pass
    643     // when we finish marking.
    644     MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
    645   }
    646 
    647   // Mark strong roots grey.
    648   IncrementalMarkingRootMarkingVisitor visitor(this);
    649   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
    650 
    651   // Ready to start incremental marking.
    652   if (FLAG_trace_incremental_marking) {
    653     PrintF("[IncrementalMarking] Running\n");
    654   }
    655 }
    656 
    657 
    658 void IncrementalMarking::PrepareForScavenge() {
    659   if (!IsMarking()) return;
    660   NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
    661                           heap_->new_space()->FromSpaceEnd());
    662   while (it.has_next()) {
    663     Bitmap::Clear(it.next());
    664   }
    665 }
    666 
    667 
    668 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
    669   if (!IsMarking()) return;
    670 
    671   int current = marking_deque_.bottom();
    672   int mask = marking_deque_.mask();
    673   int limit = marking_deque_.top();
    674   HeapObject** array = marking_deque_.array();
    675   int new_top = current;
    676 
    677   Map* filler_map = heap_->one_pointer_filler_map();
    678 
    679   while (current != limit) {
    680     HeapObject* obj = array[current];
    681     ASSERT(obj->IsHeapObject());
    682     current = ((current + 1) & mask);
    683     if (heap_->InNewSpace(obj)) {
    684       MapWord map_word = obj->map_word();
    685       if (map_word.IsForwardingAddress()) {
    686         HeapObject* dest = map_word.ToForwardingAddress();
    687         array[new_top] = dest;
    688         new_top = ((new_top + 1) & mask);
    689         ASSERT(new_top != marking_deque_.bottom());
    690 #ifdef DEBUG
    691         MarkBit mark_bit = Marking::MarkBitFrom(obj);
    692         ASSERT(Marking::IsGrey(mark_bit) ||
    693                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
    694 #endif
    695       }
    696     } else if (obj->map() != filler_map) {
    697       // Skip one word filler objects that appear on the
    698       // stack when we perform in place array shift.
    699       array[new_top] = obj;
    700       new_top = ((new_top + 1) & mask);
    701       ASSERT(new_top != marking_deque_.bottom());
    702 #ifdef DEBUG
    703         MarkBit mark_bit = Marking::MarkBitFrom(obj);
    704         MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
    705         ASSERT(Marking::IsGrey(mark_bit) ||
    706                (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
    707                (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
    708                 Marking::IsBlack(mark_bit)));
    709 #endif
    710     }
    711   }
    712   marking_deque_.set_top(new_top);
    713 
    714   steps_took_since_last_gc_ = 0;
    715   steps_count_since_last_gc_ = 0;
    716   longest_step_ = 0.0;
    717 }
    718 
    719 
    720 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
    721   MarkBit map_mark_bit = Marking::MarkBitFrom(map);
    722   if (Marking::IsWhite(map_mark_bit)) {
    723     WhiteToGreyAndPush(map, map_mark_bit);
    724   }
    725 
    726   IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
    727 
    728   MarkBit mark_bit = Marking::MarkBitFrom(obj);
    729 #ifdef DEBUG
    730   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
    731   SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
    732               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
    733               (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
    734                Marking::IsBlack(mark_bit)));
    735 #endif
    736   MarkBlackOrKeepBlack(obj, mark_bit, size);
    737 }
    738 
    739 
    740 void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
    741   Map* filler_map = heap_->one_pointer_filler_map();
    742   while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
    743     HeapObject* obj = marking_deque_.Pop();
    744 
    745     // Explicitly skip one word fillers. Incremental markbit patterns are
    746     // correct only for objects that occupy at least two words.
    747     Map* map = obj->map();
    748     if (map == filler_map) continue;
    749 
    750     int size = obj->SizeFromMap(map);
    751     unscanned_bytes_of_large_object_ = 0;
    752     VisitObject(map, obj, size);
    753     bytes_to_process -= (size - unscanned_bytes_of_large_object_);
    754   }
    755 }
    756 
    757 
    758 void IncrementalMarking::ProcessMarkingDeque() {
    759   Map* filler_map = heap_->one_pointer_filler_map();
    760   while (!marking_deque_.IsEmpty()) {
    761     HeapObject* obj = marking_deque_.Pop();
    762 
    763     // Explicitly skip one word fillers. Incremental markbit patterns are
    764     // correct only for objects that occupy at least two words.
    765     Map* map = obj->map();
    766     if (map == filler_map) continue;
    767 
    768     VisitObject(map, obj, obj->SizeFromMap(map));
    769   }
    770 }
    771 
    772 
    773 void IncrementalMarking::Hurry() {
    774   if (state() == MARKING) {
    775     double start = 0.0;
    776     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
    777       start = OS::TimeCurrentMillis();
    778       if (FLAG_trace_incremental_marking) {
    779         PrintF("[IncrementalMarking] Hurry\n");
    780       }
    781     }
    782     // TODO(gc) hurry can mark objects it encounters black as mutator
    783     // was stopped.
    784     ProcessMarkingDeque();
    785     state_ = COMPLETE;
    786     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
    787       double end = OS::TimeCurrentMillis();
    788       double delta = end - start;
    789       heap_->AddMarkingTime(delta);
    790       if (FLAG_trace_incremental_marking) {
    791         PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
    792                static_cast<int>(delta));
    793       }
    794     }
    795   }
    796 
    797   if (FLAG_cleanup_code_caches_at_gc) {
    798     PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
    799     Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
    800     MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
    801                                           PolymorphicCodeCache::kSize);
    802   }
    803 
    804   Object* context = heap_->native_contexts_list();
    805   while (!context->IsUndefined()) {
    806     // GC can happen when the context is not fully initialized,
    807     // so the cache can be undefined.
    808     HeapObject* cache = HeapObject::cast(
    809         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
    810     if (!cache->IsUndefined()) {
    811       MarkBit mark_bit = Marking::MarkBitFrom(cache);
    812       if (Marking::IsGrey(mark_bit)) {
    813         Marking::GreyToBlack(mark_bit);
    814         MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
    815       }
    816     }
    817     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
    818   }
    819 }
    820 
    821 
    822 void IncrementalMarking::Abort() {
    823   if (IsStopped()) return;
    824   if (FLAG_trace_incremental_marking) {
    825     PrintF("[IncrementalMarking] Aborting.\n");
    826   }
    827   heap_->new_space()->LowerInlineAllocationLimit(0);
    828   IncrementalMarking::set_should_hurry(false);
    829   ResetStepCounters();
    830   if (IsMarking()) {
    831     PatchIncrementalMarkingRecordWriteStubs(heap_,
    832                                             RecordWriteStub::STORE_BUFFER_ONLY);
    833     DeactivateIncrementalWriteBarrier();
    834 
    835     if (is_compacting_) {
    836       LargeObjectIterator it(heap_->lo_space());
    837       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    838         Page* p = Page::FromAddress(obj->address());
    839         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
    840           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
    841         }
    842       }
    843     }
    844   }
    845   heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
    846   state_ = STOPPED;
    847   is_compacting_ = false;
    848 }
    849 
    850 
    851 void IncrementalMarking::Finalize() {
    852   Hurry();
    853   state_ = STOPPED;
    854   is_compacting_ = false;
    855   heap_->new_space()->LowerInlineAllocationLimit(0);
    856   IncrementalMarking::set_should_hurry(false);
    857   ResetStepCounters();
    858   PatchIncrementalMarkingRecordWriteStubs(heap_,
    859                                           RecordWriteStub::STORE_BUFFER_ONLY);
    860   DeactivateIncrementalWriteBarrier();
    861   ASSERT(marking_deque_.IsEmpty());
    862   heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
    863 }
    864 
    865 
    866 void IncrementalMarking::MarkingComplete(CompletionAction action) {
    867   state_ = COMPLETE;
    868   // We will set the stack guard to request a GC now.  This will mean the rest
    869   // of the GC gets performed as soon as possible (we can't do a GC here in a
    870   // record-write context).  If a few things get allocated between now and then
    871   // that shouldn't make us do a scavenge and keep being incremental, so we set
    872   // the should-hurry flag to indicate that there can't be much work left to do.
    873   set_should_hurry(true);
    874   if (FLAG_trace_incremental_marking) {
    875     PrintF("[IncrementalMarking] Complete (normal).\n");
    876   }
    877   if (action == GC_VIA_STACK_GUARD) {
    878     heap_->isolate()->stack_guard()->RequestGC();
    879   }
    880 }
    881 
    882 
    883 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
    884   if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
    885     // TODO(hpayer): Let's play safe for now, but compaction should be
    886     // in principle possible.
    887     Start(PREVENT_COMPACTION);
    888   } else {
    889     Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
    890   }
    891 }
    892 
    893 
    894 void IncrementalMarking::Step(intptr_t allocated_bytes,
    895                               CompletionAction action) {
    896   if (heap_->gc_state() != Heap::NOT_IN_GC ||
    897       !FLAG_incremental_marking ||
    898       !FLAG_incremental_marking_steps ||
    899       (state_ != SWEEPING && state_ != MARKING)) {
    900     return;
    901   }
    902 
    903   allocated_ += allocated_bytes;
    904 
    905   if (allocated_ < kAllocatedThreshold &&
    906       write_barriers_invoked_since_last_step_ <
    907           kWriteBarriersInvokedThreshold) {
    908     return;
    909   }
    910 
    911   if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
    912 
    913   // The marking speed is driven either by the allocation rate or by the rate
    914   // at which we are having to check the color of objects in the write barrier.
    915   // It is possible for a tight non-allocating loop to run a lot of write
    916   // barriers before we get here and check them (marking can only take place on
    917   // allocation), so to reduce the lumpiness we don't use the write barriers
    918   // invoked since last step directly to determine the amount of work to do.
    919   intptr_t bytes_to_process =
    920       marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
    921   allocated_ = 0;
    922   write_barriers_invoked_since_last_step_ = 0;
    923 
    924   bytes_scanned_ += bytes_to_process;
    925 
    926   double start = 0;
    927 
    928   if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
    929       FLAG_print_cumulative_gc_stat) {
    930     start = OS::TimeCurrentMillis();
    931   }
    932 
    933   if (state_ == SWEEPING) {
    934     if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) {
    935       bytes_scanned_ = 0;
    936       StartMarking(PREVENT_COMPACTION);
    937     }
    938   } else if (state_ == MARKING) {
    939     ProcessMarkingDeque(bytes_to_process);
    940     if (marking_deque_.IsEmpty()) MarkingComplete(action);
    941   }
    942 
    943   steps_count_++;
    944   steps_count_since_last_gc_++;
    945 
    946   bool speed_up = false;
    947 
    948   if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
    949     if (FLAG_trace_gc) {
    950       PrintPID("Speed up marking after %d steps\n",
    951                static_cast<int>(kMarkingSpeedAccellerationInterval));
    952     }
    953     speed_up = true;
    954   }
    955 
    956   bool space_left_is_very_small =
    957       (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
    958 
    959   bool only_1_nth_of_space_that_was_available_still_left =
    960       (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
    961           old_generation_space_available_at_start_of_incremental_);
    962 
    963   if (space_left_is_very_small ||
    964       only_1_nth_of_space_that_was_available_still_left) {
    965     if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
    966     speed_up = true;
    967   }
    968 
    969   bool size_of_old_space_multiplied_by_n_during_marking =
    970       (heap_->PromotedTotalSize() >
    971        (marking_speed_ + 1) *
    972            old_generation_space_used_at_start_of_incremental_);
    973   if (size_of_old_space_multiplied_by_n_during_marking) {
    974     speed_up = true;
    975     if (FLAG_trace_gc) {
    976       PrintPID("Speed up marking because of heap size increase\n");
    977     }
    978   }
    979 
    980   int64_t promoted_during_marking = heap_->PromotedTotalSize()
    981       - old_generation_space_used_at_start_of_incremental_;
    982   intptr_t delay = marking_speed_ * MB;
    983   intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
    984 
    985   // We try to scan at at least twice the speed that we are allocating.
    986   if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
    987     if (FLAG_trace_gc) {
    988       PrintPID("Speed up marking because marker was not keeping up\n");
    989     }
    990     speed_up = true;
    991   }
    992 
    993   if (speed_up) {
    994     if (state_ != MARKING) {
    995       if (FLAG_trace_gc) {
    996         PrintPID("Postponing speeding up marking until marking starts\n");
    997       }
    998     } else {
    999       marking_speed_ += kMarkingSpeedAccelleration;
   1000       marking_speed_ = static_cast<int>(
   1001           Min(kMaxMarkingSpeed,
   1002               static_cast<intptr_t>(marking_speed_ * 1.3)));
   1003       if (FLAG_trace_gc) {
   1004         PrintPID("Marking speed increased to %d\n", marking_speed_);
   1005       }
   1006     }
   1007   }
   1008 
   1009   if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
   1010       FLAG_print_cumulative_gc_stat) {
   1011     double end = OS::TimeCurrentMillis();
   1012     double delta = (end - start);
   1013     longest_step_ = Max(longest_step_, delta);
   1014     steps_took_ += delta;
   1015     steps_took_since_last_gc_ += delta;
   1016     heap_->AddMarkingTime(delta);
   1017   }
   1018 }
   1019 
   1020 
   1021 void IncrementalMarking::ResetStepCounters() {
   1022   steps_count_ = 0;
   1023   steps_took_ = 0;
   1024   longest_step_ = 0.0;
   1025   old_generation_space_available_at_start_of_incremental_ =
   1026       SpaceLeftInOldSpace();
   1027   old_generation_space_used_at_start_of_incremental_ =
   1028       heap_->PromotedTotalSize();
   1029   steps_count_since_last_gc_ = 0;
   1030   steps_took_since_last_gc_ = 0;
   1031   bytes_rescanned_ = 0;
   1032   marking_speed_ = kInitialMarkingSpeed;
   1033   bytes_scanned_ = 0;
   1034   write_barriers_invoked_since_last_step_ = 0;
   1035 }
   1036 
   1037 
   1038 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
   1039   return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
   1040 }
   1041 
   1042 } }  // namespace v8::internal
   1043