Home | History | Annotate | Download | only in heap
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/heap/store-buffer.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "src/counters.h"
     10 #include "src/heap/incremental-marking.h"
     11 #include "src/heap/store-buffer-inl.h"
     12 #include "src/isolate.h"
     13 #include "src/objects-inl.h"
     14 #include "src/v8.h"
     15 
     16 namespace v8 {
     17 namespace internal {
     18 
     19 StoreBuffer::StoreBuffer(Heap* heap)
     20     : heap_(heap),
     21       start_(NULL),
     22       limit_(NULL),
     23       old_start_(NULL),
     24       old_limit_(NULL),
     25       old_top_(NULL),
     26       old_reserved_limit_(NULL),
     27       old_buffer_is_sorted_(false),
     28       old_buffer_is_filtered_(false),
     29       during_gc_(false),
     30       store_buffer_rebuilding_enabled_(false),
     31       callback_(NULL),
     32       may_move_store_buffer_entries_(true),
     33       virtual_memory_(NULL),
     34       hash_set_1_(NULL),
     35       hash_set_2_(NULL),
     36       hash_sets_are_empty_(true) {}
     37 
     38 
     39 void StoreBuffer::SetUp() {
     40   // Allocate 3x the buffer size, so that we can start the new store buffer
     41   // aligned to 2x the size.  This lets us use a bit test to detect the end of
     42   // the area.
     43   virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
     44   uintptr_t start_as_int =
     45       reinterpret_cast<uintptr_t>(virtual_memory_->address());
     46   start_ =
     47       reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
     48   limit_ = start_ + (kStoreBufferSize / kPointerSize);
     49 
     50   // Reserve space for the larger old buffer.
     51   old_virtual_memory_ =
     52       new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
     53   old_top_ = old_start_ =
     54       reinterpret_cast<Address*>(old_virtual_memory_->address());
     55   // Don't know the alignment requirements of the OS, but it is certainly not
     56   // less than 0xfff.
     57   CHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
     58   CHECK(kStoreBufferSize >= base::OS::CommitPageSize());
     59   // Initial size of the old buffer is as big as the buffer for new pointers.
     60   // This means even if we later fail to enlarge the old buffer due to OOM from
     61   // the OS, we will still be able to empty the new pointer buffer into the old
     62   // buffer.
     63   int initial_length = static_cast<int>(kStoreBufferSize / kPointerSize);
     64   CHECK(initial_length > 0);
     65   CHECK(initial_length <= kOldStoreBufferLength);
     66   old_limit_ = old_start_ + initial_length;
     67   old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
     68 
     69   if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
     70                                    (old_limit_ - old_start_) * kPointerSize,
     71                                    false)) {
     72     V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
     73   }
     74 
     75   DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
     76   DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
     77   Address* vm_limit = reinterpret_cast<Address*>(
     78       reinterpret_cast<char*>(virtual_memory_->address()) +
     79       virtual_memory_->size());
     80   DCHECK(start_ <= vm_limit);
     81   DCHECK(limit_ <= vm_limit);
     82   USE(vm_limit);
     83   DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
     84   DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
     85          0);
     86 
     87   if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
     88                                kStoreBufferSize,
     89                                false)) {  // Not executable.
     90     V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
     91   }
     92   heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
     93 
     94   hash_set_1_ = new uintptr_t[kHashSetLength];
     95   hash_set_2_ = new uintptr_t[kHashSetLength];
     96   hash_sets_are_empty_ = false;
     97 
     98   ClearFilteringHashSets();
     99 }
    100 
    101 
    102 void StoreBuffer::TearDown() {
    103   delete virtual_memory_;
    104   delete old_virtual_memory_;
    105   delete[] hash_set_1_;
    106   delete[] hash_set_2_;
    107   old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
    108   start_ = limit_ = NULL;
    109   heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
    110 }
    111 
    112 
    113 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
    114   isolate->heap()->store_buffer()->Compact();
    115   isolate->counters()->store_buffer_overflows()->Increment();
    116 }
    117 
    118 
    119 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
    120   return old_limit_ - old_top_ >= space_needed;
    121 }
    122 
    123 
    124 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
    125   while (old_limit_ - old_top_ < space_needed &&
    126          old_limit_ < old_reserved_limit_) {
    127     size_t grow = old_limit_ - old_start_;  // Double size.
    128     if (old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
    129                                     grow * kPointerSize, false)) {
    130       old_limit_ += grow;
    131     } else {
    132       break;
    133     }
    134   }
    135 
    136   if (SpaceAvailable(space_needed)) return;
    137 
    138   if (old_buffer_is_filtered_) return;
    139   DCHECK(may_move_store_buffer_entries_);
    140   Compact();
    141 
    142   old_buffer_is_filtered_ = true;
    143   bool page_has_scan_on_scavenge_flag = false;
    144 
    145   PointerChunkIterator it(heap_);
    146   MemoryChunk* chunk;
    147   while ((chunk = it.next()) != NULL) {
    148     if (chunk->scan_on_scavenge()) {
    149       page_has_scan_on_scavenge_flag = true;
    150       break;
    151     }
    152   }
    153 
    154   if (page_has_scan_on_scavenge_flag) {
    155     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    156   }
    157 
    158   if (SpaceAvailable(space_needed)) return;
    159 
    160   // Sample 1 entry in 97 and filter out the pages where we estimate that more
    161   // than 1 in 8 pointers are to new space.
    162   static const int kSampleFinenesses = 5;
    163   static const struct Samples {
    164     int prime_sample_step;
    165     int threshold;
    166   } samples[kSampleFinenesses] = {
    167         {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
    168         {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
    169         {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
    170         {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
    171         {1, 0}};
    172   for (int i = 0; i < kSampleFinenesses; i++) {
    173     ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
    174     // As a last resort we mark all pages as being exempt from the store buffer.
    175     DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
    176     if (SpaceAvailable(space_needed)) return;
    177   }
    178   UNREACHABLE();
    179 }
    180 
    181 
    182 // Sample the store buffer to see if some pages are taking up a lot of space
    183 // in the store buffer.
    184 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
    185   PointerChunkIterator it(heap_);
    186   MemoryChunk* chunk;
    187   while ((chunk = it.next()) != NULL) {
    188     chunk->set_store_buffer_counter(0);
    189   }
    190   bool created_new_scan_on_scavenge_pages = false;
    191   MemoryChunk* previous_chunk = NULL;
    192   for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
    193     Address addr = *p;
    194     MemoryChunk* containing_chunk = NULL;
    195     if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
    196       containing_chunk = previous_chunk;
    197     } else {
    198       containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
    199     }
    200     int old_counter = containing_chunk->store_buffer_counter();
    201     if (old_counter >= threshold) {
    202       containing_chunk->set_scan_on_scavenge(true);
    203       created_new_scan_on_scavenge_pages = true;
    204     }
    205     containing_chunk->set_store_buffer_counter(old_counter + 1);
    206     previous_chunk = containing_chunk;
    207   }
    208   if (created_new_scan_on_scavenge_pages) {
    209     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    210     heap_->isolate()->CountUsage(
    211         v8::Isolate::UseCounterFeature::kStoreBufferOverflow);
    212   }
    213   old_buffer_is_filtered_ = true;
    214 }
    215 
    216 
    217 void StoreBuffer::Filter(int flag) {
    218   Address* new_top = old_start_;
    219   MemoryChunk* previous_chunk = NULL;
    220   for (Address* p = old_start_; p < old_top_; p++) {
    221     Address addr = *p;
    222     MemoryChunk* containing_chunk = NULL;
    223     if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
    224       containing_chunk = previous_chunk;
    225     } else {
    226       containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
    227       previous_chunk = containing_chunk;
    228     }
    229     if (!containing_chunk->IsFlagSet(flag)) {
    230       *new_top++ = addr;
    231     }
    232   }
    233   old_top_ = new_top;
    234 
    235   // Filtering hash sets are inconsistent with the store buffer after this
    236   // operation.
    237   ClearFilteringHashSets();
    238 }
    239 
    240 
    241 bool StoreBuffer::PrepareForIteration() {
    242   Compact();
    243   PointerChunkIterator it(heap_);
    244   MemoryChunk* chunk;
    245   bool page_has_scan_on_scavenge_flag = false;
    246   while ((chunk = it.next()) != NULL) {
    247     if (chunk->scan_on_scavenge()) {
    248       page_has_scan_on_scavenge_flag = true;
    249       break;
    250     }
    251   }
    252 
    253   if (page_has_scan_on_scavenge_flag) {
    254     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    255   }
    256 
    257   // Filtering hash sets are inconsistent with the store buffer after
    258   // iteration.
    259   ClearFilteringHashSets();
    260 
    261   return page_has_scan_on_scavenge_flag;
    262 }
    263 
    264 
    265 void StoreBuffer::ClearFilteringHashSets() {
    266   if (!hash_sets_are_empty_) {
    267     memset(reinterpret_cast<void*>(hash_set_1_), 0,
    268            sizeof(uintptr_t) * kHashSetLength);
    269     memset(reinterpret_cast<void*>(hash_set_2_), 0,
    270            sizeof(uintptr_t) * kHashSetLength);
    271     hash_sets_are_empty_ = true;
    272   }
    273 }
    274 
    275 
    276 void StoreBuffer::GCPrologue() {
    277   ClearFilteringHashSets();
    278   during_gc_ = true;
    279 }
    280 
    281 
    282 #ifdef VERIFY_HEAP
    283 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
    284   LargeObjectIterator it(space);
    285   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
    286     if (object->IsFixedArray()) {
    287       Address slot_address = object->address();
    288       Address end = object->address() + object->Size();
    289 
    290       while (slot_address < end) {
    291         HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
    292         // When we are not in GC the Heap::InNewSpace() predicate
    293         // checks that pointers which satisfy predicate point into
    294         // the active semispace.
    295         Object* object = *slot;
    296         heap_->InNewSpace(object);
    297         slot_address += kPointerSize;
    298       }
    299     }
    300   }
    301 }
    302 #endif
    303 
    304 
    305 void StoreBuffer::Verify() {
    306 #ifdef VERIFY_HEAP
    307   VerifyPointers(heap_->lo_space());
    308 #endif
    309 }
    310 
    311 
    312 void StoreBuffer::GCEpilogue() {
    313   during_gc_ = false;
    314 #ifdef VERIFY_HEAP
    315   if (FLAG_verify_heap) {
    316     Verify();
    317   }
    318 #endif
    319 }
    320 
    321 
    322 void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
    323                                       ObjectSlotCallback slot_callback) {
    324   Object** slot = reinterpret_cast<Object**>(slot_address);
    325   Object* object = *slot;
    326 
    327   // If the object is not in from space, it must be a duplicate store buffer
    328   // entry and the slot was already updated.
    329   if (heap_->InFromSpace(object)) {
    330     HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
    331     DCHECK(heap_object->IsHeapObject());
    332     slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
    333     object = *slot;
    334     // If the object was in from space before and is after executing the
    335     // callback in to space, the object is still live.
    336     // Unfortunately, we do not know about the slot. It could be in a
    337     // just freed free space object.
    338     if (heap_->InToSpace(object)) {
    339       EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
    340     }
    341   }
    342 }
    343 
    344 
    345 void StoreBuffer::FindPointersToNewSpaceInRegion(
    346     Address start, Address end, ObjectSlotCallback slot_callback) {
    347   for (Address slot_address = start; slot_address < end;
    348        slot_address += kPointerSize) {
    349     ProcessOldToNewSlot(slot_address, slot_callback);
    350   }
    351 }
    352 
    353 
    354 void StoreBuffer::IteratePointersInStoreBuffer(
    355     ObjectSlotCallback slot_callback) {
    356   Address* limit = old_top_;
    357   old_top_ = old_start_;
    358   {
    359     DontMoveStoreBufferEntriesScope scope(this);
    360     for (Address* current = old_start_; current < limit; current++) {
    361 #ifdef DEBUG
    362       Address* saved_top = old_top_;
    363 #endif
    364       ProcessOldToNewSlot(*current, slot_callback);
    365       DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
    366     }
    367   }
    368 }
    369 
    370 
    371 void StoreBuffer::ClearInvalidStoreBufferEntries() {
    372   Compact();
    373   Address* new_top = old_start_;
    374   for (Address* current = old_start_; current < old_top_; current++) {
    375     Address addr = *current;
    376     Object** slot = reinterpret_cast<Object**>(addr);
    377     Object* object = *slot;
    378     if (heap_->InNewSpace(object) && object->IsHeapObject()) {
    379       // If the target object is not black, the source slot must be part
    380       // of a non-black (dead) object.
    381       HeapObject* heap_object = HeapObject::cast(object);
    382       if (Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
    383           heap_->mark_compact_collector()->IsSlotInLiveObject(addr)) {
    384         *new_top++ = addr;
    385       }
    386     }
    387   }
    388   old_top_ = new_top;
    389   ClearFilteringHashSets();
    390 
    391   // Don't scan on scavenge dead large objects.
    392   LargeObjectIterator it(heap_->lo_space());
    393   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
    394     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
    395     if (chunk->scan_on_scavenge() &&
    396         Marking::IsWhite(Marking::MarkBitFrom(object))) {
    397       chunk->set_scan_on_scavenge(false);
    398     }
    399   }
    400 }
    401 
    402 
    403 void StoreBuffer::VerifyValidStoreBufferEntries() {
    404   for (Address* current = old_start_; current < old_top_; current++) {
    405     Object** slot = reinterpret_cast<Object**>(*current);
    406     Object* object = *slot;
    407     CHECK(object->IsHeapObject());
    408     CHECK(heap_->InNewSpace(object));
    409     heap_->mark_compact_collector()->VerifyIsSlotInLiveObject(
    410         reinterpret_cast<Address>(slot), HeapObject::cast(object));
    411   }
    412 }
    413 
    414 
    415 class FindPointersToNewSpaceVisitor final : public ObjectVisitor {
    416  public:
    417   FindPointersToNewSpaceVisitor(StoreBuffer* store_buffer,
    418                                 ObjectSlotCallback callback)
    419       : store_buffer_(store_buffer), callback_(callback) {}
    420 
    421   V8_INLINE void VisitPointers(Object** start, Object** end) override {
    422     store_buffer_->FindPointersToNewSpaceInRegion(
    423         reinterpret_cast<Address>(start), reinterpret_cast<Address>(end),
    424         callback_);
    425   }
    426 
    427   V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
    428 
    429  private:
    430   StoreBuffer* store_buffer_;
    431   ObjectSlotCallback callback_;
    432 };
    433 
    434 
    435 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
    436   // We do not sort or remove duplicated entries from the store buffer because
    437   // we expect that callback will rebuild the store buffer thus removing
    438   // all duplicates and pointers to old space.
    439   bool some_pages_to_scan = PrepareForIteration();
    440 
    441   // TODO(gc): we want to skip slots on evacuation candidates
    442   // but we can't simply figure that out from slot address
    443   // because slot can belong to a large object.
    444   IteratePointersInStoreBuffer(slot_callback);
    445 
    446   // We are done scanning all the pointers that were in the store buffer, but
    447   // there may be some pages marked scan_on_scavenge that have pointers to new
    448   // space that are not in the store buffer.  We must scan them now.  As we
    449   // scan, the surviving pointers to new space will be added to the store
    450   // buffer.  If there are still a lot of pointers to new space then we will
    451   // keep the scan_on_scavenge flag on the page and discard the pointers that
    452   // were added to the store buffer.  If there are not many pointers to new
    453   // space left on the page we will keep the pointers in the store buffer and
    454   // remove the flag from the page.
    455   if (some_pages_to_scan) {
    456     if (callback_ != NULL) {
    457       (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
    458     }
    459     PointerChunkIterator it(heap_);
    460     MemoryChunk* chunk;
    461     FindPointersToNewSpaceVisitor visitor(this, slot_callback);
    462     while ((chunk = it.next()) != NULL) {
    463       if (chunk->scan_on_scavenge()) {
    464         chunk->set_scan_on_scavenge(false);
    465         if (callback_ != NULL) {
    466           (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
    467         }
    468         if (chunk->owner() == heap_->lo_space()) {
    469           LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
    470           HeapObject* array = large_page->GetObject();
    471           DCHECK(array->IsFixedArray());
    472           Address start = array->address();
    473           Address end = start + array->Size();
    474           FindPointersToNewSpaceInRegion(start, end, slot_callback);
    475         } else {
    476           Page* page = reinterpret_cast<Page*>(chunk);
    477           PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
    478           if (owner == heap_->map_space()) {
    479             DCHECK(page->WasSwept());
    480             HeapObjectIterator iterator(page);
    481             for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
    482                  heap_object = iterator.Next()) {
    483               // We skip free space objects.
    484               if (!heap_object->IsFiller()) {
    485                 DCHECK(heap_object->IsMap());
    486                 FindPointersToNewSpaceInRegion(
    487                     heap_object->address() + Map::kPointerFieldsBeginOffset,
    488                     heap_object->address() + Map::kPointerFieldsEndOffset,
    489                     slot_callback);
    490               }
    491             }
    492           } else {
    493             if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
    494               // Aborted pages require iterating using mark bits because they
    495               // don't have an iterable object layout before sweeping (which can
    496               // only happen later). Note that we can never reach an
    497               // aborted page through the scavenger.
    498               DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
    499               heap_->mark_compact_collector()->VisitLiveObjectsBody(page,
    500                                                                     &visitor);
    501             } else {
    502               heap_->mark_compact_collector()
    503                   ->SweepOrWaitUntilSweepingCompleted(page);
    504               HeapObjectIterator iterator(page);
    505               for (HeapObject* heap_object = iterator.Next();
    506                    heap_object != nullptr; heap_object = iterator.Next()) {
    507                 // We iterate over objects that contain new space pointers only.
    508                 heap_object->IterateBody(&visitor);
    509               }
    510             }
    511           }
    512         }
    513       }
    514     }
    515     if (callback_ != NULL) {
    516       (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
    517     }
    518   }
    519 }
    520 
    521 
    522 void StoreBuffer::Compact() {
    523   Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
    524 
    525   if (top == start_) return;
    526 
    527   // There's no check of the limit in the loop below so we check here for
    528   // the worst case (compaction doesn't eliminate any pointers).
    529   DCHECK(top <= limit_);
    530   heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
    531   EnsureSpace(top - start_);
    532   DCHECK(may_move_store_buffer_entries_);
    533   // Goes through the addresses in the store buffer attempting to remove
    534   // duplicates.  In the interest of speed this is a lossy operation.  Some
    535   // duplicates will remain.  We have two hash sets with different hash
    536   // functions to reduce the number of unnecessary clashes.
    537   hash_sets_are_empty_ = false;  // Hash sets are in use.
    538   for (Address* current = start_; current < top; current++) {
    539     DCHECK(!heap_->code_space()->Contains(*current));
    540     uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
    541     // Shift out the last bits including any tags.
    542     int_addr >>= kPointerSizeLog2;
    543     // The upper part of an address is basically random because of ASLR and OS
    544     // non-determinism, so we use only the bits within a page for hashing to
    545     // make v8's behavior (more) deterministic.
    546     uintptr_t hash_addr =
    547         int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
    548     int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
    549                  (kHashSetLength - 1));
    550     if (hash_set_1_[hash1] == int_addr) continue;
    551     uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
    552     hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
    553     hash2 &= (kHashSetLength - 1);
    554     if (hash_set_2_[hash2] == int_addr) continue;
    555     if (hash_set_1_[hash1] == 0) {
    556       hash_set_1_[hash1] = int_addr;
    557     } else if (hash_set_2_[hash2] == 0) {
    558       hash_set_2_[hash2] = int_addr;
    559     } else {
    560       // Rather than slowing down we just throw away some entries.  This will
    561       // cause some duplicates to remain undetected.
    562       hash_set_1_[hash1] = int_addr;
    563       hash_set_2_[hash2] = 0;
    564     }
    565     old_buffer_is_sorted_ = false;
    566     old_buffer_is_filtered_ = false;
    567     *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
    568     DCHECK(old_top_ <= old_limit_);
    569   }
    570   heap_->isolate()->counters()->store_buffer_compactions()->Increment();
    571 }
    572 
    573 
    574 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
    575   if (event == kStoreBufferStartScanningPagesEvent) {
    576     start_of_current_page_ = NULL;
    577     current_page_ = NULL;
    578   } else if (event == kStoreBufferScanningPageEvent) {
    579     if (current_page_ != NULL) {
    580       // If this page already overflowed the store buffer during this iteration.
    581       if (current_page_->scan_on_scavenge()) {
    582         // Then we should wipe out the entries that have been added for it.
    583         store_buffer_->SetTop(start_of_current_page_);
    584       } else if (store_buffer_->Top() - start_of_current_page_ >=
    585                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
    586         // Did we find too many pointers in the previous page?  The heuristic is
    587         // that no page can take more then 1/5 the remaining slots in the store
    588         // buffer.
    589         current_page_->set_scan_on_scavenge(true);
    590         store_buffer_->SetTop(start_of_current_page_);
    591       } else {
    592         // In this case the page we scanned took a reasonable number of slots in
    593         // the store buffer.  It has now been rehabilitated and is no longer
    594         // marked scan_on_scavenge.
    595         DCHECK(!current_page_->scan_on_scavenge());
    596       }
    597     }
    598     start_of_current_page_ = store_buffer_->Top();
    599     current_page_ = page;
    600   } else if (event == kStoreBufferFullEvent) {
    601     // The current page overflowed the store buffer again.  Wipe out its entries
    602     // in the store buffer and mark it scan-on-scavenge again.  This may happen
    603     // several times while scanning.
    604     if (current_page_ == NULL) {
    605       // Store Buffer overflowed while scanning promoted objects.  These are not
    606       // in any particular page, though they are likely to be clustered by the
    607       // allocation routines.
    608       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
    609     } else {
    610       // Store Buffer overflowed while scanning a particular old space page for
    611       // pointers to new space.
    612       DCHECK(current_page_ == page);
    613       DCHECK(page != NULL);
    614       current_page_->set_scan_on_scavenge(true);
    615       DCHECK(start_of_current_page_ != store_buffer_->Top());
    616       store_buffer_->SetTop(start_of_current_page_);
    617     }
    618   } else {
    619     UNREACHABLE();
    620   }
    621 }
    622 
    623 }  // namespace internal
    624 }  // namespace v8
    625