Home | History | Annotate | Download | only in heap
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include <algorithm>
      6 
      7 #include "src/v8.h"
      8 
      9 #include "src/base/atomicops.h"
     10 #include "src/counters.h"
     11 #include "src/heap/store-buffer-inl.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 StoreBuffer::StoreBuffer(Heap* heap)
     17     : heap_(heap),
     18       start_(NULL),
     19       limit_(NULL),
     20       old_start_(NULL),
     21       old_limit_(NULL),
     22       old_top_(NULL),
     23       old_reserved_limit_(NULL),
     24       old_buffer_is_sorted_(false),
     25       old_buffer_is_filtered_(false),
     26       during_gc_(false),
     27       store_buffer_rebuilding_enabled_(false),
     28       callback_(NULL),
     29       may_move_store_buffer_entries_(true),
     30       virtual_memory_(NULL),
     31       hash_set_1_(NULL),
     32       hash_set_2_(NULL),
     33       hash_sets_are_empty_(true) {}
     34 
     35 
     36 void StoreBuffer::SetUp() {
     37   virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
     38   uintptr_t start_as_int =
     39       reinterpret_cast<uintptr_t>(virtual_memory_->address());
     40   start_ =
     41       reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
     42   limit_ = start_ + (kStoreBufferSize / kPointerSize);
     43 
     44   old_virtual_memory_ =
     45       new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
     46   old_top_ = old_start_ =
     47       reinterpret_cast<Address*>(old_virtual_memory_->address());
     48   // Don't know the alignment requirements of the OS, but it is certainly not
     49   // less than 0xfff.
     50   DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
     51   int initial_length =
     52       static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
     53   DCHECK(initial_length > 0);
     54   DCHECK(initial_length <= kOldStoreBufferLength);
     55   old_limit_ = old_start_ + initial_length;
     56   old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
     57 
     58   CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
     59                                     (old_limit_ - old_start_) * kPointerSize,
     60                                     false));
     61 
     62   DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
     63   DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
     64   Address* vm_limit = reinterpret_cast<Address*>(
     65       reinterpret_cast<char*>(virtual_memory_->address()) +
     66       virtual_memory_->size());
     67   DCHECK(start_ <= vm_limit);
     68   DCHECK(limit_ <= vm_limit);
     69   USE(vm_limit);
     70   DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
     71   DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
     72          0);
     73 
     74   CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
     75                                 kStoreBufferSize,
     76                                 false));  // Not executable.
     77   heap_->public_set_store_buffer_top(start_);
     78 
     79   hash_set_1_ = new uintptr_t[kHashSetLength];
     80   hash_set_2_ = new uintptr_t[kHashSetLength];
     81   hash_sets_are_empty_ = false;
     82 
     83   ClearFilteringHashSets();
     84 }
     85 
     86 
     87 void StoreBuffer::TearDown() {
     88   delete virtual_memory_;
     89   delete old_virtual_memory_;
     90   delete[] hash_set_1_;
     91   delete[] hash_set_2_;
     92   old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
     93   start_ = limit_ = NULL;
     94   heap_->public_set_store_buffer_top(start_);
     95 }
     96 
     97 
     98 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
     99   isolate->heap()->store_buffer()->Compact();
    100   isolate->counters()->store_buffer_overflows()->Increment();
    101 }
    102 
    103 
    104 void StoreBuffer::Uniq() {
    105   // Remove adjacent duplicates and cells that do not point at new space.
    106   Address previous = NULL;
    107   Address* write = old_start_;
    108   DCHECK(may_move_store_buffer_entries_);
    109   for (Address* read = old_start_; read < old_top_; read++) {
    110     Address current = *read;
    111     if (current != previous) {
    112       if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
    113         *write++ = current;
    114       }
    115     }
    116     previous = current;
    117   }
    118   old_top_ = write;
    119 }
    120 
    121 
    122 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
    123   return old_limit_ - old_top_ >= space_needed;
    124 }
    125 
    126 
    127 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
    128   while (old_limit_ - old_top_ < space_needed &&
    129          old_limit_ < old_reserved_limit_) {
    130     size_t grow = old_limit_ - old_start_;  // Double size.
    131     CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
    132                                       grow * kPointerSize, false));
    133     old_limit_ += grow;
    134   }
    135 
    136   if (SpaceAvailable(space_needed)) return;
    137 
    138   if (old_buffer_is_filtered_) return;
    139   DCHECK(may_move_store_buffer_entries_);
    140   Compact();
    141 
    142   old_buffer_is_filtered_ = true;
    143   bool page_has_scan_on_scavenge_flag = false;
    144 
    145   PointerChunkIterator it(heap_);
    146   MemoryChunk* chunk;
    147   while ((chunk = it.next()) != NULL) {
    148     if (chunk->scan_on_scavenge()) {
    149       page_has_scan_on_scavenge_flag = true;
    150       break;
    151     }
    152   }
    153 
    154   if (page_has_scan_on_scavenge_flag) {
    155     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    156   }
    157 
    158   if (SpaceAvailable(space_needed)) return;
    159 
    160   // Sample 1 entry in 97 and filter out the pages where we estimate that more
    161   // than 1 in 8 pointers are to new space.
    162   static const int kSampleFinenesses = 5;
    163   static const struct Samples {
    164     int prime_sample_step;
    165     int threshold;
    166   } samples[kSampleFinenesses] = {
    167         {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
    168         {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
    169         {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
    170         {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
    171         {1, 0}};
    172   for (int i = 0; i < kSampleFinenesses; i++) {
    173     ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
    174     // As a last resort we mark all pages as being exempt from the store buffer.
    175     DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
    176     if (SpaceAvailable(space_needed)) return;
    177   }
    178   UNREACHABLE();
    179 }
    180 
    181 
    182 // Sample the store buffer to see if some pages are taking up a lot of space
    183 // in the store buffer.
    184 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
    185   PointerChunkIterator it(heap_);
    186   MemoryChunk* chunk;
    187   while ((chunk = it.next()) != NULL) {
    188     chunk->set_store_buffer_counter(0);
    189   }
    190   bool created_new_scan_on_scavenge_pages = false;
    191   MemoryChunk* previous_chunk = NULL;
    192   for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
    193     Address addr = *p;
    194     MemoryChunk* containing_chunk = NULL;
    195     if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
    196       containing_chunk = previous_chunk;
    197     } else {
    198       containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
    199     }
    200     int old_counter = containing_chunk->store_buffer_counter();
    201     if (old_counter >= threshold) {
    202       containing_chunk->set_scan_on_scavenge(true);
    203       created_new_scan_on_scavenge_pages = true;
    204     }
    205     containing_chunk->set_store_buffer_counter(old_counter + 1);
    206     previous_chunk = containing_chunk;
    207   }
    208   if (created_new_scan_on_scavenge_pages) {
    209     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    210   }
    211   old_buffer_is_filtered_ = true;
    212 }
    213 
    214 
    215 void StoreBuffer::Filter(int flag) {
    216   Address* new_top = old_start_;
    217   MemoryChunk* previous_chunk = NULL;
    218   for (Address* p = old_start_; p < old_top_; p++) {
    219     Address addr = *p;
    220     MemoryChunk* containing_chunk = NULL;
    221     if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
    222       containing_chunk = previous_chunk;
    223     } else {
    224       containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
    225       previous_chunk = containing_chunk;
    226     }
    227     if (!containing_chunk->IsFlagSet(flag)) {
    228       *new_top++ = addr;
    229     }
    230   }
    231   old_top_ = new_top;
    232 
    233   // Filtering hash sets are inconsistent with the store buffer after this
    234   // operation.
    235   ClearFilteringHashSets();
    236 }
    237 
    238 
    239 void StoreBuffer::SortUniq() {
    240   Compact();
    241   if (old_buffer_is_sorted_) return;
    242   std::sort(old_start_, old_top_);
    243   Uniq();
    244 
    245   old_buffer_is_sorted_ = true;
    246 
    247   // Filtering hash sets are inconsistent with the store buffer after this
    248   // operation.
    249   ClearFilteringHashSets();
    250 }
    251 
    252 
    253 bool StoreBuffer::PrepareForIteration() {
    254   Compact();
    255   PointerChunkIterator it(heap_);
    256   MemoryChunk* chunk;
    257   bool page_has_scan_on_scavenge_flag = false;
    258   while ((chunk = it.next()) != NULL) {
    259     if (chunk->scan_on_scavenge()) {
    260       page_has_scan_on_scavenge_flag = true;
    261       break;
    262     }
    263   }
    264 
    265   if (page_has_scan_on_scavenge_flag) {
    266     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    267   }
    268 
    269   // Filtering hash sets are inconsistent with the store buffer after
    270   // iteration.
    271   ClearFilteringHashSets();
    272 
    273   return page_has_scan_on_scavenge_flag;
    274 }
    275 
    276 
    277 #ifdef DEBUG
    278 void StoreBuffer::Clean() {
    279   ClearFilteringHashSets();
    280   Uniq();  // Also removes things that no longer point to new space.
    281   EnsureSpace(kStoreBufferSize / 2);
    282 }
    283 
    284 
    285 static Address* in_store_buffer_1_element_cache = NULL;
    286 
    287 
    288 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
    289   if (!FLAG_enable_slow_asserts) return true;
    290   if (in_store_buffer_1_element_cache != NULL &&
    291       *in_store_buffer_1_element_cache == cell_address) {
    292     return true;
    293   }
    294   Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
    295   for (Address* current = top - 1; current >= start_; current--) {
    296     if (*current == cell_address) {
    297       in_store_buffer_1_element_cache = current;
    298       return true;
    299     }
    300   }
    301   for (Address* current = old_top_ - 1; current >= old_start_; current--) {
    302     if (*current == cell_address) {
    303       in_store_buffer_1_element_cache = current;
    304       return true;
    305     }
    306   }
    307   return false;
    308 }
    309 #endif
    310 
    311 
    312 void StoreBuffer::ClearFilteringHashSets() {
    313   if (!hash_sets_are_empty_) {
    314     memset(reinterpret_cast<void*>(hash_set_1_), 0,
    315            sizeof(uintptr_t) * kHashSetLength);
    316     memset(reinterpret_cast<void*>(hash_set_2_), 0,
    317            sizeof(uintptr_t) * kHashSetLength);
    318     hash_sets_are_empty_ = true;
    319   }
    320 }
    321 
    322 
    323 void StoreBuffer::GCPrologue() {
    324   ClearFilteringHashSets();
    325   during_gc_ = true;
    326 }
    327 
    328 
    329 #ifdef VERIFY_HEAP
    330 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
    331   LargeObjectIterator it(space);
    332   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
    333     if (object->IsFixedArray()) {
    334       Address slot_address = object->address();
    335       Address end = object->address() + object->Size();
    336 
    337       while (slot_address < end) {
    338         HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
    339         // When we are not in GC the Heap::InNewSpace() predicate
    340         // checks that pointers which satisfy predicate point into
    341         // the active semispace.
    342         Object* object = reinterpret_cast<Object*>(
    343             base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    344         heap_->InNewSpace(object);
    345         slot_address += kPointerSize;
    346       }
    347     }
    348   }
    349 }
    350 #endif
    351 
    352 
    353 void StoreBuffer::Verify() {
    354 #ifdef VERIFY_HEAP
    355   VerifyPointers(heap_->lo_space());
    356 #endif
    357 }
    358 
    359 
    360 void StoreBuffer::GCEpilogue() {
    361   during_gc_ = false;
    362 #ifdef VERIFY_HEAP
    363   if (FLAG_verify_heap) {
    364     Verify();
    365   }
    366 #endif
    367 }
    368 
    369 
    370 void StoreBuffer::FindPointersToNewSpaceInRegion(
    371     Address start, Address end, ObjectSlotCallback slot_callback,
    372     bool clear_maps) {
    373   for (Address slot_address = start; slot_address < end;
    374        slot_address += kPointerSize) {
    375     Object** slot = reinterpret_cast<Object**>(slot_address);
    376     Object* object = reinterpret_cast<Object*>(
    377         base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    378     if (heap_->InNewSpace(object)) {
    379       HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
    380       DCHECK(heap_object->IsHeapObject());
    381       // The new space object was not promoted if it still contains a map
    382       // pointer. Clear the map field now lazily.
    383       if (clear_maps) ClearDeadObject(heap_object);
    384       slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
    385       object = reinterpret_cast<Object*>(
    386           base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    387       if (heap_->InNewSpace(object)) {
    388         EnterDirectlyIntoStoreBuffer(slot_address);
    389       }
    390     }
    391   }
    392 }
    393 
    394 
    395 void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
    396                                                bool clear_maps) {
    397   Address* limit = old_top_;
    398   old_top_ = old_start_;
    399   {
    400     DontMoveStoreBufferEntriesScope scope(this);
    401     for (Address* current = old_start_; current < limit; current++) {
    402 #ifdef DEBUG
    403       Address* saved_top = old_top_;
    404 #endif
    405       Object** slot = reinterpret_cast<Object**>(*current);
    406       Object* object = reinterpret_cast<Object*>(
    407           base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    408       if (heap_->InFromSpace(object)) {
    409         HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
    410         // The new space object was not promoted if it still contains a map
    411         // pointer. Clear the map field now lazily.
    412         if (clear_maps) ClearDeadObject(heap_object);
    413         slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
    414         object = reinterpret_cast<Object*>(
    415             base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    416         if (heap_->InNewSpace(object)) {
    417           EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
    418         }
    419       }
    420       DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
    421     }
    422   }
    423 }
    424 
    425 
    426 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
    427   IteratePointersToNewSpace(slot_callback, false);
    428 }
    429 
    430 
    431 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
    432     ObjectSlotCallback slot_callback) {
    433   IteratePointersToNewSpace(slot_callback, true);
    434 }
    435 
    436 
    437 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
    438                                             bool clear_maps) {
    439   // We do not sort or remove duplicated entries from the store buffer because
    440   // we expect that callback will rebuild the store buffer thus removing
    441   // all duplicates and pointers to old space.
    442   bool some_pages_to_scan = PrepareForIteration();
    443 
    444   // TODO(gc): we want to skip slots on evacuation candidates
    445   // but we can't simply figure that out from slot address
    446   // because slot can belong to a large object.
    447   IteratePointersInStoreBuffer(slot_callback, clear_maps);
    448 
    449   // We are done scanning all the pointers that were in the store buffer, but
    450   // there may be some pages marked scan_on_scavenge that have pointers to new
    451   // space that are not in the store buffer.  We must scan them now.  As we
    452   // scan, the surviving pointers to new space will be added to the store
    453   // buffer.  If there are still a lot of pointers to new space then we will
    454   // keep the scan_on_scavenge flag on the page and discard the pointers that
    455   // were added to the store buffer.  If there are not many pointers to new
    456   // space left on the page we will keep the pointers in the store buffer and
    457   // remove the flag from the page.
    458   if (some_pages_to_scan) {
    459     if (callback_ != NULL) {
    460       (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
    461     }
    462     PointerChunkIterator it(heap_);
    463     MemoryChunk* chunk;
    464     while ((chunk = it.next()) != NULL) {
    465       if (chunk->scan_on_scavenge()) {
    466         chunk->set_scan_on_scavenge(false);
    467         if (callback_ != NULL) {
    468           (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
    469         }
    470         if (chunk->owner() == heap_->lo_space()) {
    471           LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
    472           HeapObject* array = large_page->GetObject();
    473           DCHECK(array->IsFixedArray());
    474           Address start = array->address();
    475           Address end = start + array->Size();
    476           FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
    477         } else {
    478           Page* page = reinterpret_cast<Page*>(chunk);
    479           PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
    480           if (owner == heap_->map_space()) {
    481             DCHECK(page->WasSwept());
    482             HeapObjectIterator iterator(page, NULL);
    483             for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
    484                  heap_object = iterator.Next()) {
    485               // We skip free space objects.
    486               if (!heap_object->IsFiller()) {
    487                 DCHECK(heap_object->IsMap());
    488                 FindPointersToNewSpaceInRegion(
    489                     heap_object->address() + Map::kPointerFieldsBeginOffset,
    490                     heap_object->address() + Map::kPointerFieldsEndOffset,
    491                     slot_callback, clear_maps);
    492               }
    493             }
    494           } else {
    495             if (!page->SweepingCompleted()) {
    496               heap_->mark_compact_collector()->SweepInParallel(page, owner);
    497               if (!page->SweepingCompleted()) {
    498                 // We were not able to sweep that page, i.e., a concurrent
    499                 // sweeper thread currently owns this page.
    500                 // TODO(hpayer): This may introduce a huge pause here. We
    501                 // just care about finish sweeping of the scan on scavenge page.
    502                 heap_->mark_compact_collector()->EnsureSweepingCompleted();
    503               }
    504             }
    505             CHECK(page->owner() == heap_->old_pointer_space());
    506             HeapObjectIterator iterator(page, NULL);
    507             for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
    508                  heap_object = iterator.Next()) {
    509               // We iterate over objects that contain new space pointers only.
    510               if (!heap_object->MayContainRawValues()) {
    511                 FindPointersToNewSpaceInRegion(
    512                     heap_object->address() + HeapObject::kHeaderSize,
    513                     heap_object->address() + heap_object->Size(), slot_callback,
    514                     clear_maps);
    515               }
    516             }
    517           }
    518         }
    519       }
    520     }
    521     if (callback_ != NULL) {
    522       (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
    523     }
    524   }
    525 }
    526 
    527 
    528 void StoreBuffer::Compact() {
    529   Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
    530 
    531   if (top == start_) return;
    532 
    533   // There's no check of the limit in the loop below so we check here for
    534   // the worst case (compaction doesn't eliminate any pointers).
    535   DCHECK(top <= limit_);
    536   heap_->public_set_store_buffer_top(start_);
    537   EnsureSpace(top - start_);
    538   DCHECK(may_move_store_buffer_entries_);
    539   // Goes through the addresses in the store buffer attempting to remove
    540   // duplicates.  In the interest of speed this is a lossy operation.  Some
    541   // duplicates will remain.  We have two hash sets with different hash
    542   // functions to reduce the number of unnecessary clashes.
    543   hash_sets_are_empty_ = false;  // Hash sets are in use.
    544   for (Address* current = start_; current < top; current++) {
    545     DCHECK(!heap_->cell_space()->Contains(*current));
    546     DCHECK(!heap_->code_space()->Contains(*current));
    547     DCHECK(!heap_->old_data_space()->Contains(*current));
    548     uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
    549     // Shift out the last bits including any tags.
    550     int_addr >>= kPointerSizeLog2;
    551     // The upper part of an address is basically random because of ASLR and OS
    552     // non-determinism, so we use only the bits within a page for hashing to
    553     // make v8's behavior (more) deterministic.
    554     uintptr_t hash_addr =
    555         int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
    556     int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
    557                  (kHashSetLength - 1));
    558     if (hash_set_1_[hash1] == int_addr) continue;
    559     uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
    560     hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
    561     hash2 &= (kHashSetLength - 1);
    562     if (hash_set_2_[hash2] == int_addr) continue;
    563     if (hash_set_1_[hash1] == 0) {
    564       hash_set_1_[hash1] = int_addr;
    565     } else if (hash_set_2_[hash2] == 0) {
    566       hash_set_2_[hash2] = int_addr;
    567     } else {
    568       // Rather than slowing down we just throw away some entries.  This will
    569       // cause some duplicates to remain undetected.
    570       hash_set_1_[hash1] = int_addr;
    571       hash_set_2_[hash2] = 0;
    572     }
    573     old_buffer_is_sorted_ = false;
    574     old_buffer_is_filtered_ = false;
    575     *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
    576     DCHECK(old_top_ <= old_limit_);
    577   }
    578   heap_->isolate()->counters()->store_buffer_compactions()->Increment();
    579 }
    580 }
    581 }  // namespace v8::internal
    582