Home | History | Annotate | Download | only in src
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/store-buffer.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "src/v8.h"
     10 
     11 #include "src/base/atomicops.h"
     12 #include "src/counters.h"
     13 #include "src/store-buffer-inl.h"
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 StoreBuffer::StoreBuffer(Heap* heap)
     19     : heap_(heap),
     20       start_(NULL),
     21       limit_(NULL),
     22       old_start_(NULL),
     23       old_limit_(NULL),
     24       old_top_(NULL),
     25       old_reserved_limit_(NULL),
     26       old_buffer_is_sorted_(false),
     27       old_buffer_is_filtered_(false),
     28       during_gc_(false),
     29       store_buffer_rebuilding_enabled_(false),
     30       callback_(NULL),
     31       may_move_store_buffer_entries_(true),
     32       virtual_memory_(NULL),
     33       hash_set_1_(NULL),
     34       hash_set_2_(NULL),
     35       hash_sets_are_empty_(true) {
     36 }
     37 
     38 
     39 void StoreBuffer::SetUp() {
     40   virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
     41   uintptr_t start_as_int =
     42       reinterpret_cast<uintptr_t>(virtual_memory_->address());
     43   start_ =
     44       reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
     45   limit_ = start_ + (kStoreBufferSize / kPointerSize);
     46 
     47   old_virtual_memory_ =
     48       new VirtualMemory(kOldStoreBufferLength * kPointerSize);
     49   old_top_ = old_start_ =
     50       reinterpret_cast<Address*>(old_virtual_memory_->address());
     51   // Don't know the alignment requirements of the OS, but it is certainly not
     52   // less than 0xfff.
     53   ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
     54   int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
     55   ASSERT(initial_length > 0);
     56   ASSERT(initial_length <= kOldStoreBufferLength);
     57   old_limit_ = old_start_ + initial_length;
     58   old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
     59 
     60   CHECK(old_virtual_memory_->Commit(
     61             reinterpret_cast<void*>(old_start_),
     62             (old_limit_ - old_start_) * kPointerSize,
     63             false));
     64 
     65   ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
     66   ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
     67   Address* vm_limit = reinterpret_cast<Address*>(
     68       reinterpret_cast<char*>(virtual_memory_->address()) +
     69           virtual_memory_->size());
     70   ASSERT(start_ <= vm_limit);
     71   ASSERT(limit_ <= vm_limit);
     72   USE(vm_limit);
     73   ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
     74   ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
     75          0);
     76 
     77   CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
     78                                 kStoreBufferSize,
     79                                 false));  // Not executable.
     80   heap_->public_set_store_buffer_top(start_);
     81 
     82   hash_set_1_ = new uintptr_t[kHashSetLength];
     83   hash_set_2_ = new uintptr_t[kHashSetLength];
     84   hash_sets_are_empty_ = false;
     85 
     86   ClearFilteringHashSets();
     87 }
     88 
     89 
     90 void StoreBuffer::TearDown() {
     91   delete virtual_memory_;
     92   delete old_virtual_memory_;
     93   delete[] hash_set_1_;
     94   delete[] hash_set_2_;
     95   old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
     96   start_ = limit_ = NULL;
     97   heap_->public_set_store_buffer_top(start_);
     98 }
     99 
    100 
    101 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
    102   isolate->heap()->store_buffer()->Compact();
    103   isolate->counters()->store_buffer_overflows()->Increment();
    104 }
    105 
    106 
    107 void StoreBuffer::Uniq() {
    108   // Remove adjacent duplicates and cells that do not point at new space.
    109   Address previous = NULL;
    110   Address* write = old_start_;
    111   ASSERT(may_move_store_buffer_entries_);
    112   for (Address* read = old_start_; read < old_top_; read++) {
    113     Address current = *read;
    114     if (current != previous) {
    115       if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
    116         *write++ = current;
    117       }
    118     }
    119     previous = current;
    120   }
    121   old_top_ = write;
    122 }
    123 
    124 
    125 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
    126   return old_limit_ - old_top_ >= space_needed;
    127 }
    128 
    129 
    130 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
    131   while (old_limit_ - old_top_ < space_needed &&
    132          old_limit_ < old_reserved_limit_) {
    133     size_t grow = old_limit_ - old_start_;  // Double size.
    134     CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
    135                                       grow * kPointerSize,
    136                                       false));
    137     old_limit_ += grow;
    138   }
    139 
    140   if (SpaceAvailable(space_needed)) return;
    141 
    142   if (old_buffer_is_filtered_) return;
    143   ASSERT(may_move_store_buffer_entries_);
    144   Compact();
    145 
    146   old_buffer_is_filtered_ = true;
    147   bool page_has_scan_on_scavenge_flag = false;
    148 
    149   PointerChunkIterator it(heap_);
    150   MemoryChunk* chunk;
    151   while ((chunk = it.next()) != NULL) {
    152     if (chunk->scan_on_scavenge()) {
    153       page_has_scan_on_scavenge_flag = true;
    154       break;
    155     }
    156   }
    157 
    158   if (page_has_scan_on_scavenge_flag) {
    159     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    160   }
    161 
    162   if (SpaceAvailable(space_needed)) return;
    163 
    164   // Sample 1 entry in 97 and filter out the pages where we estimate that more
    165   // than 1 in 8 pointers are to new space.
    166   static const int kSampleFinenesses = 5;
    167   static const struct Samples {
    168     int prime_sample_step;
    169     int threshold;
    170   } samples[kSampleFinenesses] =  {
    171     { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
    172     { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
    173     { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
    174     { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
    175     { 1, 0}
    176   };
    177   for (int i = 0; i < kSampleFinenesses; i++) {
    178     ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
    179     // As a last resort we mark all pages as being exempt from the store buffer.
    180     ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
    181     if (SpaceAvailable(space_needed)) return;
    182   }
    183   UNREACHABLE();
    184 }
    185 
    186 
    187 // Sample the store buffer to see if some pages are taking up a lot of space
    188 // in the store buffer.
    189 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
    190   PointerChunkIterator it(heap_);
    191   MemoryChunk* chunk;
    192   while ((chunk = it.next()) != NULL) {
    193     chunk->set_store_buffer_counter(0);
    194   }
    195   bool created_new_scan_on_scavenge_pages = false;
    196   MemoryChunk* previous_chunk = NULL;
    197   for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
    198     Address addr = *p;
    199     MemoryChunk* containing_chunk = NULL;
    200     if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
    201       containing_chunk = previous_chunk;
    202     } else {
    203       containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
    204     }
    205     int old_counter = containing_chunk->store_buffer_counter();
    206     if (old_counter >= threshold) {
    207       containing_chunk->set_scan_on_scavenge(true);
    208       created_new_scan_on_scavenge_pages = true;
    209     }
    210     containing_chunk->set_store_buffer_counter(old_counter + 1);
    211     previous_chunk = containing_chunk;
    212   }
    213   if (created_new_scan_on_scavenge_pages) {
    214     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    215   }
    216   old_buffer_is_filtered_ = true;
    217 }
    218 
    219 
    220 void StoreBuffer::Filter(int flag) {
    221   Address* new_top = old_start_;
    222   MemoryChunk* previous_chunk = NULL;
    223   for (Address* p = old_start_; p < old_top_; p++) {
    224     Address addr = *p;
    225     MemoryChunk* containing_chunk = NULL;
    226     if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
    227       containing_chunk = previous_chunk;
    228     } else {
    229       containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
    230       previous_chunk = containing_chunk;
    231     }
    232     if (!containing_chunk->IsFlagSet(flag)) {
    233       *new_top++ = addr;
    234     }
    235   }
    236   old_top_ = new_top;
    237 
    238   // Filtering hash sets are inconsistent with the store buffer after this
    239   // operation.
    240   ClearFilteringHashSets();
    241 }
    242 
    243 
    244 void StoreBuffer::SortUniq() {
    245   Compact();
    246   if (old_buffer_is_sorted_) return;
    247   std::sort(old_start_, old_top_);
    248   Uniq();
    249 
    250   old_buffer_is_sorted_ = true;
    251 
    252   // Filtering hash sets are inconsistent with the store buffer after this
    253   // operation.
    254   ClearFilteringHashSets();
    255 }
    256 
    257 
    258 bool StoreBuffer::PrepareForIteration() {
    259   Compact();
    260   PointerChunkIterator it(heap_);
    261   MemoryChunk* chunk;
    262   bool page_has_scan_on_scavenge_flag = false;
    263   while ((chunk = it.next()) != NULL) {
    264     if (chunk->scan_on_scavenge()) {
    265       page_has_scan_on_scavenge_flag = true;
    266       break;
    267     }
    268   }
    269 
    270   if (page_has_scan_on_scavenge_flag) {
    271     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
    272   }
    273 
    274   // Filtering hash sets are inconsistent with the store buffer after
    275   // iteration.
    276   ClearFilteringHashSets();
    277 
    278   return page_has_scan_on_scavenge_flag;
    279 }
    280 
    281 
    282 #ifdef DEBUG
    283 void StoreBuffer::Clean() {
    284   ClearFilteringHashSets();
    285   Uniq();  // Also removes things that no longer point to new space.
    286   EnsureSpace(kStoreBufferSize / 2);
    287 }
    288 
    289 
    290 static Address* in_store_buffer_1_element_cache = NULL;
    291 
    292 
    293 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
    294   if (!FLAG_enable_slow_asserts) return true;
    295   if (in_store_buffer_1_element_cache != NULL &&
    296       *in_store_buffer_1_element_cache == cell_address) {
    297     return true;
    298   }
    299   Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
    300   for (Address* current = top - 1; current >= start_; current--) {
    301     if (*current == cell_address) {
    302       in_store_buffer_1_element_cache = current;
    303       return true;
    304     }
    305   }
    306   for (Address* current = old_top_ - 1; current >= old_start_; current--) {
    307     if (*current == cell_address) {
    308       in_store_buffer_1_element_cache = current;
    309       return true;
    310     }
    311   }
    312   return false;
    313 }
    314 #endif
    315 
    316 
    317 void StoreBuffer::ClearFilteringHashSets() {
    318   if (!hash_sets_are_empty_) {
    319     memset(reinterpret_cast<void*>(hash_set_1_),
    320            0,
    321            sizeof(uintptr_t) * kHashSetLength);
    322     memset(reinterpret_cast<void*>(hash_set_2_),
    323            0,
    324            sizeof(uintptr_t) * kHashSetLength);
    325     hash_sets_are_empty_ = true;
    326   }
    327 }
    328 
    329 
    330 void StoreBuffer::GCPrologue() {
    331   ClearFilteringHashSets();
    332   during_gc_ = true;
    333 }
    334 
    335 
    336 #ifdef VERIFY_HEAP
    337 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
    338   LargeObjectIterator it(space);
    339   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
    340     if (object->IsFixedArray()) {
    341       Address slot_address = object->address();
    342       Address end = object->address() + object->Size();
    343 
    344       while (slot_address < end) {
    345         HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
    346         // When we are not in GC the Heap::InNewSpace() predicate
    347         // checks that pointers which satisfy predicate point into
    348         // the active semispace.
    349         Object* object = reinterpret_cast<Object*>(
    350             base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    351         heap_->InNewSpace(object);
    352         slot_address += kPointerSize;
    353       }
    354     }
    355   }
    356 }
    357 #endif
    358 
    359 
    360 void StoreBuffer::Verify() {
    361 #ifdef VERIFY_HEAP
    362   VerifyPointers(heap_->lo_space());
    363 #endif
    364 }
    365 
    366 
    367 void StoreBuffer::GCEpilogue() {
    368   during_gc_ = false;
    369 #ifdef VERIFY_HEAP
    370   if (FLAG_verify_heap) {
    371     Verify();
    372   }
    373 #endif
    374 }
    375 
    376 
    377 void StoreBuffer::FindPointersToNewSpaceInRegion(
    378     Address start,
    379     Address end,
    380     ObjectSlotCallback slot_callback,
    381     bool clear_maps) {
    382   for (Address slot_address = start;
    383        slot_address < end;
    384        slot_address += kPointerSize) {
    385     Object** slot = reinterpret_cast<Object**>(slot_address);
    386     Object* object = reinterpret_cast<Object*>(
    387         base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    388     if (heap_->InNewSpace(object)) {
    389       HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
    390       ASSERT(heap_object->IsHeapObject());
    391       // The new space object was not promoted if it still contains a map
    392       // pointer. Clear the map field now lazily.
    393       if (clear_maps) ClearDeadObject(heap_object);
    394       slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
    395       object = reinterpret_cast<Object*>(
    396           base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    397       if (heap_->InNewSpace(object)) {
    398         EnterDirectlyIntoStoreBuffer(slot_address);
    399       }
    400     }
    401   }
    402 }
    403 
    404 
    405 // Compute start address of the first map following given addr.
    406 static inline Address MapStartAlign(Address addr) {
    407   Address page = Page::FromAddress(addr)->area_start();
    408   return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
    409 }
    410 
    411 
    412 // Compute end address of the first map preceding given addr.
    413 static inline Address MapEndAlign(Address addr) {
    414   Address page = Page::FromAllocationTop(addr)->area_start();
    415   return page + ((addr - page) / Map::kSize * Map::kSize);
    416 }
    417 
    418 
    419 void StoreBuffer::FindPointersToNewSpaceInMaps(
    420     Address start,
    421     Address end,
    422     ObjectSlotCallback slot_callback,
    423     bool clear_maps) {
    424   ASSERT(MapStartAlign(start) == start);
    425   ASSERT(MapEndAlign(end) == end);
    426 
    427   Address map_address = start;
    428   while (map_address < end) {
    429     ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
    430     ASSERT(Memory::Object_at(map_address)->IsMap());
    431 
    432     Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
    433     Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
    434 
    435     FindPointersToNewSpaceInRegion(pointer_fields_start,
    436                                    pointer_fields_end,
    437                                    slot_callback,
    438                                    clear_maps);
    439     map_address += Map::kSize;
    440   }
    441 }
    442 
    443 
    444 void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
    445     Address start,
    446     Address end,
    447     ObjectSlotCallback slot_callback,
    448     bool clear_maps) {
    449   Address map_aligned_start = MapStartAlign(start);
    450   Address map_aligned_end   = MapEndAlign(end);
    451 
    452   ASSERT(map_aligned_start == start);
    453   ASSERT(map_aligned_start <= map_aligned_end && map_aligned_end <= end);
    454 
    455   FindPointersToNewSpaceInMaps(map_aligned_start,
    456                                map_aligned_end,
    457                                slot_callback,
    458                                clear_maps);
    459 }
    460 
    461 
    462 void StoreBuffer::IteratePointersInStoreBuffer(
    463     ObjectSlotCallback slot_callback,
    464     bool clear_maps) {
    465   Address* limit = old_top_;
    466   old_top_ = old_start_;
    467   {
    468     DontMoveStoreBufferEntriesScope scope(this);
    469     for (Address* current = old_start_; current < limit; current++) {
    470 #ifdef DEBUG
    471       Address* saved_top = old_top_;
    472 #endif
    473       Object** slot = reinterpret_cast<Object**>(*current);
    474       Object* object = reinterpret_cast<Object*>(
    475           base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    476       if (heap_->InFromSpace(object)) {
    477         HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
    478         // The new space object was not promoted if it still contains a map
    479         // pointer. Clear the map field now lazily.
    480         if (clear_maps) ClearDeadObject(heap_object);
    481         slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
    482         object = reinterpret_cast<Object*>(
    483             base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    484         if (heap_->InNewSpace(object)) {
    485           EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
    486         }
    487       }
    488       ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
    489     }
    490   }
    491 }
    492 
    493 
    494 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
    495   IteratePointersToNewSpace(slot_callback, false);
    496 }
    497 
    498 
    499 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
    500     ObjectSlotCallback slot_callback) {
    501   IteratePointersToNewSpace(slot_callback, true);
    502 }
    503 
    504 
    505 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
    506                                             bool clear_maps) {
    507   // We do not sort or remove duplicated entries from the store buffer because
    508   // we expect that callback will rebuild the store buffer thus removing
    509   // all duplicates and pointers to old space.
    510   bool some_pages_to_scan = PrepareForIteration();
    511 
    512   // TODO(gc): we want to skip slots on evacuation candidates
    513   // but we can't simply figure that out from slot address
    514   // because slot can belong to a large object.
    515   IteratePointersInStoreBuffer(slot_callback, clear_maps);
    516 
    517   // We are done scanning all the pointers that were in the store buffer, but
    518   // there may be some pages marked scan_on_scavenge that have pointers to new
    519   // space that are not in the store buffer.  We must scan them now.  As we
    520   // scan, the surviving pointers to new space will be added to the store
    521   // buffer.  If there are still a lot of pointers to new space then we will
    522   // keep the scan_on_scavenge flag on the page and discard the pointers that
    523   // were added to the store buffer.  If there are not many pointers to new
    524   // space left on the page we will keep the pointers in the store buffer and
    525   // remove the flag from the page.
    526   if (some_pages_to_scan) {
    527     if (callback_ != NULL) {
    528       (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
    529     }
    530     PointerChunkIterator it(heap_);
    531     MemoryChunk* chunk;
    532     while ((chunk = it.next()) != NULL) {
    533       if (chunk->scan_on_scavenge()) {
    534         chunk->set_scan_on_scavenge(false);
    535         if (callback_ != NULL) {
    536           (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
    537         }
    538         if (chunk->owner() == heap_->lo_space()) {
    539           LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
    540           HeapObject* array = large_page->GetObject();
    541           ASSERT(array->IsFixedArray());
    542           Address start = array->address();
    543           Address end = start + array->Size();
    544           FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
    545         } else {
    546           Page* page = reinterpret_cast<Page*>(chunk);
    547           PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
    548           Address start = page->area_start();
    549           Address end = page->area_end();
    550           if (owner == heap_->map_space()) {
    551             FindPointersToNewSpaceInMapsRegion(
    552                 start, end, slot_callback, clear_maps);
    553           } else {
    554             FindPointersToNewSpaceInRegion(
    555                 start, end, slot_callback, clear_maps);
    556           }
    557         }
    558       }
    559     }
    560     if (callback_ != NULL) {
    561       (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
    562     }
    563   }
    564 }
    565 
    566 
    567 void StoreBuffer::Compact() {
    568   Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
    569 
    570   if (top == start_) return;
    571 
    572   // There's no check of the limit in the loop below so we check here for
    573   // the worst case (compaction doesn't eliminate any pointers).
    574   ASSERT(top <= limit_);
    575   heap_->public_set_store_buffer_top(start_);
    576   EnsureSpace(top - start_);
    577   ASSERT(may_move_store_buffer_entries_);
    578   // Goes through the addresses in the store buffer attempting to remove
    579   // duplicates.  In the interest of speed this is a lossy operation.  Some
    580   // duplicates will remain.  We have two hash sets with different hash
    581   // functions to reduce the number of unnecessary clashes.
    582   hash_sets_are_empty_ = false;  // Hash sets are in use.
    583   for (Address* current = start_; current < top; current++) {
    584     ASSERT(!heap_->cell_space()->Contains(*current));
    585     ASSERT(!heap_->code_space()->Contains(*current));
    586     ASSERT(!heap_->old_data_space()->Contains(*current));
    587     uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
    588     // Shift out the last bits including any tags.
    589     int_addr >>= kPointerSizeLog2;
    590     // The upper part of an address is basically random because of ASLR and OS
    591     // non-determinism, so we use only the bits within a page for hashing to
    592     // make v8's behavior (more) deterministic.
    593     uintptr_t hash_addr =
    594         int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
    595     int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
    596                  (kHashSetLength - 1));
    597     if (hash_set_1_[hash1] == int_addr) continue;
    598     uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
    599     hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
    600     hash2 &= (kHashSetLength - 1);
    601     if (hash_set_2_[hash2] == int_addr) continue;
    602     if (hash_set_1_[hash1] == 0) {
    603       hash_set_1_[hash1] = int_addr;
    604     } else if (hash_set_2_[hash2] == 0) {
    605       hash_set_2_[hash2] = int_addr;
    606     } else {
    607       // Rather than slowing down we just throw away some entries.  This will
    608       // cause some duplicates to remain undetected.
    609       hash_set_1_[hash1] = int_addr;
    610       hash_set_2_[hash2] = 0;
    611     }
    612     old_buffer_is_sorted_ = false;
    613     old_buffer_is_filtered_ = false;
    614     *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
    615     ASSERT(old_top_ <= old_limit_);
    616   }
    617   heap_->isolate()->counters()->store_buffer_compactions()->Increment();
    618 }
    619 
    620 } }  // namespace v8::internal
    621