1 // Copyright 2011 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "store-buffer.h" 29 30 #include <algorithm> 31 32 #include "v8.h" 33 #include "store-buffer-inl.h" 34 #include "v8-counters.h" 35 36 namespace v8 { 37 namespace internal { 38 39 StoreBuffer::StoreBuffer(Heap* heap) 40 : heap_(heap), 41 start_(NULL), 42 limit_(NULL), 43 old_start_(NULL), 44 old_limit_(NULL), 45 old_top_(NULL), 46 old_reserved_limit_(NULL), 47 old_buffer_is_sorted_(false), 48 old_buffer_is_filtered_(false), 49 during_gc_(false), 50 store_buffer_rebuilding_enabled_(false), 51 callback_(NULL), 52 may_move_store_buffer_entries_(true), 53 virtual_memory_(NULL), 54 hash_set_1_(NULL), 55 hash_set_2_(NULL), 56 hash_sets_are_empty_(true) { 57 } 58 59 60 void StoreBuffer::SetUp() { 61 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); 62 uintptr_t start_as_int = 63 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 64 start_ = 65 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 66 limit_ = start_ + (kStoreBufferSize / kPointerSize); 67 68 old_virtual_memory_ = 69 new VirtualMemory(kOldStoreBufferLength * kPointerSize); 70 old_top_ = old_start_ = 71 reinterpret_cast<Address*>(old_virtual_memory_->address()); 72 // Don't know the alignment requirements of the OS, but it is certainly not 73 // less than 0xfff. 74 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); 75 int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize); 76 ASSERT(initial_length > 0); 77 ASSERT(initial_length <= kOldStoreBufferLength); 78 old_limit_ = old_start_ + initial_length; 79 old_reserved_limit_ = old_start_ + kOldStoreBufferLength; 80 81 CHECK(old_virtual_memory_->Commit( 82 reinterpret_cast<void*>(old_start_), 83 (old_limit_ - old_start_) * kPointerSize, 84 false)); 85 86 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 87 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 88 Address* vm_limit = reinterpret_cast<Address*>( 89 reinterpret_cast<char*>(virtual_memory_->address()) + 90 virtual_memory_->size()); 91 ASSERT(start_ <= vm_limit); 92 ASSERT(limit_ <= vm_limit); 93 USE(vm_limit); 94 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); 95 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == 96 0); 97 98 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), 99 kStoreBufferSize, 100 false)); // Not executable. 101 heap_->public_set_store_buffer_top(start_); 102 103 hash_set_1_ = new uintptr_t[kHashSetLength]; 104 hash_set_2_ = new uintptr_t[kHashSetLength]; 105 hash_sets_are_empty_ = false; 106 107 ClearFilteringHashSets(); 108 } 109 110 111 void StoreBuffer::TearDown() { 112 delete virtual_memory_; 113 delete old_virtual_memory_; 114 delete[] hash_set_1_; 115 delete[] hash_set_2_; 116 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; 117 start_ = limit_ = NULL; 118 heap_->public_set_store_buffer_top(start_); 119 } 120 121 122 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { 123 isolate->heap()->store_buffer()->Compact(); 124 isolate->counters()->store_buffer_overflows()->Increment(); 125 } 126 127 128 void StoreBuffer::Uniq() { 129 // Remove adjacent duplicates and cells that do not point at new space. 130 Address previous = NULL; 131 Address* write = old_start_; 132 ASSERT(may_move_store_buffer_entries_); 133 for (Address* read = old_start_; read < old_top_; read++) { 134 Address current = *read; 135 if (current != previous) { 136 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { 137 *write++ = current; 138 } 139 } 140 previous = current; 141 } 142 old_top_ = write; 143 } 144 145 146 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { 147 return old_limit_ - old_top_ >= space_needed; 148 } 149 150 151 void StoreBuffer::EnsureSpace(intptr_t space_needed) { 152 while (old_limit_ - old_top_ < space_needed && 153 old_limit_ < old_reserved_limit_) { 154 size_t grow = old_limit_ - old_start_; // Double size. 155 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), 156 grow * kPointerSize, 157 false)); 158 old_limit_ += grow; 159 } 160 161 if (SpaceAvailable(space_needed)) return; 162 163 if (old_buffer_is_filtered_) return; 164 ASSERT(may_move_store_buffer_entries_); 165 Compact(); 166 167 old_buffer_is_filtered_ = true; 168 bool page_has_scan_on_scavenge_flag = false; 169 170 PointerChunkIterator it(heap_); 171 MemoryChunk* chunk; 172 while ((chunk = it.next()) != NULL) { 173 if (chunk->scan_on_scavenge()) { 174 page_has_scan_on_scavenge_flag = true; 175 break; 176 } 177 } 178 179 if (page_has_scan_on_scavenge_flag) { 180 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 181 } 182 183 if (SpaceAvailable(space_needed)) return; 184 185 // Sample 1 entry in 97 and filter out the pages where we estimate that more 186 // than 1 in 8 pointers are to new space. 187 static const int kSampleFinenesses = 5; 188 static const struct Samples { 189 int prime_sample_step; 190 int threshold; 191 } samples[kSampleFinenesses] = { 192 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, 193 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, 194 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, 195 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, 196 { 1, 0} 197 }; 198 for (int i = 0; i < kSampleFinenesses; i++) { 199 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); 200 // As a last resort we mark all pages as being exempt from the store buffer. 201 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_); 202 if (SpaceAvailable(space_needed)) return; 203 } 204 UNREACHABLE(); 205 } 206 207 208 // Sample the store buffer to see if some pages are taking up a lot of space 209 // in the store buffer. 210 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { 211 PointerChunkIterator it(heap_); 212 MemoryChunk* chunk; 213 while ((chunk = it.next()) != NULL) { 214 chunk->set_store_buffer_counter(0); 215 } 216 bool created_new_scan_on_scavenge_pages = false; 217 MemoryChunk* previous_chunk = NULL; 218 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) { 219 Address addr = *p; 220 MemoryChunk* containing_chunk = NULL; 221 if (previous_chunk != NULL && previous_chunk->Contains(addr)) { 222 containing_chunk = previous_chunk; 223 } else { 224 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); 225 } 226 int old_counter = containing_chunk->store_buffer_counter(); 227 if (old_counter >= threshold) { 228 containing_chunk->set_scan_on_scavenge(true); 229 created_new_scan_on_scavenge_pages = true; 230 } 231 containing_chunk->set_store_buffer_counter(old_counter + 1); 232 previous_chunk = containing_chunk; 233 } 234 if (created_new_scan_on_scavenge_pages) { 235 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 236 } 237 old_buffer_is_filtered_ = true; 238 } 239 240 241 void StoreBuffer::Filter(int flag) { 242 Address* new_top = old_start_; 243 MemoryChunk* previous_chunk = NULL; 244 for (Address* p = old_start_; p < old_top_; p++) { 245 Address addr = *p; 246 MemoryChunk* containing_chunk = NULL; 247 if (previous_chunk != NULL && previous_chunk->Contains(addr)) { 248 containing_chunk = previous_chunk; 249 } else { 250 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); 251 previous_chunk = containing_chunk; 252 } 253 if (!containing_chunk->IsFlagSet(flag)) { 254 *new_top++ = addr; 255 } 256 } 257 old_top_ = new_top; 258 259 // Filtering hash sets are inconsistent with the store buffer after this 260 // operation. 261 ClearFilteringHashSets(); 262 } 263 264 265 void StoreBuffer::SortUniq() { 266 Compact(); 267 if (old_buffer_is_sorted_) return; 268 std::sort(old_start_, old_top_); 269 Uniq(); 270 271 old_buffer_is_sorted_ = true; 272 273 // Filtering hash sets are inconsistent with the store buffer after this 274 // operation. 275 ClearFilteringHashSets(); 276 } 277 278 279 bool StoreBuffer::PrepareForIteration() { 280 Compact(); 281 PointerChunkIterator it(heap_); 282 MemoryChunk* chunk; 283 bool page_has_scan_on_scavenge_flag = false; 284 while ((chunk = it.next()) != NULL) { 285 if (chunk->scan_on_scavenge()) { 286 page_has_scan_on_scavenge_flag = true; 287 break; 288 } 289 } 290 291 if (page_has_scan_on_scavenge_flag) { 292 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 293 } 294 295 // Filtering hash sets are inconsistent with the store buffer after 296 // iteration. 297 ClearFilteringHashSets(); 298 299 return page_has_scan_on_scavenge_flag; 300 } 301 302 303 #ifdef DEBUG 304 void StoreBuffer::Clean() { 305 ClearFilteringHashSets(); 306 Uniq(); // Also removes things that no longer point to new space. 307 EnsureSpace(kStoreBufferSize / 2); 308 } 309 310 311 static Address* in_store_buffer_1_element_cache = NULL; 312 313 314 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { 315 if (!FLAG_enable_slow_asserts) return true; 316 if (in_store_buffer_1_element_cache != NULL && 317 *in_store_buffer_1_element_cache == cell_address) { 318 return true; 319 } 320 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); 321 for (Address* current = top - 1; current >= start_; current--) { 322 if (*current == cell_address) { 323 in_store_buffer_1_element_cache = current; 324 return true; 325 } 326 } 327 for (Address* current = old_top_ - 1; current >= old_start_; current--) { 328 if (*current == cell_address) { 329 in_store_buffer_1_element_cache = current; 330 return true; 331 } 332 } 333 return false; 334 } 335 #endif 336 337 338 void StoreBuffer::ClearFilteringHashSets() { 339 if (!hash_sets_are_empty_) { 340 memset(reinterpret_cast<void*>(hash_set_1_), 341 0, 342 sizeof(uintptr_t) * kHashSetLength); 343 memset(reinterpret_cast<void*>(hash_set_2_), 344 0, 345 sizeof(uintptr_t) * kHashSetLength); 346 hash_sets_are_empty_ = true; 347 } 348 } 349 350 351 void StoreBuffer::GCPrologue() { 352 ClearFilteringHashSets(); 353 during_gc_ = true; 354 } 355 356 357 #ifdef VERIFY_HEAP 358 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { 359 // Do nothing. 360 } 361 362 363 void StoreBuffer::VerifyPointers(PagedSpace* space, 364 RegionCallback region_callback) { 365 PageIterator it(space); 366 367 while (it.has_next()) { 368 Page* page = it.next(); 369 FindPointersToNewSpaceOnPage( 370 reinterpret_cast<PagedSpace*>(page->owner()), 371 page, 372 region_callback, 373 &DummyScavengePointer, 374 false); 375 } 376 } 377 378 379 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { 380 LargeObjectIterator it(space); 381 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 382 if (object->IsFixedArray()) { 383 Address slot_address = object->address(); 384 Address end = object->address() + object->Size(); 385 386 while (slot_address < end) { 387 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); 388 // When we are not in GC the Heap::InNewSpace() predicate 389 // checks that pointers which satisfy predicate point into 390 // the active semispace. 391 heap_->InNewSpace(*slot); 392 slot_address += kPointerSize; 393 } 394 } 395 } 396 } 397 #endif 398 399 400 void StoreBuffer::Verify() { 401 #ifdef VERIFY_HEAP 402 VerifyPointers(heap_->old_pointer_space(), 403 &StoreBuffer::FindPointersToNewSpaceInRegion); 404 VerifyPointers(heap_->map_space(), 405 &StoreBuffer::FindPointersToNewSpaceInMapsRegion); 406 VerifyPointers(heap_->lo_space()); 407 #endif 408 } 409 410 411 void StoreBuffer::GCEpilogue() { 412 during_gc_ = false; 413 #ifdef VERIFY_HEAP 414 if (FLAG_verify_heap) { 415 Verify(); 416 } 417 #endif 418 } 419 420 421 void StoreBuffer::FindPointersToNewSpaceInRegion( 422 Address start, 423 Address end, 424 ObjectSlotCallback slot_callback, 425 bool clear_maps) { 426 for (Address slot_address = start; 427 slot_address < end; 428 slot_address += kPointerSize) { 429 Object** slot = reinterpret_cast<Object**>(slot_address); 430 if (heap_->InNewSpace(*slot)) { 431 HeapObject* object = reinterpret_cast<HeapObject*>(*slot); 432 ASSERT(object->IsHeapObject()); 433 // The new space object was not promoted if it still contains a map 434 // pointer. Clear the map field now lazily. 435 if (clear_maps) ClearDeadObject(object); 436 slot_callback(reinterpret_cast<HeapObject**>(slot), object); 437 if (heap_->InNewSpace(*slot)) { 438 EnterDirectlyIntoStoreBuffer(slot_address); 439 } 440 } 441 } 442 } 443 444 445 // Compute start address of the first map following given addr. 446 static inline Address MapStartAlign(Address addr) { 447 Address page = Page::FromAddress(addr)->area_start(); 448 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); 449 } 450 451 452 // Compute end address of the first map preceding given addr. 453 static inline Address MapEndAlign(Address addr) { 454 Address page = Page::FromAllocationTop(addr)->area_start(); 455 return page + ((addr - page) / Map::kSize * Map::kSize); 456 } 457 458 459 void StoreBuffer::FindPointersToNewSpaceInMaps( 460 Address start, 461 Address end, 462 ObjectSlotCallback slot_callback, 463 bool clear_maps) { 464 ASSERT(MapStartAlign(start) == start); 465 ASSERT(MapEndAlign(end) == end); 466 467 Address map_address = start; 468 while (map_address < end) { 469 ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address))); 470 ASSERT(Memory::Object_at(map_address)->IsMap()); 471 472 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; 473 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; 474 475 FindPointersToNewSpaceInRegion(pointer_fields_start, 476 pointer_fields_end, 477 slot_callback, 478 clear_maps); 479 map_address += Map::kSize; 480 } 481 } 482 483 484 void StoreBuffer::FindPointersToNewSpaceInMapsRegion( 485 Address start, 486 Address end, 487 ObjectSlotCallback slot_callback, 488 bool clear_maps) { 489 Address map_aligned_start = MapStartAlign(start); 490 Address map_aligned_end = MapEndAlign(end); 491 492 ASSERT(map_aligned_start == start); 493 ASSERT(map_aligned_end == end); 494 495 FindPointersToNewSpaceInMaps(map_aligned_start, 496 map_aligned_end, 497 slot_callback, 498 clear_maps); 499 } 500 501 502 // This function iterates over all the pointers in a paged space in the heap, 503 // looking for pointers into new space. Within the pages there may be dead 504 // objects that have not been overwritten by free spaces or fillers because of 505 // lazy sweeping. These dead objects may not contain pointers to new space. 506 // The garbage areas that have been swept properly (these will normally be the 507 // large ones) will be marked with free space and filler map words. In 508 // addition any area that has never been used at all for object allocation must 509 // be marked with a free space or filler. Because the free space and filler 510 // maps do not move we can always recognize these even after a compaction. 511 // Normal objects like FixedArrays and JSObjects should not contain references 512 // to these maps. The special garbage section (see comment in spaces.h) is 513 // skipped since it can contain absolutely anything. Any objects that are 514 // allocated during iteration may or may not be visited by the iteration, but 515 // they will not be partially visited. 516 void StoreBuffer::FindPointersToNewSpaceOnPage( 517 PagedSpace* space, 518 Page* page, 519 RegionCallback region_callback, 520 ObjectSlotCallback slot_callback, 521 bool clear_maps) { 522 Address visitable_start = page->area_start(); 523 Address end_of_page = page->area_end(); 524 525 Address visitable_end = visitable_start; 526 527 Object* free_space_map = heap_->free_space_map(); 528 Object* two_pointer_filler_map = heap_->two_pointer_filler_map(); 529 530 while (visitable_end < end_of_page) { 531 Object* o = *reinterpret_cast<Object**>(visitable_end); 532 // Skip fillers but not things that look like fillers in the special 533 // garbage section which can contain anything. 534 if (o == free_space_map || 535 o == two_pointer_filler_map || 536 (visitable_end == space->top() && visitable_end != space->limit())) { 537 if (visitable_start != visitable_end) { 538 // After calling this the special garbage section may have moved. 539 (this->*region_callback)(visitable_start, 540 visitable_end, 541 slot_callback, 542 clear_maps); 543 if (visitable_end >= space->top() && visitable_end < space->limit()) { 544 visitable_end = space->limit(); 545 visitable_start = visitable_end; 546 continue; 547 } 548 } 549 if (visitable_end == space->top() && visitable_end != space->limit()) { 550 visitable_start = visitable_end = space->limit(); 551 } else { 552 // At this point we are either at the start of a filler or we are at 553 // the point where the space->top() used to be before the 554 // visit_pointer_region call above. Either way we can skip the 555 // object at the current spot: We don't promise to visit objects 556 // allocated during heap traversal, and if space->top() moved then it 557 // must be because an object was allocated at this point. 558 visitable_start = 559 visitable_end + HeapObject::FromAddress(visitable_end)->Size(); 560 visitable_end = visitable_start; 561 } 562 } else { 563 ASSERT(o != free_space_map); 564 ASSERT(o != two_pointer_filler_map); 565 ASSERT(visitable_end < space->top() || visitable_end >= space->limit()); 566 visitable_end += kPointerSize; 567 } 568 } 569 ASSERT(visitable_end == end_of_page); 570 if (visitable_start != visitable_end) { 571 (this->*region_callback)(visitable_start, 572 visitable_end, 573 slot_callback, 574 clear_maps); 575 } 576 } 577 578 579 void StoreBuffer::IteratePointersInStoreBuffer( 580 ObjectSlotCallback slot_callback, 581 bool clear_maps) { 582 Address* limit = old_top_; 583 old_top_ = old_start_; 584 { 585 DontMoveStoreBufferEntriesScope scope(this); 586 for (Address* current = old_start_; current < limit; current++) { 587 #ifdef DEBUG 588 Address* saved_top = old_top_; 589 #endif 590 Object** slot = reinterpret_cast<Object**>(*current); 591 Object* object = *slot; 592 if (heap_->InFromSpace(object)) { 593 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); 594 // The new space object was not promoted if it still contains a map 595 // pointer. Clear the map field now lazily. 596 if (clear_maps) ClearDeadObject(heap_object); 597 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); 598 if (heap_->InNewSpace(*slot)) { 599 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); 600 } 601 } 602 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top); 603 } 604 } 605 } 606 607 608 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { 609 IteratePointersToNewSpace(slot_callback, false); 610 } 611 612 613 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps( 614 ObjectSlotCallback slot_callback) { 615 IteratePointersToNewSpace(slot_callback, true); 616 } 617 618 619 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, 620 bool clear_maps) { 621 // We do not sort or remove duplicated entries from the store buffer because 622 // we expect that callback will rebuild the store buffer thus removing 623 // all duplicates and pointers to old space. 624 bool some_pages_to_scan = PrepareForIteration(); 625 626 // TODO(gc): we want to skip slots on evacuation candidates 627 // but we can't simply figure that out from slot address 628 // because slot can belong to a large object. 629 IteratePointersInStoreBuffer(slot_callback, clear_maps); 630 631 // We are done scanning all the pointers that were in the store buffer, but 632 // there may be some pages marked scan_on_scavenge that have pointers to new 633 // space that are not in the store buffer. We must scan them now. As we 634 // scan, the surviving pointers to new space will be added to the store 635 // buffer. If there are still a lot of pointers to new space then we will 636 // keep the scan_on_scavenge flag on the page and discard the pointers that 637 // were added to the store buffer. If there are not many pointers to new 638 // space left on the page we will keep the pointers in the store buffer and 639 // remove the flag from the page. 640 if (some_pages_to_scan) { 641 if (callback_ != NULL) { 642 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); 643 } 644 PointerChunkIterator it(heap_); 645 MemoryChunk* chunk; 646 while ((chunk = it.next()) != NULL) { 647 if (chunk->scan_on_scavenge()) { 648 chunk->set_scan_on_scavenge(false); 649 if (callback_ != NULL) { 650 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); 651 } 652 if (chunk->owner() == heap_->lo_space()) { 653 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); 654 HeapObject* array = large_page->GetObject(); 655 ASSERT(array->IsFixedArray()); 656 Address start = array->address(); 657 Address end = start + array->Size(); 658 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); 659 } else { 660 Page* page = reinterpret_cast<Page*>(chunk); 661 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); 662 FindPointersToNewSpaceOnPage( 663 owner, 664 page, 665 (owner == heap_->map_space() ? 666 &StoreBuffer::FindPointersToNewSpaceInMapsRegion : 667 &StoreBuffer::FindPointersToNewSpaceInRegion), 668 slot_callback, 669 clear_maps); 670 } 671 } 672 } 673 if (callback_ != NULL) { 674 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); 675 } 676 } 677 } 678 679 680 void StoreBuffer::Compact() { 681 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); 682 683 if (top == start_) return; 684 685 // There's no check of the limit in the loop below so we check here for 686 // the worst case (compaction doesn't eliminate any pointers). 687 ASSERT(top <= limit_); 688 heap_->public_set_store_buffer_top(start_); 689 EnsureSpace(top - start_); 690 ASSERT(may_move_store_buffer_entries_); 691 // Goes through the addresses in the store buffer attempting to remove 692 // duplicates. In the interest of speed this is a lossy operation. Some 693 // duplicates will remain. We have two hash sets with different hash 694 // functions to reduce the number of unnecessary clashes. 695 hash_sets_are_empty_ = false; // Hash sets are in use. 696 for (Address* current = start_; current < top; current++) { 697 ASSERT(!heap_->cell_space()->Contains(*current)); 698 ASSERT(!heap_->code_space()->Contains(*current)); 699 ASSERT(!heap_->old_data_space()->Contains(*current)); 700 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 701 // Shift out the last bits including any tags. 702 int_addr >>= kPointerSizeLog2; 703 // The upper part of an address is basically random because of ASLR and OS 704 // non-determinism, so we use only the bits within a page for hashing to 705 // make v8's behavior (more) deterministic. 706 uintptr_t hash_addr = 707 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); 708 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & 709 (kHashSetLength - 1)); 710 if (hash_set_1_[hash1] == int_addr) continue; 711 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2)); 712 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); 713 hash2 &= (kHashSetLength - 1); 714 if (hash_set_2_[hash2] == int_addr) continue; 715 if (hash_set_1_[hash1] == 0) { 716 hash_set_1_[hash1] = int_addr; 717 } else if (hash_set_2_[hash2] == 0) { 718 hash_set_2_[hash2] = int_addr; 719 } else { 720 // Rather than slowing down we just throw away some entries. This will 721 // cause some duplicates to remain undetected. 722 hash_set_1_[hash1] = int_addr; 723 hash_set_2_[hash2] = 0; 724 } 725 old_buffer_is_sorted_ = false; 726 old_buffer_is_filtered_ = false; 727 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 728 ASSERT(old_top_ <= old_limit_); 729 } 730 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 731 } 732 733 } } // namespace v8::internal 734