1 // Copyright 2011 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "store-buffer.h" 29 30 #include <algorithm> 31 32 #include "v8.h" 33 #include "store-buffer-inl.h" 34 #include "v8-counters.h" 35 36 namespace v8 { 37 namespace internal { 38 39 StoreBuffer::StoreBuffer(Heap* heap) 40 : heap_(heap), 41 start_(NULL), 42 limit_(NULL), 43 old_start_(NULL), 44 old_limit_(NULL), 45 old_top_(NULL), 46 old_reserved_limit_(NULL), 47 old_buffer_is_sorted_(false), 48 old_buffer_is_filtered_(false), 49 during_gc_(false), 50 store_buffer_rebuilding_enabled_(false), 51 callback_(NULL), 52 may_move_store_buffer_entries_(true), 53 virtual_memory_(NULL), 54 hash_set_1_(NULL), 55 hash_set_2_(NULL), 56 hash_sets_are_empty_(true) { 57 } 58 59 60 void StoreBuffer::SetUp() { 61 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); 62 uintptr_t start_as_int = 63 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 64 start_ = 65 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 66 limit_ = start_ + (kStoreBufferSize / kPointerSize); 67 68 old_virtual_memory_ = 69 new VirtualMemory(kOldStoreBufferLength * kPointerSize); 70 old_top_ = old_start_ = 71 reinterpret_cast<Address*>(old_virtual_memory_->address()); 72 // Don't know the alignment requirements of the OS, but it is certainly not 73 // less than 0xfff. 74 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); 75 int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize); 76 ASSERT(initial_length > 0); 77 ASSERT(initial_length <= kOldStoreBufferLength); 78 old_limit_ = old_start_ + initial_length; 79 old_reserved_limit_ = old_start_ + kOldStoreBufferLength; 80 81 CHECK(old_virtual_memory_->Commit( 82 reinterpret_cast<void*>(old_start_), 83 (old_limit_ - old_start_) * kPointerSize, 84 false)); 85 86 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 87 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 88 Address* vm_limit = reinterpret_cast<Address*>( 89 reinterpret_cast<char*>(virtual_memory_->address()) + 90 virtual_memory_->size()); 91 ASSERT(start_ <= vm_limit); 92 ASSERT(limit_ <= vm_limit); 93 USE(vm_limit); 94 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); 95 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == 96 0); 97 98 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), 99 kStoreBufferSize, 100 false)); // Not executable. 101 heap_->public_set_store_buffer_top(start_); 102 103 hash_set_1_ = new uintptr_t[kHashSetLength]; 104 hash_set_2_ = new uintptr_t[kHashSetLength]; 105 hash_sets_are_empty_ = false; 106 107 ClearFilteringHashSets(); 108 } 109 110 111 void StoreBuffer::TearDown() { 112 delete virtual_memory_; 113 delete old_virtual_memory_; 114 delete[] hash_set_1_; 115 delete[] hash_set_2_; 116 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; 117 start_ = limit_ = NULL; 118 heap_->public_set_store_buffer_top(start_); 119 } 120 121 122 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { 123 isolate->heap()->store_buffer()->Compact(); 124 isolate->counters()->store_buffer_overflows()->Increment(); 125 } 126 127 128 void StoreBuffer::Uniq() { 129 // Remove adjacent duplicates and cells that do not point at new space. 130 Address previous = NULL; 131 Address* write = old_start_; 132 ASSERT(may_move_store_buffer_entries_); 133 for (Address* read = old_start_; read < old_top_; read++) { 134 Address current = *read; 135 if (current != previous) { 136 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { 137 *write++ = current; 138 } 139 } 140 previous = current; 141 } 142 old_top_ = write; 143 } 144 145 146 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { 147 return old_limit_ - old_top_ >= space_needed; 148 } 149 150 151 void StoreBuffer::EnsureSpace(intptr_t space_needed) { 152 while (old_limit_ - old_top_ < space_needed && 153 old_limit_ < old_reserved_limit_) { 154 size_t grow = old_limit_ - old_start_; // Double size. 155 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), 156 grow * kPointerSize, 157 false)); 158 old_limit_ += grow; 159 } 160 161 if (SpaceAvailable(space_needed)) return; 162 163 if (old_buffer_is_filtered_) return; 164 ASSERT(may_move_store_buffer_entries_); 165 Compact(); 166 167 old_buffer_is_filtered_ = true; 168 bool page_has_scan_on_scavenge_flag = false; 169 170 PointerChunkIterator it(heap_); 171 MemoryChunk* chunk; 172 while ((chunk = it.next()) != NULL) { 173 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; 174 } 175 176 if (page_has_scan_on_scavenge_flag) { 177 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 178 } 179 180 if (SpaceAvailable(space_needed)) return; 181 182 // Sample 1 entry in 97 and filter out the pages where we estimate that more 183 // than 1 in 8 pointers are to new space. 184 static const int kSampleFinenesses = 5; 185 static const struct Samples { 186 int prime_sample_step; 187 int threshold; 188 } samples[kSampleFinenesses] = { 189 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, 190 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, 191 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, 192 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, 193 { 1, 0} 194 }; 195 for (int i = 0; i < kSampleFinenesses; i++) { 196 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); 197 // As a last resort we mark all pages as being exempt from the store buffer. 198 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_); 199 if (SpaceAvailable(space_needed)) return; 200 } 201 UNREACHABLE(); 202 } 203 204 205 // Sample the store buffer to see if some pages are taking up a lot of space 206 // in the store buffer. 207 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { 208 PointerChunkIterator it(heap_); 209 MemoryChunk* chunk; 210 while ((chunk = it.next()) != NULL) { 211 chunk->set_store_buffer_counter(0); 212 } 213 bool created_new_scan_on_scavenge_pages = false; 214 MemoryChunk* previous_chunk = NULL; 215 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) { 216 Address addr = *p; 217 MemoryChunk* containing_chunk = NULL; 218 if (previous_chunk != NULL && previous_chunk->Contains(addr)) { 219 containing_chunk = previous_chunk; 220 } else { 221 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); 222 } 223 int old_counter = containing_chunk->store_buffer_counter(); 224 if (old_counter == threshold) { 225 containing_chunk->set_scan_on_scavenge(true); 226 created_new_scan_on_scavenge_pages = true; 227 } 228 containing_chunk->set_store_buffer_counter(old_counter + 1); 229 previous_chunk = containing_chunk; 230 } 231 if (created_new_scan_on_scavenge_pages) { 232 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 233 } 234 old_buffer_is_filtered_ = true; 235 } 236 237 238 void StoreBuffer::Filter(int flag) { 239 Address* new_top = old_start_; 240 MemoryChunk* previous_chunk = NULL; 241 for (Address* p = old_start_; p < old_top_; p++) { 242 Address addr = *p; 243 MemoryChunk* containing_chunk = NULL; 244 if (previous_chunk != NULL && previous_chunk->Contains(addr)) { 245 containing_chunk = previous_chunk; 246 } else { 247 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); 248 previous_chunk = containing_chunk; 249 } 250 if (!containing_chunk->IsFlagSet(flag)) { 251 *new_top++ = addr; 252 } 253 } 254 old_top_ = new_top; 255 256 // Filtering hash sets are inconsistent with the store buffer after this 257 // operation. 258 ClearFilteringHashSets(); 259 } 260 261 262 void StoreBuffer::SortUniq() { 263 Compact(); 264 if (old_buffer_is_sorted_) return; 265 std::sort(old_start_, old_top_); 266 Uniq(); 267 268 old_buffer_is_sorted_ = true; 269 270 // Filtering hash sets are inconsistent with the store buffer after this 271 // operation. 272 ClearFilteringHashSets(); 273 } 274 275 276 bool StoreBuffer::PrepareForIteration() { 277 Compact(); 278 PointerChunkIterator it(heap_); 279 MemoryChunk* chunk; 280 bool page_has_scan_on_scavenge_flag = false; 281 while ((chunk = it.next()) != NULL) { 282 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; 283 } 284 285 if (page_has_scan_on_scavenge_flag) { 286 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 287 } 288 289 // Filtering hash sets are inconsistent with the store buffer after 290 // iteration. 291 ClearFilteringHashSets(); 292 293 return page_has_scan_on_scavenge_flag; 294 } 295 296 297 #ifdef DEBUG 298 void StoreBuffer::Clean() { 299 ClearFilteringHashSets(); 300 Uniq(); // Also removes things that no longer point to new space. 301 EnsureSpace(kStoreBufferSize / 2); 302 } 303 304 305 static Address* in_store_buffer_1_element_cache = NULL; 306 307 308 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { 309 if (!FLAG_enable_slow_asserts) return true; 310 if (in_store_buffer_1_element_cache != NULL && 311 *in_store_buffer_1_element_cache == cell_address) { 312 return true; 313 } 314 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); 315 for (Address* current = top - 1; current >= start_; current--) { 316 if (*current == cell_address) { 317 in_store_buffer_1_element_cache = current; 318 return true; 319 } 320 } 321 for (Address* current = old_top_ - 1; current >= old_start_; current--) { 322 if (*current == cell_address) { 323 in_store_buffer_1_element_cache = current; 324 return true; 325 } 326 } 327 return false; 328 } 329 #endif 330 331 332 void StoreBuffer::ClearFilteringHashSets() { 333 if (!hash_sets_are_empty_) { 334 memset(reinterpret_cast<void*>(hash_set_1_), 335 0, 336 sizeof(uintptr_t) * kHashSetLength); 337 memset(reinterpret_cast<void*>(hash_set_2_), 338 0, 339 sizeof(uintptr_t) * kHashSetLength); 340 hash_sets_are_empty_ = true; 341 } 342 } 343 344 345 void StoreBuffer::GCPrologue() { 346 ClearFilteringHashSets(); 347 during_gc_ = true; 348 } 349 350 351 #ifdef VERIFY_HEAP 352 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { 353 // Do nothing. 354 } 355 356 357 void StoreBuffer::VerifyPointers(PagedSpace* space, 358 RegionCallback region_callback) { 359 PageIterator it(space); 360 361 while (it.has_next()) { 362 Page* page = it.next(); 363 FindPointersToNewSpaceOnPage( 364 reinterpret_cast<PagedSpace*>(page->owner()), 365 page, 366 region_callback, 367 &DummyScavengePointer, 368 false); 369 } 370 } 371 372 373 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { 374 LargeObjectIterator it(space); 375 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 376 if (object->IsFixedArray()) { 377 Address slot_address = object->address(); 378 Address end = object->address() + object->Size(); 379 380 while (slot_address < end) { 381 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); 382 // When we are not in GC the Heap::InNewSpace() predicate 383 // checks that pointers which satisfy predicate point into 384 // the active semispace. 385 heap_->InNewSpace(*slot); 386 slot_address += kPointerSize; 387 } 388 } 389 } 390 } 391 #endif 392 393 394 void StoreBuffer::Verify() { 395 #ifdef VERIFY_HEAP 396 VerifyPointers(heap_->old_pointer_space(), 397 &StoreBuffer::FindPointersToNewSpaceInRegion); 398 VerifyPointers(heap_->map_space(), 399 &StoreBuffer::FindPointersToNewSpaceInMapsRegion); 400 VerifyPointers(heap_->lo_space()); 401 #endif 402 } 403 404 405 void StoreBuffer::GCEpilogue() { 406 during_gc_ = false; 407 #ifdef VERIFY_HEAP 408 if (FLAG_verify_heap) { 409 Verify(); 410 } 411 #endif 412 } 413 414 415 void StoreBuffer::FindPointersToNewSpaceInRegion( 416 Address start, 417 Address end, 418 ObjectSlotCallback slot_callback, 419 bool clear_maps) { 420 for (Address slot_address = start; 421 slot_address < end; 422 slot_address += kPointerSize) { 423 Object** slot = reinterpret_cast<Object**>(slot_address); 424 if (heap_->InNewSpace(*slot)) { 425 HeapObject* object = reinterpret_cast<HeapObject*>(*slot); 426 ASSERT(object->IsHeapObject()); 427 // The new space object was not promoted if it still contains a map 428 // pointer. Clear the map field now lazily. 429 if (clear_maps) ClearDeadObject(object); 430 slot_callback(reinterpret_cast<HeapObject**>(slot), object); 431 if (heap_->InNewSpace(*slot)) { 432 EnterDirectlyIntoStoreBuffer(slot_address); 433 } 434 } 435 } 436 } 437 438 439 // Compute start address of the first map following given addr. 440 static inline Address MapStartAlign(Address addr) { 441 Address page = Page::FromAddress(addr)->area_start(); 442 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); 443 } 444 445 446 // Compute end address of the first map preceding given addr. 447 static inline Address MapEndAlign(Address addr) { 448 Address page = Page::FromAllocationTop(addr)->area_start(); 449 return page + ((addr - page) / Map::kSize * Map::kSize); 450 } 451 452 453 void StoreBuffer::FindPointersToNewSpaceInMaps( 454 Address start, 455 Address end, 456 ObjectSlotCallback slot_callback, 457 bool clear_maps) { 458 ASSERT(MapStartAlign(start) == start); 459 ASSERT(MapEndAlign(end) == end); 460 461 Address map_address = start; 462 while (map_address < end) { 463 ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address))); 464 ASSERT(Memory::Object_at(map_address)->IsMap()); 465 466 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; 467 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; 468 469 FindPointersToNewSpaceInRegion(pointer_fields_start, 470 pointer_fields_end, 471 slot_callback, 472 clear_maps); 473 map_address += Map::kSize; 474 } 475 } 476 477 478 void StoreBuffer::FindPointersToNewSpaceInMapsRegion( 479 Address start, 480 Address end, 481 ObjectSlotCallback slot_callback, 482 bool clear_maps) { 483 Address map_aligned_start = MapStartAlign(start); 484 Address map_aligned_end = MapEndAlign(end); 485 486 ASSERT(map_aligned_start == start); 487 ASSERT(map_aligned_end == end); 488 489 FindPointersToNewSpaceInMaps(map_aligned_start, 490 map_aligned_end, 491 slot_callback, 492 clear_maps); 493 } 494 495 496 // This function iterates over all the pointers in a paged space in the heap, 497 // looking for pointers into new space. Within the pages there may be dead 498 // objects that have not been overwritten by free spaces or fillers because of 499 // lazy sweeping. These dead objects may not contain pointers to new space. 500 // The garbage areas that have been swept properly (these will normally be the 501 // large ones) will be marked with free space and filler map words. In 502 // addition any area that has never been used at all for object allocation must 503 // be marked with a free space or filler. Because the free space and filler 504 // maps do not move we can always recognize these even after a compaction. 505 // Normal objects like FixedArrays and JSObjects should not contain references 506 // to these maps. The special garbage section (see comment in spaces.h) is 507 // skipped since it can contain absolutely anything. Any objects that are 508 // allocated during iteration may or may not be visited by the iteration, but 509 // they will not be partially visited. 510 void StoreBuffer::FindPointersToNewSpaceOnPage( 511 PagedSpace* space, 512 Page* page, 513 RegionCallback region_callback, 514 ObjectSlotCallback slot_callback, 515 bool clear_maps) { 516 Address visitable_start = page->area_start(); 517 Address end_of_page = page->area_end(); 518 519 Address visitable_end = visitable_start; 520 521 Object* free_space_map = heap_->free_space_map(); 522 Object* two_pointer_filler_map = heap_->two_pointer_filler_map(); 523 524 while (visitable_end < end_of_page) { 525 Object* o = *reinterpret_cast<Object**>(visitable_end); 526 // Skip fillers but not things that look like fillers in the special 527 // garbage section which can contain anything. 528 if (o == free_space_map || 529 o == two_pointer_filler_map || 530 (visitable_end == space->top() && visitable_end != space->limit())) { 531 if (visitable_start != visitable_end) { 532 // After calling this the special garbage section may have moved. 533 (this->*region_callback)(visitable_start, 534 visitable_end, 535 slot_callback, 536 clear_maps); 537 if (visitable_end >= space->top() && visitable_end < space->limit()) { 538 visitable_end = space->limit(); 539 visitable_start = visitable_end; 540 continue; 541 } 542 } 543 if (visitable_end == space->top() && visitable_end != space->limit()) { 544 visitable_start = visitable_end = space->limit(); 545 } else { 546 // At this point we are either at the start of a filler or we are at 547 // the point where the space->top() used to be before the 548 // visit_pointer_region call above. Either way we can skip the 549 // object at the current spot: We don't promise to visit objects 550 // allocated during heap traversal, and if space->top() moved then it 551 // must be because an object was allocated at this point. 552 visitable_start = 553 visitable_end + HeapObject::FromAddress(visitable_end)->Size(); 554 visitable_end = visitable_start; 555 } 556 } else { 557 ASSERT(o != free_space_map); 558 ASSERT(o != two_pointer_filler_map); 559 ASSERT(visitable_end < space->top() || visitable_end >= space->limit()); 560 visitable_end += kPointerSize; 561 } 562 } 563 ASSERT(visitable_end == end_of_page); 564 if (visitable_start != visitable_end) { 565 (this->*region_callback)(visitable_start, 566 visitable_end, 567 slot_callback, 568 clear_maps); 569 } 570 } 571 572 573 void StoreBuffer::IteratePointersInStoreBuffer( 574 ObjectSlotCallback slot_callback, 575 bool clear_maps) { 576 Address* limit = old_top_; 577 old_top_ = old_start_; 578 { 579 DontMoveStoreBufferEntriesScope scope(this); 580 for (Address* current = old_start_; current < limit; current++) { 581 #ifdef DEBUG 582 Address* saved_top = old_top_; 583 #endif 584 Object** slot = reinterpret_cast<Object**>(*current); 585 Object* object = *slot; 586 if (heap_->InFromSpace(object)) { 587 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); 588 // The new space object was not promoted if it still contains a map 589 // pointer. Clear the map field now lazily. 590 if (clear_maps) ClearDeadObject(heap_object); 591 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); 592 if (heap_->InNewSpace(*slot)) { 593 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); 594 } 595 } 596 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top); 597 } 598 } 599 } 600 601 602 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { 603 IteratePointersToNewSpace(slot_callback, false); 604 } 605 606 607 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps( 608 ObjectSlotCallback slot_callback) { 609 IteratePointersToNewSpace(slot_callback, true); 610 } 611 612 613 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, 614 bool clear_maps) { 615 // We do not sort or remove duplicated entries from the store buffer because 616 // we expect that callback will rebuild the store buffer thus removing 617 // all duplicates and pointers to old space. 618 bool some_pages_to_scan = PrepareForIteration(); 619 620 // TODO(gc): we want to skip slots on evacuation candidates 621 // but we can't simply figure that out from slot address 622 // because slot can belong to a large object. 623 IteratePointersInStoreBuffer(slot_callback, clear_maps); 624 625 // We are done scanning all the pointers that were in the store buffer, but 626 // there may be some pages marked scan_on_scavenge that have pointers to new 627 // space that are not in the store buffer. We must scan them now. As we 628 // scan, the surviving pointers to new space will be added to the store 629 // buffer. If there are still a lot of pointers to new space then we will 630 // keep the scan_on_scavenge flag on the page and discard the pointers that 631 // were added to the store buffer. If there are not many pointers to new 632 // space left on the page we will keep the pointers in the store buffer and 633 // remove the flag from the page. 634 if (some_pages_to_scan) { 635 if (callback_ != NULL) { 636 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); 637 } 638 PointerChunkIterator it(heap_); 639 MemoryChunk* chunk; 640 while ((chunk = it.next()) != NULL) { 641 if (chunk->scan_on_scavenge()) { 642 chunk->set_scan_on_scavenge(false); 643 if (callback_ != NULL) { 644 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); 645 } 646 if (chunk->owner() == heap_->lo_space()) { 647 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); 648 HeapObject* array = large_page->GetObject(); 649 ASSERT(array->IsFixedArray()); 650 Address start = array->address(); 651 Address end = start + array->Size(); 652 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); 653 } else { 654 Page* page = reinterpret_cast<Page*>(chunk); 655 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); 656 FindPointersToNewSpaceOnPage( 657 owner, 658 page, 659 (owner == heap_->map_space() ? 660 &StoreBuffer::FindPointersToNewSpaceInMapsRegion : 661 &StoreBuffer::FindPointersToNewSpaceInRegion), 662 slot_callback, 663 clear_maps); 664 } 665 } 666 } 667 if (callback_ != NULL) { 668 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); 669 } 670 } 671 } 672 673 674 void StoreBuffer::Compact() { 675 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); 676 677 if (top == start_) return; 678 679 // There's no check of the limit in the loop below so we check here for 680 // the worst case (compaction doesn't eliminate any pointers). 681 ASSERT(top <= limit_); 682 heap_->public_set_store_buffer_top(start_); 683 EnsureSpace(top - start_); 684 ASSERT(may_move_store_buffer_entries_); 685 // Goes through the addresses in the store buffer attempting to remove 686 // duplicates. In the interest of speed this is a lossy operation. Some 687 // duplicates will remain. We have two hash sets with different hash 688 // functions to reduce the number of unnecessary clashes. 689 hash_sets_are_empty_ = false; // Hash sets are in use. 690 for (Address* current = start_; current < top; current++) { 691 ASSERT(!heap_->cell_space()->Contains(*current)); 692 ASSERT(!heap_->code_space()->Contains(*current)); 693 ASSERT(!heap_->old_data_space()->Contains(*current)); 694 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 695 // Shift out the last bits including any tags. 696 int_addr >>= kPointerSizeLog2; 697 // The upper part of an address is basically random because of ASLR and OS 698 // non-determinism, so we use only the bits within a page for hashing to 699 // make v8's behavior (more) deterministic. 700 uintptr_t hash_addr = 701 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); 702 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & 703 (kHashSetLength - 1)); 704 if (hash_set_1_[hash1] == int_addr) continue; 705 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2)); 706 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); 707 hash2 &= (kHashSetLength - 1); 708 if (hash_set_2_[hash2] == int_addr) continue; 709 if (hash_set_1_[hash1] == 0) { 710 hash_set_1_[hash1] = int_addr; 711 } else if (hash_set_2_[hash2] == 0) { 712 hash_set_2_[hash2] = int_addr; 713 } else { 714 // Rather than slowing down we just throw away some entries. This will 715 // cause some duplicates to remain undetected. 716 hash_set_1_[hash1] = int_addr; 717 hash_set_2_[hash2] = 0; 718 } 719 old_buffer_is_sorted_ = false; 720 old_buffer_is_filtered_ = false; 721 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 722 ASSERT(old_top_ <= old_limit_); 723 } 724 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 725 } 726 727 } } // namespace v8::internal 728