1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "v8.h" 29 30 #include "accessors.h" 31 #include "api.h" 32 #include "bootstrapper.h" 33 #include "execution.h" 34 #include "global-handles.h" 35 #include "ic-inl.h" 36 #include "natives.h" 37 #include "platform.h" 38 #include "runtime.h" 39 #include "serialize.h" 40 #include "stub-cache.h" 41 #include "v8threads.h" 42 43 namespace v8 { 44 namespace internal { 45 46 47 // ----------------------------------------------------------------------------- 48 // Coding of external references. 49 50 // The encoding of an external reference. The type is in the high word. 51 // The id is in the low word. 52 static uint32_t EncodeExternal(TypeCode type, uint16_t id) { 53 return static_cast<uint32_t>(type) << 16 | id; 54 } 55 56 57 static int* GetInternalPointer(StatsCounter* counter) { 58 // All counters refer to dummy_counter, if deserializing happens without 59 // setting up counters. 60 static int dummy_counter = 0; 61 return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter; 62 } 63 64 65 ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) { 66 ExternalReferenceTable* external_reference_table = 67 isolate->external_reference_table(); 68 if (external_reference_table == NULL) { 69 external_reference_table = new ExternalReferenceTable(isolate); 70 isolate->set_external_reference_table(external_reference_table); 71 } 72 return external_reference_table; 73 } 74 75 76 void ExternalReferenceTable::AddFromId(TypeCode type, 77 uint16_t id, 78 const char* name, 79 Isolate* isolate) { 80 Address address; 81 switch (type) { 82 case C_BUILTIN: { 83 ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate); 84 address = ref.address(); 85 break; 86 } 87 case BUILTIN: { 88 ExternalReference ref(static_cast<Builtins::Name>(id), isolate); 89 address = ref.address(); 90 break; 91 } 92 case RUNTIME_FUNCTION: { 93 ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate); 94 address = ref.address(); 95 break; 96 } 97 case IC_UTILITY: { 98 ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)), 99 isolate); 100 address = ref.address(); 101 break; 102 } 103 default: 104 UNREACHABLE(); 105 return; 106 } 107 Add(address, type, id, name); 108 } 109 110 111 void ExternalReferenceTable::Add(Address address, 112 TypeCode type, 113 uint16_t id, 114 const char* name) { 115 ASSERT_NE(NULL, address); 116 ExternalReferenceEntry entry; 117 entry.address = address; 118 entry.code = EncodeExternal(type, id); 119 entry.name = name; 120 ASSERT_NE(0, entry.code); 121 refs_.Add(entry); 122 if (id > max_id_[type]) max_id_[type] = id; 123 } 124 125 126 void ExternalReferenceTable::PopulateTable(Isolate* isolate) { 127 for (int type_code = 0; type_code < kTypeCodeCount; type_code++) { 128 max_id_[type_code] = 0; 129 } 130 131 // The following populates all of the different type of external references 132 // into the ExternalReferenceTable. 133 // 134 // NOTE: This function was originally 100k of code. It has since been 135 // rewritten to be mostly table driven, as the callback macro style tends to 136 // very easily cause code bloat. Please be careful in the future when adding 137 // new references. 138 139 struct RefTableEntry { 140 TypeCode type; 141 uint16_t id; 142 const char* name; 143 }; 144 145 static const RefTableEntry ref_table[] = { 146 // Builtins 147 #define DEF_ENTRY_C(name, ignored) \ 148 { C_BUILTIN, \ 149 Builtins::c_##name, \ 150 "Builtins::" #name }, 151 152 BUILTIN_LIST_C(DEF_ENTRY_C) 153 #undef DEF_ENTRY_C 154 155 #define DEF_ENTRY_C(name, ignored) \ 156 { BUILTIN, \ 157 Builtins::k##name, \ 158 "Builtins::" #name }, 159 #define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored) 160 161 BUILTIN_LIST_C(DEF_ENTRY_C) 162 BUILTIN_LIST_A(DEF_ENTRY_A) 163 BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A) 164 #undef DEF_ENTRY_C 165 #undef DEF_ENTRY_A 166 167 // Runtime functions 168 #define RUNTIME_ENTRY(name, nargs, ressize) \ 169 { RUNTIME_FUNCTION, \ 170 Runtime::k##name, \ 171 "Runtime::" #name }, 172 173 RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY) 174 #undef RUNTIME_ENTRY 175 176 // IC utilities 177 #define IC_ENTRY(name) \ 178 { IC_UTILITY, \ 179 IC::k##name, \ 180 "IC::" #name }, 181 182 IC_UTIL_LIST(IC_ENTRY) 183 #undef IC_ENTRY 184 }; // end of ref_table[]. 185 186 for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) { 187 AddFromId(ref_table[i].type, 188 ref_table[i].id, 189 ref_table[i].name, 190 isolate); 191 } 192 193 #ifdef ENABLE_DEBUGGER_SUPPORT 194 // Debug addresses 195 Add(Debug_Address(Debug::k_after_break_target_address).address(isolate), 196 DEBUG_ADDRESS, 197 Debug::k_after_break_target_address << kDebugIdShift, 198 "Debug::after_break_target_address()"); 199 Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate), 200 DEBUG_ADDRESS, 201 Debug::k_debug_break_slot_address << kDebugIdShift, 202 "Debug::debug_break_slot_address()"); 203 Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate), 204 DEBUG_ADDRESS, 205 Debug::k_debug_break_return_address << kDebugIdShift, 206 "Debug::debug_break_return_address()"); 207 Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate), 208 DEBUG_ADDRESS, 209 Debug::k_restarter_frame_function_pointer << kDebugIdShift, 210 "Debug::restarter_frame_function_pointer_address()"); 211 #endif 212 213 // Stat counters 214 struct StatsRefTableEntry { 215 StatsCounter* (Counters::*counter)(); 216 uint16_t id; 217 const char* name; 218 }; 219 220 const StatsRefTableEntry stats_ref_table[] = { 221 #define COUNTER_ENTRY(name, caption) \ 222 { &Counters::name, \ 223 Counters::k_##name, \ 224 "Counters::" #name }, 225 226 STATS_COUNTER_LIST_1(COUNTER_ENTRY) 227 STATS_COUNTER_LIST_2(COUNTER_ENTRY) 228 #undef COUNTER_ENTRY 229 }; // end of stats_ref_table[]. 230 231 Counters* counters = isolate->counters(); 232 for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) { 233 Add(reinterpret_cast<Address>(GetInternalPointer( 234 (counters->*(stats_ref_table[i].counter))())), 235 STATS_COUNTER, 236 stats_ref_table[i].id, 237 stats_ref_table[i].name); 238 } 239 240 // Top addresses 241 242 const char* AddressNames[] = { 243 #define BUILD_NAME_LITERAL(CamelName, hacker_name) \ 244 "Isolate::" #hacker_name "_address", 245 FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) 246 NULL 247 #undef C 248 }; 249 250 for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) { 251 Add(isolate->get_address_from_id((Isolate::AddressId)i), 252 TOP_ADDRESS, i, AddressNames[i]); 253 } 254 255 // Accessors 256 #define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ 257 Add((Address)&Accessors::name, \ 258 ACCESSOR, \ 259 Accessors::k##name, \ 260 "Accessors::" #name); 261 262 ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) 263 #undef ACCESSOR_DESCRIPTOR_DECLARATION 264 265 StubCache* stub_cache = isolate->stub_cache(); 266 267 // Stub cache tables 268 Add(stub_cache->key_reference(StubCache::kPrimary).address(), 269 STUB_CACHE_TABLE, 270 1, 271 "StubCache::primary_->key"); 272 Add(stub_cache->value_reference(StubCache::kPrimary).address(), 273 STUB_CACHE_TABLE, 274 2, 275 "StubCache::primary_->value"); 276 Add(stub_cache->map_reference(StubCache::kPrimary).address(), 277 STUB_CACHE_TABLE, 278 3, 279 "StubCache::primary_->map"); 280 Add(stub_cache->key_reference(StubCache::kSecondary).address(), 281 STUB_CACHE_TABLE, 282 4, 283 "StubCache::secondary_->key"); 284 Add(stub_cache->value_reference(StubCache::kSecondary).address(), 285 STUB_CACHE_TABLE, 286 5, 287 "StubCache::secondary_->value"); 288 Add(stub_cache->map_reference(StubCache::kSecondary).address(), 289 STUB_CACHE_TABLE, 290 6, 291 "StubCache::secondary_->map"); 292 293 // Runtime entries 294 Add(ExternalReference::perform_gc_function(isolate).address(), 295 RUNTIME_ENTRY, 296 1, 297 "Runtime::PerformGC"); 298 Add(ExternalReference::fill_heap_number_with_random_function( 299 isolate).address(), 300 RUNTIME_ENTRY, 301 2, 302 "V8::FillHeapNumberWithRandom"); 303 Add(ExternalReference::random_uint32_function(isolate).address(), 304 RUNTIME_ENTRY, 305 3, 306 "V8::Random"); 307 Add(ExternalReference::delete_handle_scope_extensions(isolate).address(), 308 RUNTIME_ENTRY, 309 4, 310 "HandleScope::DeleteExtensions"); 311 Add(ExternalReference:: 312 incremental_marking_record_write_function(isolate).address(), 313 RUNTIME_ENTRY, 314 5, 315 "IncrementalMarking::RecordWrite"); 316 Add(ExternalReference::store_buffer_overflow_function(isolate).address(), 317 RUNTIME_ENTRY, 318 6, 319 "StoreBuffer::StoreBufferOverflow"); 320 Add(ExternalReference:: 321 incremental_evacuation_record_write_function(isolate).address(), 322 RUNTIME_ENTRY, 323 7, 324 "IncrementalMarking::RecordWrite"); 325 326 327 328 // Miscellaneous 329 Add(ExternalReference::roots_array_start(isolate).address(), 330 UNCLASSIFIED, 331 3, 332 "Heap::roots_array_start()"); 333 Add(ExternalReference::address_of_stack_limit(isolate).address(), 334 UNCLASSIFIED, 335 4, 336 "StackGuard::address_of_jslimit()"); 337 Add(ExternalReference::address_of_real_stack_limit(isolate).address(), 338 UNCLASSIFIED, 339 5, 340 "StackGuard::address_of_real_jslimit()"); 341 #ifndef V8_INTERPRETED_REGEXP 342 Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), 343 UNCLASSIFIED, 344 6, 345 "RegExpStack::limit_address()"); 346 Add(ExternalReference::address_of_regexp_stack_memory_address( 347 isolate).address(), 348 UNCLASSIFIED, 349 7, 350 "RegExpStack::memory_address()"); 351 Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(), 352 UNCLASSIFIED, 353 8, 354 "RegExpStack::memory_size()"); 355 Add(ExternalReference::address_of_static_offsets_vector(isolate).address(), 356 UNCLASSIFIED, 357 9, 358 "OffsetsVector::static_offsets_vector"); 359 #endif // V8_INTERPRETED_REGEXP 360 Add(ExternalReference::new_space_start(isolate).address(), 361 UNCLASSIFIED, 362 10, 363 "Heap::NewSpaceStart()"); 364 Add(ExternalReference::new_space_mask(isolate).address(), 365 UNCLASSIFIED, 366 11, 367 "Heap::NewSpaceMask()"); 368 Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(), 369 UNCLASSIFIED, 370 12, 371 "Heap::always_allocate_scope_depth()"); 372 Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), 373 UNCLASSIFIED, 374 14, 375 "Heap::NewSpaceAllocationLimitAddress()"); 376 Add(ExternalReference::new_space_allocation_top_address(isolate).address(), 377 UNCLASSIFIED, 378 15, 379 "Heap::NewSpaceAllocationTopAddress()"); 380 #ifdef ENABLE_DEBUGGER_SUPPORT 381 Add(ExternalReference::debug_break(isolate).address(), 382 UNCLASSIFIED, 383 16, 384 "Debug::Break()"); 385 Add(ExternalReference::debug_step_in_fp_address(isolate).address(), 386 UNCLASSIFIED, 387 17, 388 "Debug::step_in_fp_addr()"); 389 #endif 390 Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(), 391 UNCLASSIFIED, 392 18, 393 "add_two_doubles"); 394 Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(), 395 UNCLASSIFIED, 396 19, 397 "sub_two_doubles"); 398 Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(), 399 UNCLASSIFIED, 400 20, 401 "mul_two_doubles"); 402 Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(), 403 UNCLASSIFIED, 404 21, 405 "div_two_doubles"); 406 Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(), 407 UNCLASSIFIED, 408 22, 409 "mod_two_doubles"); 410 Add(ExternalReference::compare_doubles(isolate).address(), 411 UNCLASSIFIED, 412 23, 413 "compare_doubles"); 414 #ifndef V8_INTERPRETED_REGEXP 415 Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(), 416 UNCLASSIFIED, 417 24, 418 "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); 419 Add(ExternalReference::re_check_stack_guard_state(isolate).address(), 420 UNCLASSIFIED, 421 25, 422 "RegExpMacroAssembler*::CheckStackGuardState()"); 423 Add(ExternalReference::re_grow_stack(isolate).address(), 424 UNCLASSIFIED, 425 26, 426 "NativeRegExpMacroAssembler::GrowStack()"); 427 Add(ExternalReference::re_word_character_map().address(), 428 UNCLASSIFIED, 429 27, 430 "NativeRegExpMacroAssembler::word_character_map"); 431 #endif // V8_INTERPRETED_REGEXP 432 // Keyed lookup cache. 433 Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(), 434 UNCLASSIFIED, 435 28, 436 "KeyedLookupCache::keys()"); 437 Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(), 438 UNCLASSIFIED, 439 29, 440 "KeyedLookupCache::field_offsets()"); 441 Add(ExternalReference::transcendental_cache_array_address(isolate).address(), 442 UNCLASSIFIED, 443 30, 444 "TranscendentalCache::caches()"); 445 Add(ExternalReference::handle_scope_next_address().address(), 446 UNCLASSIFIED, 447 31, 448 "HandleScope::next"); 449 Add(ExternalReference::handle_scope_limit_address().address(), 450 UNCLASSIFIED, 451 32, 452 "HandleScope::limit"); 453 Add(ExternalReference::handle_scope_level_address().address(), 454 UNCLASSIFIED, 455 33, 456 "HandleScope::level"); 457 Add(ExternalReference::new_deoptimizer_function(isolate).address(), 458 UNCLASSIFIED, 459 34, 460 "Deoptimizer::New()"); 461 Add(ExternalReference::compute_output_frames_function(isolate).address(), 462 UNCLASSIFIED, 463 35, 464 "Deoptimizer::ComputeOutputFrames()"); 465 Add(ExternalReference::address_of_min_int().address(), 466 UNCLASSIFIED, 467 36, 468 "LDoubleConstant::min_int"); 469 Add(ExternalReference::address_of_one_half().address(), 470 UNCLASSIFIED, 471 37, 472 "LDoubleConstant::one_half"); 473 Add(ExternalReference::isolate_address().address(), 474 UNCLASSIFIED, 475 38, 476 "isolate"); 477 Add(ExternalReference::address_of_minus_zero().address(), 478 UNCLASSIFIED, 479 39, 480 "LDoubleConstant::minus_zero"); 481 Add(ExternalReference::address_of_negative_infinity().address(), 482 UNCLASSIFIED, 483 40, 484 "LDoubleConstant::negative_infinity"); 485 Add(ExternalReference::power_double_double_function(isolate).address(), 486 UNCLASSIFIED, 487 41, 488 "power_double_double_function"); 489 Add(ExternalReference::power_double_int_function(isolate).address(), 490 UNCLASSIFIED, 491 42, 492 "power_double_int_function"); 493 Add(ExternalReference::store_buffer_top(isolate).address(), 494 UNCLASSIFIED, 495 43, 496 "store_buffer_top"); 497 Add(ExternalReference::address_of_canonical_non_hole_nan().address(), 498 UNCLASSIFIED, 499 44, 500 "canonical_nan"); 501 Add(ExternalReference::address_of_the_hole_nan().address(), 502 UNCLASSIFIED, 503 45, 504 "the_hole_nan"); 505 Add(ExternalReference::get_date_field_function(isolate).address(), 506 UNCLASSIFIED, 507 46, 508 "JSDate::GetField"); 509 Add(ExternalReference::date_cache_stamp(isolate).address(), 510 UNCLASSIFIED, 511 47, 512 "date_cache_stamp"); 513 } 514 515 516 ExternalReferenceEncoder::ExternalReferenceEncoder() 517 : encodings_(Match), 518 isolate_(Isolate::Current()) { 519 ExternalReferenceTable* external_references = 520 ExternalReferenceTable::instance(isolate_); 521 for (int i = 0; i < external_references->size(); ++i) { 522 Put(external_references->address(i), i); 523 } 524 } 525 526 527 uint32_t ExternalReferenceEncoder::Encode(Address key) const { 528 int index = IndexOf(key); 529 ASSERT(key == NULL || index >= 0); 530 return index >=0 ? 531 ExternalReferenceTable::instance(isolate_)->code(index) : 0; 532 } 533 534 535 const char* ExternalReferenceEncoder::NameOfAddress(Address key) const { 536 int index = IndexOf(key); 537 return index >= 0 ? 538 ExternalReferenceTable::instance(isolate_)->name(index) : NULL; 539 } 540 541 542 int ExternalReferenceEncoder::IndexOf(Address key) const { 543 if (key == NULL) return -1; 544 HashMap::Entry* entry = 545 const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false); 546 return entry == NULL 547 ? -1 548 : static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); 549 } 550 551 552 void ExternalReferenceEncoder::Put(Address key, int index) { 553 HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true); 554 entry->value = reinterpret_cast<void*>(index); 555 } 556 557 558 ExternalReferenceDecoder::ExternalReferenceDecoder() 559 : encodings_(NewArray<Address*>(kTypeCodeCount)), 560 isolate_(Isolate::Current()) { 561 ExternalReferenceTable* external_references = 562 ExternalReferenceTable::instance(isolate_); 563 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 564 int max = external_references->max_id(type) + 1; 565 encodings_[type] = NewArray<Address>(max + 1); 566 } 567 for (int i = 0; i < external_references->size(); ++i) { 568 Put(external_references->code(i), external_references->address(i)); 569 } 570 } 571 572 573 ExternalReferenceDecoder::~ExternalReferenceDecoder() { 574 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 575 DeleteArray(encodings_[type]); 576 } 577 DeleteArray(encodings_); 578 } 579 580 581 bool Serializer::serialization_enabled_ = false; 582 bool Serializer::too_late_to_enable_now_ = false; 583 584 585 Deserializer::Deserializer(SnapshotByteSource* source) 586 : isolate_(NULL), 587 source_(source), 588 external_reference_decoder_(NULL) { 589 } 590 591 592 // This routine both allocates a new object, and also keeps 593 // track of where objects have been allocated so that we can 594 // fix back references when deserializing. 595 Address Deserializer::Allocate(int space_index, Space* space, int size) { 596 Address address; 597 if (!SpaceIsLarge(space_index)) { 598 ASSERT(!SpaceIsPaged(space_index) || 599 size <= Page::kPageSize - Page::kObjectStartOffset); 600 MaybeObject* maybe_new_allocation; 601 if (space_index == NEW_SPACE) { 602 maybe_new_allocation = 603 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); 604 } else { 605 maybe_new_allocation = 606 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); 607 } 608 ASSERT(!maybe_new_allocation->IsFailure()); 609 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); 610 HeapObject* new_object = HeapObject::cast(new_allocation); 611 address = new_object->address(); 612 high_water_[space_index] = address + size; 613 } else { 614 ASSERT(SpaceIsLarge(space_index)); 615 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); 616 Object* new_allocation; 617 if (space_index == kLargeData || space_index == kLargeFixedArray) { 618 new_allocation = 619 lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked(); 620 } else { 621 ASSERT_EQ(kLargeCode, space_index); 622 new_allocation = 623 lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked(); 624 } 625 HeapObject* new_object = HeapObject::cast(new_allocation); 626 // Record all large objects in the same space. 627 address = new_object->address(); 628 pages_[LO_SPACE].Add(address); 629 } 630 last_object_address_ = address; 631 return address; 632 } 633 634 635 // This returns the address of an object that has been described in the 636 // snapshot as being offset bytes back in a particular space. 637 HeapObject* Deserializer::GetAddressFromEnd(int space) { 638 int offset = source_->GetInt(); 639 ASSERT(!SpaceIsLarge(space)); 640 offset <<= kObjectAlignmentBits; 641 return HeapObject::FromAddress(high_water_[space] - offset); 642 } 643 644 645 // This returns the address of an object that has been described in the 646 // snapshot as being offset bytes into a particular space. 647 HeapObject* Deserializer::GetAddressFromStart(int space) { 648 int offset = source_->GetInt(); 649 if (SpaceIsLarge(space)) { 650 // Large spaces have one object per 'page'. 651 return HeapObject::FromAddress(pages_[LO_SPACE][offset]); 652 } 653 offset <<= kObjectAlignmentBits; 654 if (space == NEW_SPACE) { 655 // New space has only one space - numbered 0. 656 return HeapObject::FromAddress(pages_[space][0] + offset); 657 } 658 ASSERT(SpaceIsPaged(space)); 659 int page_of_pointee = offset >> kPageSizeBits; 660 Address object_address = pages_[space][page_of_pointee] + 661 (offset & Page::kPageAlignmentMask); 662 return HeapObject::FromAddress(object_address); 663 } 664 665 666 void Deserializer::Deserialize() { 667 isolate_ = Isolate::Current(); 668 ASSERT(isolate_ != NULL); 669 // Don't GC while deserializing - just expand the heap. 670 AlwaysAllocateScope always_allocate; 671 // Don't use the free lists while deserializing. 672 LinearAllocationScope allocate_linearly; 673 // No active threads. 674 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); 675 // No active handles. 676 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); 677 // Make sure the entire partial snapshot cache is traversed, filling it with 678 // valid object pointers. 679 isolate_->set_serialize_partial_snapshot_cache_length( 680 Isolate::kPartialSnapshotCacheCapacity); 681 ASSERT_EQ(NULL, external_reference_decoder_); 682 external_reference_decoder_ = new ExternalReferenceDecoder(); 683 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); 684 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); 685 686 isolate_->heap()->set_global_contexts_list( 687 isolate_->heap()->undefined_value()); 688 689 // Update data pointers to the external strings containing natives sources. 690 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { 691 Object* source = isolate_->heap()->natives_source_cache()->get(i); 692 if (!source->IsUndefined()) { 693 ExternalAsciiString::cast(source)->update_data_cache(); 694 } 695 } 696 } 697 698 699 void Deserializer::DeserializePartial(Object** root) { 700 isolate_ = Isolate::Current(); 701 // Don't GC while deserializing - just expand the heap. 702 AlwaysAllocateScope always_allocate; 703 // Don't use the free lists while deserializing. 704 LinearAllocationScope allocate_linearly; 705 if (external_reference_decoder_ == NULL) { 706 external_reference_decoder_ = new ExternalReferenceDecoder(); 707 } 708 VisitPointer(root); 709 } 710 711 712 Deserializer::~Deserializer() { 713 ASSERT(source_->AtEOF()); 714 if (external_reference_decoder_) { 715 delete external_reference_decoder_; 716 external_reference_decoder_ = NULL; 717 } 718 } 719 720 721 // This is called on the roots. It is the driver of the deserialization 722 // process. It is also called on the body of each function. 723 void Deserializer::VisitPointers(Object** start, Object** end) { 724 // The space must be new space. Any other space would cause ReadChunk to try 725 // to update the remembered using NULL as the address. 726 ReadChunk(start, end, NEW_SPACE, NULL); 727 } 728 729 730 // This routine writes the new object into the pointer provided and then 731 // returns true if the new object was in young space and false otherwise. 732 // The reason for this strange interface is that otherwise the object is 733 // written very late, which means the FreeSpace map is not set up by the 734 // time we need to use it to mark the space at the end of a page free. 735 void Deserializer::ReadObject(int space_number, 736 Space* space, 737 Object** write_back) { 738 int size = source_->GetInt() << kObjectAlignmentBits; 739 Address address = Allocate(space_number, space, size); 740 *write_back = HeapObject::FromAddress(address); 741 Object** current = reinterpret_cast<Object**>(address); 742 Object** limit = current + (size >> kPointerSizeLog2); 743 if (FLAG_log_snapshot_positions) { 744 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); 745 } 746 ReadChunk(current, limit, space_number, address); 747 #ifdef DEBUG 748 bool is_codespace = (space == HEAP->code_space()) || 749 ((space == HEAP->lo_space()) && (space_number == kLargeCode)); 750 ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace); 751 #endif 752 } 753 754 755 // This macro is always used with a constant argument so it should all fold 756 // away to almost nothing in the generated code. It might be nicer to do this 757 // with the ternary operator but there are type issues with that. 758 #define ASSIGN_DEST_SPACE(space_number) \ 759 Space* dest_space; \ 760 if (space_number == NEW_SPACE) { \ 761 dest_space = isolate->heap()->new_space(); \ 762 } else if (space_number == OLD_POINTER_SPACE) { \ 763 dest_space = isolate->heap()->old_pointer_space(); \ 764 } else if (space_number == OLD_DATA_SPACE) { \ 765 dest_space = isolate->heap()->old_data_space(); \ 766 } else if (space_number == CODE_SPACE) { \ 767 dest_space = isolate->heap()->code_space(); \ 768 } else if (space_number == MAP_SPACE) { \ 769 dest_space = isolate->heap()->map_space(); \ 770 } else if (space_number == CELL_SPACE) { \ 771 dest_space = isolate->heap()->cell_space(); \ 772 } else { \ 773 ASSERT(space_number >= LO_SPACE); \ 774 dest_space = isolate->heap()->lo_space(); \ 775 } 776 777 778 static const int kUnknownOffsetFromStart = -1; 779 780 781 void Deserializer::ReadChunk(Object** current, 782 Object** limit, 783 int source_space, 784 Address current_object_address) { 785 Isolate* const isolate = isolate_; 786 bool write_barrier_needed = (current_object_address != NULL && 787 source_space != NEW_SPACE && 788 source_space != CELL_SPACE && 789 source_space != CODE_SPACE && 790 source_space != OLD_DATA_SPACE); 791 while (current < limit) { 792 int data = source_->Get(); 793 switch (data) { 794 #define CASE_STATEMENT(where, how, within, space_number) \ 795 case where + how + within + space_number: \ 796 ASSERT((where & ~kPointedToMask) == 0); \ 797 ASSERT((how & ~kHowToCodeMask) == 0); \ 798 ASSERT((within & ~kWhereToPointMask) == 0); \ 799 ASSERT((space_number & ~kSpaceMask) == 0); 800 801 #define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ 802 { \ 803 bool emit_write_barrier = false; \ 804 bool current_was_incremented = false; \ 805 int space_number = space_number_if_any == kAnyOldSpace ? \ 806 (data & kSpaceMask) : space_number_if_any; \ 807 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ 808 ASSIGN_DEST_SPACE(space_number) \ 809 ReadObject(space_number, dest_space, current); \ 810 emit_write_barrier = (space_number == NEW_SPACE); \ 811 } else { \ 812 Object* new_object = NULL; /* May not be a real Object pointer. */ \ 813 if (where == kNewObject) { \ 814 ASSIGN_DEST_SPACE(space_number) \ 815 ReadObject(space_number, dest_space, &new_object); \ 816 } else if (where == kRootArray) { \ 817 int root_id = source_->GetInt(); \ 818 new_object = isolate->heap()->roots_array_start()[root_id]; \ 819 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ 820 } else if (where == kPartialSnapshotCache) { \ 821 int cache_index = source_->GetInt(); \ 822 new_object = isolate->serialize_partial_snapshot_cache() \ 823 [cache_index]; \ 824 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ 825 } else if (where == kExternalReference) { \ 826 int reference_id = source_->GetInt(); \ 827 Address address = external_reference_decoder_-> \ 828 Decode(reference_id); \ 829 new_object = reinterpret_cast<Object*>(address); \ 830 } else if (where == kBackref) { \ 831 emit_write_barrier = (space_number == NEW_SPACE); \ 832 new_object = GetAddressFromEnd(data & kSpaceMask); \ 833 } else { \ 834 ASSERT(where == kFromStart); \ 835 if (offset_from_start == kUnknownOffsetFromStart) { \ 836 emit_write_barrier = (space_number == NEW_SPACE); \ 837 new_object = GetAddressFromStart(data & kSpaceMask); \ 838 } else { \ 839 Address object_address = pages_[space_number][0] + \ 840 (offset_from_start << kObjectAlignmentBits); \ 841 new_object = HeapObject::FromAddress(object_address); \ 842 } \ 843 } \ 844 if (within == kFirstInstruction) { \ 845 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ 846 new_object = reinterpret_cast<Object*>( \ 847 new_code_object->instruction_start()); \ 848 } \ 849 if (how == kFromCode) { \ 850 Address location_of_branch_data = \ 851 reinterpret_cast<Address>(current); \ 852 Assembler::deserialization_set_special_target_at( \ 853 location_of_branch_data, \ 854 reinterpret_cast<Address>(new_object)); \ 855 location_of_branch_data += Assembler::kSpecialTargetSize; \ 856 current = reinterpret_cast<Object**>(location_of_branch_data); \ 857 current_was_incremented = true; \ 858 } else { \ 859 *current = new_object; \ 860 } \ 861 } \ 862 if (emit_write_barrier && write_barrier_needed) { \ 863 Address current_address = reinterpret_cast<Address>(current); \ 864 isolate->heap()->RecordWrite( \ 865 current_object_address, \ 866 static_cast<int>(current_address - current_object_address)); \ 867 } \ 868 if (!current_was_incremented) { \ 869 current++; \ 870 } \ 871 break; \ 872 } \ 873 874 // This generates a case and a body for each space. The large object spaces are 875 // very rare in snapshots so they are grouped in one body. 876 #define ONE_PER_SPACE(where, how, within) \ 877 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 878 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 879 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 880 CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \ 881 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 882 CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \ 883 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 884 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 885 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 886 CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \ 887 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 888 CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \ 889 CASE_STATEMENT(where, how, within, kLargeData) \ 890 CASE_STATEMENT(where, how, within, kLargeCode) \ 891 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 892 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 893 894 // This generates a case and a body for the new space (which has to do extra 895 // write barrier handling) and handles the other spaces with 8 fall-through 896 // cases and one body. 897 #define ALL_SPACES(where, how, within) \ 898 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 899 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 900 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 901 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 902 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 903 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 904 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 905 CASE_STATEMENT(where, how, within, kLargeData) \ 906 CASE_STATEMENT(where, how, within, kLargeCode) \ 907 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 908 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 909 910 #define ONE_PER_CODE_SPACE(where, how, within) \ 911 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 912 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 913 CASE_STATEMENT(where, how, within, kLargeCode) \ 914 CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart) 915 916 #define FOUR_CASES(byte_code) \ 917 case byte_code: \ 918 case byte_code + 1: \ 919 case byte_code + 2: \ 920 case byte_code + 3: 921 922 #define SIXTEEN_CASES(byte_code) \ 923 FOUR_CASES(byte_code) \ 924 FOUR_CASES(byte_code + 4) \ 925 FOUR_CASES(byte_code + 8) \ 926 FOUR_CASES(byte_code + 12) 927 928 // We generate 15 cases and bodies that process special tags that combine 929 // the raw data tag and the length into one byte. 930 #define RAW_CASE(index, size) \ 931 case kRawData + index: { \ 932 byte* raw_data_out = reinterpret_cast<byte*>(current); \ 933 source_->CopyRaw(raw_data_out, size); \ 934 current = reinterpret_cast<Object**>(raw_data_out + size); \ 935 break; \ 936 } 937 COMMON_RAW_LENGTHS(RAW_CASE) 938 #undef RAW_CASE 939 940 // Deserialize a chunk of raw data that doesn't have one of the popular 941 // lengths. 942 case kRawData: { 943 int size = source_->GetInt(); 944 byte* raw_data_out = reinterpret_cast<byte*>(current); 945 source_->CopyRaw(raw_data_out, size); 946 current = reinterpret_cast<Object**>(raw_data_out + size); 947 break; 948 } 949 950 SIXTEEN_CASES(kRootArrayLowConstants) 951 SIXTEEN_CASES(kRootArrayHighConstants) { 952 int root_id = RootArrayConstantFromByteCode(data); 953 Object* object = isolate->heap()->roots_array_start()[root_id]; 954 ASSERT(!isolate->heap()->InNewSpace(object)); 955 *current++ = object; 956 break; 957 } 958 959 case kRepeat: { 960 int repeats = source_->GetInt(); 961 Object* object = current[-1]; 962 ASSERT(!isolate->heap()->InNewSpace(object)); 963 for (int i = 0; i < repeats; i++) current[i] = object; 964 current += repeats; 965 break; 966 } 967 968 STATIC_ASSERT(kRootArrayNumberOfConstantEncodings == 969 Heap::kOldSpaceRoots); 970 STATIC_ASSERT(kMaxRepeats == 12); 971 FOUR_CASES(kConstantRepeat) 972 FOUR_CASES(kConstantRepeat + 4) 973 FOUR_CASES(kConstantRepeat + 8) { 974 int repeats = RepeatsForCode(data); 975 Object* object = current[-1]; 976 ASSERT(!isolate->heap()->InNewSpace(object)); 977 for (int i = 0; i < repeats; i++) current[i] = object; 978 current += repeats; 979 break; 980 } 981 982 // Deserialize a new object and write a pointer to it to the current 983 // object. 984 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) 985 // Support for direct instruction pointers in functions 986 ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction) 987 // Deserialize a new code object and write a pointer to its first 988 // instruction to the current code object. 989 ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction) 990 // Find a recently deserialized object using its offset from the current 991 // allocation point and write a pointer to it to the current object. 992 ALL_SPACES(kBackref, kPlain, kStartOfObject) 993 #if V8_TARGET_ARCH_MIPS 994 // Deserialize a new object from pointer found in code and write 995 // a pointer to it to the current object. Required only for MIPS, and 996 // omitted on the other architectures because it is fully unrolled and 997 // would cause bloat. 998 ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject) 999 // Find a recently deserialized code object using its offset from the 1000 // current allocation point and write a pointer to it to the current 1001 // object. Required only for MIPS. 1002 ALL_SPACES(kBackref, kFromCode, kStartOfObject) 1003 // Find an already deserialized code object using its offset from 1004 // the start and write a pointer to it to the current object. 1005 // Required only for MIPS. 1006 ALL_SPACES(kFromStart, kFromCode, kStartOfObject) 1007 #endif 1008 // Find a recently deserialized code object using its offset from the 1009 // current allocation point and write a pointer to its first instruction 1010 // to the current code object or the instruction pointer in a function 1011 // object. 1012 ALL_SPACES(kBackref, kFromCode, kFirstInstruction) 1013 ALL_SPACES(kBackref, kPlain, kFirstInstruction) 1014 // Find an already deserialized object using its offset from the start 1015 // and write a pointer to it to the current object. 1016 ALL_SPACES(kFromStart, kPlain, kStartOfObject) 1017 ALL_SPACES(kFromStart, kPlain, kFirstInstruction) 1018 // Find an already deserialized code object using its offset from the 1019 // start and write a pointer to its first instruction to the current code 1020 // object. 1021 ALL_SPACES(kFromStart, kFromCode, kFirstInstruction) 1022 // Find an object in the roots array and write a pointer to it to the 1023 // current object. 1024 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) 1025 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) 1026 // Find an object in the partial snapshots cache and write a pointer to it 1027 // to the current object. 1028 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) 1029 CASE_BODY(kPartialSnapshotCache, 1030 kPlain, 1031 kStartOfObject, 1032 0, 1033 kUnknownOffsetFromStart) 1034 // Find an code entry in the partial snapshots cache and 1035 // write a pointer to it to the current object. 1036 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0) 1037 CASE_BODY(kPartialSnapshotCache, 1038 kPlain, 1039 kFirstInstruction, 1040 0, 1041 kUnknownOffsetFromStart) 1042 // Find an external reference and write a pointer to it to the current 1043 // object. 1044 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) 1045 CASE_BODY(kExternalReference, 1046 kPlain, 1047 kStartOfObject, 1048 0, 1049 kUnknownOffsetFromStart) 1050 // Find an external reference and write a pointer to it in the current 1051 // code object. 1052 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) 1053 CASE_BODY(kExternalReference, 1054 kFromCode, 1055 kStartOfObject, 1056 0, 1057 kUnknownOffsetFromStart) 1058 1059 #undef CASE_STATEMENT 1060 #undef CASE_BODY 1061 #undef ONE_PER_SPACE 1062 #undef ALL_SPACES 1063 #undef ASSIGN_DEST_SPACE 1064 1065 case kNewPage: { 1066 int space = source_->Get(); 1067 pages_[space].Add(last_object_address_); 1068 if (space == CODE_SPACE) { 1069 CPU::FlushICache(last_object_address_, Page::kPageSize); 1070 } 1071 break; 1072 } 1073 1074 case kSkip: { 1075 current++; 1076 break; 1077 } 1078 1079 case kNativesStringResource: { 1080 int index = source_->Get(); 1081 Vector<const char> source_vector = Natives::GetRawScriptSource(index); 1082 NativesExternalStringResource* resource = 1083 new NativesExternalStringResource(isolate->bootstrapper(), 1084 source_vector.start(), 1085 source_vector.length()); 1086 *current++ = reinterpret_cast<Object*>(resource); 1087 break; 1088 } 1089 1090 case kSynchronize: { 1091 // If we get here then that indicates that you have a mismatch between 1092 // the number of GC roots when serializing and deserializing. 1093 UNREACHABLE(); 1094 } 1095 1096 default: 1097 UNREACHABLE(); 1098 } 1099 } 1100 ASSERT_EQ(current, limit); 1101 } 1102 1103 1104 void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { 1105 const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7; 1106 for (int shift = max_shift; shift > 0; shift -= 7) { 1107 if (integer >= static_cast<uintptr_t>(1u) << shift) { 1108 Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart"); 1109 } 1110 } 1111 PutSection(static_cast<int>(integer & 0x7f), "IntLastPart"); 1112 } 1113 1114 1115 Serializer::Serializer(SnapshotByteSink* sink) 1116 : sink_(sink), 1117 current_root_index_(0), 1118 external_reference_encoder_(new ExternalReferenceEncoder), 1119 large_object_total_(0), 1120 root_index_wave_front_(0) { 1121 isolate_ = Isolate::Current(); 1122 // The serializer is meant to be used only to generate initial heap images 1123 // from a context in which there is only one isolate. 1124 ASSERT(isolate_->IsDefaultIsolate()); 1125 for (int i = 0; i <= LAST_SPACE; i++) { 1126 fullness_[i] = 0; 1127 } 1128 } 1129 1130 1131 Serializer::~Serializer() { 1132 delete external_reference_encoder_; 1133 } 1134 1135 1136 void StartupSerializer::SerializeStrongReferences() { 1137 Isolate* isolate = Isolate::Current(); 1138 // No active threads. 1139 CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse()); 1140 // No active or weak handles. 1141 CHECK(isolate->handle_scope_implementer()->blocks()->is_empty()); 1142 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); 1143 // We don't support serializing installed extensions. 1144 CHECK(!isolate->has_installed_extensions()); 1145 1146 HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG); 1147 } 1148 1149 1150 void PartialSerializer::Serialize(Object** object) { 1151 this->VisitPointer(object); 1152 Isolate* isolate = Isolate::Current(); 1153 1154 // After we have done the partial serialization the partial snapshot cache 1155 // will contain some references needed to decode the partial snapshot. We 1156 // fill it up with undefineds so it has a predictable length so the 1157 // deserialization code doesn't need to know the length. 1158 for (int index = isolate->serialize_partial_snapshot_cache_length(); 1159 index < Isolate::kPartialSnapshotCacheCapacity; 1160 index++) { 1161 isolate->serialize_partial_snapshot_cache()[index] = 1162 isolate->heap()->undefined_value(); 1163 startup_serializer_->VisitPointer( 1164 &isolate->serialize_partial_snapshot_cache()[index]); 1165 } 1166 isolate->set_serialize_partial_snapshot_cache_length( 1167 Isolate::kPartialSnapshotCacheCapacity); 1168 } 1169 1170 1171 void Serializer::VisitPointers(Object** start, Object** end) { 1172 Isolate* isolate = Isolate::Current(); 1173 1174 for (Object** current = start; current < end; current++) { 1175 if (start == isolate->heap()->roots_array_start()) { 1176 root_index_wave_front_ = 1177 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); 1178 } 1179 if (reinterpret_cast<Address>(current) == 1180 isolate->heap()->store_buffer()->TopAddress()) { 1181 sink_->Put(kSkip, "Skip"); 1182 } else if ((*current)->IsSmi()) { 1183 sink_->Put(kRawData, "RawData"); 1184 sink_->PutInt(kPointerSize, "length"); 1185 for (int i = 0; i < kPointerSize; i++) { 1186 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); 1187 } 1188 } else { 1189 SerializeObject(*current, kPlain, kStartOfObject); 1190 } 1191 } 1192 } 1193 1194 1195 // This ensures that the partial snapshot cache keeps things alive during GC and 1196 // tracks their movement. When it is called during serialization of the startup 1197 // snapshot the partial snapshot is empty, so nothing happens. When the partial 1198 // (context) snapshot is created, this array is populated with the pointers that 1199 // the partial snapshot will need. As that happens we emit serialized objects to 1200 // the startup snapshot that correspond to the elements of this cache array. On 1201 // deserialization we therefore need to visit the cache array. This fills it up 1202 // with pointers to deserialized objects. 1203 void SerializerDeserializer::Iterate(ObjectVisitor* visitor) { 1204 Isolate* isolate = Isolate::Current(); 1205 visitor->VisitPointers( 1206 isolate->serialize_partial_snapshot_cache(), 1207 &isolate->serialize_partial_snapshot_cache()[ 1208 isolate->serialize_partial_snapshot_cache_length()]); 1209 } 1210 1211 1212 // When deserializing we need to set the size of the snapshot cache. This means 1213 // the root iteration code (above) will iterate over array elements, writing the 1214 // references to deserialized objects in them. 1215 void SerializerDeserializer::SetSnapshotCacheSize(int size) { 1216 Isolate::Current()->set_serialize_partial_snapshot_cache_length(size); 1217 } 1218 1219 1220 int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { 1221 Isolate* isolate = Isolate::Current(); 1222 1223 for (int i = 0; 1224 i < isolate->serialize_partial_snapshot_cache_length(); 1225 i++) { 1226 Object* entry = isolate->serialize_partial_snapshot_cache()[i]; 1227 if (entry == heap_object) return i; 1228 } 1229 1230 // We didn't find the object in the cache. So we add it to the cache and 1231 // then visit the pointer so that it becomes part of the startup snapshot 1232 // and we can refer to it from the partial snapshot. 1233 int length = isolate->serialize_partial_snapshot_cache_length(); 1234 CHECK(length < Isolate::kPartialSnapshotCacheCapacity); 1235 isolate->serialize_partial_snapshot_cache()[length] = heap_object; 1236 startup_serializer_->VisitPointer( 1237 &isolate->serialize_partial_snapshot_cache()[length]); 1238 // We don't recurse from the startup snapshot generator into the partial 1239 // snapshot generator. 1240 ASSERT(length == isolate->serialize_partial_snapshot_cache_length()); 1241 isolate->set_serialize_partial_snapshot_cache_length(length + 1); 1242 return length; 1243 } 1244 1245 1246 int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) { 1247 Heap* heap = HEAP; 1248 if (heap->InNewSpace(heap_object)) return kInvalidRootIndex; 1249 for (int i = 0; i < root_index_wave_front_; i++) { 1250 Object* root = heap->roots_array_start()[i]; 1251 if (!root->IsSmi() && root == heap_object) { 1252 #if V8_TARGET_ARCH_MIPS 1253 if (from == kFromCode) { 1254 // In order to avoid code bloat in the deserializer we don't have 1255 // support for the encoding that specifies a particular root should 1256 // be written into the lui/ori instructions on MIPS. Therefore we 1257 // should not generate such serialization data for MIPS. 1258 return kInvalidRootIndex; 1259 } 1260 #endif 1261 return i; 1262 } 1263 } 1264 return kInvalidRootIndex; 1265 } 1266 1267 1268 // Encode the location of an already deserialized object in order to write its 1269 // location into a later object. We can encode the location as an offset from 1270 // the start of the deserialized objects or as an offset backwards from the 1271 // current allocation pointer. 1272 void Serializer::SerializeReferenceToPreviousObject( 1273 int space, 1274 int address, 1275 HowToCode how_to_code, 1276 WhereToPoint where_to_point) { 1277 int offset = CurrentAllocationAddress(space) - address; 1278 bool from_start = true; 1279 if (SpaceIsPaged(space)) { 1280 // For paged space it is simple to encode back from current allocation if 1281 // the object is on the same page as the current allocation pointer. 1282 if ((CurrentAllocationAddress(space) >> kPageSizeBits) == 1283 (address >> kPageSizeBits)) { 1284 from_start = false; 1285 address = offset; 1286 } 1287 } else if (space == NEW_SPACE) { 1288 // For new space it is always simple to encode back from current allocation. 1289 if (offset < address) { 1290 from_start = false; 1291 address = offset; 1292 } 1293 } 1294 // If we are actually dealing with real offsets (and not a numbering of 1295 // all objects) then we should shift out the bits that are always 0. 1296 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; 1297 if (from_start) { 1298 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); 1299 sink_->PutInt(address, "address"); 1300 } else { 1301 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); 1302 sink_->PutInt(address, "address"); 1303 } 1304 } 1305 1306 1307 void StartupSerializer::SerializeObject( 1308 Object* o, 1309 HowToCode how_to_code, 1310 WhereToPoint where_to_point) { 1311 CHECK(o->IsHeapObject()); 1312 HeapObject* heap_object = HeapObject::cast(o); 1313 1314 int root_index; 1315 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { 1316 PutRoot(root_index, heap_object, how_to_code, where_to_point); 1317 return; 1318 } 1319 1320 if (address_mapper_.IsMapped(heap_object)) { 1321 int space = SpaceOfAlreadySerializedObject(heap_object); 1322 int address = address_mapper_.MappedTo(heap_object); 1323 SerializeReferenceToPreviousObject(space, 1324 address, 1325 how_to_code, 1326 where_to_point); 1327 } else { 1328 // Object has not yet been serialized. Serialize it here. 1329 ObjectSerializer object_serializer(this, 1330 heap_object, 1331 sink_, 1332 how_to_code, 1333 where_to_point); 1334 object_serializer.Serialize(); 1335 } 1336 } 1337 1338 1339 void StartupSerializer::SerializeWeakReferences() { 1340 for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length(); 1341 i < Isolate::kPartialSnapshotCacheCapacity; 1342 i++) { 1343 sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization"); 1344 sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index"); 1345 } 1346 HEAP->IterateWeakRoots(this, VISIT_ALL); 1347 } 1348 1349 1350 void Serializer::PutRoot(int root_index, 1351 HeapObject* object, 1352 SerializerDeserializer::HowToCode how_to_code, 1353 SerializerDeserializer::WhereToPoint where_to_point) { 1354 if (how_to_code == kPlain && 1355 where_to_point == kStartOfObject && 1356 root_index < kRootArrayNumberOfConstantEncodings && 1357 !HEAP->InNewSpace(object)) { 1358 if (root_index < kRootArrayNumberOfLowConstantEncodings) { 1359 sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant"); 1360 } else { 1361 sink_->Put(kRootArrayHighConstants + root_index - 1362 kRootArrayNumberOfLowConstantEncodings, 1363 "RootHiConstant"); 1364 } 1365 } else { 1366 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); 1367 sink_->PutInt(root_index, "root_index"); 1368 } 1369 } 1370 1371 1372 void PartialSerializer::SerializeObject( 1373 Object* o, 1374 HowToCode how_to_code, 1375 WhereToPoint where_to_point) { 1376 CHECK(o->IsHeapObject()); 1377 HeapObject* heap_object = HeapObject::cast(o); 1378 1379 if (heap_object->IsMap()) { 1380 // The code-caches link to context-specific code objects, which 1381 // the startup and context serializes cannot currently handle. 1382 ASSERT(Map::cast(heap_object)->code_cache() == 1383 heap_object->GetHeap()->raw_unchecked_empty_fixed_array()); 1384 } 1385 1386 int root_index; 1387 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { 1388 PutRoot(root_index, heap_object, how_to_code, where_to_point); 1389 return; 1390 } 1391 1392 if (ShouldBeInThePartialSnapshotCache(heap_object)) { 1393 int cache_index = PartialSnapshotCacheIndex(heap_object); 1394 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, 1395 "PartialSnapshotCache"); 1396 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); 1397 return; 1398 } 1399 1400 // Pointers from the partial snapshot to the objects in the startup snapshot 1401 // should go through the root array or through the partial snapshot cache. 1402 // If this is not the case you may have to add something to the root array. 1403 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); 1404 // All the symbols that the partial snapshot needs should be either in the 1405 // root table or in the partial snapshot cache. 1406 ASSERT(!heap_object->IsSymbol()); 1407 1408 if (address_mapper_.IsMapped(heap_object)) { 1409 int space = SpaceOfAlreadySerializedObject(heap_object); 1410 int address = address_mapper_.MappedTo(heap_object); 1411 SerializeReferenceToPreviousObject(space, 1412 address, 1413 how_to_code, 1414 where_to_point); 1415 } else { 1416 // Object has not yet been serialized. Serialize it here. 1417 ObjectSerializer serializer(this, 1418 heap_object, 1419 sink_, 1420 how_to_code, 1421 where_to_point); 1422 serializer.Serialize(); 1423 } 1424 } 1425 1426 1427 void Serializer::ObjectSerializer::Serialize() { 1428 int space = Serializer::SpaceOfObject(object_); 1429 int size = object_->Size(); 1430 1431 sink_->Put(kNewObject + reference_representation_ + space, 1432 "ObjectSerialization"); 1433 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); 1434 1435 LOG(i::Isolate::Current(), 1436 SnapshotPositionEvent(object_->address(), sink_->Position())); 1437 1438 // Mark this object as already serialized. 1439 bool start_new_page; 1440 int offset = serializer_->Allocate(space, size, &start_new_page); 1441 serializer_->address_mapper()->AddMapping(object_, offset); 1442 if (start_new_page) { 1443 sink_->Put(kNewPage, "NewPage"); 1444 sink_->PutSection(space, "NewPageSpace"); 1445 } 1446 1447 // Serialize the map (first word of the object). 1448 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject); 1449 1450 // Serialize the rest of the object. 1451 CHECK_EQ(0, bytes_processed_so_far_); 1452 bytes_processed_so_far_ = kPointerSize; 1453 object_->IterateBody(object_->map()->instance_type(), size, this); 1454 OutputRawData(object_->address() + size); 1455 } 1456 1457 1458 void Serializer::ObjectSerializer::VisitPointers(Object** start, 1459 Object** end) { 1460 Object** current = start; 1461 while (current < end) { 1462 while (current < end && (*current)->IsSmi()) current++; 1463 if (current < end) OutputRawData(reinterpret_cast<Address>(current)); 1464 1465 while (current < end && !(*current)->IsSmi()) { 1466 HeapObject* current_contents = HeapObject::cast(*current); 1467 int root_index = serializer_->RootIndex(current_contents, kPlain); 1468 // Repeats are not subject to the write barrier so there are only some 1469 // objects that can be used in a repeat encoding. These are the early 1470 // ones in the root array that are never in new space. 1471 if (current != start && 1472 root_index != kInvalidRootIndex && 1473 root_index < kRootArrayNumberOfConstantEncodings && 1474 current_contents == current[-1]) { 1475 ASSERT(!HEAP->InNewSpace(current_contents)); 1476 int repeat_count = 1; 1477 while (current < end - 1 && current[repeat_count] == current_contents) { 1478 repeat_count++; 1479 } 1480 current += repeat_count; 1481 bytes_processed_so_far_ += repeat_count * kPointerSize; 1482 if (repeat_count > kMaxRepeats) { 1483 sink_->Put(kRepeat, "SerializeRepeats"); 1484 sink_->PutInt(repeat_count, "SerializeRepeats"); 1485 } else { 1486 sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats"); 1487 } 1488 } else { 1489 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject); 1490 bytes_processed_so_far_ += kPointerSize; 1491 current++; 1492 } 1493 } 1494 } 1495 } 1496 1497 1498 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { 1499 Object** current = rinfo->target_object_address(); 1500 1501 OutputRawData(rinfo->target_address_address()); 1502 HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain; 1503 serializer_->SerializeObject(*current, representation, kStartOfObject); 1504 bytes_processed_so_far_ += rinfo->target_address_size(); 1505 } 1506 1507 1508 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, 1509 Address* end) { 1510 Address references_start = reinterpret_cast<Address>(start); 1511 OutputRawData(references_start); 1512 1513 for (Address* current = start; current < end; current++) { 1514 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); 1515 int reference_id = serializer_->EncodeExternalReference(*current); 1516 sink_->PutInt(reference_id, "reference id"); 1517 } 1518 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); 1519 } 1520 1521 1522 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) { 1523 Address references_start = rinfo->target_address_address(); 1524 OutputRawData(references_start); 1525 1526 Address* current = rinfo->target_reference_address(); 1527 int representation = rinfo->IsCodedSpecially() ? 1528 kFromCode + kStartOfObject : kPlain + kStartOfObject; 1529 sink_->Put(kExternalReference + representation, "ExternalRef"); 1530 int reference_id = serializer_->EncodeExternalReference(*current); 1531 sink_->PutInt(reference_id, "reference id"); 1532 bytes_processed_so_far_ += rinfo->target_address_size(); 1533 } 1534 1535 1536 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { 1537 Address target_start = rinfo->target_address_address(); 1538 OutputRawData(target_start); 1539 Address target = rinfo->target_address(); 1540 uint32_t encoding = serializer_->EncodeExternalReference(target); 1541 CHECK(target == NULL ? encoding == 0 : encoding != 0); 1542 int representation; 1543 // Can't use a ternary operator because of gcc. 1544 if (rinfo->IsCodedSpecially()) { 1545 representation = kStartOfObject + kFromCode; 1546 } else { 1547 representation = kStartOfObject + kPlain; 1548 } 1549 sink_->Put(kExternalReference + representation, "ExternalReference"); 1550 sink_->PutInt(encoding, "reference id"); 1551 bytes_processed_so_far_ += rinfo->target_address_size(); 1552 } 1553 1554 1555 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { 1556 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); 1557 Address target_start = rinfo->target_address_address(); 1558 OutputRawData(target_start); 1559 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 1560 serializer_->SerializeObject(target, kFromCode, kFirstInstruction); 1561 bytes_processed_so_far_ += rinfo->target_address_size(); 1562 } 1563 1564 1565 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { 1566 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); 1567 OutputRawData(entry_address); 1568 serializer_->SerializeObject(target, kPlain, kFirstInstruction); 1569 bytes_processed_so_far_ += kPointerSize; 1570 } 1571 1572 1573 void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) { 1574 // We shouldn't have any global property cell references in code 1575 // objects in the snapshot. 1576 UNREACHABLE(); 1577 } 1578 1579 1580 void Serializer::ObjectSerializer::VisitExternalAsciiString( 1581 v8::String::ExternalAsciiStringResource** resource_pointer) { 1582 Address references_start = reinterpret_cast<Address>(resource_pointer); 1583 OutputRawData(references_start); 1584 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { 1585 Object* source = HEAP->natives_source_cache()->get(i); 1586 if (!source->IsUndefined()) { 1587 ExternalAsciiString* string = ExternalAsciiString::cast(source); 1588 typedef v8::String::ExternalAsciiStringResource Resource; 1589 const Resource* resource = string->resource(); 1590 if (resource == *resource_pointer) { 1591 sink_->Put(kNativesStringResource, "NativesStringResource"); 1592 sink_->PutSection(i, "NativesStringResourceEnd"); 1593 bytes_processed_so_far_ += sizeof(resource); 1594 return; 1595 } 1596 } 1597 } 1598 // One of the strings in the natives cache should match the resource. We 1599 // can't serialize any other kinds of external strings. 1600 UNREACHABLE(); 1601 } 1602 1603 1604 void Serializer::ObjectSerializer::OutputRawData(Address up_to) { 1605 Address object_start = object_->address(); 1606 int up_to_offset = static_cast<int>(up_to - object_start); 1607 int skipped = up_to_offset - bytes_processed_so_far_; 1608 // This assert will fail if the reloc info gives us the target_address_address 1609 // locations in a non-ascending order. Luckily that doesn't happen. 1610 ASSERT(skipped >= 0); 1611 if (skipped != 0) { 1612 Address base = object_start + bytes_processed_so_far_; 1613 #define RAW_CASE(index, length) \ 1614 if (skipped == length) { \ 1615 sink_->PutSection(kRawData + index, "RawDataFixed"); \ 1616 } else /* NOLINT */ 1617 COMMON_RAW_LENGTHS(RAW_CASE) 1618 #undef RAW_CASE 1619 { /* NOLINT */ 1620 sink_->Put(kRawData, "RawData"); 1621 sink_->PutInt(skipped, "length"); 1622 } 1623 for (int i = 0; i < skipped; i++) { 1624 unsigned int data = base[i]; 1625 sink_->PutSection(data, "Byte"); 1626 } 1627 bytes_processed_so_far_ += skipped; 1628 } 1629 } 1630 1631 1632 int Serializer::SpaceOfObject(HeapObject* object) { 1633 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1634 AllocationSpace s = static_cast<AllocationSpace>(i); 1635 if (HEAP->InSpace(object, s)) { 1636 if (i == LO_SPACE) { 1637 if (object->IsCode()) { 1638 return kLargeCode; 1639 } else if (object->IsFixedArray()) { 1640 return kLargeFixedArray; 1641 } else { 1642 return kLargeData; 1643 } 1644 } 1645 return i; 1646 } 1647 } 1648 UNREACHABLE(); 1649 return 0; 1650 } 1651 1652 1653 int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) { 1654 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1655 AllocationSpace s = static_cast<AllocationSpace>(i); 1656 if (HEAP->InSpace(object, s)) { 1657 return i; 1658 } 1659 } 1660 UNREACHABLE(); 1661 return 0; 1662 } 1663 1664 1665 int Serializer::Allocate(int space, int size, bool* new_page) { 1666 CHECK(space >= 0 && space < kNumberOfSpaces); 1667 if (SpaceIsLarge(space)) { 1668 // In large object space we merely number the objects instead of trying to 1669 // determine some sort of address. 1670 *new_page = true; 1671 large_object_total_ += size; 1672 return fullness_[LO_SPACE]++; 1673 } 1674 *new_page = false; 1675 if (fullness_[space] == 0) { 1676 *new_page = true; 1677 } 1678 if (SpaceIsPaged(space)) { 1679 // Paged spaces are a little special. We encode their addresses as if the 1680 // pages were all contiguous and each page were filled up in the range 1681 // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous 1682 // and allocation does not start at offset 0 in the page, but this scheme 1683 // means the deserializer can get the page number quickly by shifting the 1684 // serialized address. 1685 CHECK(IsPowerOf2(Page::kPageSize)); 1686 int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); 1687 CHECK(size <= SpaceAreaSize(space)); 1688 if (used_in_this_page + size > SpaceAreaSize(space)) { 1689 *new_page = true; 1690 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); 1691 } 1692 } 1693 int allocation_address = fullness_[space]; 1694 fullness_[space] = allocation_address + size; 1695 return allocation_address; 1696 } 1697 1698 1699 int Serializer::SpaceAreaSize(int space) { 1700 if (space == CODE_SPACE) { 1701 return isolate_->memory_allocator()->CodePageAreaSize(); 1702 } else { 1703 return Page::kPageSize - Page::kObjectStartOffset; 1704 } 1705 } 1706 1707 1708 } } // namespace v8::internal 1709