1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "v8.h" 29 30 #include "accessors.h" 31 #include "api.h" 32 #include "execution.h" 33 #include "global-handles.h" 34 #include "ic-inl.h" 35 #include "natives.h" 36 #include "platform.h" 37 #include "runtime.h" 38 #include "serialize.h" 39 #include "stub-cache.h" 40 #include "v8threads.h" 41 #include "bootstrapper.h" 42 43 namespace v8 { 44 namespace internal { 45 46 47 // ----------------------------------------------------------------------------- 48 // Coding of external references. 49 50 // The encoding of an external reference. The type is in the high word. 51 // The id is in the low word. 52 static uint32_t EncodeExternal(TypeCode type, uint16_t id) { 53 return static_cast<uint32_t>(type) << 16 | id; 54 } 55 56 57 static int* GetInternalPointer(StatsCounter* counter) { 58 // All counters refer to dummy_counter, if deserializing happens without 59 // setting up counters. 60 static int dummy_counter = 0; 61 return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter; 62 } 63 64 65 // ExternalReferenceTable is a helper class that defines the relationship 66 // between external references and their encodings. It is used to build 67 // hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder. 68 class ExternalReferenceTable { 69 public: 70 static ExternalReferenceTable* instance(Isolate* isolate) { 71 ExternalReferenceTable* external_reference_table = 72 isolate->external_reference_table(); 73 if (external_reference_table == NULL) { 74 external_reference_table = new ExternalReferenceTable(isolate); 75 isolate->set_external_reference_table(external_reference_table); 76 } 77 return external_reference_table; 78 } 79 80 int size() const { return refs_.length(); } 81 82 Address address(int i) { return refs_[i].address; } 83 84 uint32_t code(int i) { return refs_[i].code; } 85 86 const char* name(int i) { return refs_[i].name; } 87 88 int max_id(int code) { return max_id_[code]; } 89 90 private: 91 explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) { 92 PopulateTable(isolate); 93 } 94 ~ExternalReferenceTable() { } 95 96 struct ExternalReferenceEntry { 97 Address address; 98 uint32_t code; 99 const char* name; 100 }; 101 102 void PopulateTable(Isolate* isolate); 103 104 // For a few types of references, we can get their address from their id. 105 void AddFromId(TypeCode type, 106 uint16_t id, 107 const char* name, 108 Isolate* isolate); 109 110 // For other types of references, the caller will figure out the address. 111 void Add(Address address, TypeCode type, uint16_t id, const char* name); 112 113 List<ExternalReferenceEntry> refs_; 114 int max_id_[kTypeCodeCount]; 115 }; 116 117 118 void ExternalReferenceTable::AddFromId(TypeCode type, 119 uint16_t id, 120 const char* name, 121 Isolate* isolate) { 122 Address address; 123 switch (type) { 124 case C_BUILTIN: { 125 ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate); 126 address = ref.address(); 127 break; 128 } 129 case BUILTIN: { 130 ExternalReference ref(static_cast<Builtins::Name>(id), isolate); 131 address = ref.address(); 132 break; 133 } 134 case RUNTIME_FUNCTION: { 135 ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate); 136 address = ref.address(); 137 break; 138 } 139 case IC_UTILITY: { 140 ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)), 141 isolate); 142 address = ref.address(); 143 break; 144 } 145 default: 146 UNREACHABLE(); 147 return; 148 } 149 Add(address, type, id, name); 150 } 151 152 153 void ExternalReferenceTable::Add(Address address, 154 TypeCode type, 155 uint16_t id, 156 const char* name) { 157 ASSERT_NE(NULL, address); 158 ExternalReferenceEntry entry; 159 entry.address = address; 160 entry.code = EncodeExternal(type, id); 161 entry.name = name; 162 ASSERT_NE(0, entry.code); 163 refs_.Add(entry); 164 if (id > max_id_[type]) max_id_[type] = id; 165 } 166 167 168 void ExternalReferenceTable::PopulateTable(Isolate* isolate) { 169 for (int type_code = 0; type_code < kTypeCodeCount; type_code++) { 170 max_id_[type_code] = 0; 171 } 172 173 // The following populates all of the different type of external references 174 // into the ExternalReferenceTable. 175 // 176 // NOTE: This function was originally 100k of code. It has since been 177 // rewritten to be mostly table driven, as the callback macro style tends to 178 // very easily cause code bloat. Please be careful in the future when adding 179 // new references. 180 181 struct RefTableEntry { 182 TypeCode type; 183 uint16_t id; 184 const char* name; 185 }; 186 187 static const RefTableEntry ref_table[] = { 188 // Builtins 189 #define DEF_ENTRY_C(name, ignored) \ 190 { C_BUILTIN, \ 191 Builtins::c_##name, \ 192 "Builtins::" #name }, 193 194 BUILTIN_LIST_C(DEF_ENTRY_C) 195 #undef DEF_ENTRY_C 196 197 #define DEF_ENTRY_C(name, ignored) \ 198 { BUILTIN, \ 199 Builtins::k##name, \ 200 "Builtins::" #name }, 201 #define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored) 202 203 BUILTIN_LIST_C(DEF_ENTRY_C) 204 BUILTIN_LIST_A(DEF_ENTRY_A) 205 BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A) 206 #undef DEF_ENTRY_C 207 #undef DEF_ENTRY_A 208 209 // Runtime functions 210 #define RUNTIME_ENTRY(name, nargs, ressize) \ 211 { RUNTIME_FUNCTION, \ 212 Runtime::k##name, \ 213 "Runtime::" #name }, 214 215 RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY) 216 #undef RUNTIME_ENTRY 217 218 // IC utilities 219 #define IC_ENTRY(name) \ 220 { IC_UTILITY, \ 221 IC::k##name, \ 222 "IC::" #name }, 223 224 IC_UTIL_LIST(IC_ENTRY) 225 #undef IC_ENTRY 226 }; // end of ref_table[]. 227 228 for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) { 229 AddFromId(ref_table[i].type, 230 ref_table[i].id, 231 ref_table[i].name, 232 isolate); 233 } 234 235 #ifdef ENABLE_DEBUGGER_SUPPORT 236 // Debug addresses 237 Add(Debug_Address(Debug::k_after_break_target_address).address(isolate), 238 DEBUG_ADDRESS, 239 Debug::k_after_break_target_address << kDebugIdShift, 240 "Debug::after_break_target_address()"); 241 Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate), 242 DEBUG_ADDRESS, 243 Debug::k_debug_break_slot_address << kDebugIdShift, 244 "Debug::debug_break_slot_address()"); 245 Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate), 246 DEBUG_ADDRESS, 247 Debug::k_debug_break_return_address << kDebugIdShift, 248 "Debug::debug_break_return_address()"); 249 Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate), 250 DEBUG_ADDRESS, 251 Debug::k_restarter_frame_function_pointer << kDebugIdShift, 252 "Debug::restarter_frame_function_pointer_address()"); 253 #endif 254 255 // Stat counters 256 struct StatsRefTableEntry { 257 StatsCounter* (Counters::*counter)(); 258 uint16_t id; 259 const char* name; 260 }; 261 262 const StatsRefTableEntry stats_ref_table[] = { 263 #define COUNTER_ENTRY(name, caption) \ 264 { &Counters::name, \ 265 Counters::k_##name, \ 266 "Counters::" #name }, 267 268 STATS_COUNTER_LIST_1(COUNTER_ENTRY) 269 STATS_COUNTER_LIST_2(COUNTER_ENTRY) 270 #undef COUNTER_ENTRY 271 }; // end of stats_ref_table[]. 272 273 Counters* counters = isolate->counters(); 274 for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) { 275 Add(reinterpret_cast<Address>(GetInternalPointer( 276 (counters->*(stats_ref_table[i].counter))())), 277 STATS_COUNTER, 278 stats_ref_table[i].id, 279 stats_ref_table[i].name); 280 } 281 282 // Top addresses 283 284 const char* AddressNames[] = { 285 #define C(name) "Isolate::" #name, 286 ISOLATE_ADDRESS_LIST(C) 287 ISOLATE_ADDRESS_LIST_PROF(C) 288 NULL 289 #undef C 290 }; 291 292 for (uint16_t i = 0; i < Isolate::k_isolate_address_count; ++i) { 293 Add(isolate->get_address_from_id((Isolate::AddressId)i), 294 TOP_ADDRESS, i, AddressNames[i]); 295 } 296 297 // Accessors 298 #define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ 299 Add((Address)&Accessors::name, \ 300 ACCESSOR, \ 301 Accessors::k##name, \ 302 "Accessors::" #name); 303 304 ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) 305 #undef ACCESSOR_DESCRIPTOR_DECLARATION 306 307 StubCache* stub_cache = isolate->stub_cache(); 308 309 // Stub cache tables 310 Add(stub_cache->key_reference(StubCache::kPrimary).address(), 311 STUB_CACHE_TABLE, 312 1, 313 "StubCache::primary_->key"); 314 Add(stub_cache->value_reference(StubCache::kPrimary).address(), 315 STUB_CACHE_TABLE, 316 2, 317 "StubCache::primary_->value"); 318 Add(stub_cache->key_reference(StubCache::kSecondary).address(), 319 STUB_CACHE_TABLE, 320 3, 321 "StubCache::secondary_->key"); 322 Add(stub_cache->value_reference(StubCache::kSecondary).address(), 323 STUB_CACHE_TABLE, 324 4, 325 "StubCache::secondary_->value"); 326 327 // Runtime entries 328 Add(ExternalReference::perform_gc_function(isolate).address(), 329 RUNTIME_ENTRY, 330 1, 331 "Runtime::PerformGC"); 332 Add(ExternalReference::fill_heap_number_with_random_function( 333 isolate).address(), 334 RUNTIME_ENTRY, 335 2, 336 "V8::FillHeapNumberWithRandom"); 337 Add(ExternalReference::random_uint32_function(isolate).address(), 338 RUNTIME_ENTRY, 339 3, 340 "V8::Random"); 341 Add(ExternalReference::delete_handle_scope_extensions(isolate).address(), 342 RUNTIME_ENTRY, 343 4, 344 "HandleScope::DeleteExtensions"); 345 346 // Miscellaneous 347 Add(ExternalReference::the_hole_value_location(isolate).address(), 348 UNCLASSIFIED, 349 2, 350 "Factory::the_hole_value().location()"); 351 Add(ExternalReference::roots_address(isolate).address(), 352 UNCLASSIFIED, 353 3, 354 "Heap::roots_address()"); 355 Add(ExternalReference::address_of_stack_limit(isolate).address(), 356 UNCLASSIFIED, 357 4, 358 "StackGuard::address_of_jslimit()"); 359 Add(ExternalReference::address_of_real_stack_limit(isolate).address(), 360 UNCLASSIFIED, 361 5, 362 "StackGuard::address_of_real_jslimit()"); 363 #ifndef V8_INTERPRETED_REGEXP 364 Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), 365 UNCLASSIFIED, 366 6, 367 "RegExpStack::limit_address()"); 368 Add(ExternalReference::address_of_regexp_stack_memory_address( 369 isolate).address(), 370 UNCLASSIFIED, 371 7, 372 "RegExpStack::memory_address()"); 373 Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(), 374 UNCLASSIFIED, 375 8, 376 "RegExpStack::memory_size()"); 377 Add(ExternalReference::address_of_static_offsets_vector(isolate).address(), 378 UNCLASSIFIED, 379 9, 380 "OffsetsVector::static_offsets_vector"); 381 #endif // V8_INTERPRETED_REGEXP 382 Add(ExternalReference::new_space_start(isolate).address(), 383 UNCLASSIFIED, 384 10, 385 "Heap::NewSpaceStart()"); 386 Add(ExternalReference::new_space_mask(isolate).address(), 387 UNCLASSIFIED, 388 11, 389 "Heap::NewSpaceMask()"); 390 Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(), 391 UNCLASSIFIED, 392 12, 393 "Heap::always_allocate_scope_depth()"); 394 Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), 395 UNCLASSIFIED, 396 13, 397 "Heap::NewSpaceAllocationLimitAddress()"); 398 Add(ExternalReference::new_space_allocation_top_address(isolate).address(), 399 UNCLASSIFIED, 400 14, 401 "Heap::NewSpaceAllocationTopAddress()"); 402 #ifdef ENABLE_DEBUGGER_SUPPORT 403 Add(ExternalReference::debug_break(isolate).address(), 404 UNCLASSIFIED, 405 15, 406 "Debug::Break()"); 407 Add(ExternalReference::debug_step_in_fp_address(isolate).address(), 408 UNCLASSIFIED, 409 16, 410 "Debug::step_in_fp_addr()"); 411 #endif 412 Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(), 413 UNCLASSIFIED, 414 17, 415 "add_two_doubles"); 416 Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(), 417 UNCLASSIFIED, 418 18, 419 "sub_two_doubles"); 420 Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(), 421 UNCLASSIFIED, 422 19, 423 "mul_two_doubles"); 424 Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(), 425 UNCLASSIFIED, 426 20, 427 "div_two_doubles"); 428 Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(), 429 UNCLASSIFIED, 430 21, 431 "mod_two_doubles"); 432 Add(ExternalReference::compare_doubles(isolate).address(), 433 UNCLASSIFIED, 434 22, 435 "compare_doubles"); 436 #ifndef V8_INTERPRETED_REGEXP 437 Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(), 438 UNCLASSIFIED, 439 23, 440 "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); 441 Add(ExternalReference::re_check_stack_guard_state(isolate).address(), 442 UNCLASSIFIED, 443 24, 444 "RegExpMacroAssembler*::CheckStackGuardState()"); 445 Add(ExternalReference::re_grow_stack(isolate).address(), 446 UNCLASSIFIED, 447 25, 448 "NativeRegExpMacroAssembler::GrowStack()"); 449 Add(ExternalReference::re_word_character_map().address(), 450 UNCLASSIFIED, 451 26, 452 "NativeRegExpMacroAssembler::word_character_map"); 453 #endif // V8_INTERPRETED_REGEXP 454 // Keyed lookup cache. 455 Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(), 456 UNCLASSIFIED, 457 27, 458 "KeyedLookupCache::keys()"); 459 Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(), 460 UNCLASSIFIED, 461 28, 462 "KeyedLookupCache::field_offsets()"); 463 Add(ExternalReference::transcendental_cache_array_address(isolate).address(), 464 UNCLASSIFIED, 465 29, 466 "TranscendentalCache::caches()"); 467 Add(ExternalReference::handle_scope_next_address().address(), 468 UNCLASSIFIED, 469 30, 470 "HandleScope::next"); 471 Add(ExternalReference::handle_scope_limit_address().address(), 472 UNCLASSIFIED, 473 31, 474 "HandleScope::limit"); 475 Add(ExternalReference::handle_scope_level_address().address(), 476 UNCLASSIFIED, 477 32, 478 "HandleScope::level"); 479 Add(ExternalReference::new_deoptimizer_function(isolate).address(), 480 UNCLASSIFIED, 481 33, 482 "Deoptimizer::New()"); 483 Add(ExternalReference::compute_output_frames_function(isolate).address(), 484 UNCLASSIFIED, 485 34, 486 "Deoptimizer::ComputeOutputFrames()"); 487 Add(ExternalReference::address_of_min_int().address(), 488 UNCLASSIFIED, 489 35, 490 "LDoubleConstant::min_int"); 491 Add(ExternalReference::address_of_one_half().address(), 492 UNCLASSIFIED, 493 36, 494 "LDoubleConstant::one_half"); 495 Add(ExternalReference::isolate_address().address(), 496 UNCLASSIFIED, 497 37, 498 "isolate"); 499 Add(ExternalReference::address_of_minus_zero().address(), 500 UNCLASSIFIED, 501 38, 502 "LDoubleConstant::minus_zero"); 503 Add(ExternalReference::address_of_negative_infinity().address(), 504 UNCLASSIFIED, 505 39, 506 "LDoubleConstant::negative_infinity"); 507 Add(ExternalReference::power_double_double_function(isolate).address(), 508 UNCLASSIFIED, 509 40, 510 "power_double_double_function"); 511 Add(ExternalReference::power_double_int_function(isolate).address(), 512 UNCLASSIFIED, 513 41, 514 "power_double_int_function"); 515 Add(ExternalReference::arguments_marker_location(isolate).address(), 516 UNCLASSIFIED, 517 42, 518 "Factory::arguments_marker().location()"); 519 } 520 521 522 ExternalReferenceEncoder::ExternalReferenceEncoder() 523 : encodings_(Match), 524 isolate_(Isolate::Current()) { 525 ExternalReferenceTable* external_references = 526 ExternalReferenceTable::instance(isolate_); 527 for (int i = 0; i < external_references->size(); ++i) { 528 Put(external_references->address(i), i); 529 } 530 } 531 532 533 uint32_t ExternalReferenceEncoder::Encode(Address key) const { 534 int index = IndexOf(key); 535 ASSERT(key == NULL || index >= 0); 536 return index >=0 ? 537 ExternalReferenceTable::instance(isolate_)->code(index) : 0; 538 } 539 540 541 const char* ExternalReferenceEncoder::NameOfAddress(Address key) const { 542 int index = IndexOf(key); 543 return index >= 0 ? 544 ExternalReferenceTable::instance(isolate_)->name(index) : NULL; 545 } 546 547 548 int ExternalReferenceEncoder::IndexOf(Address key) const { 549 if (key == NULL) return -1; 550 HashMap::Entry* entry = 551 const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false); 552 return entry == NULL 553 ? -1 554 : static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); 555 } 556 557 558 void ExternalReferenceEncoder::Put(Address key, int index) { 559 HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true); 560 entry->value = reinterpret_cast<void*>(index); 561 } 562 563 564 ExternalReferenceDecoder::ExternalReferenceDecoder() 565 : encodings_(NewArray<Address*>(kTypeCodeCount)), 566 isolate_(Isolate::Current()) { 567 ExternalReferenceTable* external_references = 568 ExternalReferenceTable::instance(isolate_); 569 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 570 int max = external_references->max_id(type) + 1; 571 encodings_[type] = NewArray<Address>(max + 1); 572 } 573 for (int i = 0; i < external_references->size(); ++i) { 574 Put(external_references->code(i), external_references->address(i)); 575 } 576 } 577 578 579 ExternalReferenceDecoder::~ExternalReferenceDecoder() { 580 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 581 DeleteArray(encodings_[type]); 582 } 583 DeleteArray(encodings_); 584 } 585 586 587 bool Serializer::serialization_enabled_ = false; 588 bool Serializer::too_late_to_enable_now_ = false; 589 590 591 Deserializer::Deserializer(SnapshotByteSource* source) 592 : isolate_(NULL), 593 source_(source), 594 external_reference_decoder_(NULL) { 595 } 596 597 598 // This routine both allocates a new object, and also keeps 599 // track of where objects have been allocated so that we can 600 // fix back references when deserializing. 601 Address Deserializer::Allocate(int space_index, Space* space, int size) { 602 Address address; 603 if (!SpaceIsLarge(space_index)) { 604 ASSERT(!SpaceIsPaged(space_index) || 605 size <= Page::kPageSize - Page::kObjectStartOffset); 606 MaybeObject* maybe_new_allocation; 607 if (space_index == NEW_SPACE) { 608 maybe_new_allocation = 609 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); 610 } else { 611 maybe_new_allocation = 612 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); 613 } 614 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); 615 HeapObject* new_object = HeapObject::cast(new_allocation); 616 address = new_object->address(); 617 high_water_[space_index] = address + size; 618 } else { 619 ASSERT(SpaceIsLarge(space_index)); 620 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); 621 Object* new_allocation; 622 if (space_index == kLargeData) { 623 new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked(); 624 } else if (space_index == kLargeFixedArray) { 625 new_allocation = 626 lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked(); 627 } else { 628 ASSERT_EQ(kLargeCode, space_index); 629 new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked(); 630 } 631 HeapObject* new_object = HeapObject::cast(new_allocation); 632 // Record all large objects in the same space. 633 address = new_object->address(); 634 pages_[LO_SPACE].Add(address); 635 } 636 last_object_address_ = address; 637 return address; 638 } 639 640 641 // This returns the address of an object that has been described in the 642 // snapshot as being offset bytes back in a particular space. 643 HeapObject* Deserializer::GetAddressFromEnd(int space) { 644 int offset = source_->GetInt(); 645 ASSERT(!SpaceIsLarge(space)); 646 offset <<= kObjectAlignmentBits; 647 return HeapObject::FromAddress(high_water_[space] - offset); 648 } 649 650 651 // This returns the address of an object that has been described in the 652 // snapshot as being offset bytes into a particular space. 653 HeapObject* Deserializer::GetAddressFromStart(int space) { 654 int offset = source_->GetInt(); 655 if (SpaceIsLarge(space)) { 656 // Large spaces have one object per 'page'. 657 return HeapObject::FromAddress(pages_[LO_SPACE][offset]); 658 } 659 offset <<= kObjectAlignmentBits; 660 if (space == NEW_SPACE) { 661 // New space has only one space - numbered 0. 662 return HeapObject::FromAddress(pages_[space][0] + offset); 663 } 664 ASSERT(SpaceIsPaged(space)); 665 int page_of_pointee = offset >> kPageSizeBits; 666 Address object_address = pages_[space][page_of_pointee] + 667 (offset & Page::kPageAlignmentMask); 668 return HeapObject::FromAddress(object_address); 669 } 670 671 672 void Deserializer::Deserialize() { 673 isolate_ = Isolate::Current(); 674 // Don't GC while deserializing - just expand the heap. 675 AlwaysAllocateScope always_allocate; 676 // Don't use the free lists while deserializing. 677 LinearAllocationScope allocate_linearly; 678 // No active threads. 679 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); 680 // No active handles. 681 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); 682 // Make sure the entire partial snapshot cache is traversed, filling it with 683 // valid object pointers. 684 isolate_->set_serialize_partial_snapshot_cache_length( 685 Isolate::kPartialSnapshotCacheCapacity); 686 ASSERT_EQ(NULL, external_reference_decoder_); 687 external_reference_decoder_ = new ExternalReferenceDecoder(); 688 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); 689 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); 690 691 isolate_->heap()->set_global_contexts_list( 692 isolate_->heap()->undefined_value()); 693 } 694 695 696 void Deserializer::DeserializePartial(Object** root) { 697 isolate_ = Isolate::Current(); 698 // Don't GC while deserializing - just expand the heap. 699 AlwaysAllocateScope always_allocate; 700 // Don't use the free lists while deserializing. 701 LinearAllocationScope allocate_linearly; 702 if (external_reference_decoder_ == NULL) { 703 external_reference_decoder_ = new ExternalReferenceDecoder(); 704 } 705 VisitPointer(root); 706 } 707 708 709 Deserializer::~Deserializer() { 710 ASSERT(source_->AtEOF()); 711 if (external_reference_decoder_) { 712 delete external_reference_decoder_; 713 external_reference_decoder_ = NULL; 714 } 715 } 716 717 718 // This is called on the roots. It is the driver of the deserialization 719 // process. It is also called on the body of each function. 720 void Deserializer::VisitPointers(Object** start, Object** end) { 721 // The space must be new space. Any other space would cause ReadChunk to try 722 // to update the remembered using NULL as the address. 723 ReadChunk(start, end, NEW_SPACE, NULL); 724 } 725 726 727 // This routine writes the new object into the pointer provided and then 728 // returns true if the new object was in young space and false otherwise. 729 // The reason for this strange interface is that otherwise the object is 730 // written very late, which means the ByteArray map is not set up by the 731 // time we need to use it to mark the space at the end of a page free (by 732 // making it into a byte array). 733 void Deserializer::ReadObject(int space_number, 734 Space* space, 735 Object** write_back) { 736 int size = source_->GetInt() << kObjectAlignmentBits; 737 Address address = Allocate(space_number, space, size); 738 *write_back = HeapObject::FromAddress(address); 739 Object** current = reinterpret_cast<Object**>(address); 740 Object** limit = current + (size >> kPointerSizeLog2); 741 if (FLAG_log_snapshot_positions) { 742 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); 743 } 744 ReadChunk(current, limit, space_number, address); 745 #ifdef DEBUG 746 bool is_codespace = (space == HEAP->code_space()) || 747 ((space == HEAP->lo_space()) && (space_number == kLargeCode)); 748 ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace); 749 #endif 750 } 751 752 753 // This macro is always used with a constant argument so it should all fold 754 // away to almost nothing in the generated code. It might be nicer to do this 755 // with the ternary operator but there are type issues with that. 756 #define ASSIGN_DEST_SPACE(space_number) \ 757 Space* dest_space; \ 758 if (space_number == NEW_SPACE) { \ 759 dest_space = isolate->heap()->new_space(); \ 760 } else if (space_number == OLD_POINTER_SPACE) { \ 761 dest_space = isolate->heap()->old_pointer_space(); \ 762 } else if (space_number == OLD_DATA_SPACE) { \ 763 dest_space = isolate->heap()->old_data_space(); \ 764 } else if (space_number == CODE_SPACE) { \ 765 dest_space = isolate->heap()->code_space(); \ 766 } else if (space_number == MAP_SPACE) { \ 767 dest_space = isolate->heap()->map_space(); \ 768 } else if (space_number == CELL_SPACE) { \ 769 dest_space = isolate->heap()->cell_space(); \ 770 } else { \ 771 ASSERT(space_number >= LO_SPACE); \ 772 dest_space = isolate->heap()->lo_space(); \ 773 } 774 775 776 static const int kUnknownOffsetFromStart = -1; 777 778 779 void Deserializer::ReadChunk(Object** current, 780 Object** limit, 781 int source_space, 782 Address address) { 783 Isolate* const isolate = isolate_; 784 while (current < limit) { 785 int data = source_->Get(); 786 switch (data) { 787 #define CASE_STATEMENT(where, how, within, space_number) \ 788 case where + how + within + space_number: \ 789 ASSERT((where & ~kPointedToMask) == 0); \ 790 ASSERT((how & ~kHowToCodeMask) == 0); \ 791 ASSERT((within & ~kWhereToPointMask) == 0); \ 792 ASSERT((space_number & ~kSpaceMask) == 0); 793 794 #define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ 795 { \ 796 bool emit_write_barrier = false; \ 797 bool current_was_incremented = false; \ 798 int space_number = space_number_if_any == kAnyOldSpace ? \ 799 (data & kSpaceMask) : space_number_if_any; \ 800 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ 801 ASSIGN_DEST_SPACE(space_number) \ 802 ReadObject(space_number, dest_space, current); \ 803 emit_write_barrier = \ 804 (space_number == NEW_SPACE && source_space != NEW_SPACE); \ 805 } else { \ 806 Object* new_object = NULL; /* May not be a real Object pointer. */ \ 807 if (where == kNewObject) { \ 808 ASSIGN_DEST_SPACE(space_number) \ 809 ReadObject(space_number, dest_space, &new_object); \ 810 } else if (where == kRootArray) { \ 811 int root_id = source_->GetInt(); \ 812 new_object = isolate->heap()->roots_address()[root_id]; \ 813 } else if (where == kPartialSnapshotCache) { \ 814 int cache_index = source_->GetInt(); \ 815 new_object = isolate->serialize_partial_snapshot_cache() \ 816 [cache_index]; \ 817 } else if (where == kExternalReference) { \ 818 int reference_id = source_->GetInt(); \ 819 Address address = external_reference_decoder_-> \ 820 Decode(reference_id); \ 821 new_object = reinterpret_cast<Object*>(address); \ 822 } else if (where == kBackref) { \ 823 emit_write_barrier = \ 824 (space_number == NEW_SPACE && source_space != NEW_SPACE); \ 825 new_object = GetAddressFromEnd(data & kSpaceMask); \ 826 } else { \ 827 ASSERT(where == kFromStart); \ 828 if (offset_from_start == kUnknownOffsetFromStart) { \ 829 emit_write_barrier = \ 830 (space_number == NEW_SPACE && source_space != NEW_SPACE); \ 831 new_object = GetAddressFromStart(data & kSpaceMask); \ 832 } else { \ 833 Address object_address = pages_[space_number][0] + \ 834 (offset_from_start << kObjectAlignmentBits); \ 835 new_object = HeapObject::FromAddress(object_address); \ 836 } \ 837 } \ 838 if (within == kFirstInstruction) { \ 839 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ 840 new_object = reinterpret_cast<Object*>( \ 841 new_code_object->instruction_start()); \ 842 } \ 843 if (how == kFromCode) { \ 844 Address location_of_branch_data = \ 845 reinterpret_cast<Address>(current); \ 846 Assembler::set_target_at(location_of_branch_data, \ 847 reinterpret_cast<Address>(new_object)); \ 848 if (within == kFirstInstruction) { \ 849 location_of_branch_data += Assembler::kCallTargetSize; \ 850 current = reinterpret_cast<Object**>(location_of_branch_data); \ 851 current_was_incremented = true; \ 852 } \ 853 } else { \ 854 *current = new_object; \ 855 } \ 856 } \ 857 if (emit_write_barrier) { \ 858 isolate->heap()->RecordWrite(address, static_cast<int>( \ 859 reinterpret_cast<Address>(current) - address)); \ 860 } \ 861 if (!current_was_incremented) { \ 862 current++; /* Increment current if it wasn't done above. */ \ 863 } \ 864 break; \ 865 } \ 866 867 // This generates a case and a body for each space. The large object spaces are 868 // very rare in snapshots so they are grouped in one body. 869 #define ONE_PER_SPACE(where, how, within) \ 870 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 871 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 872 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 873 CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \ 874 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 875 CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \ 876 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 877 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 878 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 879 CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \ 880 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 881 CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \ 882 CASE_STATEMENT(where, how, within, kLargeData) \ 883 CASE_STATEMENT(where, how, within, kLargeCode) \ 884 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 885 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 886 887 // This generates a case and a body for the new space (which has to do extra 888 // write barrier handling) and handles the other spaces with 8 fall-through 889 // cases and one body. 890 #define ALL_SPACES(where, how, within) \ 891 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 892 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 893 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 894 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 895 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 896 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 897 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 898 CASE_STATEMENT(where, how, within, kLargeData) \ 899 CASE_STATEMENT(where, how, within, kLargeCode) \ 900 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 901 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 902 903 #define ONE_PER_CODE_SPACE(where, how, within) \ 904 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 905 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 906 CASE_STATEMENT(where, how, within, kLargeCode) \ 907 CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart) 908 909 #define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \ 910 space_number, \ 911 offset_from_start) \ 912 CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number) \ 913 CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start) 914 915 // We generate 15 cases and bodies that process special tags that combine 916 // the raw data tag and the length into one byte. 917 #define RAW_CASE(index, size) \ 918 case kRawData + index: { \ 919 byte* raw_data_out = reinterpret_cast<byte*>(current); \ 920 source_->CopyRaw(raw_data_out, size); \ 921 current = reinterpret_cast<Object**>(raw_data_out + size); \ 922 break; \ 923 } 924 COMMON_RAW_LENGTHS(RAW_CASE) 925 #undef RAW_CASE 926 927 // Deserialize a chunk of raw data that doesn't have one of the popular 928 // lengths. 929 case kRawData: { 930 int size = source_->GetInt(); 931 byte* raw_data_out = reinterpret_cast<byte*>(current); 932 source_->CopyRaw(raw_data_out, size); 933 current = reinterpret_cast<Object**>(raw_data_out + size); 934 break; 935 } 936 937 // Deserialize a new object and write a pointer to it to the current 938 // object. 939 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) 940 // Support for direct instruction pointers in functions 941 ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction) 942 // Deserialize a new code object and write a pointer to its first 943 // instruction to the current code object. 944 ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction) 945 // Find a recently deserialized object using its offset from the current 946 // allocation point and write a pointer to it to the current object. 947 ALL_SPACES(kBackref, kPlain, kStartOfObject) 948 // Find a recently deserialized code object using its offset from the 949 // current allocation point and write a pointer to its first instruction 950 // to the current code object or the instruction pointer in a function 951 // object. 952 ALL_SPACES(kBackref, kFromCode, kFirstInstruction) 953 ALL_SPACES(kBackref, kPlain, kFirstInstruction) 954 // Find an already deserialized object using its offset from the start 955 // and write a pointer to it to the current object. 956 ALL_SPACES(kFromStart, kPlain, kStartOfObject) 957 ALL_SPACES(kFromStart, kPlain, kFirstInstruction) 958 // Find an already deserialized code object using its offset from the 959 // start and write a pointer to its first instruction to the current code 960 // object. 961 ALL_SPACES(kFromStart, kFromCode, kFirstInstruction) 962 // Find an already deserialized object at one of the predetermined popular 963 // offsets from the start and write a pointer to it in the current object. 964 COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS) 965 // Find an object in the roots array and write a pointer to it to the 966 // current object. 967 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) 968 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) 969 // Find an object in the partial snapshots cache and write a pointer to it 970 // to the current object. 971 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) 972 CASE_BODY(kPartialSnapshotCache, 973 kPlain, 974 kStartOfObject, 975 0, 976 kUnknownOffsetFromStart) 977 // Find an code entry in the partial snapshots cache and 978 // write a pointer to it to the current object. 979 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0) 980 CASE_BODY(kPartialSnapshotCache, 981 kPlain, 982 kFirstInstruction, 983 0, 984 kUnknownOffsetFromStart) 985 // Find an external reference and write a pointer to it to the current 986 // object. 987 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) 988 CASE_BODY(kExternalReference, 989 kPlain, 990 kStartOfObject, 991 0, 992 kUnknownOffsetFromStart) 993 // Find an external reference and write a pointer to it in the current 994 // code object. 995 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) 996 CASE_BODY(kExternalReference, 997 kFromCode, 998 kStartOfObject, 999 0, 1000 kUnknownOffsetFromStart) 1001 1002 #undef CASE_STATEMENT 1003 #undef CASE_BODY 1004 #undef ONE_PER_SPACE 1005 #undef ALL_SPACES 1006 #undef EMIT_COMMON_REFERENCE_PATTERNS 1007 #undef ASSIGN_DEST_SPACE 1008 1009 case kNewPage: { 1010 int space = source_->Get(); 1011 pages_[space].Add(last_object_address_); 1012 if (space == CODE_SPACE) { 1013 CPU::FlushICache(last_object_address_, Page::kPageSize); 1014 } 1015 break; 1016 } 1017 1018 case kNativesStringResource: { 1019 int index = source_->Get(); 1020 Vector<const char> source_vector = Natives::GetScriptSource(index); 1021 NativesExternalStringResource* resource = 1022 new NativesExternalStringResource( 1023 isolate->bootstrapper(), source_vector.start()); 1024 *current++ = reinterpret_cast<Object*>(resource); 1025 break; 1026 } 1027 1028 case kSynchronize: { 1029 // If we get here then that indicates that you have a mismatch between 1030 // the number of GC roots when serializing and deserializing. 1031 UNREACHABLE(); 1032 } 1033 1034 default: 1035 UNREACHABLE(); 1036 } 1037 } 1038 ASSERT_EQ(current, limit); 1039 } 1040 1041 1042 void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { 1043 const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7; 1044 for (int shift = max_shift; shift > 0; shift -= 7) { 1045 if (integer >= static_cast<uintptr_t>(1u) << shift) { 1046 Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart"); 1047 } 1048 } 1049 PutSection(static_cast<int>(integer & 0x7f), "IntLastPart"); 1050 } 1051 1052 #ifdef DEBUG 1053 1054 void Deserializer::Synchronize(const char* tag) { 1055 int data = source_->Get(); 1056 // If this assert fails then that indicates that you have a mismatch between 1057 // the number of GC roots when serializing and deserializing. 1058 ASSERT_EQ(kSynchronize, data); 1059 do { 1060 int character = source_->Get(); 1061 if (character == 0) break; 1062 if (FLAG_debug_serialization) { 1063 PrintF("%c", character); 1064 } 1065 } while (true); 1066 if (FLAG_debug_serialization) { 1067 PrintF("\n"); 1068 } 1069 } 1070 1071 1072 void Serializer::Synchronize(const char* tag) { 1073 sink_->Put(kSynchronize, tag); 1074 int character; 1075 do { 1076 character = *tag++; 1077 sink_->PutSection(character, "TagCharacter"); 1078 } while (character != 0); 1079 } 1080 1081 #endif 1082 1083 Serializer::Serializer(SnapshotByteSink* sink) 1084 : sink_(sink), 1085 current_root_index_(0), 1086 external_reference_encoder_(new ExternalReferenceEncoder), 1087 large_object_total_(0) { 1088 // The serializer is meant to be used only to generate initial heap images 1089 // from a context in which there is only one isolate. 1090 ASSERT(Isolate::Current()->IsDefaultIsolate()); 1091 for (int i = 0; i <= LAST_SPACE; i++) { 1092 fullness_[i] = 0; 1093 } 1094 } 1095 1096 1097 Serializer::~Serializer() { 1098 delete external_reference_encoder_; 1099 } 1100 1101 1102 void StartupSerializer::SerializeStrongReferences() { 1103 Isolate* isolate = Isolate::Current(); 1104 // No active threads. 1105 CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse()); 1106 // No active or weak handles. 1107 CHECK(isolate->handle_scope_implementer()->blocks()->is_empty()); 1108 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); 1109 // We don't support serializing installed extensions. 1110 for (RegisteredExtension* ext = v8::RegisteredExtension::first_extension(); 1111 ext != NULL; 1112 ext = ext->next()) { 1113 CHECK_NE(v8::INSTALLED, ext->state()); 1114 } 1115 HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG); 1116 } 1117 1118 1119 void PartialSerializer::Serialize(Object** object) { 1120 this->VisitPointer(object); 1121 Isolate* isolate = Isolate::Current(); 1122 1123 // After we have done the partial serialization the partial snapshot cache 1124 // will contain some references needed to decode the partial snapshot. We 1125 // fill it up with undefineds so it has a predictable length so the 1126 // deserialization code doesn't need to know the length. 1127 for (int index = isolate->serialize_partial_snapshot_cache_length(); 1128 index < Isolate::kPartialSnapshotCacheCapacity; 1129 index++) { 1130 isolate->serialize_partial_snapshot_cache()[index] = 1131 isolate->heap()->undefined_value(); 1132 startup_serializer_->VisitPointer( 1133 &isolate->serialize_partial_snapshot_cache()[index]); 1134 } 1135 isolate->set_serialize_partial_snapshot_cache_length( 1136 Isolate::kPartialSnapshotCacheCapacity); 1137 } 1138 1139 1140 void Serializer::VisitPointers(Object** start, Object** end) { 1141 for (Object** current = start; current < end; current++) { 1142 if ((*current)->IsSmi()) { 1143 sink_->Put(kRawData, "RawData"); 1144 sink_->PutInt(kPointerSize, "length"); 1145 for (int i = 0; i < kPointerSize; i++) { 1146 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); 1147 } 1148 } else { 1149 SerializeObject(*current, kPlain, kStartOfObject); 1150 } 1151 } 1152 } 1153 1154 1155 // This ensures that the partial snapshot cache keeps things alive during GC and 1156 // tracks their movement. When it is called during serialization of the startup 1157 // snapshot the partial snapshot is empty, so nothing happens. When the partial 1158 // (context) snapshot is created, this array is populated with the pointers that 1159 // the partial snapshot will need. As that happens we emit serialized objects to 1160 // the startup snapshot that correspond to the elements of this cache array. On 1161 // deserialization we therefore need to visit the cache array. This fills it up 1162 // with pointers to deserialized objects. 1163 void SerializerDeserializer::Iterate(ObjectVisitor* visitor) { 1164 Isolate* isolate = Isolate::Current(); 1165 visitor->VisitPointers( 1166 isolate->serialize_partial_snapshot_cache(), 1167 &isolate->serialize_partial_snapshot_cache()[ 1168 isolate->serialize_partial_snapshot_cache_length()]); 1169 } 1170 1171 1172 // When deserializing we need to set the size of the snapshot cache. This means 1173 // the root iteration code (above) will iterate over array elements, writing the 1174 // references to deserialized objects in them. 1175 void SerializerDeserializer::SetSnapshotCacheSize(int size) { 1176 Isolate::Current()->set_serialize_partial_snapshot_cache_length(size); 1177 } 1178 1179 1180 int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { 1181 Isolate* isolate = Isolate::Current(); 1182 1183 for (int i = 0; 1184 i < isolate->serialize_partial_snapshot_cache_length(); 1185 i++) { 1186 Object* entry = isolate->serialize_partial_snapshot_cache()[i]; 1187 if (entry == heap_object) return i; 1188 } 1189 1190 // We didn't find the object in the cache. So we add it to the cache and 1191 // then visit the pointer so that it becomes part of the startup snapshot 1192 // and we can refer to it from the partial snapshot. 1193 int length = isolate->serialize_partial_snapshot_cache_length(); 1194 CHECK(length < Isolate::kPartialSnapshotCacheCapacity); 1195 isolate->serialize_partial_snapshot_cache()[length] = heap_object; 1196 startup_serializer_->VisitPointer( 1197 &isolate->serialize_partial_snapshot_cache()[length]); 1198 // We don't recurse from the startup snapshot generator into the partial 1199 // snapshot generator. 1200 ASSERT(length == isolate->serialize_partial_snapshot_cache_length()); 1201 isolate->set_serialize_partial_snapshot_cache_length(length + 1); 1202 return length; 1203 } 1204 1205 1206 int PartialSerializer::RootIndex(HeapObject* heap_object) { 1207 for (int i = 0; i < Heap::kRootListLength; i++) { 1208 Object* root = HEAP->roots_address()[i]; 1209 if (root == heap_object) return i; 1210 } 1211 return kInvalidRootIndex; 1212 } 1213 1214 1215 // Encode the location of an already deserialized object in order to write its 1216 // location into a later object. We can encode the location as an offset from 1217 // the start of the deserialized objects or as an offset backwards from the 1218 // current allocation pointer. 1219 void Serializer::SerializeReferenceToPreviousObject( 1220 int space, 1221 int address, 1222 HowToCode how_to_code, 1223 WhereToPoint where_to_point) { 1224 int offset = CurrentAllocationAddress(space) - address; 1225 bool from_start = true; 1226 if (SpaceIsPaged(space)) { 1227 // For paged space it is simple to encode back from current allocation if 1228 // the object is on the same page as the current allocation pointer. 1229 if ((CurrentAllocationAddress(space) >> kPageSizeBits) == 1230 (address >> kPageSizeBits)) { 1231 from_start = false; 1232 address = offset; 1233 } 1234 } else if (space == NEW_SPACE) { 1235 // For new space it is always simple to encode back from current allocation. 1236 if (offset < address) { 1237 from_start = false; 1238 address = offset; 1239 } 1240 } 1241 // If we are actually dealing with real offsets (and not a numbering of 1242 // all objects) then we should shift out the bits that are always 0. 1243 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; 1244 if (from_start) { 1245 #define COMMON_REFS_CASE(pseudo_space, actual_space, offset) \ 1246 if (space == actual_space && address == offset && \ 1247 how_to_code == kPlain && where_to_point == kStartOfObject) { \ 1248 sink_->Put(kFromStart + how_to_code + where_to_point + \ 1249 pseudo_space, "RefSer"); \ 1250 } else /* NOLINT */ 1251 COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE) 1252 #undef COMMON_REFS_CASE 1253 { /* NOLINT */ 1254 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); 1255 sink_->PutInt(address, "address"); 1256 } 1257 } else { 1258 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); 1259 sink_->PutInt(address, "address"); 1260 } 1261 } 1262 1263 1264 void StartupSerializer::SerializeObject( 1265 Object* o, 1266 HowToCode how_to_code, 1267 WhereToPoint where_to_point) { 1268 CHECK(o->IsHeapObject()); 1269 HeapObject* heap_object = HeapObject::cast(o); 1270 1271 if (address_mapper_.IsMapped(heap_object)) { 1272 int space = SpaceOfAlreadySerializedObject(heap_object); 1273 int address = address_mapper_.MappedTo(heap_object); 1274 SerializeReferenceToPreviousObject(space, 1275 address, 1276 how_to_code, 1277 where_to_point); 1278 } else { 1279 // Object has not yet been serialized. Serialize it here. 1280 ObjectSerializer object_serializer(this, 1281 heap_object, 1282 sink_, 1283 how_to_code, 1284 where_to_point); 1285 object_serializer.Serialize(); 1286 } 1287 } 1288 1289 1290 void StartupSerializer::SerializeWeakReferences() { 1291 for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length(); 1292 i < Isolate::kPartialSnapshotCacheCapacity; 1293 i++) { 1294 sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization"); 1295 sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index"); 1296 } 1297 HEAP->IterateWeakRoots(this, VISIT_ALL); 1298 } 1299 1300 1301 void PartialSerializer::SerializeObject( 1302 Object* o, 1303 HowToCode how_to_code, 1304 WhereToPoint where_to_point) { 1305 CHECK(o->IsHeapObject()); 1306 HeapObject* heap_object = HeapObject::cast(o); 1307 1308 int root_index; 1309 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { 1310 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); 1311 sink_->PutInt(root_index, "root_index"); 1312 return; 1313 } 1314 1315 if (ShouldBeInThePartialSnapshotCache(heap_object)) { 1316 int cache_index = PartialSnapshotCacheIndex(heap_object); 1317 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, 1318 "PartialSnapshotCache"); 1319 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); 1320 return; 1321 } 1322 1323 // Pointers from the partial snapshot to the objects in the startup snapshot 1324 // should go through the root array or through the partial snapshot cache. 1325 // If this is not the case you may have to add something to the root array. 1326 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); 1327 // All the symbols that the partial snapshot needs should be either in the 1328 // root table or in the partial snapshot cache. 1329 ASSERT(!heap_object->IsSymbol()); 1330 1331 if (address_mapper_.IsMapped(heap_object)) { 1332 int space = SpaceOfAlreadySerializedObject(heap_object); 1333 int address = address_mapper_.MappedTo(heap_object); 1334 SerializeReferenceToPreviousObject(space, 1335 address, 1336 how_to_code, 1337 where_to_point); 1338 } else { 1339 // Object has not yet been serialized. Serialize it here. 1340 ObjectSerializer serializer(this, 1341 heap_object, 1342 sink_, 1343 how_to_code, 1344 where_to_point); 1345 serializer.Serialize(); 1346 } 1347 } 1348 1349 1350 void Serializer::ObjectSerializer::Serialize() { 1351 int space = Serializer::SpaceOfObject(object_); 1352 int size = object_->Size(); 1353 1354 sink_->Put(kNewObject + reference_representation_ + space, 1355 "ObjectSerialization"); 1356 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); 1357 1358 LOG(i::Isolate::Current(), 1359 SnapshotPositionEvent(object_->address(), sink_->Position())); 1360 1361 // Mark this object as already serialized. 1362 bool start_new_page; 1363 int offset = serializer_->Allocate(space, size, &start_new_page); 1364 serializer_->address_mapper()->AddMapping(object_, offset); 1365 if (start_new_page) { 1366 sink_->Put(kNewPage, "NewPage"); 1367 sink_->PutSection(space, "NewPageSpace"); 1368 } 1369 1370 // Serialize the map (first word of the object). 1371 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject); 1372 1373 // Serialize the rest of the object. 1374 CHECK_EQ(0, bytes_processed_so_far_); 1375 bytes_processed_so_far_ = kPointerSize; 1376 object_->IterateBody(object_->map()->instance_type(), size, this); 1377 OutputRawData(object_->address() + size); 1378 } 1379 1380 1381 void Serializer::ObjectSerializer::VisitPointers(Object** start, 1382 Object** end) { 1383 Object** current = start; 1384 while (current < end) { 1385 while (current < end && (*current)->IsSmi()) current++; 1386 if (current < end) OutputRawData(reinterpret_cast<Address>(current)); 1387 1388 while (current < end && !(*current)->IsSmi()) { 1389 serializer_->SerializeObject(*current, kPlain, kStartOfObject); 1390 bytes_processed_so_far_ += kPointerSize; 1391 current++; 1392 } 1393 } 1394 } 1395 1396 1397 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, 1398 Address* end) { 1399 Address references_start = reinterpret_cast<Address>(start); 1400 OutputRawData(references_start); 1401 1402 for (Address* current = start; current < end; current++) { 1403 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); 1404 int reference_id = serializer_->EncodeExternalReference(*current); 1405 sink_->PutInt(reference_id, "reference id"); 1406 } 1407 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); 1408 } 1409 1410 1411 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { 1412 Address target_start = rinfo->target_address_address(); 1413 OutputRawData(target_start); 1414 Address target = rinfo->target_address(); 1415 uint32_t encoding = serializer_->EncodeExternalReference(target); 1416 CHECK(target == NULL ? encoding == 0 : encoding != 0); 1417 int representation; 1418 // Can't use a ternary operator because of gcc. 1419 if (rinfo->IsCodedSpecially()) { 1420 representation = kStartOfObject + kFromCode; 1421 } else { 1422 representation = kStartOfObject + kPlain; 1423 } 1424 sink_->Put(kExternalReference + representation, "ExternalReference"); 1425 sink_->PutInt(encoding, "reference id"); 1426 bytes_processed_so_far_ += rinfo->target_address_size(); 1427 } 1428 1429 1430 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { 1431 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); 1432 Address target_start = rinfo->target_address_address(); 1433 OutputRawData(target_start); 1434 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 1435 serializer_->SerializeObject(target, kFromCode, kFirstInstruction); 1436 bytes_processed_so_far_ += rinfo->target_address_size(); 1437 } 1438 1439 1440 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { 1441 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); 1442 OutputRawData(entry_address); 1443 serializer_->SerializeObject(target, kPlain, kFirstInstruction); 1444 bytes_processed_so_far_ += kPointerSize; 1445 } 1446 1447 1448 void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) { 1449 // We shouldn't have any global property cell references in code 1450 // objects in the snapshot. 1451 UNREACHABLE(); 1452 } 1453 1454 1455 void Serializer::ObjectSerializer::VisitExternalAsciiString( 1456 v8::String::ExternalAsciiStringResource** resource_pointer) { 1457 Address references_start = reinterpret_cast<Address>(resource_pointer); 1458 OutputRawData(references_start); 1459 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { 1460 Object* source = HEAP->natives_source_cache()->get(i); 1461 if (!source->IsUndefined()) { 1462 ExternalAsciiString* string = ExternalAsciiString::cast(source); 1463 typedef v8::String::ExternalAsciiStringResource Resource; 1464 Resource* resource = string->resource(); 1465 if (resource == *resource_pointer) { 1466 sink_->Put(kNativesStringResource, "NativesStringResource"); 1467 sink_->PutSection(i, "NativesStringResourceEnd"); 1468 bytes_processed_so_far_ += sizeof(resource); 1469 return; 1470 } 1471 } 1472 } 1473 // One of the strings in the natives cache should match the resource. We 1474 // can't serialize any other kinds of external strings. 1475 UNREACHABLE(); 1476 } 1477 1478 1479 void Serializer::ObjectSerializer::OutputRawData(Address up_to) { 1480 Address object_start = object_->address(); 1481 int up_to_offset = static_cast<int>(up_to - object_start); 1482 int skipped = up_to_offset - bytes_processed_so_far_; 1483 // This assert will fail if the reloc info gives us the target_address_address 1484 // locations in a non-ascending order. Luckily that doesn't happen. 1485 ASSERT(skipped >= 0); 1486 if (skipped != 0) { 1487 Address base = object_start + bytes_processed_so_far_; 1488 #define RAW_CASE(index, length) \ 1489 if (skipped == length) { \ 1490 sink_->PutSection(kRawData + index, "RawDataFixed"); \ 1491 } else /* NOLINT */ 1492 COMMON_RAW_LENGTHS(RAW_CASE) 1493 #undef RAW_CASE 1494 { /* NOLINT */ 1495 sink_->Put(kRawData, "RawData"); 1496 sink_->PutInt(skipped, "length"); 1497 } 1498 for (int i = 0; i < skipped; i++) { 1499 unsigned int data = base[i]; 1500 sink_->PutSection(data, "Byte"); 1501 } 1502 bytes_processed_so_far_ += skipped; 1503 } 1504 } 1505 1506 1507 int Serializer::SpaceOfObject(HeapObject* object) { 1508 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1509 AllocationSpace s = static_cast<AllocationSpace>(i); 1510 if (HEAP->InSpace(object, s)) { 1511 if (i == LO_SPACE) { 1512 if (object->IsCode()) { 1513 return kLargeCode; 1514 } else if (object->IsFixedArray()) { 1515 return kLargeFixedArray; 1516 } else { 1517 return kLargeData; 1518 } 1519 } 1520 return i; 1521 } 1522 } 1523 UNREACHABLE(); 1524 return 0; 1525 } 1526 1527 1528 int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) { 1529 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1530 AllocationSpace s = static_cast<AllocationSpace>(i); 1531 if (HEAP->InSpace(object, s)) { 1532 return i; 1533 } 1534 } 1535 UNREACHABLE(); 1536 return 0; 1537 } 1538 1539 1540 int Serializer::Allocate(int space, int size, bool* new_page) { 1541 CHECK(space >= 0 && space < kNumberOfSpaces); 1542 if (SpaceIsLarge(space)) { 1543 // In large object space we merely number the objects instead of trying to 1544 // determine some sort of address. 1545 *new_page = true; 1546 large_object_total_ += size; 1547 return fullness_[LO_SPACE]++; 1548 } 1549 *new_page = false; 1550 if (fullness_[space] == 0) { 1551 *new_page = true; 1552 } 1553 if (SpaceIsPaged(space)) { 1554 // Paged spaces are a little special. We encode their addresses as if the 1555 // pages were all contiguous and each page were filled up in the range 1556 // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous 1557 // and allocation does not start at offset 0 in the page, but this scheme 1558 // means the deserializer can get the page number quickly by shifting the 1559 // serialized address. 1560 CHECK(IsPowerOf2(Page::kPageSize)); 1561 int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); 1562 CHECK(size <= Page::kObjectAreaSize); 1563 if (used_in_this_page + size > Page::kObjectAreaSize) { 1564 *new_page = true; 1565 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); 1566 } 1567 } 1568 int allocation_address = fullness_[space]; 1569 fullness_[space] = allocation_address + size; 1570 return allocation_address; 1571 } 1572 1573 1574 } } // namespace v8::internal 1575