Home | History | Annotate | Download | only in src
      1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "accessors.h"
     31 #include "api.h"
     32 #include "execution.h"
     33 #include "global-handles.h"
     34 #include "ic-inl.h"
     35 #include "natives.h"
     36 #include "platform.h"
     37 #include "runtime.h"
     38 #include "serialize.h"
     39 #include "stub-cache.h"
     40 #include "v8threads.h"
     41 #include "top.h"
     42 #include "bootstrapper.h"
     43 
     44 namespace v8 {
     45 namespace internal {
     46 
     47 
     48 // -----------------------------------------------------------------------------
     49 // Coding of external references.
     50 
     51 // The encoding of an external reference. The type is in the high word.
     52 // The id is in the low word.
     53 static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
     54   return static_cast<uint32_t>(type) << 16 | id;
     55 }
     56 
     57 
     58 static int* GetInternalPointer(StatsCounter* counter) {
     59   // All counters refer to dummy_counter, if deserializing happens without
     60   // setting up counters.
     61   static int dummy_counter = 0;
     62   return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
     63 }
     64 
     65 
     66 // ExternalReferenceTable is a helper class that defines the relationship
     67 // between external references and their encodings. It is used to build
     68 // hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
     69 class ExternalReferenceTable {
     70  public:
     71   static ExternalReferenceTable* instance() {
     72     if (!instance_) instance_ = new ExternalReferenceTable();
     73     return instance_;
     74   }
     75 
     76   int size() const { return refs_.length(); }
     77 
     78   Address address(int i) { return refs_[i].address; }
     79 
     80   uint32_t code(int i) { return refs_[i].code; }
     81 
     82   const char* name(int i) { return refs_[i].name; }
     83 
     84   int max_id(int code) { return max_id_[code]; }
     85 
     86  private:
     87   static ExternalReferenceTable* instance_;
     88 
     89   ExternalReferenceTable() : refs_(64) { PopulateTable(); }
     90   ~ExternalReferenceTable() { }
     91 
     92   struct ExternalReferenceEntry {
     93     Address address;
     94     uint32_t code;
     95     const char* name;
     96   };
     97 
     98   void PopulateTable();
     99 
    100   // For a few types of references, we can get their address from their id.
    101   void AddFromId(TypeCode type, uint16_t id, const char* name);
    102 
    103   // For other types of references, the caller will figure out the address.
    104   void Add(Address address, TypeCode type, uint16_t id, const char* name);
    105 
    106   List<ExternalReferenceEntry> refs_;
    107   int max_id_[kTypeCodeCount];
    108 };
    109 
    110 
    111 ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL;
    112 
    113 
    114 void ExternalReferenceTable::AddFromId(TypeCode type,
    115                                        uint16_t id,
    116                                        const char* name) {
    117   Address address;
    118   switch (type) {
    119     case C_BUILTIN: {
    120       ExternalReference ref(static_cast<Builtins::CFunctionId>(id));
    121       address = ref.address();
    122       break;
    123     }
    124     case BUILTIN: {
    125       ExternalReference ref(static_cast<Builtins::Name>(id));
    126       address = ref.address();
    127       break;
    128     }
    129     case RUNTIME_FUNCTION: {
    130       ExternalReference ref(static_cast<Runtime::FunctionId>(id));
    131       address = ref.address();
    132       break;
    133     }
    134     case IC_UTILITY: {
    135       ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)));
    136       address = ref.address();
    137       break;
    138     }
    139     default:
    140       UNREACHABLE();
    141       return;
    142   }
    143   Add(address, type, id, name);
    144 }
    145 
    146 
    147 void ExternalReferenceTable::Add(Address address,
    148                                  TypeCode type,
    149                                  uint16_t id,
    150                                  const char* name) {
    151   ASSERT_NE(NULL, address);
    152   ExternalReferenceEntry entry;
    153   entry.address = address;
    154   entry.code = EncodeExternal(type, id);
    155   entry.name = name;
    156   ASSERT_NE(0, entry.code);
    157   refs_.Add(entry);
    158   if (id > max_id_[type]) max_id_[type] = id;
    159 }
    160 
    161 
    162 void ExternalReferenceTable::PopulateTable() {
    163   for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
    164     max_id_[type_code] = 0;
    165   }
    166 
    167   // The following populates all of the different type of external references
    168   // into the ExternalReferenceTable.
    169   //
    170   // NOTE: This function was originally 100k of code.  It has since been
    171   // rewritten to be mostly table driven, as the callback macro style tends to
    172   // very easily cause code bloat.  Please be careful in the future when adding
    173   // new references.
    174 
    175   struct RefTableEntry {
    176     TypeCode type;
    177     uint16_t id;
    178     const char* name;
    179   };
    180 
    181   static const RefTableEntry ref_table[] = {
    182   // Builtins
    183 #define DEF_ENTRY_C(name, ignored) \
    184   { C_BUILTIN, \
    185     Builtins::c_##name, \
    186     "Builtins::" #name },
    187 
    188   BUILTIN_LIST_C(DEF_ENTRY_C)
    189 #undef DEF_ENTRY_C
    190 
    191 #define DEF_ENTRY_C(name, ignored) \
    192   { BUILTIN, \
    193     Builtins::name, \
    194     "Builtins::" #name },
    195 #define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name, ignored)
    196 
    197   BUILTIN_LIST_C(DEF_ENTRY_C)
    198   BUILTIN_LIST_A(DEF_ENTRY_A)
    199   BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
    200 #undef DEF_ENTRY_C
    201 #undef DEF_ENTRY_A
    202 
    203   // Runtime functions
    204 #define RUNTIME_ENTRY(name, nargs, ressize) \
    205   { RUNTIME_FUNCTION, \
    206     Runtime::k##name, \
    207     "Runtime::" #name },
    208 
    209   RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
    210 #undef RUNTIME_ENTRY
    211 
    212   // IC utilities
    213 #define IC_ENTRY(name) \
    214   { IC_UTILITY, \
    215     IC::k##name, \
    216     "IC::" #name },
    217 
    218   IC_UTIL_LIST(IC_ENTRY)
    219 #undef IC_ENTRY
    220   };  // end of ref_table[].
    221 
    222   for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
    223     AddFromId(ref_table[i].type, ref_table[i].id, ref_table[i].name);
    224   }
    225 
    226 #ifdef ENABLE_DEBUGGER_SUPPORT
    227   // Debug addresses
    228   Add(Debug_Address(Debug::k_after_break_target_address).address(),
    229       DEBUG_ADDRESS,
    230       Debug::k_after_break_target_address << kDebugIdShift,
    231       "Debug::after_break_target_address()");
    232   Add(Debug_Address(Debug::k_debug_break_return_address).address(),
    233       DEBUG_ADDRESS,
    234       Debug::k_debug_break_return_address << kDebugIdShift,
    235       "Debug::debug_break_return_address()");
    236   const char* debug_register_format = "Debug::register_address(%i)";
    237   int dr_format_length = StrLength(debug_register_format);
    238   for (int i = 0; i < kNumJSCallerSaved; ++i) {
    239     Vector<char> name = Vector<char>::New(dr_format_length + 1);
    240     OS::SNPrintF(name, debug_register_format, i);
    241     Add(Debug_Address(Debug::k_register_address, i).address(),
    242         DEBUG_ADDRESS,
    243         Debug::k_register_address << kDebugIdShift | i,
    244         name.start());
    245   }
    246 #endif
    247 
    248   // Stat counters
    249   struct StatsRefTableEntry {
    250     StatsCounter* counter;
    251     uint16_t id;
    252     const char* name;
    253   };
    254 
    255   static const StatsRefTableEntry stats_ref_table[] = {
    256 #define COUNTER_ENTRY(name, caption) \
    257   { &Counters::name, \
    258     Counters::k_##name, \
    259     "Counters::" #name },
    260 
    261   STATS_COUNTER_LIST_1(COUNTER_ENTRY)
    262   STATS_COUNTER_LIST_2(COUNTER_ENTRY)
    263 #undef COUNTER_ENTRY
    264   };  // end of stats_ref_table[].
    265 
    266   for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
    267     Add(reinterpret_cast<Address>(
    268             GetInternalPointer(stats_ref_table[i].counter)),
    269         STATS_COUNTER,
    270         stats_ref_table[i].id,
    271         stats_ref_table[i].name);
    272   }
    273 
    274   // Top addresses
    275   const char* top_address_format = "Top::%s";
    276 
    277   const char* AddressNames[] = {
    278 #define C(name) #name,
    279     TOP_ADDRESS_LIST(C)
    280     TOP_ADDRESS_LIST_PROF(C)
    281     NULL
    282 #undef C
    283   };
    284 
    285   int top_format_length = StrLength(top_address_format) - 2;
    286   for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
    287     const char* address_name = AddressNames[i];
    288     Vector<char> name =
    289         Vector<char>::New(top_format_length + StrLength(address_name) + 1);
    290     const char* chars = name.start();
    291     OS::SNPrintF(name, top_address_format, address_name);
    292     Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
    293   }
    294 
    295   // Extensions
    296   Add(FUNCTION_ADDR(GCExtension::GC), EXTENSION, 1,
    297       "GCExtension::GC");
    298 
    299   // Accessors
    300 #define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
    301   Add((Address)&Accessors::name, \
    302       ACCESSOR, \
    303       Accessors::k##name, \
    304       "Accessors::" #name);
    305 
    306   ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
    307 #undef ACCESSOR_DESCRIPTOR_DECLARATION
    308 
    309   // Stub cache tables
    310   Add(SCTableReference::keyReference(StubCache::kPrimary).address(),
    311       STUB_CACHE_TABLE,
    312       1,
    313       "StubCache::primary_->key");
    314   Add(SCTableReference::valueReference(StubCache::kPrimary).address(),
    315       STUB_CACHE_TABLE,
    316       2,
    317       "StubCache::primary_->value");
    318   Add(SCTableReference::keyReference(StubCache::kSecondary).address(),
    319       STUB_CACHE_TABLE,
    320       3,
    321       "StubCache::secondary_->key");
    322   Add(SCTableReference::valueReference(StubCache::kSecondary).address(),
    323       STUB_CACHE_TABLE,
    324       4,
    325       "StubCache::secondary_->value");
    326 
    327   // Runtime entries
    328   Add(ExternalReference::perform_gc_function().address(),
    329       RUNTIME_ENTRY,
    330       1,
    331       "Runtime::PerformGC");
    332   Add(ExternalReference::random_positive_smi_function().address(),
    333       RUNTIME_ENTRY,
    334       2,
    335       "V8::RandomPositiveSmi");
    336 
    337   // Miscellaneous
    338   Add(ExternalReference::the_hole_value_location().address(),
    339       UNCLASSIFIED,
    340       2,
    341       "Factory::the_hole_value().location()");
    342   Add(ExternalReference::roots_address().address(),
    343       UNCLASSIFIED,
    344       3,
    345       "Heap::roots_address()");
    346   Add(ExternalReference::address_of_stack_limit().address(),
    347       UNCLASSIFIED,
    348       4,
    349       "StackGuard::address_of_jslimit()");
    350   Add(ExternalReference::address_of_real_stack_limit().address(),
    351       UNCLASSIFIED,
    352       5,
    353       "StackGuard::address_of_real_jslimit()");
    354   Add(ExternalReference::address_of_regexp_stack_limit().address(),
    355       UNCLASSIFIED,
    356       6,
    357       "RegExpStack::limit_address()");
    358   Add(ExternalReference::new_space_start().address(),
    359       UNCLASSIFIED,
    360       7,
    361       "Heap::NewSpaceStart()");
    362   Add(ExternalReference::new_space_mask().address(),
    363       UNCLASSIFIED,
    364       8,
    365       "Heap::NewSpaceMask()");
    366   Add(ExternalReference::heap_always_allocate_scope_depth().address(),
    367       UNCLASSIFIED,
    368       9,
    369       "Heap::always_allocate_scope_depth()");
    370   Add(ExternalReference::new_space_allocation_limit_address().address(),
    371       UNCLASSIFIED,
    372       10,
    373       "Heap::NewSpaceAllocationLimitAddress()");
    374   Add(ExternalReference::new_space_allocation_top_address().address(),
    375       UNCLASSIFIED,
    376       11,
    377       "Heap::NewSpaceAllocationTopAddress()");
    378 #ifdef ENABLE_DEBUGGER_SUPPORT
    379   Add(ExternalReference::debug_break().address(),
    380       UNCLASSIFIED,
    381       12,
    382       "Debug::Break()");
    383   Add(ExternalReference::debug_step_in_fp_address().address(),
    384       UNCLASSIFIED,
    385       13,
    386       "Debug::step_in_fp_addr()");
    387 #endif
    388   Add(ExternalReference::double_fp_operation(Token::ADD).address(),
    389       UNCLASSIFIED,
    390       14,
    391       "add_two_doubles");
    392   Add(ExternalReference::double_fp_operation(Token::SUB).address(),
    393       UNCLASSIFIED,
    394       15,
    395       "sub_two_doubles");
    396   Add(ExternalReference::double_fp_operation(Token::MUL).address(),
    397       UNCLASSIFIED,
    398       16,
    399       "mul_two_doubles");
    400   Add(ExternalReference::double_fp_operation(Token::DIV).address(),
    401       UNCLASSIFIED,
    402       17,
    403       "div_two_doubles");
    404   Add(ExternalReference::double_fp_operation(Token::MOD).address(),
    405       UNCLASSIFIED,
    406       18,
    407       "mod_two_doubles");
    408   Add(ExternalReference::compare_doubles().address(),
    409       UNCLASSIFIED,
    410       19,
    411       "compare_doubles");
    412 #ifdef V8_NATIVE_REGEXP
    413   Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
    414       UNCLASSIFIED,
    415       20,
    416       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
    417   Add(ExternalReference::re_check_stack_guard_state().address(),
    418       UNCLASSIFIED,
    419       21,
    420       "RegExpMacroAssembler*::CheckStackGuardState()");
    421   Add(ExternalReference::re_grow_stack().address(),
    422       UNCLASSIFIED,
    423       22,
    424       "NativeRegExpMacroAssembler::GrowStack()");
    425   Add(ExternalReference::re_word_character_map().address(),
    426       UNCLASSIFIED,
    427       23,
    428       "NativeRegExpMacroAssembler::word_character_map");
    429 #endif
    430   // Keyed lookup cache.
    431   Add(ExternalReference::keyed_lookup_cache_keys().address(),
    432       UNCLASSIFIED,
    433       24,
    434       "KeyedLookupCache::keys()");
    435   Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
    436       UNCLASSIFIED,
    437       25,
    438       "KeyedLookupCache::field_offsets()");
    439   Add(ExternalReference::transcendental_cache_array_address().address(),
    440       UNCLASSIFIED,
    441       26,
    442       "TranscendentalCache::caches()");
    443 }
    444 
    445 
    446 ExternalReferenceEncoder::ExternalReferenceEncoder()
    447     : encodings_(Match) {
    448   ExternalReferenceTable* external_references =
    449       ExternalReferenceTable::instance();
    450   for (int i = 0; i < external_references->size(); ++i) {
    451     Put(external_references->address(i), i);
    452   }
    453 }
    454 
    455 
    456 uint32_t ExternalReferenceEncoder::Encode(Address key) const {
    457   int index = IndexOf(key);
    458   return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0;
    459 }
    460 
    461 
    462 const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
    463   int index = IndexOf(key);
    464   return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL;
    465 }
    466 
    467 
    468 int ExternalReferenceEncoder::IndexOf(Address key) const {
    469   if (key == NULL) return -1;
    470   HashMap::Entry* entry =
    471       const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
    472   return entry == NULL
    473       ? -1
    474       : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
    475 }
    476 
    477 
    478 void ExternalReferenceEncoder::Put(Address key, int index) {
    479   HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
    480   entry->value = reinterpret_cast<void *>(index);
    481 }
    482 
    483 
    484 ExternalReferenceDecoder::ExternalReferenceDecoder()
    485   : encodings_(NewArray<Address*>(kTypeCodeCount)) {
    486   ExternalReferenceTable* external_references =
    487       ExternalReferenceTable::instance();
    488   for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
    489     int max = external_references->max_id(type) + 1;
    490     encodings_[type] = NewArray<Address>(max + 1);
    491   }
    492   for (int i = 0; i < external_references->size(); ++i) {
    493     Put(external_references->code(i), external_references->address(i));
    494   }
    495 }
    496 
    497 
    498 ExternalReferenceDecoder::~ExternalReferenceDecoder() {
    499   for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
    500     DeleteArray(encodings_[type]);
    501   }
    502   DeleteArray(encodings_);
    503 }
    504 
    505 
    506 bool Serializer::serialization_enabled_ = false;
    507 bool Serializer::too_late_to_enable_now_ = false;
    508 ExternalReferenceDecoder* Deserializer::external_reference_decoder_ = NULL;
    509 
    510 
    511 Deserializer::Deserializer(SnapshotByteSource* source) : source_(source) {
    512 }
    513 
    514 
    515 // This routine both allocates a new object, and also keeps
    516 // track of where objects have been allocated so that we can
    517 // fix back references when deserializing.
    518 Address Deserializer::Allocate(int space_index, Space* space, int size) {
    519   Address address;
    520   if (!SpaceIsLarge(space_index)) {
    521     ASSERT(!SpaceIsPaged(space_index) ||
    522            size <= Page::kPageSize - Page::kObjectStartOffset);
    523     Object* new_allocation;
    524     if (space_index == NEW_SPACE) {
    525       new_allocation = reinterpret_cast<NewSpace*>(space)->AllocateRaw(size);
    526     } else {
    527       new_allocation = reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
    528     }
    529     HeapObject* new_object = HeapObject::cast(new_allocation);
    530     ASSERT(!new_object->IsFailure());
    531     address = new_object->address();
    532     high_water_[space_index] = address + size;
    533   } else {
    534     ASSERT(SpaceIsLarge(space_index));
    535     ASSERT(size > Page::kPageSize - Page::kObjectStartOffset);
    536     LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
    537     Object* new_allocation;
    538     if (space_index == kLargeData) {
    539       new_allocation = lo_space->AllocateRaw(size);
    540     } else if (space_index == kLargeFixedArray) {
    541       new_allocation = lo_space->AllocateRawFixedArray(size);
    542     } else {
    543       ASSERT_EQ(kLargeCode, space_index);
    544       new_allocation = lo_space->AllocateRawCode(size);
    545     }
    546     ASSERT(!new_allocation->IsFailure());
    547     HeapObject* new_object = HeapObject::cast(new_allocation);
    548     // Record all large objects in the same space.
    549     address = new_object->address();
    550     pages_[LO_SPACE].Add(address);
    551   }
    552   last_object_address_ = address;
    553   return address;
    554 }
    555 
    556 
    557 // This returns the address of an object that has been described in the
    558 // snapshot as being offset bytes back in a particular space.
    559 HeapObject* Deserializer::GetAddressFromEnd(int space) {
    560   int offset = source_->GetInt();
    561   ASSERT(!SpaceIsLarge(space));
    562   offset <<= kObjectAlignmentBits;
    563   return HeapObject::FromAddress(high_water_[space] - offset);
    564 }
    565 
    566 
    567 // This returns the address of an object that has been described in the
    568 // snapshot as being offset bytes into a particular space.
    569 HeapObject* Deserializer::GetAddressFromStart(int space) {
    570   int offset = source_->GetInt();
    571   if (SpaceIsLarge(space)) {
    572     // Large spaces have one object per 'page'.
    573     return HeapObject::FromAddress(pages_[LO_SPACE][offset]);
    574   }
    575   offset <<= kObjectAlignmentBits;
    576   if (space == NEW_SPACE) {
    577     // New space has only one space - numbered 0.
    578     return HeapObject::FromAddress(pages_[space][0] + offset);
    579   }
    580   ASSERT(SpaceIsPaged(space));
    581   int page_of_pointee = offset >> kPageSizeBits;
    582   Address object_address = pages_[space][page_of_pointee] +
    583                            (offset & Page::kPageAlignmentMask);
    584   return HeapObject::FromAddress(object_address);
    585 }
    586 
    587 
    588 void Deserializer::Deserialize() {
    589   // Don't GC while deserializing - just expand the heap.
    590   AlwaysAllocateScope always_allocate;
    591   // Don't use the free lists while deserializing.
    592   LinearAllocationScope allocate_linearly;
    593   // No active threads.
    594   ASSERT_EQ(NULL, ThreadState::FirstInUse());
    595   // No active handles.
    596   ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
    597   // Make sure the entire partial snapshot cache is traversed, filling it with
    598   // valid object pointers.
    599   partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
    600   ASSERT_EQ(NULL, external_reference_decoder_);
    601   external_reference_decoder_ = new ExternalReferenceDecoder();
    602   Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
    603   Heap::IterateWeakRoots(this, VISIT_ALL);
    604 }
    605 
    606 
    607 void Deserializer::DeserializePartial(Object** root) {
    608   // Don't GC while deserializing - just expand the heap.
    609   AlwaysAllocateScope always_allocate;
    610   // Don't use the free lists while deserializing.
    611   LinearAllocationScope allocate_linearly;
    612   if (external_reference_decoder_ == NULL) {
    613     external_reference_decoder_ = new ExternalReferenceDecoder();
    614   }
    615   VisitPointer(root);
    616 }
    617 
    618 
    619 Deserializer::~Deserializer() {
    620   ASSERT(source_->AtEOF());
    621   if (external_reference_decoder_ != NULL) {
    622     delete external_reference_decoder_;
    623     external_reference_decoder_ = NULL;
    624   }
    625 }
    626 
    627 
    628 // This is called on the roots.  It is the driver of the deserialization
    629 // process.  It is also called on the body of each function.
    630 void Deserializer::VisitPointers(Object** start, Object** end) {
    631   // The space must be new space.  Any other space would cause ReadChunk to try
    632   // to update the remembered using NULL as the address.
    633   ReadChunk(start, end, NEW_SPACE, NULL);
    634 }
    635 
    636 
    637 // This routine writes the new object into the pointer provided and then
    638 // returns true if the new object was in young space and false otherwise.
    639 // The reason for this strange interface is that otherwise the object is
    640 // written very late, which means the ByteArray map is not set up by the
    641 // time we need to use it to mark the space at the end of a page free (by
    642 // making it into a byte array).
    643 void Deserializer::ReadObject(int space_number,
    644                               Space* space,
    645                               Object** write_back) {
    646   int size = source_->GetInt() << kObjectAlignmentBits;
    647   Address address = Allocate(space_number, space, size);
    648   *write_back = HeapObject::FromAddress(address);
    649   Object** current = reinterpret_cast<Object**>(address);
    650   Object** limit = current + (size >> kPointerSizeLog2);
    651   if (FLAG_log_snapshot_positions) {
    652     LOG(SnapshotPositionEvent(address, source_->position()));
    653   }
    654   ReadChunk(current, limit, space_number, address);
    655 }
    656 
    657 
    658 #define ONE_CASE_PER_SPACE(base_tag)   \
    659   case (base_tag) + NEW_SPACE:         /* NOLINT */ \
    660   case (base_tag) + OLD_POINTER_SPACE: /* NOLINT */ \
    661   case (base_tag) + OLD_DATA_SPACE:    /* NOLINT */ \
    662   case (base_tag) + CODE_SPACE:        /* NOLINT */ \
    663   case (base_tag) + MAP_SPACE:         /* NOLINT */ \
    664   case (base_tag) + CELL_SPACE:        /* NOLINT */ \
    665   case (base_tag) + kLargeData:        /* NOLINT */ \
    666   case (base_tag) + kLargeCode:        /* NOLINT */ \
    667   case (base_tag) + kLargeFixedArray:  /* NOLINT */
    668 
    669 
    670 void Deserializer::ReadChunk(Object** current,
    671                              Object** limit,
    672                              int space,
    673                              Address address) {
    674   while (current < limit) {
    675     int data = source_->Get();
    676     switch (data) {
    677 #define RAW_CASE(index, size)                                      \
    678       case RAW_DATA_SERIALIZATION + index: {                       \
    679         byte* raw_data_out = reinterpret_cast<byte*>(current);     \
    680         source_->CopyRaw(raw_data_out, size);                      \
    681         current = reinterpret_cast<Object**>(raw_data_out + size); \
    682         break;                                                     \
    683       }
    684       COMMON_RAW_LENGTHS(RAW_CASE)
    685 #undef RAW_CASE
    686       case RAW_DATA_SERIALIZATION: {
    687         int size = source_->GetInt();
    688         byte* raw_data_out = reinterpret_cast<byte*>(current);
    689         source_->CopyRaw(raw_data_out, size);
    690         current = reinterpret_cast<Object**>(raw_data_out + size);
    691         break;
    692       }
    693       case OBJECT_SERIALIZATION + NEW_SPACE: {
    694         ReadObject(NEW_SPACE, Heap::new_space(), current);
    695         if (space != NEW_SPACE) {
    696           Heap::RecordWrite(address, static_cast<int>(
    697               reinterpret_cast<Address>(current) - address));
    698         }
    699         current++;
    700         break;
    701       }
    702       case OBJECT_SERIALIZATION + OLD_DATA_SPACE:
    703         ReadObject(OLD_DATA_SPACE, Heap::old_data_space(), current++);
    704         break;
    705       case OBJECT_SERIALIZATION + OLD_POINTER_SPACE:
    706         ReadObject(OLD_POINTER_SPACE, Heap::old_pointer_space(), current++);
    707         break;
    708       case OBJECT_SERIALIZATION + MAP_SPACE:
    709         ReadObject(MAP_SPACE, Heap::map_space(), current++);
    710         break;
    711       case OBJECT_SERIALIZATION + CODE_SPACE:
    712         ReadObject(CODE_SPACE, Heap::code_space(), current++);
    713         break;
    714       case OBJECT_SERIALIZATION + CELL_SPACE:
    715         ReadObject(CELL_SPACE, Heap::cell_space(), current++);
    716         break;
    717       case OBJECT_SERIALIZATION + kLargeData:
    718         ReadObject(kLargeData, Heap::lo_space(), current++);
    719         break;
    720       case OBJECT_SERIALIZATION + kLargeCode:
    721         ReadObject(kLargeCode, Heap::lo_space(), current++);
    722         break;
    723       case OBJECT_SERIALIZATION + kLargeFixedArray:
    724         ReadObject(kLargeFixedArray, Heap::lo_space(), current++);
    725         break;
    726       case CODE_OBJECT_SERIALIZATION + kLargeCode: {
    727         Object* new_code_object = NULL;
    728         ReadObject(kLargeCode, Heap::lo_space(), &new_code_object);
    729         Code* code_object = reinterpret_cast<Code*>(new_code_object);
    730         // Setting a branch/call to another code object from code.
    731         Address location_of_branch_data = reinterpret_cast<Address>(current);
    732         Assembler::set_target_at(location_of_branch_data,
    733                                  code_object->instruction_start());
    734         location_of_branch_data += Assembler::kCallTargetSize;
    735         current = reinterpret_cast<Object**>(location_of_branch_data);
    736         break;
    737       }
    738       case CODE_OBJECT_SERIALIZATION + CODE_SPACE: {
    739         Object* new_code_object = NULL;
    740         ReadObject(CODE_SPACE, Heap::code_space(), &new_code_object);
    741         Code* code_object = reinterpret_cast<Code*>(new_code_object);
    742         // Setting a branch/call to another code object from code.
    743         Address location_of_branch_data = reinterpret_cast<Address>(current);
    744         Assembler::set_target_at(location_of_branch_data,
    745                                  code_object->instruction_start());
    746         location_of_branch_data += Assembler::kCallTargetSize;
    747         current = reinterpret_cast<Object**>(location_of_branch_data);
    748         break;
    749       }
    750       ONE_CASE_PER_SPACE(BACKREF_SERIALIZATION) {
    751         // Write a backreference to an object we unpacked earlier.
    752         int backref_space = (data & kSpaceMask);
    753         if (backref_space == NEW_SPACE && space != NEW_SPACE) {
    754           Heap::RecordWrite(address, static_cast<int>(
    755               reinterpret_cast<Address>(current) - address));
    756         }
    757         *current++ = GetAddressFromEnd(backref_space);
    758         break;
    759       }
    760       ONE_CASE_PER_SPACE(REFERENCE_SERIALIZATION) {
    761         // Write a reference to an object we unpacked earlier.
    762         int reference_space = (data & kSpaceMask);
    763         if (reference_space == NEW_SPACE && space != NEW_SPACE) {
    764           Heap::RecordWrite(address, static_cast<int>(
    765               reinterpret_cast<Address>(current) - address));
    766         }
    767         *current++ = GetAddressFromStart(reference_space);
    768         break;
    769       }
    770 #define COMMON_REFS_CASE(index, reference_space, address)                      \
    771       case REFERENCE_SERIALIZATION + index: {                                  \
    772         ASSERT(SpaceIsPaged(reference_space));                                 \
    773         Address object_address =                                               \
    774             pages_[reference_space][0] + (address << kObjectAlignmentBits);    \
    775         *current++ = HeapObject::FromAddress(object_address);                  \
    776         break;                                                                 \
    777       }
    778       COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
    779 #undef COMMON_REFS_CASE
    780       ONE_CASE_PER_SPACE(CODE_BACKREF_SERIALIZATION) {
    781         int backref_space = (data & kSpaceMask);
    782         // Can't use Code::cast because heap is not set up yet and assertions
    783         // will fail.
    784         Code* code_object =
    785             reinterpret_cast<Code*>(GetAddressFromEnd(backref_space));
    786         // Setting a branch/call to previously decoded code object from code.
    787         Address location_of_branch_data = reinterpret_cast<Address>(current);
    788         Assembler::set_target_at(location_of_branch_data,
    789                                  code_object->instruction_start());
    790         location_of_branch_data += Assembler::kCallTargetSize;
    791         current = reinterpret_cast<Object**>(location_of_branch_data);
    792         break;
    793       }
    794       ONE_CASE_PER_SPACE(CODE_REFERENCE_SERIALIZATION) {
    795         int backref_space = (data & kSpaceMask);
    796         // Can't use Code::cast because heap is not set up yet and assertions
    797         // will fail.
    798         Code* code_object =
    799             reinterpret_cast<Code*>(GetAddressFromStart(backref_space));
    800         // Setting a branch/call to previously decoded code object from code.
    801         Address location_of_branch_data = reinterpret_cast<Address>(current);
    802         Assembler::set_target_at(location_of_branch_data,
    803                                  code_object->instruction_start());
    804         location_of_branch_data += Assembler::kCallTargetSize;
    805         current = reinterpret_cast<Object**>(location_of_branch_data);
    806         break;
    807       }
    808       case EXTERNAL_REFERENCE_SERIALIZATION: {
    809         int reference_id = source_->GetInt();
    810         Address address = external_reference_decoder_->Decode(reference_id);
    811         *current++ = reinterpret_cast<Object*>(address);
    812         break;
    813       }
    814       case EXTERNAL_BRANCH_TARGET_SERIALIZATION: {
    815         int reference_id = source_->GetInt();
    816         Address address = external_reference_decoder_->Decode(reference_id);
    817         Address location_of_branch_data = reinterpret_cast<Address>(current);
    818         Assembler::set_external_target_at(location_of_branch_data, address);
    819         location_of_branch_data += Assembler::kExternalTargetSize;
    820         current = reinterpret_cast<Object**>(location_of_branch_data);
    821         break;
    822       }
    823       case START_NEW_PAGE_SERIALIZATION: {
    824         int space = source_->Get();
    825         pages_[space].Add(last_object_address_);
    826         break;
    827       }
    828       case NATIVES_STRING_RESOURCE: {
    829         int index = source_->Get();
    830         Vector<const char> source_vector = Natives::GetScriptSource(index);
    831         NativesExternalStringResource* resource =
    832             new NativesExternalStringResource(source_vector.start());
    833         *current++ = reinterpret_cast<Object*>(resource);
    834         break;
    835       }
    836       case ROOT_SERIALIZATION: {
    837         int root_id = source_->GetInt();
    838         *current++ = Heap::roots_address()[root_id];
    839         break;
    840       }
    841       case PARTIAL_SNAPSHOT_CACHE_ENTRY: {
    842         int cache_index = source_->GetInt();
    843         *current++ = partial_snapshot_cache_[cache_index];
    844         break;
    845       }
    846       case SYNCHRONIZE: {
    847         // If we get here then that indicates that you have a mismatch between
    848         // the number of GC roots when serializing and deserializing.
    849         UNREACHABLE();
    850       }
    851       default:
    852         UNREACHABLE();
    853     }
    854   }
    855   ASSERT_EQ(current, limit);
    856 }
    857 
    858 
    859 void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
    860   const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7;
    861   for (int shift = max_shift; shift > 0; shift -= 7) {
    862     if (integer >= static_cast<uintptr_t>(1u) << shift) {
    863       Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart");
    864     }
    865   }
    866   PutSection(static_cast<int>(integer & 0x7f), "IntLastPart");
    867 }
    868 
    869 #ifdef DEBUG
    870 
    871 void Deserializer::Synchronize(const char* tag) {
    872   int data = source_->Get();
    873   // If this assert fails then that indicates that you have a mismatch between
    874   // the number of GC roots when serializing and deserializing.
    875   ASSERT_EQ(SYNCHRONIZE, data);
    876   do {
    877     int character = source_->Get();
    878     if (character == 0) break;
    879     if (FLAG_debug_serialization) {
    880       PrintF("%c", character);
    881     }
    882   } while (true);
    883   if (FLAG_debug_serialization) {
    884     PrintF("\n");
    885   }
    886 }
    887 
    888 
    889 void Serializer::Synchronize(const char* tag) {
    890   sink_->Put(SYNCHRONIZE, tag);
    891   int character;
    892   do {
    893     character = *tag++;
    894     sink_->PutSection(character, "TagCharacter");
    895   } while (character != 0);
    896 }
    897 
    898 #endif
    899 
    900 Serializer::Serializer(SnapshotByteSink* sink)
    901     : sink_(sink),
    902       current_root_index_(0),
    903       external_reference_encoder_(new ExternalReferenceEncoder),
    904       large_object_total_(0) {
    905   for (int i = 0; i <= LAST_SPACE; i++) {
    906     fullness_[i] = 0;
    907   }
    908 }
    909 
    910 
    911 Serializer::~Serializer() {
    912   delete external_reference_encoder_;
    913 }
    914 
    915 
    916 void StartupSerializer::SerializeStrongReferences() {
    917   // No active threads.
    918   CHECK_EQ(NULL, ThreadState::FirstInUse());
    919   // No active or weak handles.
    920   CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
    921   CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
    922   // We don't support serializing installed extensions.
    923   for (RegisteredExtension* ext = RegisteredExtension::first_extension();
    924        ext != NULL;
    925        ext = ext->next()) {
    926     CHECK_NE(v8::INSTALLED, ext->state());
    927   }
    928   Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
    929 }
    930 
    931 
    932 void PartialSerializer::Serialize(Object** object) {
    933   this->VisitPointer(object);
    934 
    935   // After we have done the partial serialization the partial snapshot cache
    936   // will contain some references needed to decode the partial snapshot.  We
    937   // fill it up with undefineds so it has a predictable length so the
    938   // deserialization code doesn't need to know the length.
    939   for (int index = partial_snapshot_cache_length_;
    940        index < kPartialSnapshotCacheCapacity;
    941        index++) {
    942     partial_snapshot_cache_[index] = Heap::undefined_value();
    943     startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
    944   }
    945   partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
    946 }
    947 
    948 
    949 void Serializer::VisitPointers(Object** start, Object** end) {
    950   for (Object** current = start; current < end; current++) {
    951     if ((*current)->IsSmi()) {
    952       sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
    953       sink_->PutInt(kPointerSize, "length");
    954       for (int i = 0; i < kPointerSize; i++) {
    955         sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
    956       }
    957     } else {
    958       SerializeObject(*current, TAGGED_REPRESENTATION);
    959     }
    960   }
    961 }
    962 
    963 
    964 Object* SerializerDeserializer::partial_snapshot_cache_[
    965     kPartialSnapshotCacheCapacity];
    966 int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
    967 
    968 
    969 // This ensures that the partial snapshot cache keeps things alive during GC and
    970 // tracks their movement.  When it is called during serialization of the startup
    971 // snapshot the partial snapshot is empty, so nothing happens.  When the partial
    972 // (context) snapshot is created, this array is populated with the pointers that
    973 // the partial snapshot will need. As that happens we emit serialized objects to
    974 // the startup snapshot that correspond to the elements of this cache array.  On
    975 // deserialization we therefore need to visit the cache array.  This fills it up
    976 // with pointers to deserialized objects.
    977 void SerializerDeserializer::Iterate(ObjectVisitor *visitor) {
    978   visitor->VisitPointers(
    979       &partial_snapshot_cache_[0],
    980       &partial_snapshot_cache_[partial_snapshot_cache_length_]);
    981 }
    982 
    983 
    984 // When deserializing we need to set the size of the snapshot cache.  This means
    985 // the root iteration code (above) will iterate over array elements, writing the
    986 // references to deserialized objects in them.
    987 void SerializerDeserializer::SetSnapshotCacheSize(int size) {
    988   partial_snapshot_cache_length_ = size;
    989 }
    990 
    991 
    992 int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
    993   for (int i = 0; i < partial_snapshot_cache_length_; i++) {
    994     Object* entry = partial_snapshot_cache_[i];
    995     if (entry == heap_object) return i;
    996   }
    997 
    998   // We didn't find the object in the cache.  So we add it to the cache and
    999   // then visit the pointer so that it becomes part of the startup snapshot
   1000   // and we can refer to it from the partial snapshot.
   1001   int length = partial_snapshot_cache_length_;
   1002   CHECK(length < kPartialSnapshotCacheCapacity);
   1003   partial_snapshot_cache_[length] = heap_object;
   1004   startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]);
   1005   // We don't recurse from the startup snapshot generator into the partial
   1006   // snapshot generator.
   1007   ASSERT(length == partial_snapshot_cache_length_);
   1008   return partial_snapshot_cache_length_++;
   1009 }
   1010 
   1011 
   1012 int PartialSerializer::RootIndex(HeapObject* heap_object) {
   1013   for (int i = 0; i < Heap::kRootListLength; i++) {
   1014     Object* root = Heap::roots_address()[i];
   1015     if (root == heap_object) return i;
   1016   }
   1017   return kInvalidRootIndex;
   1018 }
   1019 
   1020 
   1021 // Encode the location of an already deserialized object in order to write its
   1022 // location into a later object.  We can encode the location as an offset from
   1023 // the start of the deserialized objects or as an offset backwards from the
   1024 // current allocation pointer.
   1025 void Serializer::SerializeReferenceToPreviousObject(
   1026     int space,
   1027     int address,
   1028     ReferenceRepresentation reference_representation) {
   1029   int offset = CurrentAllocationAddress(space) - address;
   1030   bool from_start = true;
   1031   if (SpaceIsPaged(space)) {
   1032     // For paged space it is simple to encode back from current allocation if
   1033     // the object is on the same page as the current allocation pointer.
   1034     if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
   1035         (address >> kPageSizeBits)) {
   1036       from_start = false;
   1037       address = offset;
   1038     }
   1039   } else if (space == NEW_SPACE) {
   1040     // For new space it is always simple to encode back from current allocation.
   1041     if (offset < address) {
   1042       from_start = false;
   1043       address = offset;
   1044     }
   1045   }
   1046   // If we are actually dealing with real offsets (and not a numbering of
   1047   // all objects) then we should shift out the bits that are always 0.
   1048   if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
   1049   // On some architectures references between code objects are encoded
   1050   // specially (as relative offsets).  Such references have their own
   1051   // special tags to simplify the deserializer.
   1052   if (reference_representation == CODE_TARGET_REPRESENTATION) {
   1053     if (from_start) {
   1054       sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
   1055       sink_->PutInt(address, "address");
   1056     } else {
   1057       sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
   1058       sink_->PutInt(address, "address");
   1059     }
   1060   } else {
   1061     // Regular absolute references.
   1062     CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
   1063     if (from_start) {
   1064       // There are some common offsets that have their own specialized encoding.
   1065 #define COMMON_REFS_CASE(tag, common_space, common_offset)               \
   1066       if (space == common_space && address == common_offset) {           \
   1067         sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer");      \
   1068       } else  /* NOLINT */
   1069       COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
   1070 #undef COMMON_REFS_CASE
   1071       {  /* NOLINT */
   1072         sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
   1073         sink_->PutInt(address, "address");
   1074       }
   1075     } else {
   1076       sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
   1077       sink_->PutInt(address, "address");
   1078     }
   1079   }
   1080 }
   1081 
   1082 
   1083 void StartupSerializer::SerializeObject(
   1084     Object* o,
   1085     ReferenceRepresentation reference_representation) {
   1086   CHECK(o->IsHeapObject());
   1087   HeapObject* heap_object = HeapObject::cast(o);
   1088 
   1089   if (address_mapper_.IsMapped(heap_object)) {
   1090     int space = SpaceOfAlreadySerializedObject(heap_object);
   1091     int address = address_mapper_.MappedTo(heap_object);
   1092     SerializeReferenceToPreviousObject(space,
   1093                                        address,
   1094                                        reference_representation);
   1095   } else {
   1096     // Object has not yet been serialized.  Serialize it here.
   1097     ObjectSerializer object_serializer(this,
   1098                                        heap_object,
   1099                                        sink_,
   1100                                        reference_representation);
   1101     object_serializer.Serialize();
   1102   }
   1103 }
   1104 
   1105 
   1106 void StartupSerializer::SerializeWeakReferences() {
   1107   for (int i = partial_snapshot_cache_length_;
   1108        i < kPartialSnapshotCacheCapacity;
   1109        i++) {
   1110     sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
   1111     sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
   1112   }
   1113   Heap::IterateWeakRoots(this, VISIT_ALL);
   1114 }
   1115 
   1116 
   1117 void PartialSerializer::SerializeObject(
   1118     Object* o,
   1119     ReferenceRepresentation reference_representation) {
   1120   CHECK(o->IsHeapObject());
   1121   HeapObject* heap_object = HeapObject::cast(o);
   1122 
   1123   int root_index;
   1124   if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
   1125     sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
   1126     sink_->PutInt(root_index, "root_index");
   1127     return;
   1128   }
   1129 
   1130   if (ShouldBeInThePartialSnapshotCache(heap_object)) {
   1131     int cache_index = PartialSnapshotCacheIndex(heap_object);
   1132     sink_->Put(PARTIAL_SNAPSHOT_CACHE_ENTRY, "PartialSnapshotCache");
   1133     sink_->PutInt(cache_index, "partial_snapshot_cache_index");
   1134     return;
   1135   }
   1136 
   1137   // Pointers from the partial snapshot to the objects in the startup snapshot
   1138   // should go through the root array or through the partial snapshot cache.
   1139   // If this is not the case you may have to add something to the root array.
   1140   ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
   1141   // All the symbols that the partial snapshot needs should be either in the
   1142   // root table or in the partial snapshot cache.
   1143   ASSERT(!heap_object->IsSymbol());
   1144 
   1145   if (address_mapper_.IsMapped(heap_object)) {
   1146     int space = SpaceOfAlreadySerializedObject(heap_object);
   1147     int address = address_mapper_.MappedTo(heap_object);
   1148     SerializeReferenceToPreviousObject(space,
   1149                                        address,
   1150                                        reference_representation);
   1151   } else {
   1152     // Object has not yet been serialized.  Serialize it here.
   1153     ObjectSerializer serializer(this,
   1154                                 heap_object,
   1155                                 sink_,
   1156                                 reference_representation);
   1157     serializer.Serialize();
   1158   }
   1159 }
   1160 
   1161 
   1162 void Serializer::ObjectSerializer::Serialize() {
   1163   int space = Serializer::SpaceOfObject(object_);
   1164   int size = object_->Size();
   1165 
   1166   if (reference_representation_ == TAGGED_REPRESENTATION) {
   1167     sink_->Put(OBJECT_SERIALIZATION + space, "ObjectSerialization");
   1168   } else {
   1169     CHECK_EQ(CODE_TARGET_REPRESENTATION, reference_representation_);
   1170     sink_->Put(CODE_OBJECT_SERIALIZATION + space, "ObjectSerialization");
   1171   }
   1172   sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
   1173 
   1174   LOG(SnapshotPositionEvent(object_->address(), sink_->Position()));
   1175 
   1176   // Mark this object as already serialized.
   1177   bool start_new_page;
   1178   int offset = serializer_->Allocate(space, size, &start_new_page);
   1179   serializer_->address_mapper()->AddMapping(object_, offset);
   1180   if (start_new_page) {
   1181     sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
   1182     sink_->PutSection(space, "NewPageSpace");
   1183   }
   1184 
   1185   // Serialize the map (first word of the object).
   1186   serializer_->SerializeObject(object_->map(), TAGGED_REPRESENTATION);
   1187 
   1188   // Serialize the rest of the object.
   1189   CHECK_EQ(0, bytes_processed_so_far_);
   1190   bytes_processed_so_far_ = kPointerSize;
   1191   object_->IterateBody(object_->map()->instance_type(), size, this);
   1192   OutputRawData(object_->address() + size);
   1193 }
   1194 
   1195 
   1196 void Serializer::ObjectSerializer::VisitPointers(Object** start,
   1197                                                  Object** end) {
   1198   Object** current = start;
   1199   while (current < end) {
   1200     while (current < end && (*current)->IsSmi()) current++;
   1201     if (current < end) OutputRawData(reinterpret_cast<Address>(current));
   1202 
   1203     while (current < end && !(*current)->IsSmi()) {
   1204       serializer_->SerializeObject(*current, TAGGED_REPRESENTATION);
   1205       bytes_processed_so_far_ += kPointerSize;
   1206       current++;
   1207     }
   1208   }
   1209 }
   1210 
   1211 
   1212 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
   1213                                                            Address* end) {
   1214   Address references_start = reinterpret_cast<Address>(start);
   1215   OutputRawData(references_start);
   1216 
   1217   for (Address* current = start; current < end; current++) {
   1218     sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "ExternalReference");
   1219     int reference_id = serializer_->EncodeExternalReference(*current);
   1220     sink_->PutInt(reference_id, "reference id");
   1221   }
   1222   bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
   1223 }
   1224 
   1225 
   1226 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
   1227   Address target_start = rinfo->target_address_address();
   1228   OutputRawData(target_start);
   1229   Address target = rinfo->target_address();
   1230   uint32_t encoding = serializer_->EncodeExternalReference(target);
   1231   CHECK(target == NULL ? encoding == 0 : encoding != 0);
   1232   sink_->Put(EXTERNAL_BRANCH_TARGET_SERIALIZATION, "ExternalReference");
   1233   sink_->PutInt(encoding, "reference id");
   1234   bytes_processed_so_far_ += Assembler::kExternalTargetSize;
   1235 }
   1236 
   1237 
   1238 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
   1239   CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
   1240   Address target_start = rinfo->target_address_address();
   1241   OutputRawData(target_start);
   1242   Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
   1243   serializer_->SerializeObject(target, CODE_TARGET_REPRESENTATION);
   1244   bytes_processed_so_far_ += Assembler::kCallTargetSize;
   1245 }
   1246 
   1247 
   1248 void Serializer::ObjectSerializer::VisitExternalAsciiString(
   1249     v8::String::ExternalAsciiStringResource** resource_pointer) {
   1250   Address references_start = reinterpret_cast<Address>(resource_pointer);
   1251   OutputRawData(references_start);
   1252   for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
   1253     Object* source = Heap::natives_source_cache()->get(i);
   1254     if (!source->IsUndefined()) {
   1255       ExternalAsciiString* string = ExternalAsciiString::cast(source);
   1256       typedef v8::String::ExternalAsciiStringResource Resource;
   1257       Resource* resource = string->resource();
   1258       if (resource == *resource_pointer) {
   1259         sink_->Put(NATIVES_STRING_RESOURCE, "NativesStringResource");
   1260         sink_->PutSection(i, "NativesStringResourceEnd");
   1261         bytes_processed_so_far_ += sizeof(resource);
   1262         return;
   1263       }
   1264     }
   1265   }
   1266   // One of the strings in the natives cache should match the resource.  We
   1267   // can't serialize any other kinds of external strings.
   1268   UNREACHABLE();
   1269 }
   1270 
   1271 
   1272 void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
   1273   Address object_start = object_->address();
   1274   int up_to_offset = static_cast<int>(up_to - object_start);
   1275   int skipped = up_to_offset - bytes_processed_so_far_;
   1276   // This assert will fail if the reloc info gives us the target_address_address
   1277   // locations in a non-ascending order.  Luckily that doesn't happen.
   1278   ASSERT(skipped >= 0);
   1279   if (skipped != 0) {
   1280     Address base = object_start + bytes_processed_so_far_;
   1281 #define RAW_CASE(index, length)                                                \
   1282     if (skipped == length) {                                                   \
   1283       sink_->PutSection(RAW_DATA_SERIALIZATION + index, "RawDataFixed");       \
   1284     } else  /* NOLINT */
   1285     COMMON_RAW_LENGTHS(RAW_CASE)
   1286 #undef RAW_CASE
   1287     {  /* NOLINT */
   1288       sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
   1289       sink_->PutInt(skipped, "length");
   1290     }
   1291     for (int i = 0; i < skipped; i++) {
   1292       unsigned int data = base[i];
   1293       sink_->PutSection(data, "Byte");
   1294     }
   1295     bytes_processed_so_far_ += skipped;
   1296   }
   1297 }
   1298 
   1299 
   1300 int Serializer::SpaceOfObject(HeapObject* object) {
   1301   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
   1302     AllocationSpace s = static_cast<AllocationSpace>(i);
   1303     if (Heap::InSpace(object, s)) {
   1304       if (i == LO_SPACE) {
   1305         if (object->IsCode()) {
   1306           return kLargeCode;
   1307         } else if (object->IsFixedArray()) {
   1308           return kLargeFixedArray;
   1309         } else {
   1310           return kLargeData;
   1311         }
   1312       }
   1313       return i;
   1314     }
   1315   }
   1316   UNREACHABLE();
   1317   return 0;
   1318 }
   1319 
   1320 
   1321 int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
   1322   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
   1323     AllocationSpace s = static_cast<AllocationSpace>(i);
   1324     if (Heap::InSpace(object, s)) {
   1325       return i;
   1326     }
   1327   }
   1328   UNREACHABLE();
   1329   return 0;
   1330 }
   1331 
   1332 
   1333 int Serializer::Allocate(int space, int size, bool* new_page) {
   1334   CHECK(space >= 0 && space < kNumberOfSpaces);
   1335   if (SpaceIsLarge(space)) {
   1336     // In large object space we merely number the objects instead of trying to
   1337     // determine some sort of address.
   1338     *new_page = true;
   1339     large_object_total_ += size;
   1340     return fullness_[LO_SPACE]++;
   1341   }
   1342   *new_page = false;
   1343   if (fullness_[space] == 0) {
   1344     *new_page = true;
   1345   }
   1346   if (SpaceIsPaged(space)) {
   1347     // Paged spaces are a little special.  We encode their addresses as if the
   1348     // pages were all contiguous and each page were filled up in the range
   1349     // 0 - Page::kObjectAreaSize.  In practice the pages may not be contiguous
   1350     // and allocation does not start at offset 0 in the page, but this scheme
   1351     // means the deserializer can get the page number quickly by shifting the
   1352     // serialized address.
   1353     CHECK(IsPowerOf2(Page::kPageSize));
   1354     int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
   1355     CHECK(size <= Page::kObjectAreaSize);
   1356     if (used_in_this_page + size > Page::kObjectAreaSize) {
   1357       *new_page = true;
   1358       fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
   1359     }
   1360   }
   1361   int allocation_address = fullness_[space];
   1362   fullness_[space] = allocation_address + size;
   1363   return allocation_address;
   1364 }
   1365 
   1366 
   1367 } }  // namespace v8::internal
   1368