Home | History | Annotate | Download | only in snapshot
      1 // Copyright 2016 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/snapshot/deserializer.h"
      6 
      7 #include "src/bootstrapper.h"
      8 #include "src/external-reference-table.h"
      9 #include "src/heap/heap.h"
     10 #include "src/isolate.h"
     11 #include "src/macro-assembler.h"
     12 #include "src/snapshot/natives.h"
     13 #include "src/v8.h"
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 void Deserializer::DecodeReservation(
     19     Vector<const SerializedData::Reservation> res) {
     20   DCHECK_EQ(0, reservations_[NEW_SPACE].length());
     21   STATIC_ASSERT(NEW_SPACE == 0);
     22   int current_space = NEW_SPACE;
     23   for (auto& r : res) {
     24     reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
     25     if (r.is_last()) current_space++;
     26   }
     27   DCHECK_EQ(kNumberOfSpaces, current_space);
     28   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
     29 }
     30 
     31 void Deserializer::FlushICacheForNewIsolate() {
     32   DCHECK(!deserializing_user_code_);
     33   // The entire isolate is newly deserialized. Simply flush all code pages.
     34   for (Page* p : *isolate_->heap()->code_space()) {
     35     Assembler::FlushICache(isolate_, p->area_start(),
     36                            p->area_end() - p->area_start());
     37   }
     38 }
     39 
     40 void Deserializer::FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects() {
     41   DCHECK(deserializing_user_code_);
     42   for (Code* code : new_code_objects_) {
     43     // Record all references to embedded objects in the new code object.
     44     isolate_->heap()->RecordWritesIntoCode(code);
     45 
     46     if (FLAG_serialize_age_code) code->PreAge(isolate_);
     47     Assembler::FlushICache(isolate_, code->instruction_start(),
     48                            code->instruction_size());
     49   }
     50 }
     51 
     52 bool Deserializer::ReserveSpace() {
     53 #ifdef DEBUG
     54   for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
     55     CHECK(reservations_[i].length() > 0);
     56   }
     57 #endif  // DEBUG
     58   DCHECK(allocated_maps_.is_empty());
     59   if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_))
     60     return false;
     61   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
     62     high_water_[i] = reservations_[i][0].start;
     63   }
     64   return true;
     65 }
     66 
     67 void Deserializer::Initialize(Isolate* isolate) {
     68   DCHECK_NULL(isolate_);
     69   DCHECK_NOT_NULL(isolate);
     70   isolate_ = isolate;
     71   DCHECK_NULL(external_reference_table_);
     72   external_reference_table_ = ExternalReferenceTable::instance(isolate);
     73   CHECK_EQ(magic_number_,
     74            SerializedData::ComputeMagicNumber(external_reference_table_));
     75 }
     76 
     77 void Deserializer::Deserialize(Isolate* isolate) {
     78   Initialize(isolate);
     79   if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
     80   // No active threads.
     81   DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
     82   // No active handles.
     83   DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
     84   // Partial snapshot cache is not yet populated.
     85   DCHECK(isolate_->partial_snapshot_cache()->is_empty());
     86 
     87   {
     88     DisallowHeapAllocation no_gc;
     89     isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
     90     isolate_->heap()->IterateSmiRoots(this);
     91     isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
     92     isolate_->heap()->RepairFreeListsAfterDeserialization();
     93     isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
     94     DeserializeDeferredObjects();
     95     FlushICacheForNewIsolate();
     96   }
     97 
     98   isolate_->heap()->set_native_contexts_list(
     99       isolate_->heap()->undefined_value());
    100   // The allocation site list is build during root iteration, but if no sites
    101   // were encountered then it needs to be initialized to undefined.
    102   if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
    103     isolate_->heap()->set_allocation_sites_list(
    104         isolate_->heap()->undefined_value());
    105   }
    106 
    107   // Issue code events for newly deserialized code objects.
    108   LOG_CODE_EVENT(isolate_, LogCodeObjects());
    109   LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
    110   LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
    111 }
    112 
    113 MaybeHandle<Object> Deserializer::DeserializePartial(
    114     Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
    115   Initialize(isolate);
    116   if (!ReserveSpace()) {
    117     V8::FatalProcessOutOfMemory("deserialize context");
    118     return MaybeHandle<Object>();
    119   }
    120 
    121   AddAttachedObject(global_proxy);
    122 
    123   DisallowHeapAllocation no_gc;
    124   // Keep track of the code space start and end pointers in case new
    125   // code objects were unserialized
    126   OldSpace* code_space = isolate_->heap()->code_space();
    127   Address start_address = code_space->top();
    128   Object* root;
    129   VisitPointer(&root);
    130   DeserializeDeferredObjects();
    131   DeserializeInternalFields();
    132 
    133   isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
    134 
    135   // There's no code deserialized here. If this assert fires then that's
    136   // changed and logging should be added to notify the profiler et al of the
    137   // new code, which also has to be flushed from instruction cache.
    138   CHECK_EQ(start_address, code_space->top());
    139   return Handle<Object>(root, isolate);
    140 }
    141 
    142 MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) {
    143   Initialize(isolate);
    144   if (!ReserveSpace()) {
    145     return MaybeHandle<HeapObject>();
    146   } else {
    147     deserializing_user_code_ = true;
    148     HandleScope scope(isolate);
    149     Handle<HeapObject> result;
    150     {
    151       DisallowHeapAllocation no_gc;
    152       Object* root;
    153       VisitPointer(&root);
    154       DeserializeDeferredObjects();
    155       FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
    156       result = Handle<HeapObject>(HeapObject::cast(root));
    157       isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
    158     }
    159     CommitPostProcessedObjects(isolate);
    160     return scope.CloseAndEscape(result);
    161   }
    162 }
    163 
    164 Deserializer::~Deserializer() {
    165   // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
    166   // DCHECK(source_.AtEOF());
    167 #ifdef DEBUG
    168   for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
    169     int chunk_index = current_chunk_[space];
    170     CHECK_EQ(reservations_[space].length(), chunk_index + 1);
    171     CHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
    172   }
    173   CHECK_EQ(allocated_maps_.length(), next_map_index_);
    174 #endif  // DEBUG
    175 }
    176 
    177 // This is called on the roots.  It is the driver of the deserialization
    178 // process.  It is also called on the body of each function.
    179 void Deserializer::VisitPointers(Object** start, Object** end) {
    180   // The space must be new space.  Any other space would cause ReadChunk to try
    181   // to update the remembered using NULL as the address.
    182   ReadData(start, end, NEW_SPACE, NULL);
    183 }
    184 
    185 void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
    186   static const byte expected = kSynchronize;
    187   CHECK_EQ(expected, source_.Get());
    188 }
    189 
    190 void Deserializer::DeserializeDeferredObjects() {
    191   for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
    192     switch (code) {
    193       case kAlignmentPrefix:
    194       case kAlignmentPrefix + 1:
    195       case kAlignmentPrefix + 2:
    196         SetAlignment(code);
    197         break;
    198       default: {
    199         int space = code & kSpaceMask;
    200         DCHECK(space <= kNumberOfSpaces);
    201         DCHECK(code - space == kNewObject);
    202         HeapObject* object = GetBackReferencedObject(space);
    203         int size = source_.GetInt() << kPointerSizeLog2;
    204         Address obj_address = object->address();
    205         Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
    206         Object** end = reinterpret_cast<Object**>(obj_address + size);
    207         bool filled = ReadData(start, end, space, obj_address);
    208         CHECK(filled);
    209         DCHECK(CanBeDeferred(object));
    210         PostProcessNewObject(object, space);
    211       }
    212     }
    213   }
    214 }
    215 
    216 void Deserializer::DeserializeInternalFields() {
    217   if (!source_.HasMore() || source_.Get() != kInternalFieldsData) return;
    218   DisallowHeapAllocation no_gc;
    219   DisallowJavascriptExecution no_js(isolate_);
    220   DisallowCompilation no_compile(isolate_);
    221   v8::DeserializeInternalFieldsCallback callback =
    222       isolate_->deserialize_internal_fields_callback();
    223   DCHECK_NOT_NULL(callback);
    224   for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
    225     HandleScope scope(isolate_);
    226     int space = code & kSpaceMask;
    227     DCHECK(space <= kNumberOfSpaces);
    228     DCHECK(code - space == kNewObject);
    229     Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
    230                          isolate_);
    231     int index = source_.GetInt();
    232     int size = source_.GetInt();
    233     byte* data = new byte[size];
    234     source_.CopyRaw(data, size);
    235     callback(v8::Utils::ToLocal(obj), index,
    236              {reinterpret_cast<char*>(data), size});
    237     delete[] data;
    238   }
    239 }
    240 
    241 // Used to insert a deserialized internalized string into the string table.
    242 class StringTableInsertionKey : public HashTableKey {
    243  public:
    244   explicit StringTableInsertionKey(String* string)
    245       : string_(string), hash_(HashForObject(string)) {
    246     DCHECK(string->IsInternalizedString());
    247   }
    248 
    249   bool IsMatch(Object* string) override {
    250     // We know that all entries in a hash table had their hash keys created.
    251     // Use that knowledge to have fast failure.
    252     if (hash_ != HashForObject(string)) return false;
    253     // We want to compare the content of two internalized strings here.
    254     return string_->SlowEquals(String::cast(string));
    255   }
    256 
    257   uint32_t Hash() override { return hash_; }
    258 
    259   uint32_t HashForObject(Object* key) override {
    260     return String::cast(key)->Hash();
    261   }
    262 
    263   MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
    264     return handle(string_, isolate);
    265   }
    266 
    267  private:
    268   String* string_;
    269   uint32_t hash_;
    270   DisallowHeapAllocation no_gc;
    271 };
    272 
    273 HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
    274   if (deserializing_user_code()) {
    275     if (obj->IsString()) {
    276       String* string = String::cast(obj);
    277       // Uninitialize hash field as the hash seed may have changed.
    278       string->set_hash_field(String::kEmptyHashField);
    279       if (string->IsInternalizedString()) {
    280         // Canonicalize the internalized string. If it already exists in the
    281         // string table, set it to forward to the existing one.
    282         StringTableInsertionKey key(string);
    283         String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
    284         if (canonical == NULL) {
    285           new_internalized_strings_.Add(handle(string));
    286           return string;
    287         } else {
    288           string->SetForwardedInternalizedString(canonical);
    289           return canonical;
    290         }
    291       }
    292     } else if (obj->IsScript()) {
    293       new_scripts_.Add(handle(Script::cast(obj)));
    294     } else {
    295       DCHECK(CanBeDeferred(obj));
    296     }
    297   }
    298   if (obj->IsAllocationSite()) {
    299     DCHECK(obj->IsAllocationSite());
    300     // Allocation sites are present in the snapshot, and must be linked into
    301     // a list at deserialization time.
    302     AllocationSite* site = AllocationSite::cast(obj);
    303     // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
    304     // as a (weak) root. If this root is relocated correctly, this becomes
    305     // unnecessary.
    306     if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
    307       site->set_weak_next(isolate_->heap()->undefined_value());
    308     } else {
    309       site->set_weak_next(isolate_->heap()->allocation_sites_list());
    310     }
    311     isolate_->heap()->set_allocation_sites_list(site);
    312   } else if (obj->IsCode()) {
    313     // We flush all code pages after deserializing the startup snapshot. In that
    314     // case, we only need to remember code objects in the large object space.
    315     // When deserializing user code, remember each individual code object.
    316     if (deserializing_user_code() || space == LO_SPACE) {
    317       new_code_objects_.Add(Code::cast(obj));
    318     }
    319   }
    320   // Check alignment.
    321   DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
    322   return obj;
    323 }
    324 
    325 void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
    326   StringTable::EnsureCapacityForDeserialization(
    327       isolate, new_internalized_strings_.length());
    328   for (Handle<String> string : new_internalized_strings_) {
    329     StringTableInsertionKey key(*string);
    330     DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
    331     StringTable::LookupKey(isolate, &key);
    332   }
    333 
    334   Heap* heap = isolate->heap();
    335   Factory* factory = isolate->factory();
    336   for (Handle<Script> script : new_scripts_) {
    337     // Assign a new script id to avoid collision.
    338     script->set_id(isolate_->heap()->NextScriptId());
    339     // Add script to list.
    340     Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
    341     heap->SetRootScriptList(*list);
    342   }
    343 }
    344 
    345 HeapObject* Deserializer::GetBackReferencedObject(int space) {
    346   HeapObject* obj;
    347   SerializerReference back_reference =
    348       SerializerReference::FromBitfield(source_.GetInt());
    349   if (space == LO_SPACE) {
    350     uint32_t index = back_reference.large_object_index();
    351     obj = deserialized_large_objects_[index];
    352   } else if (space == MAP_SPACE) {
    353     int index = back_reference.map_index();
    354     DCHECK(index < next_map_index_);
    355     obj = HeapObject::FromAddress(allocated_maps_[index]);
    356   } else {
    357     DCHECK(space < kNumberOfPreallocatedSpaces);
    358     uint32_t chunk_index = back_reference.chunk_index();
    359     DCHECK_LE(chunk_index, current_chunk_[space]);
    360     uint32_t chunk_offset = back_reference.chunk_offset();
    361     Address address = reservations_[space][chunk_index].start + chunk_offset;
    362     if (next_alignment_ != kWordAligned) {
    363       int padding = Heap::GetFillToAlign(address, next_alignment_);
    364       next_alignment_ = kWordAligned;
    365       DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
    366       address += padding;
    367     }
    368     obj = HeapObject::FromAddress(address);
    369   }
    370   if (deserializing_user_code() && obj->IsInternalizedString()) {
    371     obj = String::cast(obj)->GetForwardedInternalizedString();
    372   }
    373   hot_objects_.Add(obj);
    374   return obj;
    375 }
    376 
    377 // This routine writes the new object into the pointer provided and then
    378 // returns true if the new object was in young space and false otherwise.
    379 // The reason for this strange interface is that otherwise the object is
    380 // written very late, which means the FreeSpace map is not set up by the
    381 // time we need to use it to mark the space at the end of a page free.
    382 void Deserializer::ReadObject(int space_number, Object** write_back) {
    383   Address address;
    384   HeapObject* obj;
    385   int size = source_.GetInt() << kObjectAlignmentBits;
    386 
    387   if (next_alignment_ != kWordAligned) {
    388     int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
    389     address = Allocate(space_number, reserved);
    390     obj = HeapObject::FromAddress(address);
    391     // If one of the following assertions fails, then we are deserializing an
    392     // aligned object when the filler maps have not been deserialized yet.
    393     // We require filler maps as padding to align the object.
    394     Heap* heap = isolate_->heap();
    395     DCHECK(heap->free_space_map()->IsMap());
    396     DCHECK(heap->one_pointer_filler_map()->IsMap());
    397     DCHECK(heap->two_pointer_filler_map()->IsMap());
    398     obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
    399     address = obj->address();
    400     next_alignment_ = kWordAligned;
    401   } else {
    402     address = Allocate(space_number, size);
    403     obj = HeapObject::FromAddress(address);
    404   }
    405 
    406   isolate_->heap()->OnAllocationEvent(obj, size);
    407   Object** current = reinterpret_cast<Object**>(address);
    408   Object** limit = current + (size >> kPointerSizeLog2);
    409 
    410   if (ReadData(current, limit, space_number, address)) {
    411     // Only post process if object content has not been deferred.
    412     obj = PostProcessNewObject(obj, space_number);
    413   }
    414 
    415   Object* write_back_obj = obj;
    416   UnalignedCopy(write_back, &write_back_obj);
    417 #ifdef DEBUG
    418   if (obj->IsCode()) {
    419     DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
    420   } else {
    421     DCHECK(space_number != CODE_SPACE);
    422   }
    423 #endif  // DEBUG
    424 }
    425 
    426 // We know the space requirements before deserialization and can
    427 // pre-allocate that reserved space. During deserialization, all we need
    428 // to do is to bump up the pointer for each space in the reserved
    429 // space. This is also used for fixing back references.
    430 // We may have to split up the pre-allocation into several chunks
    431 // because it would not fit onto a single page. We do not have to keep
    432 // track of when to move to the next chunk. An opcode will signal this.
    433 // Since multiple large objects cannot be folded into one large object
    434 // space allocation, we have to do an actual allocation when deserializing
    435 // each large object. Instead of tracking offset for back references, we
    436 // reference large objects by index.
    437 Address Deserializer::Allocate(int space_index, int size) {
    438   if (space_index == LO_SPACE) {
    439     AlwaysAllocateScope scope(isolate_);
    440     LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
    441     Executability exec = static_cast<Executability>(source_.Get());
    442     AllocationResult result = lo_space->AllocateRaw(size, exec);
    443     HeapObject* obj = result.ToObjectChecked();
    444     deserialized_large_objects_.Add(obj);
    445     return obj->address();
    446   } else if (space_index == MAP_SPACE) {
    447     DCHECK_EQ(Map::kSize, size);
    448     return allocated_maps_[next_map_index_++];
    449   } else {
    450     DCHECK(space_index < kNumberOfPreallocatedSpaces);
    451     Address address = high_water_[space_index];
    452     DCHECK_NOT_NULL(address);
    453     high_water_[space_index] += size;
    454 #ifdef DEBUG
    455     // Assert that the current reserved chunk is still big enough.
    456     const Heap::Reservation& reservation = reservations_[space_index];
    457     int chunk_index = current_chunk_[space_index];
    458     CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
    459 #endif
    460     if (space_index == CODE_SPACE) SkipList::Update(address, size);
    461     return address;
    462   }
    463 }
    464 
    465 Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector,
    466                                            Object** current) {
    467   DCHECK(!isolate_->heap()->deserialization_complete());
    468   NativesExternalStringResource* resource = new NativesExternalStringResource(
    469       source_vector.start(), source_vector.length());
    470   Object* resource_obj = reinterpret_cast<Object*>(resource);
    471   UnalignedCopy(current++, &resource_obj);
    472   return current;
    473 }
    474 
    475 bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
    476                             Address current_object_address) {
    477   Isolate* const isolate = isolate_;
    478   // Write barrier support costs around 1% in startup time.  In fact there
    479   // are no new space objects in current boot snapshots, so it's not needed,
    480   // but that may change.
    481   bool write_barrier_needed =
    482       (current_object_address != NULL && source_space != NEW_SPACE &&
    483        source_space != CODE_SPACE);
    484   while (current < limit) {
    485     byte data = source_.Get();
    486     switch (data) {
    487 #define CASE_STATEMENT(where, how, within, space_number) \
    488   case where + how + within + space_number:              \
    489     STATIC_ASSERT((where & ~kWhereMask) == 0);           \
    490     STATIC_ASSERT((how & ~kHowToCodeMask) == 0);         \
    491     STATIC_ASSERT((within & ~kWhereToPointMask) == 0);   \
    492     STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
    493 
    494 #define CASE_BODY(where, how, within, space_number_if_any)                     \
    495   {                                                                            \
    496     bool emit_write_barrier = false;                                           \
    497     bool current_was_incremented = false;                                      \
    498     int space_number = space_number_if_any == kAnyOldSpace                     \
    499                            ? (data & kSpaceMask)                               \
    500                            : space_number_if_any;                              \
    501     if (where == kNewObject && how == kPlain && within == kStartOfObject) {    \
    502       ReadObject(space_number, current);                                       \
    503       emit_write_barrier = (space_number == NEW_SPACE);                        \
    504     } else {                                                                   \
    505       Object* new_object = NULL; /* May not be a real Object pointer. */       \
    506       if (where == kNewObject) {                                               \
    507         ReadObject(space_number, &new_object);                                 \
    508       } else if (where == kBackref) {                                          \
    509         emit_write_barrier = (space_number == NEW_SPACE);                      \
    510         new_object = GetBackReferencedObject(data & kSpaceMask);               \
    511       } else if (where == kBackrefWithSkip) {                                  \
    512         int skip = source_.GetInt();                                           \
    513         current = reinterpret_cast<Object**>(                                  \
    514             reinterpret_cast<Address>(current) + skip);                        \
    515         emit_write_barrier = (space_number == NEW_SPACE);                      \
    516         new_object = GetBackReferencedObject(data & kSpaceMask);               \
    517       } else if (where == kRootArray) {                                        \
    518         int id = source_.GetInt();                                             \
    519         Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
    520         new_object = isolate->heap()->root(root_index);                        \
    521         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
    522         hot_objects_.Add(HeapObject::cast(new_object));                        \
    523       } else if (where == kPartialSnapshotCache) {                             \
    524         int cache_index = source_.GetInt();                                    \
    525         new_object = isolate->partial_snapshot_cache()->at(cache_index);       \
    526         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
    527       } else if (where == kExternalReference) {                                \
    528         int skip = source_.GetInt();                                           \
    529         current = reinterpret_cast<Object**>(                                  \
    530             reinterpret_cast<Address>(current) + skip);                        \
    531         uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());       \
    532         Address address = external_reference_table_->address(reference_id);    \
    533         new_object = reinterpret_cast<Object*>(address);                       \
    534       } else if (where == kAttachedReference) {                                \
    535         int index = source_.GetInt();                                          \
    536         new_object = *attached_objects_[index];                                \
    537         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
    538       } else {                                                                 \
    539         DCHECK(where == kBuiltin);                                             \
    540         DCHECK(deserializing_user_code());                                     \
    541         int builtin_id = source_.GetInt();                                     \
    542         DCHECK_LE(0, builtin_id);                                              \
    543         DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
    544         Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
    545         new_object = isolate->builtins()->builtin(name);                       \
    546         emit_write_barrier = false;                                            \
    547       }                                                                        \
    548       if (within == kInnerPointer) {                                           \
    549         if (new_object->IsCode()) {                                            \
    550           Code* new_code_object = Code::cast(new_object);                      \
    551           new_object =                                                         \
    552               reinterpret_cast<Object*>(new_code_object->instruction_start()); \
    553         } else {                                                               \
    554           Cell* cell = Cell::cast(new_object);                                 \
    555           new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
    556         }                                                                      \
    557       }                                                                        \
    558       if (how == kFromCode) {                                                  \
    559         Address location_of_branch_data = reinterpret_cast<Address>(current);  \
    560         Assembler::deserialization_set_special_target_at(                      \
    561             isolate, location_of_branch_data,                                  \
    562             Code::cast(HeapObject::FromAddress(current_object_address)),       \
    563             reinterpret_cast<Address>(new_object));                            \
    564         location_of_branch_data += Assembler::kSpecialTargetSize;              \
    565         current = reinterpret_cast<Object**>(location_of_branch_data);         \
    566         current_was_incremented = true;                                        \
    567       } else {                                                                 \
    568         UnalignedCopy(current, &new_object);                                   \
    569       }                                                                        \
    570     }                                                                          \
    571     if (emit_write_barrier && write_barrier_needed) {                          \
    572       Address current_address = reinterpret_cast<Address>(current);            \
    573       SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));      \
    574       isolate->heap()->RecordWrite(                                            \
    575           HeapObject::FromAddress(current_object_address),                     \
    576           static_cast<int>(current_address - current_object_address),          \
    577           *reinterpret_cast<Object**>(current_address));                       \
    578     }                                                                          \
    579     if (!current_was_incremented) {                                            \
    580       current++;                                                               \
    581     }                                                                          \
    582     break;                                                                     \
    583   }
    584 
    585 // This generates a case and a body for the new space (which has to do extra
    586 // write barrier handling) and handles the other spaces with fall-through cases
    587 // and one body.
    588 #define ALL_SPACES(where, how, within)           \
    589   CASE_STATEMENT(where, how, within, NEW_SPACE)  \
    590   CASE_BODY(where, how, within, NEW_SPACE)       \
    591   CASE_STATEMENT(where, how, within, OLD_SPACE)  \
    592   CASE_STATEMENT(where, how, within, CODE_SPACE) \
    593   CASE_STATEMENT(where, how, within, MAP_SPACE)  \
    594   CASE_STATEMENT(where, how, within, LO_SPACE)   \
    595   CASE_BODY(where, how, within, kAnyOldSpace)
    596 
    597 #define FOUR_CASES(byte_code) \
    598   case byte_code:             \
    599   case byte_code + 1:         \
    600   case byte_code + 2:         \
    601   case byte_code + 3:
    602 
    603 #define SIXTEEN_CASES(byte_code) \
    604   FOUR_CASES(byte_code)          \
    605   FOUR_CASES(byte_code + 4)      \
    606   FOUR_CASES(byte_code + 8)      \
    607   FOUR_CASES(byte_code + 12)
    608 
    609 #define SINGLE_CASE(where, how, within, space) \
    610   CASE_STATEMENT(where, how, within, space)    \
    611   CASE_BODY(where, how, within, space)
    612 
    613       // Deserialize a new object and write a pointer to it to the current
    614       // object.
    615       ALL_SPACES(kNewObject, kPlain, kStartOfObject)
    616       // Support for direct instruction pointers in functions.  It's an inner
    617       // pointer because it points at the entry point, not at the start of the
    618       // code object.
    619       SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
    620       // Support for pointers into a cell. It's an inner pointer because it
    621       // points directly at the value field, not the start of the cell object.
    622       SINGLE_CASE(kNewObject, kPlain, kInnerPointer, OLD_SPACE)
    623       // Deserialize a new code object and write a pointer to its first
    624       // instruction to the current code object.
    625       ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
    626       // Find a recently deserialized object using its offset from the current
    627       // allocation point and write a pointer to it to the current object.
    628       ALL_SPACES(kBackref, kPlain, kStartOfObject)
    629       ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
    630 #if V8_CODE_EMBEDS_OBJECT_POINTER
    631       // Deserialize a new object from pointer found in code and write
    632       // a pointer to it to the current object. Required only for MIPS, PPC, ARM
    633       // or S390 with embedded constant pool, and omitted on the other
    634       // architectures because it is fully unrolled and would cause bloat.
    635       ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
    636       // Find a recently deserialized code object using its offset from the
    637       // current allocation point and write a pointer to it to the current
    638       // object. Required only for MIPS, PPC, ARM or S390 with embedded
    639       // constant pool.
    640       ALL_SPACES(kBackref, kFromCode, kStartOfObject)
    641       ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
    642 #endif
    643       // Find a recently deserialized code object using its offset from the
    644       // current allocation point and write a pointer to its first instruction
    645       // to the current code object or the instruction pointer in a function
    646       // object.
    647       ALL_SPACES(kBackref, kFromCode, kInnerPointer)
    648       ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
    649       // Support for direct instruction pointers in functions.
    650       SINGLE_CASE(kBackref, kPlain, kInnerPointer, CODE_SPACE)
    651       SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, CODE_SPACE)
    652       // Support for pointers into a cell.
    653       SINGLE_CASE(kBackref, kPlain, kInnerPointer, OLD_SPACE)
    654       SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, OLD_SPACE)
    655       // Find an object in the roots array and write a pointer to it to the
    656       // current object.
    657       SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
    658 #if V8_CODE_EMBEDS_OBJECT_POINTER
    659       // Find an object in the roots array and write a pointer to it to in code.
    660       SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
    661 #endif
    662       // Find an object in the partial snapshots cache and write a pointer to it
    663       // to the current object.
    664       SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
    665       // Find an code entry in the partial snapshots cache and
    666       // write a pointer to it to the current object.
    667       SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
    668       // Find an external reference and write a pointer to it to the current
    669       // object.
    670       SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
    671       // Find an external reference and write a pointer to it in the current
    672       // code object.
    673       SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
    674       // Find an object in the attached references and write a pointer to it to
    675       // the current object.
    676       SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
    677       SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
    678       SINGLE_CASE(kAttachedReference, kFromCode, kStartOfObject, 0)
    679       SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
    680       // Find a builtin and write a pointer to it to the current object.
    681       SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
    682       SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0)
    683       SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
    684 
    685 #undef CASE_STATEMENT
    686 #undef CASE_BODY
    687 #undef ALL_SPACES
    688 
    689       case kSkip: {
    690         int size = source_.GetInt();
    691         current = reinterpret_cast<Object**>(
    692             reinterpret_cast<intptr_t>(current) + size);
    693         break;
    694       }
    695 
    696       case kInternalReferenceEncoded:
    697       case kInternalReference: {
    698         // Internal reference address is not encoded via skip, but by offset
    699         // from code entry.
    700         int pc_offset = source_.GetInt();
    701         int target_offset = source_.GetInt();
    702         Code* code =
    703             Code::cast(HeapObject::FromAddress(current_object_address));
    704         DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
    705         DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
    706         Address pc = code->entry() + pc_offset;
    707         Address target = code->entry() + target_offset;
    708         Assembler::deserialization_set_target_internal_reference_at(
    709             isolate, pc, target, data == kInternalReference
    710                                      ? RelocInfo::INTERNAL_REFERENCE
    711                                      : RelocInfo::INTERNAL_REFERENCE_ENCODED);
    712         break;
    713       }
    714 
    715       case kNop:
    716         break;
    717 
    718       case kNextChunk: {
    719         int space = source_.Get();
    720         DCHECK(space < kNumberOfPreallocatedSpaces);
    721         int chunk_index = current_chunk_[space];
    722         const Heap::Reservation& reservation = reservations_[space];
    723         // Make sure the current chunk is indeed exhausted.
    724         CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
    725         // Move to next reserved chunk.
    726         chunk_index = ++current_chunk_[space];
    727         CHECK_LT(chunk_index, reservation.length());
    728         high_water_[space] = reservation[chunk_index].start;
    729         break;
    730       }
    731 
    732       case kDeferred: {
    733         // Deferred can only occur right after the heap object header.
    734         DCHECK(current == reinterpret_cast<Object**>(current_object_address +
    735                                                      kPointerSize));
    736         HeapObject* obj = HeapObject::FromAddress(current_object_address);
    737         // If the deferred object is a map, its instance type may be used
    738         // during deserialization. Initialize it with a temporary value.
    739         if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
    740         current = limit;
    741         return false;
    742       }
    743 
    744       case kSynchronize:
    745         // If we get here then that indicates that you have a mismatch between
    746         // the number of GC roots when serializing and deserializing.
    747         CHECK(false);
    748         break;
    749 
    750       case kNativesStringResource:
    751         current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()),
    752                                       current);
    753         break;
    754 
    755       case kExtraNativesStringResource:
    756         current = CopyInNativesSource(
    757             ExtraNatives::GetScriptSource(source_.Get()), current);
    758         break;
    759 
    760       // Deserialize raw data of variable length.
    761       case kVariableRawData: {
    762         int size_in_bytes = source_.GetInt();
    763         byte* raw_data_out = reinterpret_cast<byte*>(current);
    764         source_.CopyRaw(raw_data_out, size_in_bytes);
    765         break;
    766       }
    767 
    768       case kVariableRepeat: {
    769         int repeats = source_.GetInt();
    770         Object* object = current[-1];
    771         DCHECK(!isolate->heap()->InNewSpace(object));
    772         for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
    773         break;
    774       }
    775 
    776       case kAlignmentPrefix:
    777       case kAlignmentPrefix + 1:
    778       case kAlignmentPrefix + 2:
    779         SetAlignment(data);
    780         break;
    781 
    782       STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
    783       STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
    784       SIXTEEN_CASES(kRootArrayConstantsWithSkip)
    785       SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
    786         int skip = source_.GetInt();
    787         current = reinterpret_cast<Object**>(
    788             reinterpret_cast<intptr_t>(current) + skip);
    789         // Fall through.
    790       }
    791 
    792       SIXTEEN_CASES(kRootArrayConstants)
    793       SIXTEEN_CASES(kRootArrayConstants + 16) {
    794         int id = data & kRootArrayConstantsMask;
    795         Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
    796         Object* object = isolate->heap()->root(root_index);
    797         DCHECK(!isolate->heap()->InNewSpace(object));
    798         UnalignedCopy(current++, &object);
    799         break;
    800       }
    801 
    802       STATIC_ASSERT(kNumberOfHotObjects == 8);
    803       FOUR_CASES(kHotObjectWithSkip)
    804       FOUR_CASES(kHotObjectWithSkip + 4) {
    805         int skip = source_.GetInt();
    806         current = reinterpret_cast<Object**>(
    807             reinterpret_cast<Address>(current) + skip);
    808         // Fall through.
    809       }
    810 
    811       FOUR_CASES(kHotObject)
    812       FOUR_CASES(kHotObject + 4) {
    813         int index = data & kHotObjectMask;
    814         Object* hot_object = hot_objects_.Get(index);
    815         UnalignedCopy(current, &hot_object);
    816         if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
    817           Address current_address = reinterpret_cast<Address>(current);
    818           isolate->heap()->RecordWrite(
    819               HeapObject::FromAddress(current_object_address),
    820               static_cast<int>(current_address - current_object_address),
    821               hot_object);
    822         }
    823         current++;
    824         break;
    825       }
    826 
    827       // Deserialize raw data of fixed length from 1 to 32 words.
    828       STATIC_ASSERT(kNumberOfFixedRawData == 32);
    829       SIXTEEN_CASES(kFixedRawData)
    830       SIXTEEN_CASES(kFixedRawData + 16) {
    831         byte* raw_data_out = reinterpret_cast<byte*>(current);
    832         int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
    833         source_.CopyRaw(raw_data_out, size_in_bytes);
    834         current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
    835         break;
    836       }
    837 
    838       STATIC_ASSERT(kNumberOfFixedRepeat == 16);
    839       SIXTEEN_CASES(kFixedRepeat) {
    840         int repeats = data - kFixedRepeatStart;
    841         Object* object;
    842         UnalignedCopy(&object, current - 1);
    843         DCHECK(!isolate->heap()->InNewSpace(object));
    844         for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
    845         break;
    846       }
    847 
    848 #undef SIXTEEN_CASES
    849 #undef FOUR_CASES
    850 #undef SINGLE_CASE
    851 
    852       default:
    853         CHECK(false);
    854     }
    855   }
    856   CHECK_EQ(limit, current);
    857   return true;
    858 }
    859 }  // namespace internal
    860 }  // namespace v8
    861