Home | History | Annotate | Download | only in snapshot
      1 // Copyright 2016 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/snapshot/deserializer.h"
      6 
      7 #include "src/api.h"
      8 #include "src/assembler-inl.h"
      9 #include "src/bootstrapper.h"
     10 #include "src/external-reference-table.h"
     11 #include "src/heap/heap-inl.h"
     12 #include "src/isolate.h"
     13 #include "src/macro-assembler.h"
     14 #include "src/objects-inl.h"
     15 #include "src/snapshot/natives.h"
     16 #include "src/v8.h"
     17 #include "src/v8threads.h"
     18 
     19 namespace v8 {
     20 namespace internal {
     21 
     22 void Deserializer::DecodeReservation(
     23     Vector<const SerializedData::Reservation> res) {
     24   DCHECK_EQ(0, reservations_[NEW_SPACE].length());
     25   STATIC_ASSERT(NEW_SPACE == 0);
     26   int current_space = NEW_SPACE;
     27   for (auto& r : res) {
     28     reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
     29     if (r.is_last()) current_space++;
     30   }
     31   DCHECK_EQ(kNumberOfSpaces, current_space);
     32   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
     33 }
     34 
     35 void Deserializer::FlushICacheForNewIsolate() {
     36   DCHECK(!deserializing_user_code_);
     37   // The entire isolate is newly deserialized. Simply flush all code pages.
     38   for (Page* p : *isolate_->heap()->code_space()) {
     39     Assembler::FlushICache(isolate_, p->area_start(),
     40                            p->area_end() - p->area_start());
     41   }
     42 }
     43 
     44 void Deserializer::FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects() {
     45   DCHECK(deserializing_user_code_);
     46   for (Code* code : new_code_objects_) {
     47     // Record all references to embedded objects in the new code object.
     48     isolate_->heap()->RecordWritesIntoCode(code);
     49 
     50     if (FLAG_serialize_age_code) code->PreAge(isolate_);
     51     Assembler::FlushICache(isolate_, code->instruction_start(),
     52                            code->instruction_size());
     53   }
     54 }
     55 
     56 bool Deserializer::ReserveSpace() {
     57 #ifdef DEBUG
     58   for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
     59     CHECK(reservations_[i].length() > 0);
     60   }
     61 #endif  // DEBUG
     62   DCHECK(allocated_maps_.is_empty());
     63   if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_))
     64     return false;
     65   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
     66     high_water_[i] = reservations_[i][0].start;
     67   }
     68   return true;
     69 }
     70 
     71 void Deserializer::Initialize(Isolate* isolate) {
     72   DCHECK_NULL(isolate_);
     73   DCHECK_NOT_NULL(isolate);
     74   isolate_ = isolate;
     75   DCHECK_NULL(external_reference_table_);
     76   external_reference_table_ = ExternalReferenceTable::instance(isolate);
     77   CHECK_EQ(magic_number_,
     78            SerializedData::ComputeMagicNumber(external_reference_table_));
     79 }
     80 
     81 void Deserializer::Deserialize(Isolate* isolate) {
     82   Initialize(isolate);
     83   if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
     84   // No active threads.
     85   DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
     86   // No active handles.
     87   DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
     88   // Partial snapshot cache is not yet populated.
     89   DCHECK(isolate_->partial_snapshot_cache()->is_empty());
     90 
     91   {
     92     DisallowHeapAllocation no_gc;
     93     isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
     94     isolate_->heap()->IterateSmiRoots(this);
     95     isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
     96     isolate_->heap()->RepairFreeListsAfterDeserialization();
     97     isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
     98     DeserializeDeferredObjects();
     99     FlushICacheForNewIsolate();
    100     RestoreExternalReferenceRedirectors(&accessor_infos_);
    101   }
    102 
    103   isolate_->heap()->set_native_contexts_list(
    104       isolate_->heap()->undefined_value());
    105   // The allocation site list is build during root iteration, but if no sites
    106   // were encountered then it needs to be initialized to undefined.
    107   if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
    108     isolate_->heap()->set_allocation_sites_list(
    109         isolate_->heap()->undefined_value());
    110   }
    111 
    112   // Issue code events for newly deserialized code objects.
    113   LOG_CODE_EVENT(isolate_, LogCodeObjects());
    114   LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
    115   LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
    116 }
    117 
    118 MaybeHandle<Object> Deserializer::DeserializePartial(
    119     Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
    120     v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
    121   Initialize(isolate);
    122   if (!ReserveSpace()) {
    123     V8::FatalProcessOutOfMemory("deserialize context");
    124     return MaybeHandle<Object>();
    125   }
    126 
    127   AddAttachedObject(global_proxy);
    128 
    129   DisallowHeapAllocation no_gc;
    130   // Keep track of the code space start and end pointers in case new
    131   // code objects were unserialized
    132   OldSpace* code_space = isolate_->heap()->code_space();
    133   Address start_address = code_space->top();
    134   Object* root;
    135   VisitPointer(&root);
    136   DeserializeDeferredObjects();
    137   DeserializeInternalFields(internal_fields_deserializer);
    138 
    139   isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
    140 
    141   // There's no code deserialized here. If this assert fires then that's
    142   // changed and logging should be added to notify the profiler et al of the
    143   // new code, which also has to be flushed from instruction cache.
    144   CHECK_EQ(start_address, code_space->top());
    145   return Handle<Object>(root, isolate);
    146 }
    147 
    148 MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) {
    149   Initialize(isolate);
    150   if (!ReserveSpace()) {
    151     return MaybeHandle<HeapObject>();
    152   } else {
    153     deserializing_user_code_ = true;
    154     HandleScope scope(isolate);
    155     Handle<HeapObject> result;
    156     {
    157       DisallowHeapAllocation no_gc;
    158       Object* root;
    159       VisitPointer(&root);
    160       DeserializeDeferredObjects();
    161       FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
    162       result = Handle<HeapObject>(HeapObject::cast(root));
    163       isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
    164     }
    165     CommitPostProcessedObjects(isolate);
    166     return scope.CloseAndEscape(result);
    167   }
    168 }
    169 
    170 Deserializer::~Deserializer() {
    171   // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
    172   // DCHECK(source_.AtEOF());
    173 #ifdef DEBUG
    174   for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
    175     int chunk_index = current_chunk_[space];
    176     CHECK_EQ(reservations_[space].length(), chunk_index + 1);
    177     CHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
    178   }
    179   CHECK_EQ(allocated_maps_.length(), next_map_index_);
    180 #endif  // DEBUG
    181 }
    182 
    183 // This is called on the roots.  It is the driver of the deserialization
    184 // process.  It is also called on the body of each function.
    185 void Deserializer::VisitPointers(Object** start, Object** end) {
    186   // The space must be new space.  Any other space would cause ReadChunk to try
    187   // to update the remembered using NULL as the address.
    188   ReadData(start, end, NEW_SPACE, NULL);
    189 }
    190 
    191 void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
    192   static const byte expected = kSynchronize;
    193   CHECK_EQ(expected, source_.Get());
    194 }
    195 
    196 void Deserializer::DeserializeDeferredObjects() {
    197   for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
    198     switch (code) {
    199       case kAlignmentPrefix:
    200       case kAlignmentPrefix + 1:
    201       case kAlignmentPrefix + 2:
    202         SetAlignment(code);
    203         break;
    204       default: {
    205         int space = code & kSpaceMask;
    206         DCHECK(space <= kNumberOfSpaces);
    207         DCHECK(code - space == kNewObject);
    208         HeapObject* object = GetBackReferencedObject(space);
    209         int size = source_.GetInt() << kPointerSizeLog2;
    210         Address obj_address = object->address();
    211         Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
    212         Object** end = reinterpret_cast<Object**>(obj_address + size);
    213         bool filled = ReadData(start, end, space, obj_address);
    214         CHECK(filled);
    215         DCHECK(CanBeDeferred(object));
    216         PostProcessNewObject(object, space);
    217       }
    218     }
    219   }
    220 }
    221 
    222 void Deserializer::DeserializeInternalFields(
    223     v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
    224   if (!source_.HasMore() || source_.Get() != kInternalFieldsData) return;
    225   DisallowHeapAllocation no_gc;
    226   DisallowJavascriptExecution no_js(isolate_);
    227   DisallowCompilation no_compile(isolate_);
    228   DCHECK_NOT_NULL(internal_fields_deserializer.callback);
    229   for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
    230     HandleScope scope(isolate_);
    231     int space = code & kSpaceMask;
    232     DCHECK(space <= kNumberOfSpaces);
    233     DCHECK(code - space == kNewObject);
    234     Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
    235                          isolate_);
    236     int index = source_.GetInt();
    237     int size = source_.GetInt();
    238     byte* data = new byte[size];
    239     source_.CopyRaw(data, size);
    240     internal_fields_deserializer.callback(v8::Utils::ToLocal(obj), index,
    241                                           {reinterpret_cast<char*>(data), size},
    242                                           internal_fields_deserializer.data);
    243     delete[] data;
    244   }
    245 }
    246 
    247 // Used to insert a deserialized internalized string into the string table.
    248 class StringTableInsertionKey : public HashTableKey {
    249  public:
    250   explicit StringTableInsertionKey(String* string)
    251       : string_(string), hash_(HashForObject(string)) {
    252     DCHECK(string->IsInternalizedString());
    253   }
    254 
    255   bool IsMatch(Object* string) override {
    256     // We know that all entries in a hash table had their hash keys created.
    257     // Use that knowledge to have fast failure.
    258     if (hash_ != HashForObject(string)) return false;
    259     // We want to compare the content of two internalized strings here.
    260     return string_->SlowEquals(String::cast(string));
    261   }
    262 
    263   uint32_t Hash() override { return hash_; }
    264 
    265   uint32_t HashForObject(Object* key) override {
    266     return String::cast(key)->Hash();
    267   }
    268 
    269   MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
    270     return handle(string_, isolate);
    271   }
    272 
    273  private:
    274   String* string_;
    275   uint32_t hash_;
    276   DisallowHeapAllocation no_gc;
    277 };
    278 
    279 HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
    280   if (deserializing_user_code()) {
    281     if (obj->IsString()) {
    282       String* string = String::cast(obj);
    283       // Uninitialize hash field as the hash seed may have changed.
    284       string->set_hash_field(String::kEmptyHashField);
    285       if (string->IsInternalizedString()) {
    286         // Canonicalize the internalized string. If it already exists in the
    287         // string table, set it to forward to the existing one.
    288         StringTableInsertionKey key(string);
    289         String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
    290         if (canonical == NULL) {
    291           new_internalized_strings_.Add(handle(string));
    292           return string;
    293         } else {
    294           string->SetForwardedInternalizedString(canonical);
    295           return canonical;
    296         }
    297       }
    298     } else if (obj->IsScript()) {
    299       new_scripts_.Add(handle(Script::cast(obj)));
    300     } else {
    301       DCHECK(CanBeDeferred(obj));
    302     }
    303   }
    304   if (obj->IsAllocationSite()) {
    305     DCHECK(obj->IsAllocationSite());
    306     // Allocation sites are present in the snapshot, and must be linked into
    307     // a list at deserialization time.
    308     AllocationSite* site = AllocationSite::cast(obj);
    309     // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
    310     // as a (weak) root. If this root is relocated correctly, this becomes
    311     // unnecessary.
    312     if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
    313       site->set_weak_next(isolate_->heap()->undefined_value());
    314     } else {
    315       site->set_weak_next(isolate_->heap()->allocation_sites_list());
    316     }
    317     isolate_->heap()->set_allocation_sites_list(site);
    318   } else if (obj->IsCode()) {
    319     // We flush all code pages after deserializing the startup snapshot. In that
    320     // case, we only need to remember code objects in the large object space.
    321     // When deserializing user code, remember each individual code object.
    322     if (deserializing_user_code() || space == LO_SPACE) {
    323       new_code_objects_.Add(Code::cast(obj));
    324     }
    325   } else if (obj->IsAccessorInfo()) {
    326     if (isolate_->external_reference_redirector()) {
    327       accessor_infos_.Add(AccessorInfo::cast(obj));
    328     }
    329   }
    330   // Check alignment.
    331   DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
    332   return obj;
    333 }
    334 
    335 void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
    336   StringTable::EnsureCapacityForDeserialization(
    337       isolate, new_internalized_strings_.length());
    338   for (Handle<String> string : new_internalized_strings_) {
    339     StringTableInsertionKey key(*string);
    340     DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
    341     StringTable::LookupKey(isolate, &key);
    342   }
    343 
    344   Heap* heap = isolate->heap();
    345   Factory* factory = isolate->factory();
    346   for (Handle<Script> script : new_scripts_) {
    347     // Assign a new script id to avoid collision.
    348     script->set_id(isolate_->heap()->NextScriptId());
    349     // Add script to list.
    350     Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
    351     heap->SetRootScriptList(*list);
    352   }
    353 }
    354 
    355 HeapObject* Deserializer::GetBackReferencedObject(int space) {
    356   HeapObject* obj;
    357   SerializerReference back_reference =
    358       SerializerReference::FromBitfield(source_.GetInt());
    359   if (space == LO_SPACE) {
    360     uint32_t index = back_reference.large_object_index();
    361     obj = deserialized_large_objects_[index];
    362   } else if (space == MAP_SPACE) {
    363     int index = back_reference.map_index();
    364     DCHECK(index < next_map_index_);
    365     obj = HeapObject::FromAddress(allocated_maps_[index]);
    366   } else {
    367     DCHECK(space < kNumberOfPreallocatedSpaces);
    368     uint32_t chunk_index = back_reference.chunk_index();
    369     DCHECK_LE(chunk_index, current_chunk_[space]);
    370     uint32_t chunk_offset = back_reference.chunk_offset();
    371     Address address = reservations_[space][chunk_index].start + chunk_offset;
    372     if (next_alignment_ != kWordAligned) {
    373       int padding = Heap::GetFillToAlign(address, next_alignment_);
    374       next_alignment_ = kWordAligned;
    375       DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
    376       address += padding;
    377     }
    378     obj = HeapObject::FromAddress(address);
    379   }
    380   if (deserializing_user_code() && obj->IsInternalizedString()) {
    381     obj = String::cast(obj)->GetForwardedInternalizedString();
    382   }
    383   hot_objects_.Add(obj);
    384   return obj;
    385 }
    386 
    387 // This routine writes the new object into the pointer provided and then
    388 // returns true if the new object was in young space and false otherwise.
    389 // The reason for this strange interface is that otherwise the object is
    390 // written very late, which means the FreeSpace map is not set up by the
    391 // time we need to use it to mark the space at the end of a page free.
    392 void Deserializer::ReadObject(int space_number, Object** write_back) {
    393   Address address;
    394   HeapObject* obj;
    395   int size = source_.GetInt() << kObjectAlignmentBits;
    396 
    397   if (next_alignment_ != kWordAligned) {
    398     int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
    399     address = Allocate(space_number, reserved);
    400     obj = HeapObject::FromAddress(address);
    401     // If one of the following assertions fails, then we are deserializing an
    402     // aligned object when the filler maps have not been deserialized yet.
    403     // We require filler maps as padding to align the object.
    404     Heap* heap = isolate_->heap();
    405     DCHECK(heap->free_space_map()->IsMap());
    406     DCHECK(heap->one_pointer_filler_map()->IsMap());
    407     DCHECK(heap->two_pointer_filler_map()->IsMap());
    408     obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
    409     address = obj->address();
    410     next_alignment_ = kWordAligned;
    411   } else {
    412     address = Allocate(space_number, size);
    413     obj = HeapObject::FromAddress(address);
    414   }
    415 
    416   isolate_->heap()->OnAllocationEvent(obj, size);
    417   Object** current = reinterpret_cast<Object**>(address);
    418   Object** limit = current + (size >> kPointerSizeLog2);
    419 
    420   if (ReadData(current, limit, space_number, address)) {
    421     // Only post process if object content has not been deferred.
    422     obj = PostProcessNewObject(obj, space_number);
    423   }
    424 
    425   Object* write_back_obj = obj;
    426   UnalignedCopy(write_back, &write_back_obj);
    427 #ifdef DEBUG
    428   if (obj->IsCode()) {
    429     DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
    430   } else {
    431     DCHECK(space_number != CODE_SPACE);
    432   }
    433 #endif  // DEBUG
    434 }
    435 
    436 // We know the space requirements before deserialization and can
    437 // pre-allocate that reserved space. During deserialization, all we need
    438 // to do is to bump up the pointer for each space in the reserved
    439 // space. This is also used for fixing back references.
    440 // We may have to split up the pre-allocation into several chunks
    441 // because it would not fit onto a single page. We do not have to keep
    442 // track of when to move to the next chunk. An opcode will signal this.
    443 // Since multiple large objects cannot be folded into one large object
    444 // space allocation, we have to do an actual allocation when deserializing
    445 // each large object. Instead of tracking offset for back references, we
    446 // reference large objects by index.
    447 Address Deserializer::Allocate(int space_index, int size) {
    448   if (space_index == LO_SPACE) {
    449     AlwaysAllocateScope scope(isolate_);
    450     LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
    451     Executability exec = static_cast<Executability>(source_.Get());
    452     AllocationResult result = lo_space->AllocateRaw(size, exec);
    453     HeapObject* obj = result.ToObjectChecked();
    454     deserialized_large_objects_.Add(obj);
    455     return obj->address();
    456   } else if (space_index == MAP_SPACE) {
    457     DCHECK_EQ(Map::kSize, size);
    458     return allocated_maps_[next_map_index_++];
    459   } else {
    460     DCHECK(space_index < kNumberOfPreallocatedSpaces);
    461     Address address = high_water_[space_index];
    462     DCHECK_NOT_NULL(address);
    463     high_water_[space_index] += size;
    464 #ifdef DEBUG
    465     // Assert that the current reserved chunk is still big enough.
    466     const Heap::Reservation& reservation = reservations_[space_index];
    467     int chunk_index = current_chunk_[space_index];
    468     CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
    469 #endif
    470     if (space_index == CODE_SPACE) SkipList::Update(address, size);
    471     return address;
    472   }
    473 }
    474 
    475 Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector,
    476                                            Object** current) {
    477   DCHECK(!isolate_->heap()->deserialization_complete());
    478   NativesExternalStringResource* resource = new NativesExternalStringResource(
    479       source_vector.start(), source_vector.length());
    480   Object* resource_obj = reinterpret_cast<Object*>(resource);
    481   UnalignedCopy(current++, &resource_obj);
    482   return current;
    483 }
    484 
    485 bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
    486                             Address current_object_address) {
    487   Isolate* const isolate = isolate_;
    488   // Write barrier support costs around 1% in startup time.  In fact there
    489   // are no new space objects in current boot snapshots, so it's not needed,
    490   // but that may change.
    491   bool write_barrier_needed =
    492       (current_object_address != NULL && source_space != NEW_SPACE &&
    493        source_space != CODE_SPACE);
    494   while (current < limit) {
    495     byte data = source_.Get();
    496     switch (data) {
    497 #define CASE_STATEMENT(where, how, within, space_number) \
    498   case where + how + within + space_number:              \
    499     STATIC_ASSERT((where & ~kWhereMask) == 0);           \
    500     STATIC_ASSERT((how & ~kHowToCodeMask) == 0);         \
    501     STATIC_ASSERT((within & ~kWhereToPointMask) == 0);   \
    502     STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
    503 
    504 #define CASE_BODY(where, how, within, space_number_if_any)                     \
    505   {                                                                            \
    506     bool emit_write_barrier = false;                                           \
    507     bool current_was_incremented = false;                                      \
    508     int space_number = space_number_if_any == kAnyOldSpace                     \
    509                            ? (data & kSpaceMask)                               \
    510                            : space_number_if_any;                              \
    511     if (where == kNewObject && how == kPlain && within == kStartOfObject) {    \
    512       ReadObject(space_number, current);                                       \
    513       emit_write_barrier = (space_number == NEW_SPACE);                        \
    514     } else {                                                                   \
    515       Object* new_object = NULL; /* May not be a real Object pointer. */       \
    516       if (where == kNewObject) {                                               \
    517         ReadObject(space_number, &new_object);                                 \
    518       } else if (where == kBackref) {                                          \
    519         emit_write_barrier = (space_number == NEW_SPACE);                      \
    520         new_object = GetBackReferencedObject(data & kSpaceMask);               \
    521       } else if (where == kBackrefWithSkip) {                                  \
    522         int skip = source_.GetInt();                                           \
    523         current = reinterpret_cast<Object**>(                                  \
    524             reinterpret_cast<Address>(current) + skip);                        \
    525         emit_write_barrier = (space_number == NEW_SPACE);                      \
    526         new_object = GetBackReferencedObject(data & kSpaceMask);               \
    527       } else if (where == kRootArray) {                                        \
    528         int id = source_.GetInt();                                             \
    529         Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
    530         new_object = isolate->heap()->root(root_index);                        \
    531         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
    532         hot_objects_.Add(HeapObject::cast(new_object));                        \
    533       } else if (where == kPartialSnapshotCache) {                             \
    534         int cache_index = source_.GetInt();                                    \
    535         new_object = isolate->partial_snapshot_cache()->at(cache_index);       \
    536         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
    537       } else if (where == kExternalReference) {                                \
    538         int skip = source_.GetInt();                                           \
    539         current = reinterpret_cast<Object**>(                                  \
    540             reinterpret_cast<Address>(current) + skip);                        \
    541         uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());       \
    542         Address address = external_reference_table_->address(reference_id);    \
    543         new_object = reinterpret_cast<Object*>(address);                       \
    544       } else if (where == kAttachedReference) {                                \
    545         int index = source_.GetInt();                                          \
    546         new_object = *attached_objects_[index];                                \
    547         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
    548       } else {                                                                 \
    549         DCHECK(where == kBuiltin);                                             \
    550         DCHECK(deserializing_user_code());                                     \
    551         int builtin_id = source_.GetInt();                                     \
    552         DCHECK_LE(0, builtin_id);                                              \
    553         DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
    554         Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
    555         new_object = isolate->builtins()->builtin(name);                       \
    556         emit_write_barrier = false;                                            \
    557       }                                                                        \
    558       if (within == kInnerPointer) {                                           \
    559         if (new_object->IsCode()) {                                            \
    560           Code* new_code_object = Code::cast(new_object);                      \
    561           new_object =                                                         \
    562               reinterpret_cast<Object*>(new_code_object->instruction_start()); \
    563         } else {                                                               \
    564           Cell* cell = Cell::cast(new_object);                                 \
    565           new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
    566         }                                                                      \
    567       }                                                                        \
    568       if (how == kFromCode) {                                                  \
    569         Address location_of_branch_data = reinterpret_cast<Address>(current);  \
    570         Assembler::deserialization_set_special_target_at(                      \
    571             isolate, location_of_branch_data,                                  \
    572             Code::cast(HeapObject::FromAddress(current_object_address)),       \
    573             reinterpret_cast<Address>(new_object));                            \
    574         location_of_branch_data += Assembler::kSpecialTargetSize;              \
    575         current = reinterpret_cast<Object**>(location_of_branch_data);         \
    576         current_was_incremented = true;                                        \
    577       } else {                                                                 \
    578         UnalignedCopy(current, &new_object);                                   \
    579       }                                                                        \
    580     }                                                                          \
    581     if (emit_write_barrier && write_barrier_needed) {                          \
    582       Address current_address = reinterpret_cast<Address>(current);            \
    583       SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));      \
    584       isolate->heap()->RecordWrite(                                            \
    585           HeapObject::FromAddress(current_object_address),                     \
    586           static_cast<int>(current_address - current_object_address),          \
    587           *reinterpret_cast<Object**>(current_address));                       \
    588     }                                                                          \
    589     if (!current_was_incremented) {                                            \
    590       current++;                                                               \
    591     }                                                                          \
    592     break;                                                                     \
    593   }
    594 
    595 // This generates a case and a body for the new space (which has to do extra
    596 // write barrier handling) and handles the other spaces with fall-through cases
    597 // and one body.
    598 #define ALL_SPACES(where, how, within)           \
    599   CASE_STATEMENT(where, how, within, NEW_SPACE)  \
    600   CASE_BODY(where, how, within, NEW_SPACE)       \
    601   CASE_STATEMENT(where, how, within, OLD_SPACE)  \
    602   CASE_STATEMENT(where, how, within, CODE_SPACE) \
    603   CASE_STATEMENT(where, how, within, MAP_SPACE)  \
    604   CASE_STATEMENT(where, how, within, LO_SPACE)   \
    605   CASE_BODY(where, how, within, kAnyOldSpace)
    606 
    607 #define FOUR_CASES(byte_code) \
    608   case byte_code:             \
    609   case byte_code + 1:         \
    610   case byte_code + 2:         \
    611   case byte_code + 3:
    612 
    613 #define SIXTEEN_CASES(byte_code) \
    614   FOUR_CASES(byte_code)          \
    615   FOUR_CASES(byte_code + 4)      \
    616   FOUR_CASES(byte_code + 8)      \
    617   FOUR_CASES(byte_code + 12)
    618 
    619 #define SINGLE_CASE(where, how, within, space) \
    620   CASE_STATEMENT(where, how, within, space)    \
    621   CASE_BODY(where, how, within, space)
    622 
    623       // Deserialize a new object and write a pointer to it to the current
    624       // object.
    625       ALL_SPACES(kNewObject, kPlain, kStartOfObject)
    626       // Support for direct instruction pointers in functions.  It's an inner
    627       // pointer because it points at the entry point, not at the start of the
    628       // code object.
    629       SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
    630       // Support for pointers into a cell. It's an inner pointer because it
    631       // points directly at the value field, not the start of the cell object.
    632       SINGLE_CASE(kNewObject, kPlain, kInnerPointer, OLD_SPACE)
    633       // Deserialize a new code object and write a pointer to its first
    634       // instruction to the current code object.
    635       ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
    636       // Find a recently deserialized object using its offset from the current
    637       // allocation point and write a pointer to it to the current object.
    638       ALL_SPACES(kBackref, kPlain, kStartOfObject)
    639       ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
    640 #if V8_CODE_EMBEDS_OBJECT_POINTER
    641       // Deserialize a new object from pointer found in code and write
    642       // a pointer to it to the current object. Required only for MIPS, PPC, ARM
    643       // or S390 with embedded constant pool, and omitted on the other
    644       // architectures because it is fully unrolled and would cause bloat.
    645       ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
    646       // Find a recently deserialized code object using its offset from the
    647       // current allocation point and write a pointer to it to the current
    648       // object. Required only for MIPS, PPC, ARM or S390 with embedded
    649       // constant pool.
    650       ALL_SPACES(kBackref, kFromCode, kStartOfObject)
    651       ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
    652 #endif
    653       // Find a recently deserialized code object using its offset from the
    654       // current allocation point and write a pointer to its first instruction
    655       // to the current code object or the instruction pointer in a function
    656       // object.
    657       ALL_SPACES(kBackref, kFromCode, kInnerPointer)
    658       ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
    659       // Support for direct instruction pointers in functions.
    660       SINGLE_CASE(kBackref, kPlain, kInnerPointer, CODE_SPACE)
    661       SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, CODE_SPACE)
    662       // Support for pointers into a cell.
    663       SINGLE_CASE(kBackref, kPlain, kInnerPointer, OLD_SPACE)
    664       SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, OLD_SPACE)
    665       // Find an object in the roots array and write a pointer to it to the
    666       // current object.
    667       SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
    668 #if V8_CODE_EMBEDS_OBJECT_POINTER
    669       // Find an object in the roots array and write a pointer to it to in code.
    670       SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
    671 #endif
    672       // Find an object in the partial snapshots cache and write a pointer to it
    673       // to the current object.
    674       SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
    675       // Find an code entry in the partial snapshots cache and
    676       // write a pointer to it to the current object.
    677       SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
    678       // Find an external reference and write a pointer to it to the current
    679       // object.
    680       SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
    681       // Find an external reference and write a pointer to it in the current
    682       // code object.
    683       SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
    684       // Find an object in the attached references and write a pointer to it to
    685       // the current object.
    686       SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
    687       SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
    688       SINGLE_CASE(kAttachedReference, kFromCode, kStartOfObject, 0)
    689       SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
    690       // Find a builtin and write a pointer to it to the current object.
    691       SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
    692       SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0)
    693       SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
    694 
    695 #undef CASE_STATEMENT
    696 #undef CASE_BODY
    697 #undef ALL_SPACES
    698 
    699       case kSkip: {
    700         int size = source_.GetInt();
    701         current = reinterpret_cast<Object**>(
    702             reinterpret_cast<intptr_t>(current) + size);
    703         break;
    704       }
    705 
    706       case kInternalReferenceEncoded:
    707       case kInternalReference: {
    708         // Internal reference address is not encoded via skip, but by offset
    709         // from code entry.
    710         int pc_offset = source_.GetInt();
    711         int target_offset = source_.GetInt();
    712         Code* code =
    713             Code::cast(HeapObject::FromAddress(current_object_address));
    714         DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
    715         DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
    716         Address pc = code->entry() + pc_offset;
    717         Address target = code->entry() + target_offset;
    718         Assembler::deserialization_set_target_internal_reference_at(
    719             isolate, pc, target, data == kInternalReference
    720                                      ? RelocInfo::INTERNAL_REFERENCE
    721                                      : RelocInfo::INTERNAL_REFERENCE_ENCODED);
    722         break;
    723       }
    724 
    725       case kNop:
    726         break;
    727 
    728       case kNextChunk: {
    729         int space = source_.Get();
    730         DCHECK(space < kNumberOfPreallocatedSpaces);
    731         int chunk_index = current_chunk_[space];
    732         const Heap::Reservation& reservation = reservations_[space];
    733         // Make sure the current chunk is indeed exhausted.
    734         CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
    735         // Move to next reserved chunk.
    736         chunk_index = ++current_chunk_[space];
    737         CHECK_LT(chunk_index, reservation.length());
    738         high_water_[space] = reservation[chunk_index].start;
    739         break;
    740       }
    741 
    742       case kDeferred: {
    743         // Deferred can only occur right after the heap object header.
    744         DCHECK(current == reinterpret_cast<Object**>(current_object_address +
    745                                                      kPointerSize));
    746         HeapObject* obj = HeapObject::FromAddress(current_object_address);
    747         // If the deferred object is a map, its instance type may be used
    748         // during deserialization. Initialize it with a temporary value.
    749         if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
    750         current = limit;
    751         return false;
    752       }
    753 
    754       case kSynchronize:
    755         // If we get here then that indicates that you have a mismatch between
    756         // the number of GC roots when serializing and deserializing.
    757         CHECK(false);
    758         break;
    759 
    760       case kNativesStringResource:
    761         current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()),
    762                                       current);
    763         break;
    764 
    765       case kExtraNativesStringResource:
    766         current = CopyInNativesSource(
    767             ExtraNatives::GetScriptSource(source_.Get()), current);
    768         break;
    769 
    770       // Deserialize raw data of variable length.
    771       case kVariableRawData: {
    772         int size_in_bytes = source_.GetInt();
    773         byte* raw_data_out = reinterpret_cast<byte*>(current);
    774         source_.CopyRaw(raw_data_out, size_in_bytes);
    775         break;
    776       }
    777 
    778       case kVariableRepeat: {
    779         int repeats = source_.GetInt();
    780         Object* object = current[-1];
    781         DCHECK(!isolate->heap()->InNewSpace(object));
    782         for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
    783         break;
    784       }
    785 
    786       case kAlignmentPrefix:
    787       case kAlignmentPrefix + 1:
    788       case kAlignmentPrefix + 2:
    789         SetAlignment(data);
    790         break;
    791 
    792       STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
    793       STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
    794       SIXTEEN_CASES(kRootArrayConstantsWithSkip)
    795       SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
    796         int skip = source_.GetInt();
    797         current = reinterpret_cast<Object**>(
    798             reinterpret_cast<intptr_t>(current) + skip);
    799         // Fall through.
    800       }
    801 
    802       SIXTEEN_CASES(kRootArrayConstants)
    803       SIXTEEN_CASES(kRootArrayConstants + 16) {
    804         int id = data & kRootArrayConstantsMask;
    805         Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
    806         Object* object = isolate->heap()->root(root_index);
    807         DCHECK(!isolate->heap()->InNewSpace(object));
    808         UnalignedCopy(current++, &object);
    809         break;
    810       }
    811 
    812       STATIC_ASSERT(kNumberOfHotObjects == 8);
    813       FOUR_CASES(kHotObjectWithSkip)
    814       FOUR_CASES(kHotObjectWithSkip + 4) {
    815         int skip = source_.GetInt();
    816         current = reinterpret_cast<Object**>(
    817             reinterpret_cast<Address>(current) + skip);
    818         // Fall through.
    819       }
    820 
    821       FOUR_CASES(kHotObject)
    822       FOUR_CASES(kHotObject + 4) {
    823         int index = data & kHotObjectMask;
    824         Object* hot_object = hot_objects_.Get(index);
    825         UnalignedCopy(current, &hot_object);
    826         if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
    827           Address current_address = reinterpret_cast<Address>(current);
    828           isolate->heap()->RecordWrite(
    829               HeapObject::FromAddress(current_object_address),
    830               static_cast<int>(current_address - current_object_address),
    831               hot_object);
    832         }
    833         current++;
    834         break;
    835       }
    836 
    837       // Deserialize raw data of fixed length from 1 to 32 words.
    838       STATIC_ASSERT(kNumberOfFixedRawData == 32);
    839       SIXTEEN_CASES(kFixedRawData)
    840       SIXTEEN_CASES(kFixedRawData + 16) {
    841         byte* raw_data_out = reinterpret_cast<byte*>(current);
    842         int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
    843         source_.CopyRaw(raw_data_out, size_in_bytes);
    844         current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
    845         break;
    846       }
    847 
    848       STATIC_ASSERT(kNumberOfFixedRepeat == 16);
    849       SIXTEEN_CASES(kFixedRepeat) {
    850         int repeats = data - kFixedRepeatStart;
    851         Object* object;
    852         UnalignedCopy(&object, current - 1);
    853         DCHECK(!isolate->heap()->InNewSpace(object));
    854         for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
    855         break;
    856       }
    857 
    858 #undef SIXTEEN_CASES
    859 #undef FOUR_CASES
    860 #undef SINGLE_CASE
    861 
    862       default:
    863         CHECK(false);
    864     }
    865   }
    866   CHECK_EQ(limit, current);
    867   return true;
    868 }
    869 }  // namespace internal
    870 }  // namespace v8
    871