Home | History | Annotate | Download | only in snapshot
      1 // Copyright 2016 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/snapshot/serializer.h"
      6 
      7 #include "src/macro-assembler.h"
      8 #include "src/snapshot/natives.h"
      9 
     10 namespace v8 {
     11 namespace internal {
     12 
     13 Serializer::Serializer(Isolate* isolate)
     14     : isolate_(isolate),
     15       external_reference_encoder_(isolate),
     16       root_index_map_(isolate),
     17       recursion_depth_(0),
     18       code_address_map_(NULL),
     19       num_maps_(0),
     20       large_objects_total_size_(0),
     21       seen_large_objects_index_(0) {
     22   // The serializer is meant to be used only to generate initial heap images
     23   // from a context in which there is only one isolate.
     24   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
     25     pending_chunk_[i] = 0;
     26     max_chunk_size_[i] = static_cast<uint32_t>(
     27         MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
     28   }
     29 
     30 #ifdef OBJECT_PRINT
     31   if (FLAG_serialization_statistics) {
     32     instance_type_count_ = NewArray<int>(kInstanceTypes);
     33     instance_type_size_ = NewArray<size_t>(kInstanceTypes);
     34     for (int i = 0; i < kInstanceTypes; i++) {
     35       instance_type_count_[i] = 0;
     36       instance_type_size_[i] = 0;
     37     }
     38   } else {
     39     instance_type_count_ = NULL;
     40     instance_type_size_ = NULL;
     41   }
     42 #endif  // OBJECT_PRINT
     43 }
     44 
     45 Serializer::~Serializer() {
     46   if (code_address_map_ != NULL) delete code_address_map_;
     47 #ifdef OBJECT_PRINT
     48   if (instance_type_count_ != NULL) {
     49     DeleteArray(instance_type_count_);
     50     DeleteArray(instance_type_size_);
     51   }
     52 #endif  // OBJECT_PRINT
     53 }
     54 
     55 #ifdef OBJECT_PRINT
     56 void Serializer::CountInstanceType(Map* map, int size) {
     57   int instance_type = map->instance_type();
     58   instance_type_count_[instance_type]++;
     59   instance_type_size_[instance_type] += size;
     60 }
     61 #endif  // OBJECT_PRINT
     62 
     63 void Serializer::OutputStatistics(const char* name) {
     64   if (!FLAG_serialization_statistics) return;
     65   PrintF("%s:\n", name);
     66   PrintF("  Spaces (bytes):\n");
     67   for (int space = 0; space < kNumberOfSpaces; space++) {
     68     PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
     69   }
     70   PrintF("\n");
     71   for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
     72     size_t s = pending_chunk_[space];
     73     for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
     74     PrintF("%16" PRIuS, s);
     75   }
     76   PrintF("%16d\n", large_objects_total_size_);
     77 #ifdef OBJECT_PRINT
     78   PrintF("  Instance types (count and bytes):\n");
     79 #define PRINT_INSTANCE_TYPE(Name)                                 \
     80   if (instance_type_count_[Name]) {                               \
     81     PrintF("%10d %10" PRIuS "  %s\n", instance_type_count_[Name], \
     82            instance_type_size_[Name], #Name);                     \
     83   }
     84   INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
     85 #undef PRINT_INSTANCE_TYPE
     86   PrintF("\n");
     87 #endif  // OBJECT_PRINT
     88 }
     89 
     90 void Serializer::SerializeDeferredObjects() {
     91   while (deferred_objects_.length() > 0) {
     92     HeapObject* obj = deferred_objects_.RemoveLast();
     93     ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
     94     obj_serializer.SerializeDeferred();
     95   }
     96   sink_.Put(kSynchronize, "Finished with deferred objects");
     97 }
     98 
     99 void Serializer::VisitPointers(Object** start, Object** end) {
    100   for (Object** current = start; current < end; current++) {
    101     if ((*current)->IsSmi()) {
    102       PutSmi(Smi::cast(*current));
    103     } else {
    104       SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
    105     }
    106   }
    107 }
    108 
    109 void Serializer::EncodeReservations(
    110     List<SerializedData::Reservation>* out) const {
    111   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
    112     for (int j = 0; j < completed_chunks_[i].length(); j++) {
    113       out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
    114     }
    115 
    116     if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
    117       out->Add(SerializedData::Reservation(pending_chunk_[i]));
    118     }
    119     out->last().mark_as_last();
    120   }
    121   out->Add(SerializedData::Reservation(num_maps_ * Map::kSize));
    122   out->last().mark_as_last();
    123   out->Add(SerializedData::Reservation(large_objects_total_size_));
    124   out->last().mark_as_last();
    125 }
    126 
    127 #ifdef DEBUG
    128 bool Serializer::BackReferenceIsAlreadyAllocated(
    129     SerializerReference reference) {
    130   DCHECK(reference.is_back_reference());
    131   AllocationSpace space = reference.space();
    132   if (space == LO_SPACE) {
    133     return reference.large_object_index() < seen_large_objects_index_;
    134   } else if (space == MAP_SPACE) {
    135     return reference.map_index() < num_maps_;
    136   } else {
    137     int chunk_index = reference.chunk_index();
    138     if (chunk_index == completed_chunks_[space].length()) {
    139       return reference.chunk_offset() < pending_chunk_[space];
    140     } else {
    141       return chunk_index < completed_chunks_[space].length() &&
    142              reference.chunk_offset() < completed_chunks_[space][chunk_index];
    143     }
    144   }
    145 }
    146 #endif  // DEBUG
    147 
    148 bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
    149                                     WhereToPoint where_to_point, int skip) {
    150   if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
    151   // Encode a reference to a hot object by its index in the working set.
    152   int index = hot_objects_.Find(obj);
    153   if (index == HotObjectsList::kNotFound) return false;
    154   DCHECK(index >= 0 && index < kNumberOfHotObjects);
    155   if (FLAG_trace_serializer) {
    156     PrintF(" Encoding hot object %d:", index);
    157     obj->ShortPrint();
    158     PrintF("\n");
    159   }
    160   if (skip != 0) {
    161     sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
    162     sink_.PutInt(skip, "HotObjectSkipDistance");
    163   } else {
    164     sink_.Put(kHotObject + index, "HotObject");
    165   }
    166   return true;
    167 }
    168 bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
    169                                         WhereToPoint where_to_point, int skip) {
    170   SerializerReference reference = reference_map_.Lookup(obj);
    171   if (!reference.is_valid()) return false;
    172   // Encode the location of an already deserialized object in order to write
    173   // its location into a later object.  We can encode the location as an
    174   // offset fromthe start of the deserialized objects or as an offset
    175   // backwards from thecurrent allocation pointer.
    176   if (reference.is_attached_reference()) {
    177     FlushSkip(skip);
    178     if (FLAG_trace_serializer) {
    179       PrintF(" Encoding attached reference %d\n",
    180              reference.attached_reference_index());
    181     }
    182     PutAttachedReference(reference, how_to_code, where_to_point);
    183   } else {
    184     DCHECK(reference.is_back_reference());
    185     if (FLAG_trace_serializer) {
    186       PrintF(" Encoding back reference to: ");
    187       obj->ShortPrint();
    188       PrintF("\n");
    189     }
    190 
    191     PutAlignmentPrefix(obj);
    192     AllocationSpace space = reference.space();
    193     if (skip == 0) {
    194       sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef");
    195     } else {
    196       sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space,
    197                 "BackRefWithSkip");
    198       sink_.PutInt(skip, "BackRefSkipDistance");
    199     }
    200     PutBackReference(obj, reference);
    201   }
    202   return true;
    203 }
    204 
    205 void Serializer::PutRoot(int root_index, HeapObject* object,
    206                          SerializerDeserializer::HowToCode how_to_code,
    207                          SerializerDeserializer::WhereToPoint where_to_point,
    208                          int skip) {
    209   if (FLAG_trace_serializer) {
    210     PrintF(" Encoding root %d:", root_index);
    211     object->ShortPrint();
    212     PrintF("\n");
    213   }
    214 
    215   // Assert that the first 32 root array items are a conscious choice. They are
    216   // chosen so that the most common ones can be encoded more efficiently.
    217   STATIC_ASSERT(Heap::kEmptyDescriptorArrayRootIndex ==
    218                 kNumberOfRootArrayConstants - 1);
    219 
    220   if (how_to_code == kPlain && where_to_point == kStartOfObject &&
    221       root_index < kNumberOfRootArrayConstants &&
    222       !isolate()->heap()->InNewSpace(object)) {
    223     if (skip == 0) {
    224       sink_.Put(kRootArrayConstants + root_index, "RootConstant");
    225     } else {
    226       sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
    227       sink_.PutInt(skip, "SkipInPutRoot");
    228     }
    229   } else {
    230     FlushSkip(skip);
    231     sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
    232     sink_.PutInt(root_index, "root_index");
    233     hot_objects_.Add(object);
    234   }
    235 }
    236 
    237 void Serializer::PutSmi(Smi* smi) {
    238   sink_.Put(kOnePointerRawData, "Smi");
    239   byte* bytes = reinterpret_cast<byte*>(&smi);
    240   for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
    241 }
    242 
    243 void Serializer::PutBackReference(HeapObject* object,
    244                                   SerializerReference reference) {
    245   DCHECK(BackReferenceIsAlreadyAllocated(reference));
    246   sink_.PutInt(reference.back_reference(), "BackRefValue");
    247   hot_objects_.Add(object);
    248 }
    249 
    250 void Serializer::PutAttachedReference(SerializerReference reference,
    251                                       HowToCode how_to_code,
    252                                       WhereToPoint where_to_point) {
    253   DCHECK(reference.is_attached_reference());
    254   DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
    255          (how_to_code == kPlain && where_to_point == kInnerPointer) ||
    256          (how_to_code == kFromCode && where_to_point == kStartOfObject) ||
    257          (how_to_code == kFromCode && where_to_point == kInnerPointer));
    258   sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
    259   sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
    260 }
    261 
    262 int Serializer::PutAlignmentPrefix(HeapObject* object) {
    263   AllocationAlignment alignment = object->RequiredAlignment();
    264   if (alignment != kWordAligned) {
    265     DCHECK(1 <= alignment && alignment <= 3);
    266     byte prefix = (kAlignmentPrefix - 1) + alignment;
    267     sink_.Put(prefix, "Alignment");
    268     return Heap::GetMaximumFillToAlign(alignment);
    269   }
    270   return 0;
    271 }
    272 
    273 SerializerReference Serializer::AllocateLargeObject(int size) {
    274   // Large objects are allocated one-by-one when deserializing. We do not
    275   // have to keep track of multiple chunks.
    276   large_objects_total_size_ += size;
    277   return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
    278 }
    279 
    280 SerializerReference Serializer::AllocateMap() {
    281   // Maps are allocated one-by-one when deserializing.
    282   return SerializerReference::MapReference(num_maps_++);
    283 }
    284 
    285 SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
    286   DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
    287   DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
    288   uint32_t new_chunk_size = pending_chunk_[space] + size;
    289   if (new_chunk_size > max_chunk_size(space)) {
    290     // The new chunk size would not fit onto a single page. Complete the
    291     // current chunk and start a new one.
    292     sink_.Put(kNextChunk, "NextChunk");
    293     sink_.Put(space, "NextChunkSpace");
    294     completed_chunks_[space].Add(pending_chunk_[space]);
    295     pending_chunk_[space] = 0;
    296     new_chunk_size = size;
    297   }
    298   uint32_t offset = pending_chunk_[space];
    299   pending_chunk_[space] = new_chunk_size;
    300   return SerializerReference::BackReference(
    301       space, completed_chunks_[space].length(), offset);
    302 }
    303 
    304 void Serializer::Pad() {
    305   // The non-branching GetInt will read up to 3 bytes too far, so we need
    306   // to pad the snapshot to make sure we don't read over the end.
    307   for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
    308     sink_.Put(kNop, "Padding");
    309   }
    310   // Pad up to pointer size for checksum.
    311   while (!IsAligned(sink_.Position(), kPointerAlignment)) {
    312     sink_.Put(kNop, "Padding");
    313   }
    314 }
    315 
    316 void Serializer::InitializeCodeAddressMap() {
    317   isolate_->InitializeLoggingAndCounters();
    318   code_address_map_ = new CodeAddressMap(isolate_);
    319 }
    320 
    321 Code* Serializer::CopyCode(Code* code) {
    322   code_buffer_.Rewind(0);  // Clear buffer without deleting backing store.
    323   int size = code->CodeSize();
    324   code_buffer_.AddAll(Vector<byte>(code->address(), size));
    325   return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
    326 }
    327 
    328 bool Serializer::HasNotExceededFirstPageOfEachSpace() {
    329   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
    330     if (!completed_chunks_[i].is_empty()) return false;
    331   }
    332   return true;
    333 }
    334 
    335 void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
    336                                                      int size, Map* map) {
    337   if (serializer_->code_address_map_) {
    338     const char* code_name =
    339         serializer_->code_address_map_->Lookup(object_->address());
    340     LOG(serializer_->isolate_,
    341         CodeNameEvent(object_->address(), sink_->Position(), code_name));
    342   }
    343 
    344   SerializerReference back_reference;
    345   if (space == LO_SPACE) {
    346     sink_->Put(kNewObject + reference_representation_ + space,
    347                "NewLargeObject");
    348     sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
    349     if (object_->IsCode()) {
    350       sink_->Put(EXECUTABLE, "executable large object");
    351     } else {
    352       sink_->Put(NOT_EXECUTABLE, "not executable large object");
    353     }
    354     back_reference = serializer_->AllocateLargeObject(size);
    355   } else if (space == MAP_SPACE) {
    356     DCHECK_EQ(Map::kSize, size);
    357     back_reference = serializer_->AllocateMap();
    358     sink_->Put(kNewObject + reference_representation_ + space, "NewMap");
    359     // This is redundant, but we include it anyways.
    360     sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
    361   } else {
    362     int fill = serializer_->PutAlignmentPrefix(object_);
    363     back_reference = serializer_->Allocate(space, size + fill);
    364     sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
    365     sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
    366   }
    367 
    368 #ifdef OBJECT_PRINT
    369   if (FLAG_serialization_statistics) {
    370     serializer_->CountInstanceType(map, size);
    371   }
    372 #endif  // OBJECT_PRINT
    373 
    374   // Mark this object as already serialized.
    375   serializer_->reference_map()->Add(object_, back_reference);
    376 
    377   // Serialize the map (first word of the object).
    378   serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
    379 }
    380 
    381 void Serializer::ObjectSerializer::SerializeExternalString() {
    382   // Instead of serializing this as an external string, we serialize
    383   // an imaginary sequential string with the same content.
    384   Isolate* isolate = serializer_->isolate();
    385   DCHECK(object_->IsExternalString());
    386   DCHECK(object_->map() != isolate->heap()->native_source_string_map());
    387   ExternalString* string = ExternalString::cast(object_);
    388   int length = string->length();
    389   Map* map;
    390   int content_size;
    391   int allocation_size;
    392   const byte* resource;
    393   // Find the map and size for the imaginary sequential string.
    394   bool internalized = object_->IsInternalizedString();
    395   if (object_->IsExternalOneByteString()) {
    396     map = internalized ? isolate->heap()->one_byte_internalized_string_map()
    397                        : isolate->heap()->one_byte_string_map();
    398     allocation_size = SeqOneByteString::SizeFor(length);
    399     content_size = length * kCharSize;
    400     resource = reinterpret_cast<const byte*>(
    401         ExternalOneByteString::cast(string)->resource()->data());
    402   } else {
    403     map = internalized ? isolate->heap()->internalized_string_map()
    404                        : isolate->heap()->string_map();
    405     allocation_size = SeqTwoByteString::SizeFor(length);
    406     content_size = length * kShortSize;
    407     resource = reinterpret_cast<const byte*>(
    408         ExternalTwoByteString::cast(string)->resource()->data());
    409   }
    410 
    411   AllocationSpace space =
    412       (allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE;
    413   SerializePrologue(space, allocation_size, map);
    414 
    415   // Output the rest of the imaginary string.
    416   int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
    417 
    418   // Output raw data header. Do not bother with common raw length cases here.
    419   sink_->Put(kVariableRawData, "RawDataForString");
    420   sink_->PutInt(bytes_to_output, "length");
    421 
    422   // Serialize string header (except for map).
    423   Address string_start = string->address();
    424   for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
    425     sink_->PutSection(string_start[i], "StringHeader");
    426   }
    427 
    428   // Serialize string content.
    429   sink_->PutRaw(resource, content_size, "StringContent");
    430 
    431   // Since the allocation size is rounded up to object alignment, there
    432   // maybe left-over bytes that need to be padded.
    433   int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
    434   DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
    435   for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
    436 
    437   sink_->Put(kSkip, "SkipAfterString");
    438   sink_->PutInt(bytes_to_output, "SkipDistance");
    439 }
    440 
    441 // Clear and later restore the next link in the weak cell or allocation site.
    442 // TODO(all): replace this with proper iteration of weak slots in serializer.
    443 class UnlinkWeakNextScope {
    444  public:
    445   explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
    446     if (object->IsWeakCell()) {
    447       object_ = object;
    448       next_ = WeakCell::cast(object)->next();
    449       WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
    450     } else if (object->IsAllocationSite()) {
    451       object_ = object;
    452       next_ = AllocationSite::cast(object)->weak_next();
    453       AllocationSite::cast(object)->set_weak_next(
    454           object->GetHeap()->undefined_value());
    455     }
    456   }
    457 
    458   ~UnlinkWeakNextScope() {
    459     if (object_ != nullptr) {
    460       if (object_->IsWeakCell()) {
    461         WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
    462       } else {
    463         AllocationSite::cast(object_)->set_weak_next(next_,
    464                                                      UPDATE_WEAK_WRITE_BARRIER);
    465       }
    466     }
    467   }
    468 
    469  private:
    470   HeapObject* object_;
    471   Object* next_;
    472   DisallowHeapAllocation no_gc_;
    473 };
    474 
    475 void Serializer::ObjectSerializer::Serialize() {
    476   if (FLAG_trace_serializer) {
    477     PrintF(" Encoding heap object: ");
    478     object_->ShortPrint();
    479     PrintF("\n");
    480   }
    481 
    482   // We cannot serialize typed array objects correctly.
    483   DCHECK(!object_->IsJSTypedArray());
    484 
    485   // We don't expect fillers.
    486   DCHECK(!object_->IsFiller());
    487 
    488   if (object_->IsScript()) {
    489     // Clear cached line ends.
    490     Object* undefined = serializer_->isolate()->heap()->undefined_value();
    491     Script::cast(object_)->set_line_ends(undefined);
    492   }
    493 
    494   if (object_->IsExternalString()) {
    495     Heap* heap = serializer_->isolate()->heap();
    496     if (object_->map() != heap->native_source_string_map()) {
    497       // Usually we cannot recreate resources for external strings. To work
    498       // around this, external strings are serialized to look like ordinary
    499       // sequential strings.
    500       // The exception are native source code strings, since we can recreate
    501       // their resources. In that case we fall through and leave it to
    502       // VisitExternalOneByteString further down.
    503       SerializeExternalString();
    504       return;
    505     }
    506   }
    507 
    508   int size = object_->Size();
    509   Map* map = object_->map();
    510   AllocationSpace space =
    511       MemoryChunk::FromAddress(object_->address())->owner()->identity();
    512   SerializePrologue(space, size, map);
    513 
    514   // Serialize the rest of the object.
    515   CHECK_EQ(0, bytes_processed_so_far_);
    516   bytes_processed_so_far_ = kPointerSize;
    517 
    518   RecursionScope recursion(serializer_);
    519   // Objects that are immediately post processed during deserialization
    520   // cannot be deferred, since post processing requires the object content.
    521   if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
    522     serializer_->QueueDeferredObject(object_);
    523     sink_->Put(kDeferred, "Deferring object content");
    524     return;
    525   }
    526 
    527   UnlinkWeakNextScope unlink_weak_next(object_);
    528 
    529   object_->IterateBody(map->instance_type(), size, this);
    530   OutputRawData(object_->address() + size);
    531 }
    532 
    533 void Serializer::ObjectSerializer::SerializeDeferred() {
    534   if (FLAG_trace_serializer) {
    535     PrintF(" Encoding deferred heap object: ");
    536     object_->ShortPrint();
    537     PrintF("\n");
    538   }
    539 
    540   int size = object_->Size();
    541   Map* map = object_->map();
    542   SerializerReference back_reference =
    543       serializer_->reference_map()->Lookup(object_);
    544   DCHECK(back_reference.is_back_reference());
    545 
    546   // Serialize the rest of the object.
    547   CHECK_EQ(0, bytes_processed_so_far_);
    548   bytes_processed_so_far_ = kPointerSize;
    549 
    550   serializer_->PutAlignmentPrefix(object_);
    551   sink_->Put(kNewObject + back_reference.space(), "deferred object");
    552   serializer_->PutBackReference(object_, back_reference);
    553   sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
    554 
    555   UnlinkWeakNextScope unlink_weak_next(object_);
    556 
    557   object_->IterateBody(map->instance_type(), size, this);
    558   OutputRawData(object_->address() + size);
    559 }
    560 
    561 void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
    562   Object** current = start;
    563   while (current < end) {
    564     while (current < end && (*current)->IsSmi()) current++;
    565     if (current < end) OutputRawData(reinterpret_cast<Address>(current));
    566 
    567     while (current < end && !(*current)->IsSmi()) {
    568       HeapObject* current_contents = HeapObject::cast(*current);
    569       int root_index = serializer_->root_index_map()->Lookup(current_contents);
    570       // Repeats are not subject to the write barrier so we can only use
    571       // immortal immovable root members. They are never in new space.
    572       if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
    573           Heap::RootIsImmortalImmovable(root_index) &&
    574           current_contents == current[-1]) {
    575         DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
    576         int repeat_count = 1;
    577         while (&current[repeat_count] < end - 1 &&
    578                current[repeat_count] == current_contents) {
    579           repeat_count++;
    580         }
    581         current += repeat_count;
    582         bytes_processed_so_far_ += repeat_count * kPointerSize;
    583         if (repeat_count > kNumberOfFixedRepeat) {
    584           sink_->Put(kVariableRepeat, "VariableRepeat");
    585           sink_->PutInt(repeat_count, "repeat count");
    586         } else {
    587           sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
    588         }
    589       } else {
    590         serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
    591                                      0);
    592         bytes_processed_so_far_ += kPointerSize;
    593         current++;
    594       }
    595     }
    596   }
    597 }
    598 
    599 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
    600   int skip = OutputRawData(rinfo->target_address_address(),
    601                            kCanReturnSkipInsteadOfSkipping);
    602   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
    603   Object* object = rinfo->target_object();
    604   serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
    605                                kStartOfObject, skip);
    606   bytes_processed_so_far_ += rinfo->target_address_size();
    607 }
    608 
    609 void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
    610   int skip = OutputRawData(reinterpret_cast<Address>(p),
    611                            kCanReturnSkipInsteadOfSkipping);
    612   sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
    613   sink_->PutInt(skip, "SkipB4ExternalRef");
    614   Address target = *p;
    615   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
    616   bytes_processed_so_far_ += kPointerSize;
    617 }
    618 
    619 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
    620   int skip = OutputRawData(rinfo->target_address_address(),
    621                            kCanReturnSkipInsteadOfSkipping);
    622   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
    623   sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
    624   sink_->PutInt(skip, "SkipB4ExternalRef");
    625   Address target = rinfo->target_external_reference();
    626   DCHECK_NOT_NULL(target);  // Code does not reference null.
    627   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
    628   bytes_processed_so_far_ += rinfo->target_address_size();
    629 }
    630 
    631 void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
    632   // We can only reference to internal references of code that has been output.
    633   DCHECK(object_->IsCode() && code_has_been_output_);
    634   // We do not use skip from last patched pc to find the pc to patch, since
    635   // target_address_address may not return addresses in ascending order when
    636   // used for internal references. External references may be stored at the
    637   // end of the code in the constant pool, whereas internal references are
    638   // inline. That would cause the skip to be negative. Instead, we store the
    639   // offset from code entry.
    640   Address entry = Code::cast(object_)->entry();
    641   intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
    642   intptr_t target_offset = rinfo->target_internal_reference() - entry;
    643   DCHECK(0 <= pc_offset &&
    644          pc_offset <= Code::cast(object_)->instruction_size());
    645   DCHECK(0 <= target_offset &&
    646          target_offset <= Code::cast(object_)->instruction_size());
    647   sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
    648                  ? kInternalReference
    649                  : kInternalReferenceEncoded,
    650              "InternalRef");
    651   sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
    652   sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
    653 }
    654 
    655 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
    656   int skip = OutputRawData(rinfo->target_address_address(),
    657                            kCanReturnSkipInsteadOfSkipping);
    658   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
    659   sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
    660   sink_->PutInt(skip, "SkipB4ExternalRef");
    661   Address target = rinfo->target_address();
    662   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
    663   bytes_processed_so_far_ += rinfo->target_address_size();
    664 }
    665 
    666 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
    667   int skip = OutputRawData(rinfo->target_address_address(),
    668                            kCanReturnSkipInsteadOfSkipping);
    669   Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
    670   serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
    671   bytes_processed_so_far_ += rinfo->target_address_size();
    672 }
    673 
    674 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
    675   int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
    676   Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
    677   serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
    678   bytes_processed_so_far_ += kPointerSize;
    679 }
    680 
    681 void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
    682   int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
    683   Cell* object = Cell::cast(rinfo->target_cell());
    684   serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
    685   bytes_processed_so_far_ += kPointerSize;
    686 }
    687 
    688 bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
    689     int builtin_count,
    690     v8::String::ExternalOneByteStringResource** resource_pointer,
    691     FixedArray* source_cache, int resource_index) {
    692   Isolate* isolate = serializer_->isolate();
    693   for (int i = 0; i < builtin_count; i++) {
    694     Object* source = source_cache->get(i);
    695     if (!source->IsUndefined(isolate)) {
    696       ExternalOneByteString* string = ExternalOneByteString::cast(source);
    697       typedef v8::String::ExternalOneByteStringResource Resource;
    698       const Resource* resource = string->resource();
    699       if (resource == *resource_pointer) {
    700         sink_->Put(resource_index, "NativesStringResource");
    701         sink_->PutSection(i, "NativesStringResourceEnd");
    702         bytes_processed_so_far_ += sizeof(resource);
    703         return true;
    704       }
    705     }
    706   }
    707   return false;
    708 }
    709 
    710 void Serializer::ObjectSerializer::VisitExternalOneByteString(
    711     v8::String::ExternalOneByteStringResource** resource_pointer) {
    712   DCHECK_EQ(serializer_->isolate()->heap()->native_source_string_map(),
    713             object_->map());
    714   DCHECK(ExternalOneByteString::cast(object_)->is_short());
    715   Address references_start = reinterpret_cast<Address>(resource_pointer);
    716   OutputRawData(references_start);
    717   if (SerializeExternalNativeSourceString(
    718           Natives::GetBuiltinsCount(), resource_pointer,
    719           Natives::GetSourceCache(serializer_->isolate()->heap()),
    720           kNativesStringResource)) {
    721     return;
    722   }
    723   if (SerializeExternalNativeSourceString(
    724           ExtraNatives::GetBuiltinsCount(), resource_pointer,
    725           ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
    726           kExtraNativesStringResource)) {
    727     return;
    728   }
    729   // One of the strings in the natives cache should match the resource.  We
    730   // don't expect any other kinds of external strings here.
    731   UNREACHABLE();
    732 }
    733 
    734 Address Serializer::ObjectSerializer::PrepareCode() {
    735   Code* code = Code::cast(object_);
    736   if (FLAG_predictable) {
    737     // To make snapshots reproducible, we make a copy of the code object
    738     // and wipe all pointers in the copy, which we then serialize.
    739     code = serializer_->CopyCode(code);
    740     int mode_mask = RelocInfo::kCodeTargetMask |
    741                     RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
    742                     RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
    743                     RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
    744                     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
    745                     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
    746     for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
    747       RelocInfo* rinfo = it.rinfo();
    748       rinfo->WipeOut();
    749     }
    750     // We need to wipe out the header fields *after* wiping out the
    751     // relocations, because some of these fields are needed for the latter.
    752     code->WipeOutHeader();
    753   }
    754   // Code age headers are not serializable.
    755   code->MakeYoung(serializer_->isolate());
    756   return code->address();
    757 }
    758 
    759 int Serializer::ObjectSerializer::OutputRawData(
    760     Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
    761   Address object_start = object_->address();
    762   int base = bytes_processed_so_far_;
    763   int up_to_offset = static_cast<int>(up_to - object_start);
    764   int to_skip = up_to_offset - bytes_processed_so_far_;
    765   int bytes_to_output = to_skip;
    766   bytes_processed_so_far_ += to_skip;
    767   // This assert will fail if the reloc info gives us the target_address_address
    768   // locations in a non-ascending order.  Luckily that doesn't happen.
    769   DCHECK(to_skip >= 0);
    770   bool outputting_code = false;
    771   bool is_code_object = object_->IsCode();
    772   if (to_skip != 0 && is_code_object && !code_has_been_output_) {
    773     // Output the code all at once and fix later.
    774     bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
    775     outputting_code = true;
    776     code_has_been_output_ = true;
    777   }
    778   if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
    779     if (!outputting_code && bytes_to_output == to_skip &&
    780         IsAligned(bytes_to_output, kPointerAlignment) &&
    781         bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
    782       int size_in_words = bytes_to_output >> kPointerSizeLog2;
    783       sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
    784       to_skip = 0;  // This instruction includes skip.
    785     } else {
    786       // We always end up here if we are outputting the code of a code object.
    787       sink_->Put(kVariableRawData, "VariableRawData");
    788       sink_->PutInt(bytes_to_output, "length");
    789     }
    790 
    791     if (is_code_object) object_start = PrepareCode();
    792 
    793     const char* description = is_code_object ? "Code" : "Byte";
    794     sink_->PutRaw(object_start + base, bytes_to_output, description);
    795   }
    796   if (to_skip != 0 && return_skip == kIgnoringReturn) {
    797     sink_->Put(kSkip, "Skip");
    798     sink_->PutInt(to_skip, "SkipDistance");
    799     to_skip = 0;
    800   }
    801   return to_skip;
    802 }
    803 
    804 }  // namespace internal
    805 }  // namespace v8
    806