Home | History | Annotate | Download | only in src
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/layout-descriptor.h"
      6 
      7 #include <sstream>
      8 
      9 #include "src/base/bits.h"
     10 #include "src/handles-inl.h"
     11 #include "src/objects-inl.h"
     12 
     13 using v8::base::bits::CountTrailingZeros32;
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 Handle<LayoutDescriptor> LayoutDescriptor::New(
     19     Handle<Map> map, Handle<DescriptorArray> descriptors, int num_descriptors) {
     20   Isolate* isolate = descriptors->GetIsolate();
     21   if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
     22 
     23   int layout_descriptor_length =
     24       CalculateCapacity(*map, *descriptors, num_descriptors);
     25 
     26   if (layout_descriptor_length == 0) {
     27     // No double fields were found, use fast pointer layout.
     28     return handle(FastPointerLayout(), isolate);
     29   }
     30 
     31   // Initially, layout descriptor corresponds to an object with all fields
     32   // tagged.
     33   Handle<LayoutDescriptor> layout_descriptor_handle =
     34       LayoutDescriptor::New(isolate, layout_descriptor_length);
     35 
     36   LayoutDescriptor* layout_descriptor = Initialize(
     37       *layout_descriptor_handle, *map, *descriptors, num_descriptors);
     38 
     39   return handle(layout_descriptor, isolate);
     40 }
     41 
     42 
     43 Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
     44     Handle<Map> map, PropertyDetails details) {
     45   DCHECK(map->owns_descriptors());
     46   Isolate* isolate = map->GetIsolate();
     47   Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
     48                                              isolate);
     49 
     50   if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
     51     DCHECK(details.location() != kField ||
     52            layout_descriptor->IsTagged(details.field_index()));
     53     return layout_descriptor;
     54   }
     55   int field_index = details.field_index();
     56   layout_descriptor = LayoutDescriptor::EnsureCapacity(
     57       isolate, layout_descriptor, field_index + details.field_width_in_words());
     58 
     59   DisallowHeapAllocation no_allocation;
     60   LayoutDescriptor* layout_desc = *layout_descriptor;
     61   layout_desc = layout_desc->SetRawData(field_index);
     62   if (details.field_width_in_words() > 1) {
     63     layout_desc = layout_desc->SetRawData(field_index + 1);
     64   }
     65   return handle(layout_desc, isolate);
     66 }
     67 
     68 
     69 Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
     70     Handle<Map> map, PropertyDetails details,
     71     Handle<LayoutDescriptor> full_layout_descriptor) {
     72   DisallowHeapAllocation no_allocation;
     73   LayoutDescriptor* layout_descriptor = map->layout_descriptor();
     74   if (layout_descriptor->IsSlowLayout()) {
     75     return full_layout_descriptor;
     76   }
     77   if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
     78     DCHECK(details.location() != kField ||
     79            layout_descriptor->IsTagged(details.field_index()));
     80     return handle(layout_descriptor, map->GetIsolate());
     81   }
     82   int field_index = details.field_index();
     83   int new_capacity = field_index + details.field_width_in_words();
     84   if (new_capacity > layout_descriptor->capacity()) {
     85     // Current map's layout descriptor runs out of space, so use the full
     86     // layout descriptor.
     87     return full_layout_descriptor;
     88   }
     89 
     90   layout_descriptor = layout_descriptor->SetRawData(field_index);
     91   if (details.field_width_in_words() > 1) {
     92     layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
     93   }
     94   return handle(layout_descriptor, map->GetIsolate());
     95 }
     96 
     97 
     98 Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
     99     Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
    100     int new_capacity) {
    101   int old_capacity = layout_descriptor->capacity();
    102   if (new_capacity <= old_capacity) {
    103     return layout_descriptor;
    104   }
    105   Handle<LayoutDescriptor> new_layout_descriptor =
    106       LayoutDescriptor::New(isolate, new_capacity);
    107   DCHECK(new_layout_descriptor->IsSlowLayout());
    108 
    109   if (layout_descriptor->IsSlowLayout()) {
    110     memcpy(new_layout_descriptor->DataPtr(), layout_descriptor->DataPtr(),
    111            layout_descriptor->DataSize());
    112     return new_layout_descriptor;
    113   } else {
    114     // Fast layout.
    115     uint32_t value =
    116         static_cast<uint32_t>(Smi::cast(*layout_descriptor)->value());
    117     new_layout_descriptor->set(0, value);
    118     return new_layout_descriptor;
    119   }
    120 }
    121 
    122 
    123 bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
    124                                 int* out_sequence_length) {
    125   DCHECK(max_sequence_length > 0);
    126   if (IsFastPointerLayout()) {
    127     *out_sequence_length = max_sequence_length;
    128     return true;
    129   }
    130 
    131   int layout_word_index;
    132   int layout_bit_index;
    133 
    134   if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
    135     // Out of bounds queries are considered tagged.
    136     *out_sequence_length = max_sequence_length;
    137     return true;
    138   }
    139   uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
    140 
    141   uint32_t value = IsSlowLayout()
    142                        ? get_scalar(layout_word_index)
    143                        : static_cast<uint32_t>(Smi::cast(this)->value());
    144 
    145   bool is_tagged = (value & layout_mask) == 0;
    146   if (!is_tagged) value = ~value;  // Count set bits instead of cleared bits.
    147   value = value & ~(layout_mask - 1);  // Clear bits we are not interested in.
    148   int sequence_length = CountTrailingZeros32(value) - layout_bit_index;
    149 
    150   if (layout_bit_index + sequence_length == kNumberOfBits) {
    151     // This is a contiguous sequence till the end of current word, proceed
    152     // counting in the subsequent words.
    153     if (IsSlowLayout()) {
    154       int len = length();
    155       ++layout_word_index;
    156       for (; layout_word_index < len; layout_word_index++) {
    157         value = get_scalar(layout_word_index);
    158         bool cur_is_tagged = (value & 1) == 0;
    159         if (cur_is_tagged != is_tagged) break;
    160         if (!is_tagged) value = ~value;  // Count set bits instead.
    161         int cur_sequence_length = CountTrailingZeros32(value);
    162         sequence_length += cur_sequence_length;
    163         if (sequence_length >= max_sequence_length) break;
    164         if (cur_sequence_length != kNumberOfBits) break;
    165       }
    166     }
    167     if (is_tagged && (field_index + sequence_length == capacity())) {
    168       // The contiguous sequence of tagged fields lasts till the end of the
    169       // layout descriptor which means that all the fields starting from
    170       // field_index are tagged.
    171       sequence_length = std::numeric_limits<int>::max();
    172     }
    173   }
    174   *out_sequence_length = Min(sequence_length, max_sequence_length);
    175   return is_tagged;
    176 }
    177 
    178 
    179 Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate,
    180                                                          int length) {
    181   return New(isolate, length);
    182 }
    183 
    184 
    185 LayoutDescriptor* LayoutDescriptor::SetTaggedForTesting(int field_index,
    186                                                         bool tagged) {
    187   return SetTagged(field_index, tagged);
    188 }
    189 
    190 
    191 bool LayoutDescriptorHelper::IsTagged(
    192     int offset_in_bytes, int end_offset,
    193     int* out_end_of_contiguous_region_offset) {
    194   DCHECK(IsAligned(offset_in_bytes, kPointerSize));
    195   DCHECK(IsAligned(end_offset, kPointerSize));
    196   DCHECK(offset_in_bytes < end_offset);
    197   if (all_fields_tagged_) {
    198     *out_end_of_contiguous_region_offset = end_offset;
    199     DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
    200     return true;
    201   }
    202   int max_sequence_length = (end_offset - offset_in_bytes) / kPointerSize;
    203   int field_index = Max(0, (offset_in_bytes - header_size_) / kPointerSize);
    204   int sequence_length;
    205   bool tagged = layout_descriptor_->IsTagged(field_index, max_sequence_length,
    206                                              &sequence_length);
    207   DCHECK(sequence_length > 0);
    208   if (offset_in_bytes < header_size_) {
    209     // Object headers do not contain non-tagged fields. Check if the contiguous
    210     // region continues after the header.
    211     if (tagged) {
    212       // First field is tagged, calculate end offset from there.
    213       *out_end_of_contiguous_region_offset =
    214           header_size_ + sequence_length * kPointerSize;
    215 
    216     } else {
    217       *out_end_of_contiguous_region_offset = header_size_;
    218     }
    219     DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
    220     return true;
    221   }
    222   *out_end_of_contiguous_region_offset =
    223       offset_in_bytes + sequence_length * kPointerSize;
    224   DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
    225   return tagged;
    226 }
    227 
    228 
    229 LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
    230                                          DescriptorArray* descriptors,
    231                                          int num_descriptors) {
    232   DisallowHeapAllocation no_allocation;
    233   // Fast mode descriptors are never shared and therefore always fully
    234   // correspond to their map.
    235   if (!IsSlowLayout()) return this;
    236 
    237   int layout_descriptor_length =
    238       CalculateCapacity(map, descriptors, num_descriptors);
    239   // It must not become fast-mode descriptor here, because otherwise it has to
    240   // be fast pointer layout descriptor already but it's is slow mode now.
    241   DCHECK_LT(kSmiValueSize, layout_descriptor_length);
    242 
    243   // Trim, clean and reinitialize this slow-mode layout descriptor.
    244   int array_length = GetSlowModeBackingStoreLength(layout_descriptor_length);
    245   int current_length = length();
    246   if (current_length != array_length) {
    247     DCHECK_LT(array_length, current_length);
    248     int delta = current_length - array_length;
    249     heap->RightTrimFixedArray(this, delta);
    250   }
    251   memset(DataPtr(), 0, DataSize());
    252   LayoutDescriptor* layout_descriptor =
    253       Initialize(this, map, descriptors, num_descriptors);
    254   DCHECK_EQ(this, layout_descriptor);
    255   return layout_descriptor;
    256 }
    257 
    258 
    259 bool LayoutDescriptor::IsConsistentWithMap(Map* map, bool check_tail) {
    260   if (FLAG_unbox_double_fields) {
    261     DescriptorArray* descriptors = map->instance_descriptors();
    262     int nof_descriptors = map->NumberOfOwnDescriptors();
    263     int last_field_index = 0;
    264     for (int i = 0; i < nof_descriptors; i++) {
    265       PropertyDetails details = descriptors->GetDetails(i);
    266       if (details.location() != kField) continue;
    267       FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
    268       bool tagged_expected =
    269           !field_index.is_inobject() || !details.representation().IsDouble();
    270       for (int bit = 0; bit < details.field_width_in_words(); bit++) {
    271         bool tagged_actual = IsTagged(details.field_index() + bit);
    272         DCHECK_EQ(tagged_expected, tagged_actual);
    273         if (tagged_actual != tagged_expected) return false;
    274       }
    275       last_field_index =
    276           Max(last_field_index,
    277               details.field_index() + details.field_width_in_words());
    278     }
    279     if (check_tail) {
    280       int n = capacity();
    281       for (int i = last_field_index; i < n; i++) {
    282         DCHECK(IsTagged(i));
    283       }
    284     }
    285   }
    286   return true;
    287 }
    288 }  // namespace internal
    289 }  // namespace v8
    290