Home | History | Annotate | Download | only in heap
      1 // Copyright 2016 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_SLOT_SET_H
      6 #define V8_SLOT_SET_H
      7 
      8 #include <map>
      9 #include <stack>
     10 
     11 #include "src/allocation.h"
     12 #include "src/base/atomic-utils.h"
     13 #include "src/base/bits.h"
     14 #include "src/utils.h"
     15 
     16 namespace v8 {
     17 namespace internal {
     18 
     19 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
     20 
     21 // Data structure for maintaining a set of slots in a standard (non-large)
     22 // page. The base address of the page must be set with SetPageStart before any
     23 // operation.
     24 // The data structure assumes that the slots are pointer size aligned and
     25 // splits the valid slot offset range into kBuckets buckets.
     26 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
     27 class SlotSet : public Malloced {
     28  public:
     29   enum EmptyBucketMode {
     30     FREE_EMPTY_BUCKETS,     // An empty bucket will be deallocated immediately.
     31     PREFREE_EMPTY_BUCKETS,  // An empty bucket will be unlinked from the slot
     32                             // set, but deallocated on demand by a sweeper
     33                             // thread.
     34     KEEP_EMPTY_BUCKETS      // An empty bucket will be kept.
     35   };
     36 
     37   SlotSet() {
     38     for (int i = 0; i < kBuckets; i++) {
     39       bucket[i].SetValue(nullptr);
     40     }
     41   }
     42 
     43   ~SlotSet() {
     44     for (int i = 0; i < kBuckets; i++) {
     45       ReleaseBucket(i);
     46     }
     47     FreeToBeFreedBuckets();
     48   }
     49 
     50   void SetPageStart(Address page_start) { page_start_ = page_start; }
     51 
     52   // The slot offset specifies a slot at address page_start_ + slot_offset.
     53   // This method should only be called on the main thread because concurrent
     54   // allocation of the bucket is not thread-safe.
     55   void Insert(int slot_offset) {
     56     int bucket_index, cell_index, bit_index;
     57     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
     58     base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
     59     if (current_bucket == nullptr) {
     60       current_bucket = AllocateBucket();
     61       bucket[bucket_index].SetValue(current_bucket);
     62     }
     63     if (!(current_bucket[cell_index].Value() & (1u << bit_index))) {
     64       current_bucket[cell_index].SetBit(bit_index);
     65     }
     66   }
     67 
     68   // The slot offset specifies a slot at address page_start_ + slot_offset.
     69   // Returns true if the set contains the slot.
     70   bool Contains(int slot_offset) {
     71     int bucket_index, cell_index, bit_index;
     72     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
     73     base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
     74     if (current_bucket == nullptr) {
     75       return false;
     76     }
     77     return (current_bucket[cell_index].Value() & (1u << bit_index)) != 0;
     78   }
     79 
     80   // The slot offset specifies a slot at address page_start_ + slot_offset.
     81   void Remove(int slot_offset) {
     82     int bucket_index, cell_index, bit_index;
     83     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
     84     base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
     85     if (current_bucket != nullptr) {
     86       uint32_t cell = current_bucket[cell_index].Value();
     87       if (cell) {
     88         uint32_t bit_mask = 1u << bit_index;
     89         if (cell & bit_mask) {
     90           current_bucket[cell_index].ClearBit(bit_index);
     91         }
     92       }
     93     }
     94   }
     95 
     96   // The slot offsets specify a range of slots at addresses:
     97   // [page_start_ + start_offset ... page_start_ + end_offset).
     98   void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
     99     CHECK_LE(end_offset, 1 << kPageSizeBits);
    100     DCHECK_LE(start_offset, end_offset);
    101     int start_bucket, start_cell, start_bit;
    102     SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
    103     int end_bucket, end_cell, end_bit;
    104     SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
    105     uint32_t start_mask = (1u << start_bit) - 1;
    106     uint32_t end_mask = ~((1u << end_bit) - 1);
    107     if (start_bucket == end_bucket && start_cell == end_cell) {
    108       ClearCell(start_bucket, start_cell, ~(start_mask | end_mask));
    109       return;
    110     }
    111     int current_bucket = start_bucket;
    112     int current_cell = start_cell;
    113     ClearCell(current_bucket, current_cell, ~start_mask);
    114     current_cell++;
    115     base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
    116     if (current_bucket < end_bucket) {
    117       if (bucket_ptr != nullptr) {
    118         ClearBucket(bucket_ptr, current_cell, kCellsPerBucket);
    119       }
    120       // The rest of the current bucket is cleared.
    121       // Move on to the next bucket.
    122       current_bucket++;
    123       current_cell = 0;
    124     }
    125     DCHECK(current_bucket == end_bucket ||
    126            (current_bucket < end_bucket && current_cell == 0));
    127     while (current_bucket < end_bucket) {
    128       if (mode == PREFREE_EMPTY_BUCKETS) {
    129         PreFreeEmptyBucket(current_bucket);
    130       } else if (mode == FREE_EMPTY_BUCKETS) {
    131         ReleaseBucket(current_bucket);
    132       } else {
    133         DCHECK(mode == KEEP_EMPTY_BUCKETS);
    134         bucket_ptr = bucket[current_bucket].Value();
    135         if (bucket_ptr) {
    136           ClearBucket(bucket_ptr, 0, kCellsPerBucket);
    137         }
    138       }
    139       current_bucket++;
    140     }
    141     // All buckets between start_bucket and end_bucket are cleared.
    142     bucket_ptr = bucket[current_bucket].Value();
    143     DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
    144     if (current_bucket == kBuckets || bucket_ptr == nullptr) {
    145       return;
    146     }
    147     while (current_cell < end_cell) {
    148       bucket_ptr[current_cell].SetValue(0);
    149       current_cell++;
    150     }
    151     // All cells between start_cell and end_cell are cleared.
    152     DCHECK(current_bucket == end_bucket && current_cell == end_cell);
    153     ClearCell(end_bucket, end_cell, ~end_mask);
    154   }
    155 
    156   // The slot offset specifies a slot at address page_start_ + slot_offset.
    157   bool Lookup(int slot_offset) {
    158     int bucket_index, cell_index, bit_index;
    159     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
    160     if (bucket[bucket_index].Value() != nullptr) {
    161       uint32_t cell = bucket[bucket_index].Value()[cell_index].Value();
    162       return (cell & (1u << bit_index)) != 0;
    163     }
    164     return false;
    165   }
    166 
    167   // Iterate over all slots in the set and for each slot invoke the callback.
    168   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
    169   // Returns the new number of slots.
    170   // This method should only be called on the main thread.
    171   //
    172   // Sample usage:
    173   // Iterate([](Address slot_address) {
    174   //    if (good(slot_address)) return KEEP_SLOT;
    175   //    else return REMOVE_SLOT;
    176   // });
    177   template <typename Callback>
    178   int Iterate(Callback callback, EmptyBucketMode mode) {
    179     int new_count = 0;
    180     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
    181       base::AtomicValue<uint32_t>* current_bucket =
    182           bucket[bucket_index].Value();
    183       if (current_bucket != nullptr) {
    184         int in_bucket_count = 0;
    185         int cell_offset = bucket_index * kBitsPerBucket;
    186         for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
    187           if (current_bucket[i].Value()) {
    188             uint32_t cell = current_bucket[i].Value();
    189             uint32_t old_cell = cell;
    190             uint32_t mask = 0;
    191             while (cell) {
    192               int bit_offset = base::bits::CountTrailingZeros32(cell);
    193               uint32_t bit_mask = 1u << bit_offset;
    194               uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
    195               if (callback(page_start_ + slot) == KEEP_SLOT) {
    196                 ++in_bucket_count;
    197               } else {
    198                 mask |= bit_mask;
    199               }
    200               cell ^= bit_mask;
    201             }
    202             uint32_t new_cell = old_cell & ~mask;
    203             if (old_cell != new_cell) {
    204               while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
    205                 // If TrySetValue fails, the cell must have changed. We just
    206                 // have to read the current value of the cell, & it with the
    207                 // computed value, and retry. We can do this, because this
    208                 // method will only be called on the main thread and filtering
    209                 // threads will only remove slots.
    210                 old_cell = current_bucket[i].Value();
    211                 new_cell = old_cell & ~mask;
    212               }
    213             }
    214           }
    215         }
    216         if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
    217           PreFreeEmptyBucket(bucket_index);
    218         }
    219         new_count += in_bucket_count;
    220       }
    221     }
    222     return new_count;
    223   }
    224 
    225   void FreeToBeFreedBuckets() {
    226     base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
    227     while (!to_be_freed_buckets_.empty()) {
    228       base::AtomicValue<uint32_t>* top = to_be_freed_buckets_.top();
    229       to_be_freed_buckets_.pop();
    230       DeleteArray<base::AtomicValue<uint32_t>>(top);
    231     }
    232   }
    233 
    234  private:
    235   static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
    236   static const int kCellsPerBucket = 32;
    237   static const int kCellsPerBucketLog2 = 5;
    238   static const int kBitsPerCell = 32;
    239   static const int kBitsPerCellLog2 = 5;
    240   static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
    241   static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
    242   static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
    243 
    244   base::AtomicValue<uint32_t>* AllocateBucket() {
    245     base::AtomicValue<uint32_t>* result =
    246         NewArray<base::AtomicValue<uint32_t>>(kCellsPerBucket);
    247     for (int i = 0; i < kCellsPerBucket; i++) {
    248       result[i].SetValue(0);
    249     }
    250     return result;
    251   }
    252 
    253   void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
    254                    int end_cell) {
    255     DCHECK_GE(start_cell, 0);
    256     DCHECK_LE(end_cell, kCellsPerBucket);
    257     int current_cell = start_cell;
    258     while (current_cell < kCellsPerBucket) {
    259       bucket[current_cell].SetValue(0);
    260       current_cell++;
    261     }
    262   }
    263 
    264   void PreFreeEmptyBucket(int bucket_index) {
    265     base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
    266     if (bucket_ptr != nullptr) {
    267       base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
    268       to_be_freed_buckets_.push(bucket_ptr);
    269       bucket[bucket_index].SetValue(nullptr);
    270     }
    271   }
    272 
    273   void ReleaseBucket(int bucket_index) {
    274     DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
    275     bucket[bucket_index].SetValue(nullptr);
    276   }
    277 
    278   void ClearCell(int bucket_index, int cell_index, uint32_t mask) {
    279     if (bucket_index < kBuckets) {
    280       base::AtomicValue<uint32_t>* cells = bucket[bucket_index].Value();
    281       if (cells != nullptr) {
    282         uint32_t cell = cells[cell_index].Value();
    283         if (cell) cells[cell_index].SetBits(0, mask);
    284       }
    285     } else {
    286       // GCC bug 59124: Emits wrong warnings
    287       // "array subscript is above array bounds"
    288       UNREACHABLE();
    289     }
    290   }
    291 
    292   // Converts the slot offset into bucket/cell/bit index.
    293   void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
    294                      int* bit_index) {
    295     DCHECK_EQ(slot_offset % kPointerSize, 0);
    296     int slot = slot_offset >> kPointerSizeLog2;
    297     DCHECK(slot >= 0 && slot <= kMaxSlots);
    298     *bucket_index = slot >> kBitsPerBucketLog2;
    299     *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
    300     *bit_index = slot & (kBitsPerCell - 1);
    301   }
    302 
    303   base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets];
    304   Address page_start_;
    305   base::Mutex to_be_freed_buckets_mutex_;
    306   std::stack<base::AtomicValue<uint32_t>*> to_be_freed_buckets_;
    307 };
    308 
    309 enum SlotType {
    310   EMBEDDED_OBJECT_SLOT,
    311   OBJECT_SLOT,
    312   CELL_TARGET_SLOT,
    313   CODE_TARGET_SLOT,
    314   CODE_ENTRY_SLOT,
    315   DEBUG_TARGET_SLOT,
    316   CLEARED_SLOT
    317 };
    318 
    319 // Data structure for maintaining a multiset of typed slots in a page.
    320 // Typed slots can only appear in Code and JSFunction objects, so
    321 // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
    322 // The implementation is a chain of chunks, where each chunks is an array of
    323 // encoded (slot type, slot offset) pairs.
    324 // There is no duplicate detection and we do not expect many duplicates because
    325 // typed slots contain V8 internal pointers that are not directly exposed to JS.
    326 class TypedSlotSet {
    327  public:
    328   enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
    329 
    330   typedef std::pair<SlotType, uint32_t> TypeAndOffset;
    331 
    332   struct TypedSlot {
    333     TypedSlot() {
    334       type_and_offset_.SetValue(0);
    335       host_offset_.SetValue(0);
    336     }
    337 
    338     TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset) {
    339       type_and_offset_.SetValue(TypeField::encode(type) |
    340                                 OffsetField::encode(offset));
    341       host_offset_.SetValue(host_offset);
    342     }
    343 
    344     bool operator==(const TypedSlot other) {
    345       return type_and_offset_.Value() == other.type_and_offset_.Value() &&
    346              host_offset_.Value() == other.host_offset_.Value();
    347     }
    348 
    349     bool operator!=(const TypedSlot other) { return !(*this == other); }
    350 
    351     SlotType type() { return TypeField::decode(type_and_offset_.Value()); }
    352 
    353     uint32_t offset() { return OffsetField::decode(type_and_offset_.Value()); }
    354 
    355     TypeAndOffset GetTypeAndOffset() {
    356       uint32_t type_and_offset = type_and_offset_.Value();
    357       return std::make_pair(TypeField::decode(type_and_offset),
    358                             OffsetField::decode(type_and_offset));
    359     }
    360 
    361     uint32_t host_offset() { return host_offset_.Value(); }
    362 
    363     void Set(TypedSlot slot) {
    364       type_and_offset_.SetValue(slot.type_and_offset_.Value());
    365       host_offset_.SetValue(slot.host_offset_.Value());
    366     }
    367 
    368     void Clear() {
    369       type_and_offset_.SetValue(TypeField::encode(CLEARED_SLOT) |
    370                                 OffsetField::encode(0));
    371       host_offset_.SetValue(0);
    372     }
    373 
    374     base::AtomicValue<uint32_t> type_and_offset_;
    375     base::AtomicValue<uint32_t> host_offset_;
    376   };
    377   static const int kMaxOffset = 1 << 29;
    378 
    379   explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
    380     chunk_.SetValue(new Chunk(nullptr, kInitialBufferSize));
    381   }
    382 
    383   ~TypedSlotSet() {
    384     Chunk* chunk = chunk_.Value();
    385     while (chunk != nullptr) {
    386       Chunk* next = chunk->next.Value();
    387       delete chunk;
    388       chunk = next;
    389     }
    390     FreeToBeFreedChunks();
    391   }
    392 
    393   // The slot offset specifies a slot at address page_start_ + offset.
    394   // This method can only be called on the main thread.
    395   void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
    396     TypedSlot slot(type, host_offset, offset);
    397     Chunk* top_chunk = chunk_.Value();
    398     if (!top_chunk) {
    399       top_chunk = new Chunk(nullptr, kInitialBufferSize);
    400       chunk_.SetValue(top_chunk);
    401     }
    402     if (!top_chunk->AddSlot(slot)) {
    403       Chunk* new_top_chunk =
    404           new Chunk(top_chunk, NextCapacity(top_chunk->capacity.Value()));
    405       bool added = new_top_chunk->AddSlot(slot);
    406       chunk_.SetValue(new_top_chunk);
    407       DCHECK(added);
    408       USE(added);
    409     }
    410   }
    411 
    412   // Iterate over all slots in the set and for each slot invoke the callback.
    413   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
    414   // Returns the new number of slots.
    415   //
    416   // Sample usage:
    417   // Iterate([](SlotType slot_type, Address slot_address) {
    418   //    if (good(slot_type, slot_address)) return KEEP_SLOT;
    419   //    else return REMOVE_SLOT;
    420   // });
    421   template <typename Callback>
    422   int Iterate(Callback callback, IterationMode mode) {
    423     STATIC_ASSERT(CLEARED_SLOT < 8);
    424     Chunk* chunk = chunk_.Value();
    425     Chunk* previous = nullptr;
    426     int new_count = 0;
    427     while (chunk != nullptr) {
    428       TypedSlot* buffer = chunk->buffer.Value();
    429       int count = chunk->count.Value();
    430       bool empty = true;
    431       for (int i = 0; i < count; i++) {
    432         // Order is important here. We have to read out the slot type last to
    433         // observe the concurrent removal case consistently.
    434         Address host_addr = page_start_ + buffer[i].host_offset();
    435         TypeAndOffset type_and_offset = buffer[i].GetTypeAndOffset();
    436         SlotType type = type_and_offset.first;
    437         if (type != CLEARED_SLOT) {
    438           Address addr = page_start_ + type_and_offset.second;
    439           if (callback(type, host_addr, addr) == KEEP_SLOT) {
    440             new_count++;
    441             empty = false;
    442           } else {
    443             buffer[i].Clear();
    444           }
    445         }
    446       }
    447 
    448       Chunk* next = chunk->next.Value();
    449       if (mode == PREFREE_EMPTY_CHUNKS && empty) {
    450         // We remove the chunk from the list but let it still point its next
    451         // chunk to allow concurrent iteration.
    452         if (previous) {
    453           previous->next.SetValue(next);
    454         } else {
    455           chunk_.SetValue(next);
    456         }
    457         base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
    458         to_be_freed_chunks_.push(chunk);
    459       } else {
    460         previous = chunk;
    461       }
    462       chunk = next;
    463     }
    464     return new_count;
    465   }
    466 
    467   void FreeToBeFreedChunks() {
    468     base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
    469     while (!to_be_freed_chunks_.empty()) {
    470       Chunk* top = to_be_freed_chunks_.top();
    471       to_be_freed_chunks_.pop();
    472       delete top;
    473     }
    474   }
    475 
    476   void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
    477     Chunk* chunk = chunk_.Value();
    478     while (chunk != nullptr) {
    479       TypedSlot* buffer = chunk->buffer.Value();
    480       int count = chunk->count.Value();
    481       for (int i = 0; i < count; i++) {
    482         uint32_t host_offset = buffer[i].host_offset();
    483         std::map<uint32_t, uint32_t>::iterator upper_bound =
    484             invalid_ranges.upper_bound(host_offset);
    485         if (upper_bound == invalid_ranges.begin()) continue;
    486         // upper_bounds points to the invalid range after the given slot. Hence,
    487         // we have to go to the previous element.
    488         upper_bound--;
    489         DCHECK_LE(upper_bound->first, host_offset);
    490         if (upper_bound->second > host_offset) {
    491           buffer[i].Clear();
    492         }
    493       }
    494       chunk = chunk->next.Value();
    495     }
    496   }
    497 
    498  private:
    499   static const int kInitialBufferSize = 100;
    500   static const int kMaxBufferSize = 16 * KB;
    501 
    502   static int NextCapacity(int capacity) {
    503     return Min(kMaxBufferSize, capacity * 2);
    504   }
    505 
    506   class OffsetField : public BitField<int, 0, 29> {};
    507   class TypeField : public BitField<SlotType, 29, 3> {};
    508 
    509   struct Chunk : Malloced {
    510     explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
    511       count.SetValue(0);
    512       capacity.SetValue(chunk_capacity);
    513       buffer.SetValue(NewArray<TypedSlot>(chunk_capacity));
    514       next.SetValue(next_chunk);
    515     }
    516     bool AddSlot(TypedSlot slot) {
    517       int current_count = count.Value();
    518       if (current_count == capacity.Value()) return false;
    519       TypedSlot* current_buffer = buffer.Value();
    520       // Order is important here. We have to write the slot first before
    521       // increasing the counter to guarantee that a consistent state is
    522       // observed by concurrent threads.
    523       current_buffer[current_count].Set(slot);
    524       count.SetValue(current_count + 1);
    525       return true;
    526     }
    527     ~Chunk() { DeleteArray(buffer.Value()); }
    528     base::AtomicValue<Chunk*> next;
    529     base::AtomicValue<int> count;
    530     base::AtomicValue<int> capacity;
    531     base::AtomicValue<TypedSlot*> buffer;
    532   };
    533 
    534   Address page_start_;
    535   base::AtomicValue<Chunk*> chunk_;
    536   base::Mutex to_be_freed_chunks_mutex_;
    537   std::stack<Chunk*> to_be_freed_chunks_;
    538 };
    539 
    540 }  // namespace internal
    541 }  // namespace v8
    542 
    543 #endif  // V8_SLOT_SET_H
    544