Home | History | Annotate | Download | only in src
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_STORE_BUFFER_H_
      6 #define V8_STORE_BUFFER_H_
      7 
      8 #include "src/allocation.h"
      9 #include "src/checks.h"
     10 #include "src/globals.h"
     11 #include "src/platform.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 class Page;
     17 class PagedSpace;
     18 class StoreBuffer;
     19 
     20 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
     21 
     22 typedef void (StoreBuffer::*RegionCallback)(Address start,
     23                                             Address end,
     24                                             ObjectSlotCallback slot_callback,
     25                                             bool clear_maps);
     26 
     27 // Used to implement the write barrier by collecting addresses of pointers
     28 // between spaces.
     29 class StoreBuffer {
     30  public:
     31   explicit StoreBuffer(Heap* heap);
     32 
     33   static void StoreBufferOverflow(Isolate* isolate);
     34 
     35   inline Address TopAddress();
     36 
     37   void SetUp();
     38   void TearDown();
     39 
     40   // This is used by the mutator to enter addresses into the store buffer.
     41   inline void Mark(Address addr);
     42 
     43   // This is used by the heap traversal to enter the addresses into the store
     44   // buffer that should still be in the store buffer after GC.  It enters
     45   // addresses directly into the old buffer because the GC starts by wiping the
     46   // old buffer and thereafter only visits each cell once so there is no need
     47   // to attempt to remove any dupes.  During the first part of a GC we
     48   // are using the store buffer to access the old spaces and at the same time
     49   // we are rebuilding the store buffer using this function.  There is, however
     50   // no issue of overwriting the buffer we are iterating over, because this
     51   // stage of the scavenge can only reduce the number of addresses in the store
     52   // buffer (some objects are promoted so pointers to them do not need to be in
     53   // the store buffer).  The later parts of the GC scan the pages that are
     54   // exempt from the store buffer and process the promotion queue.  These steps
     55   // can overflow this buffer.  We check for this and on overflow we call the
     56   // callback set up with the StoreBufferRebuildScope object.
     57   inline void EnterDirectlyIntoStoreBuffer(Address addr);
     58 
     59   // Iterates over all pointers that go from old space to new space.  It will
     60   // delete the store buffer as it starts so the callback should reenter
     61   // surviving old-to-new pointers into the store buffer to rebuild it.
     62   void IteratePointersToNewSpace(ObjectSlotCallback callback);
     63 
     64   // Same as IteratePointersToNewSpace but additonally clears maps in objects
     65   // referenced from the store buffer that do not contain a forwarding pointer.
     66   void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
     67 
     68   static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
     69   static const int kStoreBufferSize = kStoreBufferOverflowBit;
     70   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
     71   static const int kOldStoreBufferLength = kStoreBufferLength * 16;
     72   static const int kHashSetLengthLog2 = 12;
     73   static const int kHashSetLength = 1 << kHashSetLengthLog2;
     74 
     75   void Compact();
     76 
     77   void GCPrologue();
     78   void GCEpilogue();
     79 
     80   Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
     81   Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
     82   Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
     83   void SetTop(Object*** top) {
     84     ASSERT(top >= Start());
     85     ASSERT(top <= Limit());
     86     old_top_ = reinterpret_cast<Address*>(top);
     87   }
     88 
     89   bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
     90   bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
     91 
     92   // Goes through the store buffer removing pointers to things that have
     93   // been promoted.  Rebuilds the store buffer completely if it overflowed.
     94   void SortUniq();
     95 
     96   void EnsureSpace(intptr_t space_needed);
     97   void Verify();
     98 
     99   bool PrepareForIteration();
    100 
    101 #ifdef DEBUG
    102   void Clean();
    103   // Slow, for asserts only.
    104   bool CellIsInStoreBuffer(Address cell);
    105 #endif
    106 
    107   void Filter(int flag);
    108 
    109  private:
    110   Heap* heap_;
    111 
    112   // The store buffer is divided up into a new buffer that is constantly being
    113   // filled by mutator activity and an old buffer that is filled with the data
    114   // from the new buffer after compression.
    115   Address* start_;
    116   Address* limit_;
    117 
    118   Address* old_start_;
    119   Address* old_limit_;
    120   Address* old_top_;
    121   Address* old_reserved_limit_;
    122   VirtualMemory* old_virtual_memory_;
    123 
    124   bool old_buffer_is_sorted_;
    125   bool old_buffer_is_filtered_;
    126   bool during_gc_;
    127   // The garbage collector iterates over many pointers to new space that are not
    128   // handled by the store buffer.  This flag indicates whether the pointers
    129   // found by the callbacks should be added to the store buffer or not.
    130   bool store_buffer_rebuilding_enabled_;
    131   StoreBufferCallback callback_;
    132   bool may_move_store_buffer_entries_;
    133 
    134   VirtualMemory* virtual_memory_;
    135 
    136   // Two hash sets used for filtering.
    137   // If address is in the hash set then it is guaranteed to be in the
    138   // old part of the store buffer.
    139   uintptr_t* hash_set_1_;
    140   uintptr_t* hash_set_2_;
    141   bool hash_sets_are_empty_;
    142 
    143   void ClearFilteringHashSets();
    144 
    145   bool SpaceAvailable(intptr_t space_needed);
    146   void Uniq();
    147   void ExemptPopularPages(int prime_sample_step, int threshold);
    148 
    149   // Set the map field of the object to NULL if contains a map.
    150   inline void ClearDeadObject(HeapObject *object);
    151 
    152   void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
    153 
    154   void FindPointersToNewSpaceInRegion(Address start,
    155                                       Address end,
    156                                       ObjectSlotCallback slot_callback,
    157                                       bool clear_maps);
    158 
    159   // For each region of pointers on a page in use from an old space call
    160   // visit_pointer_region callback.
    161   // If either visit_pointer_region or callback can cause an allocation
    162   // in old space and changes in allocation watermark then
    163   // can_preallocate_during_iteration should be set to true.
    164   void IteratePointersOnPage(
    165       PagedSpace* space,
    166       Page* page,
    167       RegionCallback region_callback,
    168       ObjectSlotCallback slot_callback);
    169 
    170   void FindPointersToNewSpaceInMaps(
    171     Address start,
    172     Address end,
    173     ObjectSlotCallback slot_callback,
    174     bool clear_maps);
    175 
    176   void FindPointersToNewSpaceInMapsRegion(
    177     Address start,
    178     Address end,
    179     ObjectSlotCallback slot_callback,
    180     bool clear_maps);
    181 
    182   void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
    183                                     bool clear_maps);
    184 
    185 #ifdef VERIFY_HEAP
    186   void VerifyPointers(LargeObjectSpace* space);
    187 #endif
    188 
    189   friend class StoreBufferRebuildScope;
    190   friend class DontMoveStoreBufferEntriesScope;
    191 };
    192 
    193 
    194 class StoreBufferRebuildScope {
    195  public:
    196   explicit StoreBufferRebuildScope(Heap* heap,
    197                                    StoreBuffer* store_buffer,
    198                                    StoreBufferCallback callback)
    199       : store_buffer_(store_buffer),
    200         stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
    201         stored_callback_(store_buffer->callback_) {
    202     store_buffer_->store_buffer_rebuilding_enabled_ = true;
    203     store_buffer_->callback_ = callback;
    204     (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
    205   }
    206 
    207   ~StoreBufferRebuildScope() {
    208     store_buffer_->callback_ = stored_callback_;
    209     store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
    210   }
    211 
    212  private:
    213   StoreBuffer* store_buffer_;
    214   bool stored_state_;
    215   StoreBufferCallback stored_callback_;
    216 };
    217 
    218 
    219 class DontMoveStoreBufferEntriesScope {
    220  public:
    221   explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
    222       : store_buffer_(store_buffer),
    223         stored_state_(store_buffer->may_move_store_buffer_entries_) {
    224     store_buffer_->may_move_store_buffer_entries_ = false;
    225   }
    226 
    227   ~DontMoveStoreBufferEntriesScope() {
    228     store_buffer_->may_move_store_buffer_entries_ = stored_state_;
    229   }
    230 
    231  private:
    232   StoreBuffer* store_buffer_;
    233   bool stored_state_;
    234 };
    235 
    236 } }  // namespace v8::internal
    237 
    238 #endif  // V8_STORE_BUFFER_H_
    239