Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_INCREMENTAL_MARKING_H_
      6 #define V8_INCREMENTAL_MARKING_H_
      7 
      8 
      9 #include "src/execution.h"
     10 #include "src/mark-compact.h"
     11 #include "src/objects.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 
     17 class IncrementalMarking {
     18  public:
     19   enum State {
     20     STOPPED,
     21     SWEEPING,
     22     MARKING,
     23     COMPLETE
     24   };
     25 
     26   enum CompletionAction {
     27     GC_VIA_STACK_GUARD,
     28     NO_GC_VIA_STACK_GUARD
     29   };
     30 
     31   explicit IncrementalMarking(Heap* heap);
     32 
     33   static void Initialize();
     34 
     35   void TearDown();
     36 
     37   State state() {
     38     ASSERT(state_ == STOPPED || FLAG_incremental_marking);
     39     return state_;
     40   }
     41 
     42   bool should_hurry() { return should_hurry_; }
     43   void set_should_hurry(bool val) { should_hurry_ = val; }
     44 
     45   inline bool IsStopped() { return state() == STOPPED; }
     46 
     47   INLINE(bool IsMarking()) { return state() >= MARKING; }
     48 
     49   inline bool IsMarkingIncomplete() { return state() == MARKING; }
     50 
     51   inline bool IsComplete() { return state() == COMPLETE; }
     52 
     53   bool WorthActivating();
     54 
     55   enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
     56 
     57   void Start(CompactionFlag flag = ALLOW_COMPACTION);
     58 
     59   void Stop();
     60 
     61   void PrepareForScavenge();
     62 
     63   void UpdateMarkingDequeAfterScavenge();
     64 
     65   void Hurry();
     66 
     67   void Finalize();
     68 
     69   void Abort();
     70 
     71   void MarkingComplete(CompletionAction action);
     72 
     73   // It's hard to know how much work the incremental marker should do to make
     74   // progress in the face of the mutator creating new work for it.  We start
     75   // of at a moderate rate of work and gradually increase the speed of the
     76   // incremental marker until it completes.
     77   // Do some marking every time this much memory has been allocated or that many
     78   // heavy (color-checking) write barriers have been invoked.
     79   static const intptr_t kAllocatedThreshold = 65536;
     80   static const intptr_t kWriteBarriersInvokedThreshold = 32768;
     81   // Start off by marking this many times more memory than has been allocated.
     82   static const intptr_t kInitialMarkingSpeed = 1;
     83   // But if we are promoting a lot of data we need to mark faster to keep up
     84   // with the data that is entering the old space through promotion.
     85   static const intptr_t kFastMarking = 3;
     86   // After this many steps we increase the marking/allocating factor.
     87   static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
     88   // This is how much we increase the marking/allocating factor by.
     89   static const intptr_t kMarkingSpeedAccelleration = 2;
     90   static const intptr_t kMaxMarkingSpeed = 1000;
     91 
     92   void OldSpaceStep(intptr_t allocated);
     93 
     94   void Step(intptr_t allocated, CompletionAction action);
     95 
     96   inline void RestartIfNotMarking() {
     97     if (state_ == COMPLETE) {
     98       state_ = MARKING;
     99       if (FLAG_trace_incremental_marking) {
    100         PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
    101       }
    102     }
    103   }
    104 
    105   static void RecordWriteFromCode(HeapObject* obj,
    106                                   Object** slot,
    107                                   Isolate* isolate);
    108 
    109   // Record a slot for compaction.  Returns false for objects that are
    110   // guaranteed to be rescanned or not guaranteed to survive.
    111   //
    112   // No slots in white objects should be recorded, as some slots are typed and
    113   // cannot be interpreted correctly if the underlying object does not survive
    114   // the incremental cycle (stays white).
    115   INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
    116   INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
    117   INLINE(void RecordWriteIntoCode(HeapObject* obj,
    118                                   RelocInfo* rinfo,
    119                                   Object* value));
    120   INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
    121                                      Object** slot,
    122                                      Code* value));
    123 
    124 
    125   void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
    126   void RecordWriteIntoCodeSlow(HeapObject* obj,
    127                                RelocInfo* rinfo,
    128                                Object* value);
    129   void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
    130   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
    131   void RecordCodeTargetPatch(Address pc, HeapObject* value);
    132 
    133   inline void RecordWrites(HeapObject* obj);
    134 
    135   inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
    136 
    137   inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
    138 
    139   inline int steps_count() {
    140     return steps_count_;
    141   }
    142 
    143   inline double steps_took() {
    144     return steps_took_;
    145   }
    146 
    147   inline double longest_step() {
    148     return longest_step_;
    149   }
    150 
    151   inline int steps_count_since_last_gc() {
    152     return steps_count_since_last_gc_;
    153   }
    154 
    155   inline double steps_took_since_last_gc() {
    156     return steps_took_since_last_gc_;
    157   }
    158 
    159   inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
    160     SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
    161   }
    162 
    163   inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
    164     SetNewSpacePageFlags(chunk, IsMarking());
    165   }
    166 
    167   MarkingDeque* marking_deque() { return &marking_deque_; }
    168 
    169   bool IsCompacting() { return IsMarking() && is_compacting_; }
    170 
    171   void ActivateGeneratedStub(Code* stub);
    172 
    173   void NotifyOfHighPromotionRate() {
    174     if (IsMarking()) {
    175       if (marking_speed_ < kFastMarking) {
    176         if (FLAG_trace_gc) {
    177           PrintPID("Increasing marking speed to %d "
    178                    "due to high promotion rate\n",
    179                    static_cast<int>(kFastMarking));
    180         }
    181         marking_speed_ = kFastMarking;
    182       }
    183     }
    184   }
    185 
    186   void EnterNoMarkingScope() {
    187     no_marking_scope_depth_++;
    188   }
    189 
    190   void LeaveNoMarkingScope() {
    191     no_marking_scope_depth_--;
    192   }
    193 
    194   void UncommitMarkingDeque();
    195 
    196   void NotifyIncompleteScanOfObject(int unscanned_bytes) {
    197     unscanned_bytes_of_large_object_ = unscanned_bytes;
    198   }
    199 
    200  private:
    201   int64_t SpaceLeftInOldSpace();
    202 
    203   void ResetStepCounters();
    204 
    205   void StartMarking(CompactionFlag flag);
    206 
    207   void ActivateIncrementalWriteBarrier(PagedSpace* space);
    208   static void ActivateIncrementalWriteBarrier(NewSpace* space);
    209   void ActivateIncrementalWriteBarrier();
    210 
    211   static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
    212   static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
    213   void DeactivateIncrementalWriteBarrier();
    214 
    215   static void SetOldSpacePageFlags(MemoryChunk* chunk,
    216                                    bool is_marking,
    217                                    bool is_compacting);
    218 
    219   static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
    220 
    221   void EnsureMarkingDequeIsCommitted();
    222 
    223   INLINE(void ProcessMarkingDeque());
    224 
    225   INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
    226 
    227   INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
    228 
    229   Heap* heap_;
    230 
    231   State state_;
    232   bool is_compacting_;
    233 
    234   VirtualMemory* marking_deque_memory_;
    235   bool marking_deque_memory_committed_;
    236   MarkingDeque marking_deque_;
    237 
    238   int steps_count_;
    239   double steps_took_;
    240   double longest_step_;
    241   int64_t old_generation_space_available_at_start_of_incremental_;
    242   int64_t old_generation_space_used_at_start_of_incremental_;
    243   int steps_count_since_last_gc_;
    244   double steps_took_since_last_gc_;
    245   int64_t bytes_rescanned_;
    246   bool should_hurry_;
    247   int marking_speed_;
    248   intptr_t bytes_scanned_;
    249   intptr_t allocated_;
    250   intptr_t write_barriers_invoked_since_last_step_;
    251 
    252   int no_marking_scope_depth_;
    253 
    254   int unscanned_bytes_of_large_object_;
    255 
    256   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
    257 };
    258 
    259 } }  // namespace v8::internal
    260 
    261 #endif  // V8_INCREMENTAL_MARKING_H_
    262