Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_INCREMENTAL_MARKING_H_
     29 #define V8_INCREMENTAL_MARKING_H_
     30 
     31 
     32 #include "execution.h"
     33 #include "mark-compact.h"
     34 #include "objects.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 
     40 class IncrementalMarking {
     41  public:
     42   enum State {
     43     STOPPED,
     44     SWEEPING,
     45     MARKING,
     46     COMPLETE
     47   };
     48 
     49   enum CompletionAction {
     50     GC_VIA_STACK_GUARD,
     51     NO_GC_VIA_STACK_GUARD
     52   };
     53 
     54   explicit IncrementalMarking(Heap* heap);
     55 
     56   static void Initialize();
     57 
     58   void TearDown();
     59 
     60   State state() {
     61     ASSERT(state_ == STOPPED || FLAG_incremental_marking);
     62     return state_;
     63   }
     64 
     65   bool should_hurry() { return should_hurry_; }
     66   void set_should_hurry(bool val) { should_hurry_ = val; }
     67 
     68   inline bool IsStopped() { return state() == STOPPED; }
     69 
     70   INLINE(bool IsMarking()) { return state() >= MARKING; }
     71 
     72   inline bool IsMarkingIncomplete() { return state() == MARKING; }
     73 
     74   inline bool IsComplete() { return state() == COMPLETE; }
     75 
     76   bool WorthActivating();
     77 
     78   enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
     79 
     80   void Start(CompactionFlag flag = ALLOW_COMPACTION);
     81 
     82   void Stop();
     83 
     84   void PrepareForScavenge();
     85 
     86   void UpdateMarkingDequeAfterScavenge();
     87 
     88   void Hurry();
     89 
     90   void Finalize();
     91 
     92   void Abort();
     93 
     94   void MarkingComplete(CompletionAction action);
     95 
     96   // It's hard to know how much work the incremental marker should do to make
     97   // progress in the face of the mutator creating new work for it.  We start
     98   // of at a moderate rate of work and gradually increase the speed of the
     99   // incremental marker until it completes.
    100   // Do some marking every time this much memory has been allocated or that many
    101   // heavy (color-checking) write barriers have been invoked.
    102   static const intptr_t kAllocatedThreshold = 65536;
    103   static const intptr_t kWriteBarriersInvokedThreshold = 65536;
    104   // Start off by marking this many times more memory than has been allocated.
    105   static const intptr_t kInitialMarkingSpeed = 1;
    106   // But if we are promoting a lot of data we need to mark faster to keep up
    107   // with the data that is entering the old space through promotion.
    108   static const intptr_t kFastMarking = 3;
    109   // After this many steps we increase the marking/allocating factor.
    110   static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
    111   // This is how much we increase the marking/allocating factor by.
    112   static const intptr_t kMarkingSpeedAccelleration = 2;
    113   static const intptr_t kMaxMarkingSpeed = 1000;
    114 
    115   void OldSpaceStep(intptr_t allocated);
    116 
    117   void Step(intptr_t allocated, CompletionAction action);
    118 
    119   inline void RestartIfNotMarking() {
    120     if (state_ == COMPLETE) {
    121       state_ = MARKING;
    122       if (FLAG_trace_incremental_marking) {
    123         PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
    124       }
    125     }
    126   }
    127 
    128   static void RecordWriteFromCode(HeapObject* obj,
    129                                   Object** slot,
    130                                   Isolate* isolate);
    131 
    132   static void RecordWriteForEvacuationFromCode(HeapObject* obj,
    133                                                Object** slot,
    134                                                Isolate* isolate);
    135 
    136   // Record a slot for compaction.  Returns false for objects that are
    137   // guaranteed to be rescanned or not guaranteed to survive.
    138   //
    139   // No slots in white objects should be recorded, as some slots are typed and
    140   // cannot be interpreted correctly if the underlying object does not survive
    141   // the incremental cycle (stays white).
    142   INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
    143   INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
    144   INLINE(void RecordWriteIntoCode(HeapObject* obj,
    145                                   RelocInfo* rinfo,
    146                                   Object* value));
    147   INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
    148                                      Object** slot,
    149                                      Code* value));
    150 
    151 
    152   void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
    153   void RecordWriteIntoCodeSlow(HeapObject* obj,
    154                                RelocInfo* rinfo,
    155                                Object* value);
    156   void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
    157   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
    158   void RecordCodeTargetPatch(Address pc, HeapObject* value);
    159 
    160   inline void RecordWrites(HeapObject* obj);
    161 
    162   inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
    163 
    164   inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
    165 
    166   inline int steps_count() {
    167     return steps_count_;
    168   }
    169 
    170   inline double steps_took() {
    171     return steps_took_;
    172   }
    173 
    174   inline double longest_step() {
    175     return longest_step_;
    176   }
    177 
    178   inline int steps_count_since_last_gc() {
    179     return steps_count_since_last_gc_;
    180   }
    181 
    182   inline double steps_took_since_last_gc() {
    183     return steps_took_since_last_gc_;
    184   }
    185 
    186   inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
    187     SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
    188   }
    189 
    190   inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
    191     SetNewSpacePageFlags(chunk, IsMarking());
    192   }
    193 
    194   MarkingDeque* marking_deque() { return &marking_deque_; }
    195 
    196   bool IsCompacting() { return IsMarking() && is_compacting_; }
    197 
    198   void ActivateGeneratedStub(Code* stub);
    199 
    200   void NotifyOfHighPromotionRate() {
    201     if (IsMarking()) {
    202       if (marking_speed_ < kFastMarking) {
    203         if (FLAG_trace_gc) {
    204           PrintPID("Increasing marking speed to %d "
    205                    "due to high promotion rate\n",
    206                    static_cast<int>(kFastMarking));
    207         }
    208         marking_speed_ = kFastMarking;
    209       }
    210     }
    211   }
    212 
    213   void EnterNoMarkingScope() {
    214     no_marking_scope_depth_++;
    215   }
    216 
    217   void LeaveNoMarkingScope() {
    218     no_marking_scope_depth_--;
    219   }
    220 
    221   void UncommitMarkingDeque();
    222 
    223   void NotifyIncompleteScanOfObject(int unscanned_bytes) {
    224     unscanned_bytes_of_large_object_ = unscanned_bytes;
    225   }
    226 
    227  private:
    228   int64_t SpaceLeftInOldSpace();
    229 
    230   void ResetStepCounters();
    231 
    232   void StartMarking(CompactionFlag flag);
    233 
    234   void ActivateIncrementalWriteBarrier(PagedSpace* space);
    235   static void ActivateIncrementalWriteBarrier(NewSpace* space);
    236   void ActivateIncrementalWriteBarrier();
    237 
    238   static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
    239   static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
    240   void DeactivateIncrementalWriteBarrier();
    241 
    242   static void SetOldSpacePageFlags(MemoryChunk* chunk,
    243                                    bool is_marking,
    244                                    bool is_compacting);
    245 
    246   static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
    247 
    248   void EnsureMarkingDequeIsCommitted();
    249 
    250   INLINE(void ProcessMarkingDeque());
    251 
    252   INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
    253 
    254   INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
    255 
    256   Heap* heap_;
    257 
    258   State state_;
    259   bool is_compacting_;
    260 
    261   VirtualMemory* marking_deque_memory_;
    262   bool marking_deque_memory_committed_;
    263   MarkingDeque marking_deque_;
    264 
    265   int steps_count_;
    266   double steps_took_;
    267   double longest_step_;
    268   int64_t old_generation_space_available_at_start_of_incremental_;
    269   int64_t old_generation_space_used_at_start_of_incremental_;
    270   int steps_count_since_last_gc_;
    271   double steps_took_since_last_gc_;
    272   int64_t bytes_rescanned_;
    273   bool should_hurry_;
    274   int marking_speed_;
    275   intptr_t bytes_scanned_;
    276   intptr_t allocated_;
    277   intptr_t write_barriers_invoked_since_last_step_;
    278 
    279   int no_marking_scope_depth_;
    280 
    281   int unscanned_bytes_of_large_object_;
    282 
    283   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
    284 };
    285 
    286 } }  // namespace v8::internal
    287 
    288 #endif  // V8_INCREMENTAL_MARKING_H_
    289