Home | History | Annotate | Download | only in heap
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/heap/memory-reducer.h"
      6 
      7 #include "src/flags.h"
      8 #include "src/heap/gc-tracer.h"
      9 #include "src/heap/heap-inl.h"
     10 #include "src/utils.h"
     11 #include "src/v8.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 const int MemoryReducer::kLongDelayMs = 8000;
     17 const int MemoryReducer::kShortDelayMs = 500;
     18 const int MemoryReducer::kWatchdogDelayMs = 100000;
     19 const int MemoryReducer::kMaxNumberOfGCs = 3;
     20 const double MemoryReducer::kCommittedMemoryFactor = 1.1;
     21 const size_t MemoryReducer::kCommittedMemoryDelta = 10 * MB;
     22 
     23 MemoryReducer::MemoryReducer(Heap* heap)
     24     : heap_(heap),
     25       taskrunner_(V8::GetCurrentPlatform()->GetForegroundTaskRunner(
     26           reinterpret_cast<v8::Isolate*>(heap->isolate()))),
     27       state_(kDone, 0, 0.0, 0.0, 0),
     28       js_calls_counter_(0),
     29       js_calls_sample_time_ms_(0.0) {}
     30 
     31 MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
     32     : CancelableTask(memory_reducer->heap()->isolate()),
     33       memory_reducer_(memory_reducer) {}
     34 
     35 
     36 void MemoryReducer::TimerTask::RunInternal() {
     37   Heap* heap = memory_reducer_->heap();
     38   Event event;
     39   double time_ms = heap->MonotonicallyIncreasingTimeInMs();
     40   heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
     41                                    heap->OldGenerationAllocationCounter());
     42   bool low_allocation_rate = heap->HasLowAllocationRate();
     43   bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
     44   if (FLAG_trace_gc_verbose) {
     45     heap->isolate()->PrintWithTimestamp(
     46         "Memory reducer: %s, %s\n",
     47         low_allocation_rate ? "low alloc" : "high alloc",
     48         optimize_for_memory ? "background" : "foreground");
     49   }
     50   event.type = kTimer;
     51   event.time_ms = time_ms;
     52   // The memory reducer will start incremental markig if
     53   // 1) mutator is likely idle: js call rate is low and allocation rate is low.
     54   // 2) mutator is in background: optimize for memory flag is set.
     55   event.should_start_incremental_gc =
     56       low_allocation_rate || optimize_for_memory;
     57   event.can_start_incremental_gc =
     58       heap->incremental_marking()->IsStopped() &&
     59       (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
     60   event.committed_memory = heap->CommittedOldGenerationMemory();
     61   memory_reducer_->NotifyTimer(event);
     62 }
     63 
     64 
     65 void MemoryReducer::NotifyTimer(const Event& event) {
     66   DCHECK_EQ(kTimer, event.type);
     67   DCHECK_EQ(kWait, state_.action);
     68   state_ = Step(state_, event);
     69   if (state_.action == kRun) {
     70     DCHECK(heap()->incremental_marking()->IsStopped());
     71     DCHECK(FLAG_incremental_marking);
     72     if (FLAG_trace_gc_verbose) {
     73       heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
     74                                             state_.started_gcs);
     75     }
     76     heap()->StartIdleIncrementalMarking(
     77         GarbageCollectionReason::kMemoryReducer,
     78         kGCCallbackFlagCollectAllExternalMemory);
     79   } else if (state_.action == kWait) {
     80     if (!heap()->incremental_marking()->IsStopped() &&
     81         heap()->ShouldOptimizeForMemoryUsage()) {
     82       // Make progress with pending incremental marking if memory usage has
     83       // higher priority than latency. This is important for background tabs
     84       // that do not send idle notifications.
     85       const int kIncrementalMarkingDelayMs = 500;
     86       double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
     87                         kIncrementalMarkingDelayMs;
     88       heap()->incremental_marking()->AdvanceIncrementalMarking(
     89           deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
     90           StepOrigin::kTask);
     91       heap()->FinalizeIncrementalMarkingIfComplete(
     92           GarbageCollectionReason::kFinalizeMarkingViaTask);
     93     }
     94     // Re-schedule the timer.
     95     ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
     96     if (FLAG_trace_gc_verbose) {
     97       heap()->isolate()->PrintWithTimestamp(
     98           "Memory reducer: waiting for %.f ms\n",
     99           state_.next_gc_start_ms - event.time_ms);
    100     }
    101   }
    102 }
    103 
    104 
    105 void MemoryReducer::NotifyMarkCompact(const Event& event) {
    106   DCHECK_EQ(kMarkCompact, event.type);
    107   Action old_action = state_.action;
    108   state_ = Step(state_, event);
    109   if (old_action != kWait && state_.action == kWait) {
    110     // If we are transitioning to the WAIT state, start the timer.
    111     ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
    112   }
    113   if (old_action == kRun) {
    114     if (FLAG_trace_gc_verbose) {
    115       heap()->isolate()->PrintWithTimestamp(
    116           "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
    117           state_.action == kWait ? "will do more" : "done");
    118     }
    119   }
    120 }
    121 
    122 void MemoryReducer::NotifyPossibleGarbage(const Event& event) {
    123   DCHECK_EQ(kPossibleGarbage, event.type);
    124   Action old_action = state_.action;
    125   state_ = Step(state_, event);
    126   if (old_action != kWait && state_.action == kWait) {
    127     // If we are transitioning to the WAIT state, start the timer.
    128     ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
    129   }
    130 }
    131 
    132 
    133 bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
    134   return state.last_gc_time_ms != 0 &&
    135          event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
    136 }
    137 
    138 
    139 // For specification of this function see the comment for MemoryReducer class.
    140 MemoryReducer::State MemoryReducer::Step(const State& state,
    141                                          const Event& event) {
    142   if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
    143     return State(kDone, 0, 0, state.last_gc_time_ms, 0);
    144   }
    145   switch (state.action) {
    146     case kDone:
    147       if (event.type == kTimer) {
    148         return state;
    149       } else if (event.type == kMarkCompact) {
    150         if (event.committed_memory <
    151             Max(static_cast<size_t>(state.committed_memory_at_last_run *
    152                                     kCommittedMemoryFactor),
    153                 state.committed_memory_at_last_run + kCommittedMemoryDelta)) {
    154           return state;
    155         } else {
    156           return State(kWait, 0, event.time_ms + kLongDelayMs,
    157                        event.type == kMarkCompact ? event.time_ms
    158                                                   : state.last_gc_time_ms,
    159                        0);
    160         }
    161       } else {
    162         DCHECK_EQ(kPossibleGarbage, event.type);
    163         return State(
    164             kWait, 0, event.time_ms + kLongDelayMs,
    165             event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms,
    166             0);
    167       }
    168     case kWait:
    169       switch (event.type) {
    170         case kPossibleGarbage:
    171           return state;
    172         case kTimer:
    173           if (state.started_gcs >= kMaxNumberOfGCs) {
    174             return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms,
    175                          event.committed_memory);
    176           } else if (event.can_start_incremental_gc &&
    177                      (event.should_start_incremental_gc ||
    178                       WatchdogGC(state, event))) {
    179             if (state.next_gc_start_ms <= event.time_ms) {
    180               return State(kRun, state.started_gcs + 1, 0.0,
    181                            state.last_gc_time_ms, 0);
    182             } else {
    183               return state;
    184             }
    185           } else {
    186             return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
    187                          state.last_gc_time_ms, 0);
    188           }
    189         case kMarkCompact:
    190           return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
    191                        event.time_ms, 0);
    192       }
    193     case kRun:
    194       if (event.type != kMarkCompact) {
    195         return state;
    196       } else {
    197         if (state.started_gcs < kMaxNumberOfGCs &&
    198             (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
    199           return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
    200                        event.time_ms, 0);
    201         } else {
    202           return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms,
    203                        event.committed_memory);
    204         }
    205       }
    206   }
    207   UNREACHABLE();
    208 }
    209 
    210 void MemoryReducer::ScheduleTimer(double delay_ms) {
    211   DCHECK_LT(0, delay_ms);
    212   if (heap()->IsTearingDown()) return;
    213   // Leave some room for precision error in task scheduler.
    214   const double kSlackMs = 100;
    215   taskrunner_->PostDelayedTask(
    216       base::make_unique<MemoryReducer::TimerTask>(this),
    217       (delay_ms + kSlackMs) / 1000.0);
    218 }
    219 
    220 void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
    221 
    222 }  // namespace internal
    223 }  // namespace v8
    224