Home | History | Annotate | Download | only in trace_event
      1 // Copyright 2017 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/trace_event/memory_dump_scheduler.h"
      6 
      7 #include "base/process/process_metrics.h"
      8 #include "base/single_thread_task_runner.h"
      9 #include "base/threading/thread_task_runner_handle.h"
     10 #include "base/trace_event/memory_dump_manager.h"
     11 #include "build/build_config.h"
     12 
     13 namespace base {
     14 namespace trace_event {
     15 
     16 namespace {
     17 // Threshold on increase in memory from last dump beyond which a new dump must
     18 // be triggered.
     19 int64_t kDefaultMemoryIncreaseThreshold = 50 * 1024 * 1024;  // 50MiB
     20 const uint32_t kMemoryTotalsPollingInterval = 25;
     21 uint32_t g_polling_interval_ms_for_testing = 0;
     22 }  // namespace
     23 
     24 // static
     25 MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() {
     26   static MemoryDumpScheduler* instance = new MemoryDumpScheduler();
     27   return instance;
     28 }
     29 
     30 MemoryDumpScheduler::MemoryDumpScheduler() : mdm_(nullptr), is_setup_(false) {}
     31 MemoryDumpScheduler::~MemoryDumpScheduler() {}
     32 
     33 void MemoryDumpScheduler::Setup(
     34     MemoryDumpManager* mdm,
     35     scoped_refptr<SingleThreadTaskRunner> polling_task_runner) {
     36   mdm_ = mdm;
     37   polling_task_runner_ = polling_task_runner;
     38   periodic_state_.reset(new PeriodicTriggerState);
     39   polling_state_.reset(new PollingTriggerState);
     40   is_setup_ = true;
     41 }
     42 
     43 void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type,
     44                                      MemoryDumpLevelOfDetail level_of_detail,
     45                                      uint32_t min_time_between_dumps_ms) {
     46   DCHECK(is_setup_);
     47   if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
     48     DCHECK(!periodic_state_->is_configured);
     49     DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state);
     50     DCHECK_NE(0u, min_time_between_dumps_ms);
     51 
     52     polling_state_->level_of_detail = level_of_detail;
     53     polling_state_->min_polls_between_dumps =
     54         (min_time_between_dumps_ms + polling_state_->polling_interval_ms - 1) /
     55         polling_state_->polling_interval_ms;
     56     polling_state_->current_state = PollingTriggerState::CONFIGURED;
     57   } else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
     58     DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state);
     59     periodic_state_->is_configured = true;
     60     DCHECK_NE(0u, min_time_between_dumps_ms);
     61     switch (level_of_detail) {
     62       case MemoryDumpLevelOfDetail::BACKGROUND:
     63         break;
     64       case MemoryDumpLevelOfDetail::LIGHT:
     65         DCHECK_EQ(0u, periodic_state_->light_dump_period_ms);
     66         periodic_state_->light_dump_period_ms = min_time_between_dumps_ms;
     67         break;
     68       case MemoryDumpLevelOfDetail::DETAILED:
     69         DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms);
     70         periodic_state_->heavy_dump_period_ms = min_time_between_dumps_ms;
     71         break;
     72     }
     73 
     74     periodic_state_->min_timer_period_ms = std::min(
     75         periodic_state_->min_timer_period_ms, min_time_between_dumps_ms);
     76     DCHECK_EQ(0u, periodic_state_->light_dump_period_ms %
     77                       periodic_state_->min_timer_period_ms);
     78     DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms %
     79                       periodic_state_->min_timer_period_ms);
     80   }
     81 }
     82 
     83 void MemoryDumpScheduler::EnablePeriodicTriggerIfNeeded() {
     84   DCHECK(is_setup_);
     85   if (!periodic_state_->is_configured || periodic_state_->timer.IsRunning())
     86     return;
     87   periodic_state_->light_dumps_rate = periodic_state_->light_dump_period_ms /
     88                                       periodic_state_->min_timer_period_ms;
     89   periodic_state_->heavy_dumps_rate = periodic_state_->heavy_dump_period_ms /
     90                                       periodic_state_->min_timer_period_ms;
     91 
     92   periodic_state_->dump_count = 0;
     93   periodic_state_->timer.Start(
     94       FROM_HERE,
     95       TimeDelta::FromMilliseconds(periodic_state_->min_timer_period_ms),
     96       Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
     97 }
     98 
     99 void MemoryDumpScheduler::EnablePollingIfNeeded() {
    100   DCHECK(is_setup_);
    101   if (polling_state_->current_state != PollingTriggerState::CONFIGURED)
    102     return;
    103 
    104   polling_state_->current_state = PollingTriggerState::ENABLED;
    105   polling_state_->ResetTotals();
    106 
    107   polling_task_runner_->PostTask(
    108       FROM_HERE,
    109       Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
    110 }
    111 
    112 void MemoryDumpScheduler::NotifyDumpTriggered() {
    113   if (polling_task_runner_ &&
    114       !polling_task_runner_->RunsTasksOnCurrentThread()) {
    115     polling_task_runner_->PostTask(
    116         FROM_HERE,
    117         Bind(&MemoryDumpScheduler::NotifyDumpTriggered, Unretained(this)));
    118     return;
    119   }
    120 
    121   if (!polling_state_ ||
    122       polling_state_->current_state != PollingTriggerState::ENABLED) {
    123     return;
    124   }
    125 
    126   polling_state_->ResetTotals();
    127 }
    128 
    129 void MemoryDumpScheduler::DisableAllTriggers() {
    130   if (periodic_state_) {
    131     if (periodic_state_->timer.IsRunning())
    132       periodic_state_->timer.Stop();
    133     periodic_state_.reset();
    134   }
    135 
    136   if (polling_task_runner_) {
    137     DCHECK(polling_state_);
    138     polling_task_runner_->PostTask(
    139         FROM_HERE, Bind(&MemoryDumpScheduler::DisablePollingOnPollingThread,
    140                         Unretained(this)));
    141     polling_task_runner_ = nullptr;
    142   }
    143   is_setup_ = false;
    144 }
    145 
    146 void MemoryDumpScheduler::DisablePollingOnPollingThread() {
    147   polling_state_->current_state = PollingTriggerState::DISABLED;
    148   polling_state_.reset();
    149 }
    150 
    151 // static
    152 void MemoryDumpScheduler::SetPollingIntervalForTesting(uint32_t interval) {
    153   g_polling_interval_ms_for_testing = interval;
    154 }
    155 
    156 bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
    157   return periodic_state_->timer.IsRunning();
    158 }
    159 
    160 void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
    161   MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
    162   if (periodic_state_->light_dumps_rate > 0 &&
    163       periodic_state_->dump_count % periodic_state_->light_dumps_rate == 0)
    164     level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
    165   if (periodic_state_->heavy_dumps_rate > 0 &&
    166       periodic_state_->dump_count % periodic_state_->heavy_dumps_rate == 0)
    167     level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
    168   ++periodic_state_->dump_count;
    169 
    170   mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
    171 }
    172 
    173 void MemoryDumpScheduler::PollMemoryOnPollingThread() {
    174   if (!polling_state_)
    175     return;
    176 
    177   DCHECK_EQ(PollingTriggerState::ENABLED, polling_state_->current_state);
    178 
    179   uint64_t polled_memory = 0;
    180   bool res = mdm_->PollFastMemoryTotal(&polled_memory);
    181   DCHECK(res);
    182   if (polling_state_->level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
    183     TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
    184                    polled_memory / 1024 / 1024);
    185   }
    186 
    187   if (ShouldTriggerDump(polled_memory)) {
    188     TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
    189                          "Peak memory dump Triggered",
    190                          TRACE_EVENT_SCOPE_PROCESS, "total_usage_MB",
    191                          polled_memory / 1024 / 1024);
    192 
    193     mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
    194                             polling_state_->level_of_detail);
    195   }
    196 
    197   // TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
    198   ThreadTaskRunnerHandle::Get()->PostDelayedTask(
    199       FROM_HERE,
    200       Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
    201       TimeDelta::FromMilliseconds(polling_state_->polling_interval_ms));
    202 }
    203 
    204 bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
    205   // This function tries to detect peak memory usage as discussed in
    206   // https://goo.gl/0kOU4A.
    207 
    208   if (current_memory_total == 0)
    209     return false;
    210 
    211   bool should_dump = false;
    212   ++polling_state_->num_polls_from_last_dump;
    213   if (polling_state_->last_dump_memory_total == 0) {
    214     // If it's first sample then trigger memory dump.
    215     should_dump = true;
    216   } else if (polling_state_->min_polls_between_dumps >
    217              polling_state_->num_polls_from_last_dump) {
    218     return false;
    219   }
    220 
    221   int64_t increase_from_last_dump =
    222       current_memory_total - polling_state_->last_dump_memory_total;
    223   should_dump |=
    224       increase_from_last_dump > polling_state_->memory_increase_threshold;
    225   should_dump |= IsCurrentSamplePeak(current_memory_total);
    226   if (should_dump)
    227     polling_state_->ResetTotals();
    228   return should_dump;
    229 }
    230 
    231 bool MemoryDumpScheduler::IsCurrentSamplePeak(
    232     uint64_t current_memory_total_bytes) {
    233   uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
    234   polling_state_->last_memory_totals_kb_index =
    235       (polling_state_->last_memory_totals_kb_index + 1) %
    236       PollingTriggerState::kMaxNumMemorySamples;
    237   uint64_t mean = 0;
    238   for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
    239     if (polling_state_->last_memory_totals_kb[i] == 0) {
    240       // Not enough samples to detect peaks.
    241       polling_state_
    242           ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
    243           current_memory_total_kb;
    244       return false;
    245     }
    246     mean += polling_state_->last_memory_totals_kb[i];
    247   }
    248   mean = mean / PollingTriggerState::kMaxNumMemorySamples;
    249   uint64_t variance = 0;
    250   for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
    251     variance += (polling_state_->last_memory_totals_kb[i] - mean) *
    252                 (polling_state_->last_memory_totals_kb[i] - mean);
    253   }
    254   variance = variance / PollingTriggerState::kMaxNumMemorySamples;
    255 
    256   polling_state_
    257       ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
    258       current_memory_total_kb;
    259 
    260   // If stddev is less than 0.2% then we consider that the process is inactive.
    261   bool is_stddev_low = variance < mean / 500 * mean / 500;
    262   if (is_stddev_low)
    263     return false;
    264 
    265   // (mean + 3.69 * stddev) corresponds to a value that is higher than current
    266   // sample with 99.99% probability.
    267   return (current_memory_total_kb - mean) * (current_memory_total_kb - mean) >
    268          (3.69 * 3.69 * variance);
    269 }
    270 
    271 MemoryDumpScheduler::PeriodicTriggerState::PeriodicTriggerState()
    272     : is_configured(false),
    273       dump_count(0),
    274       min_timer_period_ms(std::numeric_limits<uint32_t>::max()),
    275       light_dumps_rate(0),
    276       heavy_dumps_rate(0),
    277       light_dump_period_ms(0),
    278       heavy_dump_period_ms(0) {}
    279 
    280 MemoryDumpScheduler::PeriodicTriggerState::~PeriodicTriggerState() {
    281   DCHECK(!timer.IsRunning());
    282 }
    283 
    284 MemoryDumpScheduler::PollingTriggerState::PollingTriggerState()
    285     : current_state(DISABLED),
    286       level_of_detail(MemoryDumpLevelOfDetail::FIRST),
    287       polling_interval_ms(g_polling_interval_ms_for_testing
    288                               ? g_polling_interval_ms_for_testing
    289                               : kMemoryTotalsPollingInterval),
    290       min_polls_between_dumps(0),
    291       num_polls_from_last_dump(-1),
    292       last_dump_memory_total(0),
    293       memory_increase_threshold(0),
    294       last_memory_totals_kb_index(0) {}
    295 
    296 MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {}
    297 
    298 void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
    299   if (!memory_increase_threshold) {
    300     memory_increase_threshold = kDefaultMemoryIncreaseThreshold;
    301 #if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
    302     defined(OS_ANDROID)
    303     // Set threshold to 1% of total system memory.
    304     SystemMemoryInfoKB meminfo;
    305     bool res = GetSystemMemoryInfo(&meminfo);
    306     if (res) {
    307       memory_increase_threshold =
    308           (static_cast<int64_t>(meminfo.total) / 100) * 1024;
    309     }
    310     DCHECK_GT(memory_increase_threshold, 0u);
    311 #endif
    312   }
    313 
    314   // Update the |last_dump_memory_total|'s value from the totals if it's not
    315   // first poll.
    316   if (num_polls_from_last_dump >= 0 &&
    317       last_memory_totals_kb[last_memory_totals_kb_index]) {
    318     last_dump_memory_total =
    319         last_memory_totals_kb[last_memory_totals_kb_index] * 1024;
    320   }
    321   num_polls_from_last_dump = 0;
    322   for (uint32_t i = 0; i < kMaxNumMemorySamples; ++i)
    323     last_memory_totals_kb[i] = 0;
    324   last_memory_totals_kb_index = 0;
    325 }
    326 
    327 }  // namespace trace_event
    328 }  // namespace base
    329