Home | History | Annotate | Download | only in renderer_host
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "chrome/browser/renderer_host/web_cache_manager.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/bind.h"
     10 #include "base/compiler_specific.h"
     11 #include "base/memory/singleton.h"
     12 #include "base/message_loop/message_loop.h"
     13 #include "base/metrics/histogram.h"
     14 #include "base/prefs/pref_registry_simple.h"
     15 #include "base/prefs/pref_service.h"
     16 #include "base/sys_info.h"
     17 #include "base/time/time.h"
     18 #include "chrome/browser/browser_process.h"
     19 #include "chrome/browser/chrome_notification_types.h"
     20 #include "chrome/common/chrome_constants.h"
     21 #include "chrome/common/pref_names.h"
     22 #include "chrome/common/render_messages.h"
     23 #include "content/public/browser/notification_service.h"
     24 #include "content/public/browser/render_process_host.h"
     25 
     26 #if defined(OS_ANDROID)
     27 #include "base/android/sys_utils.h"
     28 #endif
     29 
     30 using base::Time;
     31 using base::TimeDelta;
     32 using blink::WebCache;
     33 
     34 static const int kReviseAllocationDelayMS = 200;
     35 
     36 // The default size limit of the in-memory cache is 8 MB
     37 static const int kDefaultMemoryCacheSize = 8 * 1024 * 1024;
     38 
     39 namespace {
     40 
     41 int GetDefaultCacheSize() {
     42   // Start off with a modest default
     43   int default_cache_size = kDefaultMemoryCacheSize;
     44 
     45   // Check how much physical memory the OS has
     46   int mem_size_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
     47   if (mem_size_mb >= 1000)  // If we have a GB of memory, set a larger default.
     48     default_cache_size *= 4;
     49   else if (mem_size_mb >= 512)  // With 512 MB, set a slightly larger default.
     50     default_cache_size *= 2;
     51 
     52   UMA_HISTOGRAM_MEMORY_MB("Cache.MaxCacheSizeMB",
     53                           default_cache_size / 1024 / 1024);
     54 
     55   return default_cache_size;
     56 }
     57 
     58 }  // anonymous namespace
     59 
     60 // static
     61 void WebCacheManager::RegisterPrefs(PrefRegistrySimple* registry) {
     62   registry->RegisterIntegerPref(prefs::kMemoryCacheSize, GetDefaultCacheSize());
     63 }
     64 
     65 // static
     66 WebCacheManager* WebCacheManager::GetInstance() {
     67   return Singleton<WebCacheManager>::get();
     68 }
     69 
     70 WebCacheManager::WebCacheManager()
     71     : global_size_limit_(GetDefaultGlobalSizeLimit()),
     72       weak_factory_(this) {
     73   registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CREATED,
     74                  content::NotificationService::AllBrowserContextsAndSources());
     75   registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_TERMINATED,
     76                  content::NotificationService::AllBrowserContextsAndSources());
     77 }
     78 
     79 WebCacheManager::~WebCacheManager() {
     80 }
     81 
     82 void WebCacheManager::Add(int renderer_id) {
     83   DCHECK(inactive_renderers_.count(renderer_id) == 0);
     84 
     85   // It is tempting to make the following DCHECK here, but it fails when a new
     86   // tab is created as we observe activity from that tab because the
     87   // RenderProcessHost is recreated and adds itself.
     88   //
     89   //   DCHECK(active_renderers_.count(renderer_id) == 0);
     90   //
     91   // However, there doesn't seem to be much harm in receiving the calls in this
     92   // order.
     93 
     94   active_renderers_.insert(renderer_id);
     95 
     96   RendererInfo* stats = &(stats_[renderer_id]);
     97   memset(stats, 0, sizeof(*stats));
     98   stats->access = Time::Now();
     99 
    100   // Revise our allocation strategy to account for this new renderer.
    101   ReviseAllocationStrategyLater();
    102 }
    103 
    104 void WebCacheManager::Remove(int renderer_id) {
    105   // Erase all knowledge of this renderer
    106   active_renderers_.erase(renderer_id);
    107   inactive_renderers_.erase(renderer_id);
    108   stats_.erase(renderer_id);
    109 
    110   // Reallocate the resources used by this renderer
    111   ReviseAllocationStrategyLater();
    112 }
    113 
    114 void WebCacheManager::ObserveActivity(int renderer_id) {
    115   StatsMap::iterator item = stats_.find(renderer_id);
    116   if (item == stats_.end())
    117     return;  // We might see stats for a renderer that has been destroyed.
    118 
    119   // Record activity.
    120   active_renderers_.insert(renderer_id);
    121   item->second.access = Time::Now();
    122 
    123   std::set<int>::iterator elmt = inactive_renderers_.find(renderer_id);
    124   if (elmt != inactive_renderers_.end()) {
    125     inactive_renderers_.erase(elmt);
    126 
    127     // A renderer that was inactive, just became active.  We should make sure
    128     // it is given a fair cache allocation, but we defer this for a bit in
    129     // order to make this function call cheap.
    130     ReviseAllocationStrategyLater();
    131   }
    132 }
    133 
    134 void WebCacheManager::ObserveStats(int renderer_id,
    135                                    const WebCache::UsageStats& stats) {
    136   StatsMap::iterator entry = stats_.find(renderer_id);
    137   if (entry == stats_.end())
    138     return;  // We might see stats for a renderer that has been destroyed.
    139 
    140   // Record the updated stats.
    141   entry->second.capacity = stats.capacity;
    142   entry->second.deadSize = stats.deadSize;
    143   entry->second.liveSize = stats.liveSize;
    144   entry->second.maxDeadCapacity = stats.maxDeadCapacity;
    145   entry->second.minDeadCapacity = stats.minDeadCapacity;
    146 }
    147 
    148 void WebCacheManager::SetGlobalSizeLimit(size_t bytes) {
    149   global_size_limit_ = bytes;
    150   ReviseAllocationStrategyLater();
    151 }
    152 
    153 void WebCacheManager::ClearCache() {
    154   // Tell each renderer process to clear the cache.
    155   ClearRendererCache(active_renderers_, INSTANTLY);
    156   ClearRendererCache(inactive_renderers_, INSTANTLY);
    157 }
    158 
    159 void WebCacheManager::ClearCacheOnNavigation() {
    160   // Tell each renderer process to clear the cache when a tab is reloaded or
    161   // the user navigates to a new website.
    162   ClearRendererCache(active_renderers_, ON_NAVIGATION);
    163   ClearRendererCache(inactive_renderers_, ON_NAVIGATION);
    164 }
    165 
    166 void WebCacheManager::Observe(int type,
    167                               const content::NotificationSource& source,
    168                               const content::NotificationDetails& details) {
    169   switch (type) {
    170     case content::NOTIFICATION_RENDERER_PROCESS_CREATED: {
    171       content::RenderProcessHost* process =
    172           content::Source<content::RenderProcessHost>(source).ptr();
    173       Add(process->GetID());
    174       break;
    175     }
    176     case content::NOTIFICATION_RENDERER_PROCESS_TERMINATED: {
    177       content::RenderProcessHost* process =
    178           content::Source<content::RenderProcessHost>(source).ptr();
    179       Remove(process->GetID());
    180       break;
    181     }
    182     default:
    183       NOTREACHED();
    184       break;
    185   }
    186 }
    187 
    188 // static
    189 size_t WebCacheManager::GetDefaultGlobalSizeLimit() {
    190   PrefService* perf_service = g_browser_process->local_state();
    191   if (perf_service)
    192     return perf_service->GetInteger(prefs::kMemoryCacheSize);
    193 
    194   return GetDefaultCacheSize();
    195 }
    196 
    197 void WebCacheManager::GatherStats(const std::set<int>& renderers,
    198                                   WebCache::UsageStats* stats) {
    199   DCHECK(stats);
    200 
    201   memset(stats, 0, sizeof(WebCache::UsageStats));
    202 
    203   std::set<int>::const_iterator iter = renderers.begin();
    204   while (iter != renderers.end()) {
    205     StatsMap::iterator elmt = stats_.find(*iter);
    206     if (elmt != stats_.end()) {
    207       stats->minDeadCapacity += elmt->second.minDeadCapacity;
    208       stats->maxDeadCapacity += elmt->second.maxDeadCapacity;
    209       stats->capacity += elmt->second.capacity;
    210       stats->liveSize += elmt->second.liveSize;
    211       stats->deadSize += elmt->second.deadSize;
    212     }
    213     ++iter;
    214   }
    215 }
    216 
    217 // static
    218 size_t WebCacheManager::GetSize(AllocationTactic tactic,
    219                                 const WebCache::UsageStats& stats) {
    220   switch (tactic) {
    221   case DIVIDE_EVENLY:
    222     // We aren't going to reserve any space for existing objects.
    223     return 0;
    224   case KEEP_CURRENT_WITH_HEADROOM:
    225     // We need enough space for our current objects, plus some headroom.
    226     return 3 * GetSize(KEEP_CURRENT, stats) / 2;
    227   case KEEP_CURRENT:
    228     // We need enough space to keep our current objects.
    229     return stats.liveSize + stats.deadSize;
    230   case KEEP_LIVE_WITH_HEADROOM:
    231     // We need enough space to keep out live resources, plus some headroom.
    232     return 3 * GetSize(KEEP_LIVE, stats) / 2;
    233   case KEEP_LIVE:
    234     // We need enough space to keep our live resources.
    235     return stats.liveSize;
    236   default:
    237     NOTREACHED() << "Unknown cache allocation tactic";
    238     return 0;
    239   }
    240 }
    241 
    242 bool WebCacheManager::AttemptTactic(
    243     AllocationTactic active_tactic,
    244     const WebCache::UsageStats& active_stats,
    245     AllocationTactic inactive_tactic,
    246     const WebCache::UsageStats& inactive_stats,
    247     AllocationStrategy* strategy) {
    248   DCHECK(strategy);
    249 
    250   size_t active_size = GetSize(active_tactic, active_stats);
    251   size_t inactive_size = GetSize(inactive_tactic, inactive_stats);
    252 
    253   // Give up if we don't have enough space to use this tactic.
    254   if (global_size_limit_ < active_size + inactive_size)
    255     return false;
    256 
    257   // Compute the unreserved space available.
    258   size_t total_extra = global_size_limit_ - (active_size + inactive_size);
    259 
    260   // The plan for the extra space is to divide it evenly amoung the active
    261   // renderers.
    262   size_t shares = active_renderers_.size();
    263 
    264   // The inactive renderers get one share of the extra memory to be divided
    265   // among themselves.
    266   size_t inactive_extra = 0;
    267   if (!inactive_renderers_.empty()) {
    268     ++shares;
    269     inactive_extra = total_extra / shares;
    270   }
    271 
    272   // The remaining memory is allocated to the active renderers.
    273   size_t active_extra = total_extra - inactive_extra;
    274 
    275   // Actually compute the allocations for each renderer.
    276   AddToStrategy(active_renderers_, active_tactic, active_extra, strategy);
    277   AddToStrategy(inactive_renderers_, inactive_tactic, inactive_extra, strategy);
    278 
    279   // We succeeded in computing an allocation strategy.
    280   return true;
    281 }
    282 
    283 void WebCacheManager::AddToStrategy(const std::set<int>& renderers,
    284                                     AllocationTactic tactic,
    285                                     size_t extra_bytes_to_allocate,
    286                                     AllocationStrategy* strategy) {
    287   DCHECK(strategy);
    288 
    289   // Nothing to do if there are no renderers.  It is common for there to be no
    290   // inactive renderers if there is a single active tab.
    291   if (renderers.empty())
    292     return;
    293 
    294   // Divide the extra memory evenly among the renderers.
    295   size_t extra_each = extra_bytes_to_allocate / renderers.size();
    296 
    297   std::set<int>::const_iterator iter = renderers.begin();
    298   while (iter != renderers.end()) {
    299     size_t cache_size = extra_each;
    300 
    301     // Add in the space required to implement |tactic|.
    302     StatsMap::iterator elmt = stats_.find(*iter);
    303     if (elmt != stats_.end())
    304       cache_size += GetSize(tactic, elmt->second);
    305 
    306     // Record the allocation in our strategy.
    307     strategy->push_back(Allocation(*iter, cache_size));
    308     ++iter;
    309   }
    310 }
    311 
    312 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) {
    313   // Inform each render process of its cache allocation.
    314   AllocationStrategy::const_iterator allocation = strategy.begin();
    315   while (allocation != strategy.end()) {
    316     content::RenderProcessHost* host =
    317         content::RenderProcessHost::FromID(allocation->first);
    318     if (host) {
    319       // This is the capacity this renderer has been allocated.
    320       size_t capacity = allocation->second;
    321 
    322       // We don't reserve any space for dead objects in the cache. Instead, we
    323       // prefer to keep live objects around. There is probably some performance
    324       // tuning to be done here.
    325       size_t min_dead_capacity = 0;
    326 
    327       // We allow the dead objects to consume up to half of the cache capacity.
    328       size_t max_dead_capacity = capacity / 2;
    329 #if defined(OS_ANDROID)
    330       if (base::android::SysUtils::IsLowEndDevice())
    331         max_dead_capacity = std::min(static_cast<size_t>(512 * 1024),
    332                                      max_dead_capacity);
    333 #endif
    334 
    335       host->Send(new ChromeViewMsg_SetCacheCapacities(min_dead_capacity,
    336                                                       max_dead_capacity,
    337                                                       capacity));
    338     }
    339     ++allocation;
    340   }
    341 }
    342 
    343 void WebCacheManager::ClearRendererCache(
    344     const std::set<int>& renderers,
    345     WebCacheManager::ClearCacheOccasion occasion) {
    346   std::set<int>::const_iterator iter = renderers.begin();
    347   for (; iter != renderers.end(); ++iter) {
    348     content::RenderProcessHost* host =
    349         content::RenderProcessHost::FromID(*iter);
    350     if (host)
    351       host->Send(new ChromeViewMsg_ClearCache(occasion == ON_NAVIGATION));
    352   }
    353 }
    354 
    355 void WebCacheManager::ReviseAllocationStrategy() {
    356   DCHECK(stats_.size() <=
    357       active_renderers_.size() + inactive_renderers_.size());
    358 
    359   // Check if renderers have gone inactive.
    360   FindInactiveRenderers();
    361 
    362   // Gather statistics
    363   WebCache::UsageStats active;
    364   WebCache::UsageStats inactive;
    365   GatherStats(active_renderers_, &active);
    366   GatherStats(inactive_renderers_, &inactive);
    367 
    368   UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size());
    369   UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size());
    370   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB",
    371                           active.capacity / 1024 / 1024);
    372   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveDeadSizeMB",
    373                           active.deadSize / 1024 / 1024);
    374   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB",
    375                           active.liveSize / 1024 / 1024);
    376   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB",
    377                           inactive.capacity / 1024 / 1024);
    378   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveDeadSizeMB",
    379                           inactive.deadSize / 1024 / 1024);
    380   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB",
    381                           inactive.liveSize / 1024 / 1024);
    382 
    383   // Compute an allocation strategy.
    384   //
    385   // We attempt various tactics in order of preference.  Our first preference
    386   // is not to evict any objects.  If we don't have enough resources, we'll
    387   // first try to evict dead data only.  If that fails, we'll just divide the
    388   // resources we have evenly.
    389   //
    390   // We always try to give the active renderers some head room in their
    391   // allocations so they can take memory away from an inactive renderer with
    392   // a large cache allocation.
    393   //
    394   // Notice the early exit will prevent attempting less desirable tactics once
    395   // we've found a workable strategy.
    396   AllocationStrategy strategy;
    397   if (  // Ideally, we'd like to give the active renderers some headroom and
    398         // keep all our current objects.
    399       AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
    400                     KEEP_CURRENT, inactive, &strategy) ||
    401       // If we can't have that, then we first try to evict the dead objects in
    402       // the caches of inactive renderers.
    403       AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
    404                     KEEP_LIVE, inactive, &strategy) ||
    405       // Next, we try to keep the live objects in the active renders (with some
    406       // room for new objects) and give whatever is left to the inactive
    407       // renderers.
    408       AttemptTactic(KEEP_LIVE_WITH_HEADROOM, active,
    409                     DIVIDE_EVENLY, inactive, &strategy) ||
    410       // If we've gotten this far, then we are very tight on memory.  Let's try
    411       // to at least keep around the live objects for the active renderers.
    412       AttemptTactic(KEEP_LIVE, active, DIVIDE_EVENLY, inactive, &strategy) ||
    413       // We're basically out of memory.  The best we can do is just divide up
    414       // what we have and soldier on.
    415       AttemptTactic(DIVIDE_EVENLY, active, DIVIDE_EVENLY, inactive,
    416                     &strategy)) {
    417     // Having found a workable strategy, we enact it.
    418     EnactStrategy(strategy);
    419   } else {
    420     // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed.
    421     NOTREACHED() << "Unable to find a cache allocation";
    422   }
    423 }
    424 
    425 void WebCacheManager::ReviseAllocationStrategyLater() {
    426   // Ask to be called back in a few milliseconds to actually recompute our
    427   // allocation.
    428   base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
    429       base::Bind(
    430           &WebCacheManager::ReviseAllocationStrategy,
    431           weak_factory_.GetWeakPtr()),
    432       base::TimeDelta::FromMilliseconds(kReviseAllocationDelayMS));
    433 }
    434 
    435 void WebCacheManager::FindInactiveRenderers() {
    436   std::set<int>::const_iterator iter = active_renderers_.begin();
    437   while (iter != active_renderers_.end()) {
    438     StatsMap::iterator elmt = stats_.find(*iter);
    439     DCHECK(elmt != stats_.end());
    440     TimeDelta idle = Time::Now() - elmt->second.access;
    441     if (idle >= TimeDelta::FromMinutes(kRendererInactiveThresholdMinutes)) {
    442       // Moved to inactive status.  This invalidates our iterator.
    443       inactive_renderers_.insert(*iter);
    444       active_renderers_.erase(*iter);
    445       iter = active_renderers_.begin();
    446       continue;
    447     }
    448     ++iter;
    449   }
    450 }
    451