Home | History | Annotate | Download | only in renderer_host
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "chrome/browser/renderer_host/web_cache_manager.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/bind.h"
     10 #include "base/compiler_specific.h"
     11 #include "base/memory/singleton.h"
     12 #include "base/message_loop/message_loop.h"
     13 #include "base/metrics/histogram.h"
     14 #include "base/prefs/pref_registry_simple.h"
     15 #include "base/prefs/pref_service.h"
     16 #include "base/sys_info.h"
     17 #include "base/time/time.h"
     18 #include "chrome/browser/browser_process.h"
     19 #include "chrome/browser/chrome_notification_types.h"
     20 #include "chrome/common/chrome_constants.h"
     21 #include "chrome/common/pref_names.h"
     22 #include "chrome/common/render_messages.h"
     23 #include "content/public/browser/notification_service.h"
     24 #include "content/public/browser/render_process_host.h"
     25 
     26 using base::Time;
     27 using base::TimeDelta;
     28 using WebKit::WebCache;
     29 
     30 static const int kReviseAllocationDelayMS = 200;
     31 
     32 // The default size limit of the in-memory cache is 8 MB
     33 static const int kDefaultMemoryCacheSize = 8 * 1024 * 1024;
     34 
     35 namespace {
     36 
     37 int GetDefaultCacheSize() {
     38   // Start off with a modest default
     39   int default_cache_size = kDefaultMemoryCacheSize;
     40 
     41   // Check how much physical memory the OS has
     42   int mem_size_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
     43   if (mem_size_mb >= 1000)  // If we have a GB of memory, set a larger default.
     44     default_cache_size *= 4;
     45   else if (mem_size_mb >= 512)  // With 512 MB, set a slightly larger default.
     46     default_cache_size *= 2;
     47 
     48   UMA_HISTOGRAM_MEMORY_MB("Cache.MaxCacheSizeMB",
     49                           default_cache_size / 1024 / 1024);
     50 
     51   return default_cache_size;
     52 }
     53 
     54 }  // anonymous namespace
     55 
     56 // static
     57 void WebCacheManager::RegisterPrefs(PrefRegistrySimple* registry) {
     58   registry->RegisterIntegerPref(prefs::kMemoryCacheSize, GetDefaultCacheSize());
     59 }
     60 
     61 // static
     62 WebCacheManager* WebCacheManager::GetInstance() {
     63   return Singleton<WebCacheManager>::get();
     64 }
     65 
     66 WebCacheManager::WebCacheManager()
     67     : global_size_limit_(GetDefaultGlobalSizeLimit()),
     68       weak_factory_(this) {
     69   registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CREATED,
     70                  content::NotificationService::AllBrowserContextsAndSources());
     71   registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_TERMINATED,
     72                  content::NotificationService::AllBrowserContextsAndSources());
     73 }
     74 
     75 WebCacheManager::~WebCacheManager() {
     76 }
     77 
     78 void WebCacheManager::Add(int renderer_id) {
     79   DCHECK(inactive_renderers_.count(renderer_id) == 0);
     80 
     81   // It is tempting to make the following DCHECK here, but it fails when a new
     82   // tab is created as we observe activity from that tab because the
     83   // RenderProcessHost is recreated and adds itself.
     84   //
     85   //   DCHECK(active_renderers_.count(renderer_id) == 0);
     86   //
     87   // However, there doesn't seem to be much harm in receiving the calls in this
     88   // order.
     89 
     90   active_renderers_.insert(renderer_id);
     91 
     92   RendererInfo* stats = &(stats_[renderer_id]);
     93   memset(stats, 0, sizeof(*stats));
     94   stats->access = Time::Now();
     95 
     96   // Revise our allocation strategy to account for this new renderer.
     97   ReviseAllocationStrategyLater();
     98 }
     99 
    100 void WebCacheManager::Remove(int renderer_id) {
    101   // Erase all knowledge of this renderer
    102   active_renderers_.erase(renderer_id);
    103   inactive_renderers_.erase(renderer_id);
    104   stats_.erase(renderer_id);
    105 
    106   // Reallocate the resources used by this renderer
    107   ReviseAllocationStrategyLater();
    108 }
    109 
    110 void WebCacheManager::ObserveActivity(int renderer_id) {
    111   StatsMap::iterator item = stats_.find(renderer_id);
    112   if (item == stats_.end())
    113     return;  // We might see stats for a renderer that has been destroyed.
    114 
    115   // Record activity.
    116   active_renderers_.insert(renderer_id);
    117   item->second.access = Time::Now();
    118 
    119   std::set<int>::iterator elmt = inactive_renderers_.find(renderer_id);
    120   if (elmt != inactive_renderers_.end()) {
    121     inactive_renderers_.erase(elmt);
    122 
    123     // A renderer that was inactive, just became active.  We should make sure
    124     // it is given a fair cache allocation, but we defer this for a bit in
    125     // order to make this function call cheap.
    126     ReviseAllocationStrategyLater();
    127   }
    128 }
    129 
    130 void WebCacheManager::ObserveStats(int renderer_id,
    131                                    const WebCache::UsageStats& stats) {
    132   StatsMap::iterator entry = stats_.find(renderer_id);
    133   if (entry == stats_.end())
    134     return;  // We might see stats for a renderer that has been destroyed.
    135 
    136   // Record the updated stats.
    137   entry->second.capacity = stats.capacity;
    138   entry->second.deadSize = stats.deadSize;
    139   entry->second.liveSize = stats.liveSize;
    140   entry->second.maxDeadCapacity = stats.maxDeadCapacity;
    141   entry->second.minDeadCapacity = stats.minDeadCapacity;
    142 }
    143 
    144 void WebCacheManager::SetGlobalSizeLimit(size_t bytes) {
    145   global_size_limit_ = bytes;
    146   ReviseAllocationStrategyLater();
    147 }
    148 
    149 void WebCacheManager::ClearCache() {
    150   // Tell each renderer process to clear the cache.
    151   ClearRendererCache(active_renderers_, INSTANTLY);
    152   ClearRendererCache(inactive_renderers_, INSTANTLY);
    153 }
    154 
    155 void WebCacheManager::ClearCacheOnNavigation() {
    156   // Tell each renderer process to clear the cache when a tab is reloaded or
    157   // the user navigates to a new website.
    158   ClearRendererCache(active_renderers_, ON_NAVIGATION);
    159   ClearRendererCache(inactive_renderers_, ON_NAVIGATION);
    160 }
    161 
    162 void WebCacheManager::Observe(int type,
    163                               const content::NotificationSource& source,
    164                               const content::NotificationDetails& details) {
    165   switch (type) {
    166     case content::NOTIFICATION_RENDERER_PROCESS_CREATED: {
    167       content::RenderProcessHost* process =
    168           content::Source<content::RenderProcessHost>(source).ptr();
    169       Add(process->GetID());
    170       break;
    171     }
    172     case content::NOTIFICATION_RENDERER_PROCESS_TERMINATED: {
    173       content::RenderProcessHost* process =
    174           content::Source<content::RenderProcessHost>(source).ptr();
    175       Remove(process->GetID());
    176       break;
    177     }
    178     default:
    179       NOTREACHED();
    180       break;
    181   }
    182 }
    183 
    184 // static
    185 size_t WebCacheManager::GetDefaultGlobalSizeLimit() {
    186   PrefService* perf_service = g_browser_process->local_state();
    187   if (perf_service)
    188     return perf_service->GetInteger(prefs::kMemoryCacheSize);
    189 
    190   return GetDefaultCacheSize();
    191 }
    192 
    193 void WebCacheManager::GatherStats(const std::set<int>& renderers,
    194                                   WebCache::UsageStats* stats) {
    195   DCHECK(stats);
    196 
    197   memset(stats, 0, sizeof(WebCache::UsageStats));
    198 
    199   std::set<int>::const_iterator iter = renderers.begin();
    200   while (iter != renderers.end()) {
    201     StatsMap::iterator elmt = stats_.find(*iter);
    202     if (elmt != stats_.end()) {
    203       stats->minDeadCapacity += elmt->second.minDeadCapacity;
    204       stats->maxDeadCapacity += elmt->second.maxDeadCapacity;
    205       stats->capacity += elmt->second.capacity;
    206       stats->liveSize += elmt->second.liveSize;
    207       stats->deadSize += elmt->second.deadSize;
    208     }
    209     ++iter;
    210   }
    211 }
    212 
    213 // static
    214 size_t WebCacheManager::GetSize(AllocationTactic tactic,
    215                                 const WebCache::UsageStats& stats) {
    216   switch (tactic) {
    217   case DIVIDE_EVENLY:
    218     // We aren't going to reserve any space for existing objects.
    219     return 0;
    220   case KEEP_CURRENT_WITH_HEADROOM:
    221     // We need enough space for our current objects, plus some headroom.
    222     return 3 * GetSize(KEEP_CURRENT, stats) / 2;
    223   case KEEP_CURRENT:
    224     // We need enough space to keep our current objects.
    225     return stats.liveSize + stats.deadSize;
    226   case KEEP_LIVE_WITH_HEADROOM:
    227     // We need enough space to keep out live resources, plus some headroom.
    228     return 3 * GetSize(KEEP_LIVE, stats) / 2;
    229   case KEEP_LIVE:
    230     // We need enough space to keep our live resources.
    231     return stats.liveSize;
    232   default:
    233     NOTREACHED() << "Unknown cache allocation tactic";
    234     return 0;
    235   }
    236 }
    237 
    238 bool WebCacheManager::AttemptTactic(
    239     AllocationTactic active_tactic,
    240     const WebCache::UsageStats& active_stats,
    241     AllocationTactic inactive_tactic,
    242     const WebCache::UsageStats& inactive_stats,
    243     AllocationStrategy* strategy) {
    244   DCHECK(strategy);
    245 
    246   size_t active_size = GetSize(active_tactic, active_stats);
    247   size_t inactive_size = GetSize(inactive_tactic, inactive_stats);
    248 
    249   // Give up if we don't have enough space to use this tactic.
    250   if (global_size_limit_ < active_size + inactive_size)
    251     return false;
    252 
    253   // Compute the unreserved space available.
    254   size_t total_extra = global_size_limit_ - (active_size + inactive_size);
    255 
    256   // The plan for the extra space is to divide it evenly amoung the active
    257   // renderers.
    258   size_t shares = active_renderers_.size();
    259 
    260   // The inactive renderers get one share of the extra memory to be divided
    261   // among themselves.
    262   size_t inactive_extra = 0;
    263   if (!inactive_renderers_.empty()) {
    264     ++shares;
    265     inactive_extra = total_extra / shares;
    266   }
    267 
    268   // The remaining memory is allocated to the active renderers.
    269   size_t active_extra = total_extra - inactive_extra;
    270 
    271   // Actually compute the allocations for each renderer.
    272   AddToStrategy(active_renderers_, active_tactic, active_extra, strategy);
    273   AddToStrategy(inactive_renderers_, inactive_tactic, inactive_extra, strategy);
    274 
    275   // We succeeded in computing an allocation strategy.
    276   return true;
    277 }
    278 
    279 void WebCacheManager::AddToStrategy(const std::set<int>& renderers,
    280                                     AllocationTactic tactic,
    281                                     size_t extra_bytes_to_allocate,
    282                                     AllocationStrategy* strategy) {
    283   DCHECK(strategy);
    284 
    285   // Nothing to do if there are no renderers.  It is common for there to be no
    286   // inactive renderers if there is a single active tab.
    287   if (renderers.empty())
    288     return;
    289 
    290   // Divide the extra memory evenly among the renderers.
    291   size_t extra_each = extra_bytes_to_allocate / renderers.size();
    292 
    293   std::set<int>::const_iterator iter = renderers.begin();
    294   while (iter != renderers.end()) {
    295     size_t cache_size = extra_each;
    296 
    297     // Add in the space required to implement |tactic|.
    298     StatsMap::iterator elmt = stats_.find(*iter);
    299     if (elmt != stats_.end())
    300       cache_size += GetSize(tactic, elmt->second);
    301 
    302     // Record the allocation in our strategy.
    303     strategy->push_back(Allocation(*iter, cache_size));
    304     ++iter;
    305   }
    306 }
    307 
    308 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) {
    309   // Inform each render process of its cache allocation.
    310   AllocationStrategy::const_iterator allocation = strategy.begin();
    311   while (allocation != strategy.end()) {
    312     content::RenderProcessHost* host =
    313         content::RenderProcessHost::FromID(allocation->first);
    314     if (host) {
    315       // This is the capacity this renderer has been allocated.
    316       size_t capacity = allocation->second;
    317 
    318       // We don't reserve any space for dead objects in the cache. Instead, we
    319       // prefer to keep live objects around. There is probably some performance
    320       // tuning to be done here.
    321       size_t min_dead_capacity = 0;
    322 
    323       // We allow the dead objects to consume up to half of the cache capacity.
    324       size_t max_dead_capacity = capacity / 2;
    325 
    326       host->Send(new ChromeViewMsg_SetCacheCapacities(min_dead_capacity,
    327                                                       max_dead_capacity,
    328                                                       capacity));
    329     }
    330     ++allocation;
    331   }
    332 }
    333 
    334 void WebCacheManager::ClearRendererCache(
    335     const std::set<int>& renderers,
    336     WebCacheManager::ClearCacheOccasion occasion) {
    337   std::set<int>::const_iterator iter = renderers.begin();
    338   for (; iter != renderers.end(); ++iter) {
    339     content::RenderProcessHost* host =
    340         content::RenderProcessHost::FromID(*iter);
    341     if (host)
    342       host->Send(new ChromeViewMsg_ClearCache(occasion == ON_NAVIGATION));
    343   }
    344 }
    345 
    346 void WebCacheManager::ReviseAllocationStrategy() {
    347   DCHECK(stats_.size() <=
    348       active_renderers_.size() + inactive_renderers_.size());
    349 
    350   // Check if renderers have gone inactive.
    351   FindInactiveRenderers();
    352 
    353   // Gather statistics
    354   WebCache::UsageStats active;
    355   WebCache::UsageStats inactive;
    356   GatherStats(active_renderers_, &active);
    357   GatherStats(inactive_renderers_, &inactive);
    358 
    359   UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size());
    360   UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size());
    361   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB",
    362                           active.capacity / 1024 / 1024);
    363   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveDeadSizeMB",
    364                           active.deadSize / 1024 / 1024);
    365   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB",
    366                           active.liveSize / 1024 / 1024);
    367   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB",
    368                           inactive.capacity / 1024 / 1024);
    369   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveDeadSizeMB",
    370                           inactive.deadSize / 1024 / 1024);
    371   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB",
    372                           inactive.liveSize / 1024 / 1024);
    373 
    374   // Compute an allocation strategy.
    375   //
    376   // We attempt various tactics in order of preference.  Our first preference
    377   // is not to evict any objects.  If we don't have enough resources, we'll
    378   // first try to evict dead data only.  If that fails, we'll just divide the
    379   // resources we have evenly.
    380   //
    381   // We always try to give the active renderers some head room in their
    382   // allocations so they can take memory away from an inactive renderer with
    383   // a large cache allocation.
    384   //
    385   // Notice the early exit will prevent attempting less desirable tactics once
    386   // we've found a workable strategy.
    387   AllocationStrategy strategy;
    388   if (  // Ideally, we'd like to give the active renderers some headroom and
    389         // keep all our current objects.
    390       AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
    391                     KEEP_CURRENT, inactive, &strategy) ||
    392       // If we can't have that, then we first try to evict the dead objects in
    393       // the caches of inactive renderers.
    394       AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
    395                     KEEP_LIVE, inactive, &strategy) ||
    396       // Next, we try to keep the live objects in the active renders (with some
    397       // room for new objects) and give whatever is left to the inactive
    398       // renderers.
    399       AttemptTactic(KEEP_LIVE_WITH_HEADROOM, active,
    400                     DIVIDE_EVENLY, inactive, &strategy) ||
    401       // If we've gotten this far, then we are very tight on memory.  Let's try
    402       // to at least keep around the live objects for the active renderers.
    403       AttemptTactic(KEEP_LIVE, active, DIVIDE_EVENLY, inactive, &strategy) ||
    404       // We're basically out of memory.  The best we can do is just divide up
    405       // what we have and soldier on.
    406       AttemptTactic(DIVIDE_EVENLY, active, DIVIDE_EVENLY, inactive,
    407                     &strategy)) {
    408     // Having found a workable strategy, we enact it.
    409     EnactStrategy(strategy);
    410   } else {
    411     // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed.
    412     NOTREACHED() << "Unable to find a cache allocation";
    413   }
    414 }
    415 
    416 void WebCacheManager::ReviseAllocationStrategyLater() {
    417   // Ask to be called back in a few milliseconds to actually recompute our
    418   // allocation.
    419   base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
    420       base::Bind(
    421           &WebCacheManager::ReviseAllocationStrategy,
    422           weak_factory_.GetWeakPtr()),
    423       base::TimeDelta::FromMilliseconds(kReviseAllocationDelayMS));
    424 }
    425 
    426 void WebCacheManager::FindInactiveRenderers() {
    427   std::set<int>::const_iterator iter = active_renderers_.begin();
    428   while (iter != active_renderers_.end()) {
    429     StatsMap::iterator elmt = stats_.find(*iter);
    430     DCHECK(elmt != stats_.end());
    431     TimeDelta idle = Time::Now() - elmt->second.access;
    432     if (idle >= TimeDelta::FromMinutes(kRendererInactiveThresholdMinutes)) {
    433       // Moved to inactive status.  This invalidates our iterator.
    434       inactive_renderers_.insert(*iter);
    435       active_renderers_.erase(*iter);
    436       iter = active_renderers_.begin();
    437       continue;
    438     }
    439     ++iter;
    440   }
    441 }
    442