Home | History | Annotate | Download | only in renderer_host
      1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "chrome/browser/renderer_host/web_cache_manager.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/compiler_specific.h"
     10 #include "base/memory/singleton.h"
     11 #include "base/metrics/histogram.h"
     12 #include "base/sys_info.h"
     13 #include "base/time.h"
     14 #include "chrome/browser/browser_process.h"
     15 #include "chrome/browser/prefs/pref_service.h"
     16 #include "chrome/common/chrome_constants.h"
     17 #include "chrome/common/pref_names.h"
     18 #include "chrome/common/render_messages.h"
     19 #include "content/browser/renderer_host/render_process_host.h"
     20 #include "content/common/notification_service.h"
     21 
     22 using base::Time;
     23 using base::TimeDelta;
     24 using WebKit::WebCache;
     25 
     26 static const unsigned int kReviseAllocationDelayMS = 200 /* milliseconds */;
     27 
     28 // The default size limit of the in-memory cache is 8 MB
     29 static const int kDefaultMemoryCacheSize = 8 * 1024 * 1024;
     30 
     31 namespace {
     32 
     33 int GetDefaultCacheSize() {
     34   // Start off with a modest default
     35   int default_cache_size = kDefaultMemoryCacheSize;
     36 
     37   // Check how much physical memory the OS has
     38   int mem_size_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
     39   if (mem_size_mb >= 1000)  // If we have a GB of memory, set a larger default.
     40     default_cache_size *= 4;
     41   else if (mem_size_mb >= 512)  // With 512 MB, set a slightly larger default.
     42     default_cache_size *= 2;
     43 
     44   UMA_HISTOGRAM_MEMORY_MB("Cache.MaxCacheSizeMB",
     45                           default_cache_size / 1024 / 1024);
     46 
     47   return default_cache_size;
     48 }
     49 
     50 }  // anonymous namespace
     51 
     52 // static
     53 void WebCacheManager::RegisterPrefs(PrefService* prefs) {
     54   prefs->RegisterIntegerPref(prefs::kMemoryCacheSize, GetDefaultCacheSize());
     55 }
     56 
     57 // static
     58 WebCacheManager* WebCacheManager::GetInstance() {
     59   return Singleton<WebCacheManager>::get();
     60 }
     61 
     62 WebCacheManager::WebCacheManager()
     63     : global_size_limit_(GetDefaultGlobalSizeLimit()),
     64       ALLOW_THIS_IN_INITIALIZER_LIST(revise_allocation_factory_(this)) {
     65 }
     66 
     67 WebCacheManager::~WebCacheManager() {
     68 }
     69 
     70 void WebCacheManager::Add(int renderer_id) {
     71   DCHECK(inactive_renderers_.count(renderer_id) == 0);
     72 
     73   // It is tempting to make the following DCHECK here, but it fails when a new
     74   // tab is created as we observe activity from that tab because the
     75   // RenderProcessHost is recreated and adds itself.
     76   //
     77   //   DCHECK(active_renderers_.count(renderer_id) == 0);
     78   //
     79   // However, there doesn't seem to be much harm in receiving the calls in this
     80   // order.
     81 
     82   active_renderers_.insert(renderer_id);
     83 
     84   RendererInfo* stats = &(stats_[renderer_id]);
     85   memset(stats, 0, sizeof(*stats));
     86   stats->access = Time::Now();
     87 
     88   // Revise our allocation strategy to account for this new renderer.
     89   ReviseAllocationStrategyLater();
     90 }
     91 
     92 void WebCacheManager::Remove(int renderer_id) {
     93   // Erase all knowledge of this renderer
     94   active_renderers_.erase(renderer_id);
     95   inactive_renderers_.erase(renderer_id);
     96   stats_.erase(renderer_id);
     97 
     98   // Reallocate the resources used by this renderer
     99   ReviseAllocationStrategyLater();
    100 }
    101 
    102 void WebCacheManager::ObserveActivity(int renderer_id) {
    103   StatsMap::iterator item = stats_.find(renderer_id);
    104   if (item == stats_.end())
    105     return;  // We might see stats for a renderer that has been destroyed.
    106 
    107   // Record activity.
    108   active_renderers_.insert(renderer_id);
    109   item->second.access = Time::Now();
    110 
    111   std::set<int>::iterator elmt = inactive_renderers_.find(renderer_id);
    112   if (elmt != inactive_renderers_.end()) {
    113     inactive_renderers_.erase(elmt);
    114 
    115     // A renderer that was inactive, just became active.  We should make sure
    116     // it is given a fair cache allocation, but we defer this for a bit in
    117     // order to make this function call cheap.
    118     ReviseAllocationStrategyLater();
    119   }
    120 }
    121 
    122 void WebCacheManager::ObserveStats(int renderer_id,
    123                                    const WebCache::UsageStats& stats) {
    124   StatsMap::iterator entry = stats_.find(renderer_id);
    125   if (entry == stats_.end())
    126     return;  // We might see stats for a renderer that has been destroyed.
    127 
    128   // Record the updated stats.
    129   entry->second.capacity = stats.capacity;
    130   entry->second.deadSize = stats.deadSize;
    131   entry->second.liveSize = stats.liveSize;
    132   entry->second.maxDeadCapacity = stats.maxDeadCapacity;
    133   entry->second.minDeadCapacity = stats.minDeadCapacity;
    134 
    135   // trigger notification
    136   WebCache::UsageStats stats_details(stats);
    137   // &stats_details is only valid during the notification.
    138   // See notification_types.h.
    139   NotificationService::current()->Notify(
    140       NotificationType::WEB_CACHE_STATS_OBSERVED,
    141       Source<RenderProcessHost>(RenderProcessHost::FromID(renderer_id)),
    142       Details<WebCache::UsageStats>(&stats_details));
    143 }
    144 
    145 void WebCacheManager::SetGlobalSizeLimit(size_t bytes) {
    146   global_size_limit_ = bytes;
    147   ReviseAllocationStrategyLater();
    148 }
    149 
    150 void WebCacheManager::ClearCache() {
    151   // Tell each renderer process to clear the cache.
    152   ClearRendederCache(active_renderers_);
    153   ClearRendederCache(inactive_renderers_);
    154 }
    155 
    156 // static
    157 size_t WebCacheManager::GetDefaultGlobalSizeLimit() {
    158   PrefService* perf_service = g_browser_process->local_state();
    159   if (perf_service)
    160     return perf_service->GetInteger(prefs::kMemoryCacheSize);
    161 
    162   return GetDefaultCacheSize();
    163 }
    164 
    165 void WebCacheManager::GatherStats(const std::set<int>& renderers,
    166                                   WebCache::UsageStats* stats) {
    167   DCHECK(stats);
    168 
    169   memset(stats, 0, sizeof(WebCache::UsageStats));
    170 
    171   std::set<int>::const_iterator iter = renderers.begin();
    172   while (iter != renderers.end()) {
    173     StatsMap::iterator elmt = stats_.find(*iter);
    174     if (elmt != stats_.end()) {
    175       stats->minDeadCapacity += elmt->second.minDeadCapacity;
    176       stats->maxDeadCapacity += elmt->second.maxDeadCapacity;
    177       stats->capacity += elmt->second.capacity;
    178       stats->liveSize += elmt->second.liveSize;
    179       stats->deadSize += elmt->second.deadSize;
    180     }
    181     ++iter;
    182   }
    183 }
    184 
    185 // static
    186 size_t WebCacheManager::GetSize(AllocationTactic tactic,
    187                                 const WebCache::UsageStats& stats) {
    188   switch (tactic) {
    189   case DIVIDE_EVENLY:
    190     // We aren't going to reserve any space for existing objects.
    191     return 0;
    192   case KEEP_CURRENT_WITH_HEADROOM:
    193     // We need enough space for our current objects, plus some headroom.
    194     return 3 * GetSize(KEEP_CURRENT, stats) / 2;
    195   case KEEP_CURRENT:
    196     // We need enough space to keep our current objects.
    197     return stats.liveSize + stats.deadSize;
    198   case KEEP_LIVE_WITH_HEADROOM:
    199     // We need enough space to keep out live resources, plus some headroom.
    200     return 3 * GetSize(KEEP_LIVE, stats) / 2;
    201   case KEEP_LIVE:
    202     // We need enough space to keep our live resources.
    203     return stats.liveSize;
    204   default:
    205     NOTREACHED() << "Unknown cache allocation tactic";
    206     return 0;
    207   }
    208 }
    209 
    210 bool WebCacheManager::AttemptTactic(
    211     AllocationTactic active_tactic,
    212     const WebCache::UsageStats& active_stats,
    213     AllocationTactic inactive_tactic,
    214     const WebCache::UsageStats& inactive_stats,
    215     AllocationStrategy* strategy) {
    216   DCHECK(strategy);
    217 
    218   size_t active_size = GetSize(active_tactic, active_stats);
    219   size_t inactive_size = GetSize(inactive_tactic, inactive_stats);
    220 
    221   // Give up if we don't have enough space to use this tactic.
    222   if (global_size_limit_ < active_size + inactive_size)
    223     return false;
    224 
    225   // Compute the unreserved space available.
    226   size_t total_extra = global_size_limit_ - (active_size + inactive_size);
    227 
    228   // The plan for the extra space is to divide it evenly amoung the active
    229   // renderers.
    230   size_t shares = active_renderers_.size();
    231 
    232   // The inactive renderers get one share of the extra memory to be divided
    233   // among themselves.
    234   size_t inactive_extra = 0;
    235   if (!inactive_renderers_.empty()) {
    236     ++shares;
    237     inactive_extra = total_extra / shares;
    238   }
    239 
    240   // The remaining memory is allocated to the active renderers.
    241   size_t active_extra = total_extra - inactive_extra;
    242 
    243   // Actually compute the allocations for each renderer.
    244   AddToStrategy(active_renderers_, active_tactic, active_extra, strategy);
    245   AddToStrategy(inactive_renderers_, inactive_tactic, inactive_extra, strategy);
    246 
    247   // We succeeded in computing an allocation strategy.
    248   return true;
    249 }
    250 
    251 void WebCacheManager::AddToStrategy(const std::set<int>& renderers,
    252                                     AllocationTactic tactic,
    253                                     size_t extra_bytes_to_allocate,
    254                                     AllocationStrategy* strategy) {
    255   DCHECK(strategy);
    256 
    257   // Nothing to do if there are no renderers.  It is common for there to be no
    258   // inactive renderers if there is a single active tab.
    259   if (renderers.empty())
    260     return;
    261 
    262   // Divide the extra memory evenly among the renderers.
    263   size_t extra_each = extra_bytes_to_allocate / renderers.size();
    264 
    265   std::set<int>::const_iterator iter = renderers.begin();
    266   while (iter != renderers.end()) {
    267     size_t cache_size = extra_each;
    268 
    269     // Add in the space required to implement |tactic|.
    270     StatsMap::iterator elmt = stats_.find(*iter);
    271     if (elmt != stats_.end())
    272       cache_size += GetSize(tactic, elmt->second);
    273 
    274     // Record the allocation in our strategy.
    275     strategy->push_back(Allocation(*iter, cache_size));
    276     ++iter;
    277   }
    278 }
    279 
    280 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) {
    281   // Inform each render process of its cache allocation.
    282   AllocationStrategy::const_iterator allocation = strategy.begin();
    283   while (allocation != strategy.end()) {
    284     RenderProcessHost* host = RenderProcessHost::FromID(allocation->first);
    285     if (host) {
    286       // This is the capacity this renderer has been allocated.
    287       size_t capacity = allocation->second;
    288 
    289       // We don't reserve any space for dead objects in the cache.  Instead, we
    290       // prefer to keep live objects around.  There is probably some performance
    291       // tuning to be done here.
    292       size_t min_dead_capacity = 0;
    293 
    294       // We allow the dead objects to consume all of the cache, if the renderer
    295       // so desires.  If we wanted this memory, we would have set the total
    296       // capacity lower.
    297       size_t max_dead_capacity = capacity;
    298 
    299       host->Send(new ViewMsg_SetCacheCapacities(min_dead_capacity,
    300                                                 max_dead_capacity,
    301                                                 capacity));
    302     }
    303     ++allocation;
    304   }
    305 }
    306 
    307 void WebCacheManager::ClearRendederCache(const std::set<int>& renderers) {
    308   std::set<int>::const_iterator iter = renderers.begin();
    309   for (; iter != renderers.end(); ++iter) {
    310     RenderProcessHost* host = RenderProcessHost::FromID(*iter);
    311     if (host)
    312       host->Send(new ViewMsg_ClearCache());
    313   }
    314 }
    315 
    316 void WebCacheManager::ReviseAllocationStrategy() {
    317   DCHECK(stats_.size() <=
    318       active_renderers_.size() + inactive_renderers_.size());
    319 
    320   // Check if renderers have gone inactive.
    321   FindInactiveRenderers();
    322 
    323   // Gather statistics
    324   WebCache::UsageStats active;
    325   WebCache::UsageStats inactive;
    326   GatherStats(active_renderers_, &active);
    327   GatherStats(inactive_renderers_, &inactive);
    328 
    329   UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size());
    330   UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size());
    331   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB",
    332                           active.capacity / 1024 / 1024);
    333   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveDeadSizeMB",
    334                           active.deadSize / 1024 / 1024);
    335   UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB",
    336                           active.liveSize / 1024 / 1024);
    337   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB",
    338                           inactive.capacity / 1024 / 1024);
    339   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveDeadSizeMB",
    340                           inactive.deadSize / 1024 / 1024);
    341   UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB",
    342                           inactive.liveSize / 1024 / 1024);
    343 
    344   // Compute an allocation strategy.
    345   //
    346   // We attempt various tactics in order of preference.  Our first preference
    347   // is not to evict any objects.  If we don't have enough resources, we'll
    348   // first try to evict dead data only.  If that fails, we'll just divide the
    349   // resources we have evenly.
    350   //
    351   // We always try to give the active renderers some head room in their
    352   // allocations so they can take memory away from an inactive renderer with
    353   // a large cache allocation.
    354   //
    355   // Notice the early exit will prevent attempting less desirable tactics once
    356   // we've found a workable strategy.
    357   AllocationStrategy strategy;
    358   if (  // Ideally, we'd like to give the active renderers some headroom and
    359         // keep all our current objects.
    360       AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
    361                     KEEP_CURRENT, inactive, &strategy) ||
    362       // If we can't have that, then we first try to evict the dead objects in
    363       // the caches of inactive renderers.
    364       AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
    365                     KEEP_LIVE, inactive, &strategy) ||
    366       // Next, we try to keep the live objects in the active renders (with some
    367       // room for new objects) and give whatever is left to the inactive
    368       // renderers.
    369       AttemptTactic(KEEP_LIVE_WITH_HEADROOM, active,
    370                     DIVIDE_EVENLY, inactive, &strategy) ||
    371       // If we've gotten this far, then we are very tight on memory.  Let's try
    372       // to at least keep around the live objects for the active renderers.
    373       AttemptTactic(KEEP_LIVE, active, DIVIDE_EVENLY, inactive, &strategy) ||
    374       // We're basically out of memory.  The best we can do is just divide up
    375       // what we have and soldier on.
    376       AttemptTactic(DIVIDE_EVENLY, active, DIVIDE_EVENLY, inactive,
    377                     &strategy)) {
    378     // Having found a workable strategy, we enact it.
    379     EnactStrategy(strategy);
    380   } else {
    381     // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed.
    382     NOTREACHED() << "Unable to find a cache allocation";
    383   }
    384 }
    385 
    386 void WebCacheManager::ReviseAllocationStrategyLater() {
    387   // Ask to be called back in a few milliseconds to actually recompute our
    388   // allocation.
    389   MessageLoop::current()->PostDelayedTask(FROM_HERE,
    390       revise_allocation_factory_.NewRunnableMethod(
    391           &WebCacheManager::ReviseAllocationStrategy),
    392       kReviseAllocationDelayMS);
    393 }
    394 
    395 void WebCacheManager::FindInactiveRenderers() {
    396   std::set<int>::const_iterator iter = active_renderers_.begin();
    397   while (iter != active_renderers_.end()) {
    398     StatsMap::iterator elmt = stats_.find(*iter);
    399     DCHECK(elmt != stats_.end());
    400     TimeDelta idle = Time::Now() - elmt->second.access;
    401     if (idle >= TimeDelta::FromMinutes(kRendererInactiveThresholdMinutes)) {
    402       // Moved to inactive status.  This invalidates our iterator.
    403       inactive_renderers_.insert(*iter);
    404       active_renderers_.erase(*iter);
    405       iter = active_renderers_.begin();
    406       continue;
    407     }
    408     ++iter;
    409   }
    410 }
    411