Home | History | Annotate | Download | only in memory
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/disk_cache/memory/mem_backend_impl.h"
      6 
      7 #include "base/logging.h"
      8 #include "base/sys_info.h"
      9 #include "net/base/net_errors.h"
     10 #include "net/disk_cache/cache_util.h"
     11 #include "net/disk_cache/memory/mem_entry_impl.h"
     12 
     13 using base::Time;
     14 
     15 namespace {
     16 
     17 const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024;
     18 const int kCleanUpMargin = 1024 * 1024;
     19 
     20 int LowWaterAdjust(int high_water) {
     21   if (high_water < kCleanUpMargin)
     22     return 0;
     23 
     24   return high_water - kCleanUpMargin;
     25 }
     26 
     27 }  // namespace
     28 
     29 namespace disk_cache {
     30 
     31 MemBackendImpl::MemBackendImpl(net::NetLog* net_log)
     32     : max_size_(0), current_size_(0), net_log_(net_log), weak_factory_(this) {
     33 }
     34 
     35 MemBackendImpl::~MemBackendImpl() {
     36   EntryMap::iterator it = entries_.begin();
     37   while (it != entries_.end()) {
     38     it->second->Doom();
     39     it = entries_.begin();
     40   }
     41   DCHECK(!current_size_);
     42 }
     43 
     44 // Static.
     45 scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes,
     46                                                   net::NetLog* net_log) {
     47   scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log));
     48   cache->SetMaxSize(max_bytes);
     49   if (cache->Init())
     50     return cache.PassAs<Backend>();
     51 
     52   LOG(ERROR) << "Unable to create cache";
     53   return scoped_ptr<Backend>();
     54 }
     55 
     56 bool MemBackendImpl::Init() {
     57   if (max_size_)
     58     return true;
     59 
     60   int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
     61 
     62   if (total_memory <= 0) {
     63     max_size_ = kDefaultInMemoryCacheSize;
     64     return true;
     65   }
     66 
     67   // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
     68   // reached on systemd with more than 2.5 GB of RAM.
     69   total_memory = total_memory * 2 / 100;
     70   if (total_memory > kDefaultInMemoryCacheSize * 5)
     71     max_size_ = kDefaultInMemoryCacheSize * 5;
     72   else
     73     max_size_ = static_cast<int32>(total_memory);
     74 
     75   return true;
     76 }
     77 
     78 bool MemBackendImpl::SetMaxSize(int max_bytes) {
     79   COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
     80   if (max_bytes < 0)
     81     return false;
     82 
     83   // Zero size means use the default.
     84   if (!max_bytes)
     85     return true;
     86 
     87   max_size_ = max_bytes;
     88   return true;
     89 }
     90 
     91 void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
     92   // Only parent entries can be passed into this method.
     93   DCHECK(entry->type() == MemEntryImpl::kParentEntry);
     94 
     95   rankings_.Remove(entry);
     96   EntryMap::iterator it = entries_.find(entry->GetKey());
     97   if (it != entries_.end())
     98     entries_.erase(it);
     99   else
    100     NOTREACHED();
    101 
    102   entry->InternalDoom();
    103 }
    104 
    105 void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
    106   rankings_.UpdateRank(node);
    107 }
    108 
    109 void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
    110   if (old_size >= new_size)
    111     SubstractStorageSize(old_size - new_size);
    112   else
    113     AddStorageSize(new_size - old_size);
    114 }
    115 
    116 int MemBackendImpl::MaxFileSize() const {
    117   return max_size_ / 8;
    118 }
    119 
    120 void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) {
    121   rankings_.Insert(entry);
    122 }
    123 
    124 void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) {
    125   rankings_.Remove(entry);
    126 }
    127 
    128 net::CacheType MemBackendImpl::GetCacheType() const {
    129   return net::MEMORY_CACHE;
    130 }
    131 
    132 int32 MemBackendImpl::GetEntryCount() const {
    133   return static_cast<int32>(entries_.size());
    134 }
    135 
    136 int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry,
    137                               const CompletionCallback& callback) {
    138   if (OpenEntry(key, entry))
    139     return net::OK;
    140 
    141   return net::ERR_FAILED;
    142 }
    143 
    144 int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry,
    145                                 const CompletionCallback& callback) {
    146   if (CreateEntry(key, entry))
    147     return net::OK;
    148 
    149   return net::ERR_FAILED;
    150 }
    151 
    152 int MemBackendImpl::DoomEntry(const std::string& key,
    153                               const CompletionCallback& callback) {
    154   if (DoomEntry(key))
    155     return net::OK;
    156 
    157   return net::ERR_FAILED;
    158 }
    159 
    160 int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
    161   if (DoomAllEntries())
    162     return net::OK;
    163 
    164   return net::ERR_FAILED;
    165 }
    166 
    167 int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time,
    168                                        const base::Time end_time,
    169                                        const CompletionCallback& callback) {
    170   if (DoomEntriesBetween(initial_time, end_time))
    171     return net::OK;
    172 
    173   return net::ERR_FAILED;
    174 }
    175 
    176 int MemBackendImpl::DoomEntriesSince(const base::Time initial_time,
    177                                      const CompletionCallback& callback) {
    178   if (DoomEntriesSince(initial_time))
    179     return net::OK;
    180 
    181   return net::ERR_FAILED;
    182 }
    183 
    184 class MemBackendImpl::MemIterator : public Backend::Iterator {
    185  public:
    186   explicit MemIterator(base::WeakPtr<MemBackendImpl> backend)
    187       : backend_(backend), current_(NULL) {
    188   }
    189 
    190   virtual int OpenNextEntry(Entry** next_entry,
    191                             const CompletionCallback& callback) OVERRIDE {
    192     if (!backend_)
    193       return net::ERR_FAILED;
    194 
    195     MemEntryImpl* node = backend_->rankings_.GetNext(current_);
    196     // We should never return a child entry so iterate until we hit a parent
    197     // entry.
    198     while (node && node->type() != MemEntryImpl::kParentEntry)
    199       node = backend_->rankings_.GetNext(node);
    200     *next_entry = node;
    201     current_ = node;
    202 
    203     if (node) {
    204       node->Open();
    205       return net::OK;
    206     }
    207     return net::ERR_FAILED;
    208   }
    209 
    210  private:
    211   base::WeakPtr<MemBackendImpl> backend_;
    212   MemEntryImpl* current_;
    213 };
    214 
    215 scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() {
    216   return scoped_ptr<Backend::Iterator>(
    217       new MemIterator(weak_factory_.GetWeakPtr()));
    218 }
    219 
    220 void MemBackendImpl::OnExternalCacheHit(const std::string& key) {
    221   EntryMap::iterator it = entries_.find(key);
    222   if (it != entries_.end()) {
    223     UpdateRank(it->second);
    224   }
    225 }
    226 
    227 bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
    228   EntryMap::iterator it = entries_.find(key);
    229   if (it == entries_.end())
    230     return false;
    231 
    232   it->second->Open();
    233 
    234   *entry = it->second;
    235   return true;
    236 }
    237 
    238 bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
    239   EntryMap::iterator it = entries_.find(key);
    240   if (it != entries_.end())
    241     return false;
    242 
    243   MemEntryImpl* cache_entry = new MemEntryImpl(this);
    244   if (!cache_entry->CreateEntry(key, net_log_)) {
    245     delete entry;
    246     return false;
    247   }
    248 
    249   rankings_.Insert(cache_entry);
    250   entries_[key] = cache_entry;
    251 
    252   *entry = cache_entry;
    253   return true;
    254 }
    255 
    256 bool MemBackendImpl::DoomEntry(const std::string& key) {
    257   Entry* entry;
    258   if (!OpenEntry(key, &entry))
    259     return false;
    260 
    261   entry->Doom();
    262   entry->Close();
    263   return true;
    264 }
    265 
    266 bool MemBackendImpl::DoomAllEntries() {
    267   TrimCache(true);
    268   return true;
    269 }
    270 
    271 bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
    272                                         const Time end_time) {
    273   if (end_time.is_null())
    274     return DoomEntriesSince(initial_time);
    275 
    276   DCHECK(end_time >= initial_time);
    277 
    278   MemEntryImpl* node = rankings_.GetNext(NULL);
    279   // Last valid entry before |node|.
    280   // Note, that entries after |node| may become invalid during |node| doom in
    281   // case when they are child entries of it. It is guaranteed that
    282   // parent node will go prior to it childs in ranking list (see
    283   // InternalReadSparseData and InternalWriteSparseData).
    284   MemEntryImpl* last_valid = NULL;
    285 
    286   // rankings_ is ordered by last used, this will descend through the cache
    287   // and start dooming items before the end_time, and will stop once it reaches
    288   // an item used before the initial time.
    289   while (node) {
    290     if (node->GetLastUsed() < initial_time)
    291       break;
    292 
    293     if (node->GetLastUsed() < end_time)
    294       node->Doom();
    295     else
    296       last_valid = node;
    297     node = rankings_.GetNext(last_valid);
    298   }
    299 
    300   return true;
    301 }
    302 
    303 bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
    304   for (;;) {
    305     // Get the entry in the front.
    306     Entry* entry = rankings_.GetNext(NULL);
    307 
    308     // Break the loop when there are no more entries or the entry is too old.
    309     if (!entry || entry->GetLastUsed() < initial_time)
    310       return true;
    311     entry->Doom();
    312   }
    313 }
    314 
    315 void MemBackendImpl::TrimCache(bool empty) {
    316   MemEntryImpl* next = rankings_.GetPrev(NULL);
    317   if (!next)
    318     return;
    319 
    320   int target_size = empty ? 0 : LowWaterAdjust(max_size_);
    321   while (current_size_ > target_size && next) {
    322     MemEntryImpl* node = next;
    323     next = rankings_.GetPrev(next);
    324     if (!node->InUse() || empty) {
    325       node->Doom();
    326     }
    327   }
    328 
    329   return;
    330 }
    331 
    332 void MemBackendImpl::AddStorageSize(int32 bytes) {
    333   current_size_ += bytes;
    334   DCHECK_GE(current_size_, 0);
    335 
    336   if (current_size_ > max_size_)
    337     TrimCache(false);
    338 }
    339 
    340 void MemBackendImpl::SubstractStorageSize(int32 bytes) {
    341   current_size_ -= bytes;
    342   DCHECK_GE(current_size_, 0);
    343 }
    344 
    345 }  // namespace disk_cache
    346