Home | History | Annotate | Download | only in disk_cache
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/disk_cache/backend_impl.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/bind_helpers.h"
      9 #include "base/file_util.h"
     10 #include "base/files/file_path.h"
     11 #include "base/hash.h"
     12 #include "base/message_loop/message_loop.h"
     13 #include "base/metrics/field_trial.h"
     14 #include "base/metrics/histogram.h"
     15 #include "base/metrics/stats_counters.h"
     16 #include "base/rand_util.h"
     17 #include "base/strings/string_util.h"
     18 #include "base/strings/stringprintf.h"
     19 #include "base/sys_info.h"
     20 #include "base/threading/thread_restrictions.h"
     21 #include "base/time/time.h"
     22 #include "base/timer/timer.h"
     23 #include "net/base/net_errors.h"
     24 #include "net/disk_cache/cache_util.h"
     25 #include "net/disk_cache/disk_format.h"
     26 #include "net/disk_cache/entry_impl.h"
     27 #include "net/disk_cache/errors.h"
     28 #include "net/disk_cache/experiments.h"
     29 #include "net/disk_cache/file.h"
     30 
     31 // This has to be defined before including histogram_macros.h from this file.
     32 #define NET_DISK_CACHE_BACKEND_IMPL_CC_
     33 #include "net/disk_cache/histogram_macros.h"
     34 
     35 using base::Time;
     36 using base::TimeDelta;
     37 using base::TimeTicks;
     38 
     39 namespace {
     40 
     41 const char* kIndexName = "index";
     42 
     43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
     44 // Note that the actual target is to keep the index table load factor under 55%
     45 // for most users.
     46 const int k64kEntriesStore = 240 * 1000 * 1000;
     47 const int kBaseTableLen = 64 * 1024;
     48 const int kDefaultCacheSize = 80 * 1024 * 1024;
     49 
     50 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
     51 const int kTrimDelay = 10;
     52 
     53 int DesiredIndexTableLen(int32 storage_size) {
     54   if (storage_size <= k64kEntriesStore)
     55     return kBaseTableLen;
     56   if (storage_size <= k64kEntriesStore * 2)
     57     return kBaseTableLen * 2;
     58   if (storage_size <= k64kEntriesStore * 4)
     59     return kBaseTableLen * 4;
     60   if (storage_size <= k64kEntriesStore * 8)
     61     return kBaseTableLen * 8;
     62 
     63   // The biggest storage_size for int32 requires a 4 MB table.
     64   return kBaseTableLen * 16;
     65 }
     66 
     67 int MaxStorageSizeForTable(int table_len) {
     68   return table_len * (k64kEntriesStore / kBaseTableLen);
     69 }
     70 
     71 size_t GetIndexSize(int table_len) {
     72   size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
     73   return sizeof(disk_cache::IndexHeader) + table_size;
     74 }
     75 
     76 // ------------------------------------------------------------------------
     77 
     78 // Sets group for the current experiment. Returns false if the files should be
     79 // discarded.
     80 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
     81   if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
     82       header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
     83     // Discard current cache.
     84     return false;
     85   }
     86 
     87   if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
     88           "ExperimentControl") {
     89     if (cache_created) {
     90       header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
     91       return true;
     92     }
     93     return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL;
     94   }
     95 
     96   header->experiment = disk_cache::NO_EXPERIMENT;
     97   return true;
     98 }
     99 
    100 // A callback to perform final cleanup on the background thread.
    101 void FinalCleanupCallback(disk_cache::BackendImpl* backend) {
    102   backend->CleanupCache();
    103 }
    104 
    105 }  // namespace
    106 
    107 // ------------------------------------------------------------------------
    108 
    109 namespace disk_cache {
    110 
    111 // Returns the preferred maximum number of bytes for the cache given the
    112 // number of available bytes.
    113 int PreferedCacheSize(int64 available) {
    114   // Return 80% of the available space if there is not enough space to use
    115   // kDefaultCacheSize.
    116   if (available < kDefaultCacheSize * 10 / 8)
    117     return static_cast<int32>(available * 8 / 10);
    118 
    119   // Return kDefaultCacheSize if it uses 80% to 10% of the available space.
    120   if (available < kDefaultCacheSize * 10)
    121     return kDefaultCacheSize;
    122 
    123   // Return 10% of the available space if the target size
    124   // (2.5 * kDefaultCacheSize) is more than 10%.
    125   if (available < static_cast<int64>(kDefaultCacheSize) * 25)
    126     return static_cast<int32>(available / 10);
    127 
    128   // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
    129   // of the available space.
    130   if (available < static_cast<int64>(kDefaultCacheSize) * 250)
    131     return kDefaultCacheSize * 5 / 2;
    132 
    133   // Return 1% of the available space if it does not exceed kint32max.
    134   if (available < static_cast<int64>(kint32max) * 100)
    135     return static_cast<int32>(available / 100);
    136 
    137   return kint32max;
    138 }
    139 
    140 // ------------------------------------------------------------------------
    141 
    142 BackendImpl::BackendImpl(const base::FilePath& path,
    143                          base::MessageLoopProxy* cache_thread,
    144                          net::NetLog* net_log)
    145     : background_queue_(this, cache_thread),
    146       path_(path),
    147       block_files_(path),
    148       mask_(0),
    149       max_size_(0),
    150       up_ticks_(0),
    151       cache_type_(net::DISK_CACHE),
    152       uma_report_(0),
    153       user_flags_(0),
    154       init_(false),
    155       restarted_(false),
    156       unit_test_(false),
    157       read_only_(false),
    158       disabled_(false),
    159       new_eviction_(false),
    160       first_timer_(true),
    161       user_load_(false),
    162       net_log_(net_log),
    163       done_(true, false),
    164       ptr_factory_(this) {
    165 }
    166 
    167 BackendImpl::BackendImpl(const base::FilePath& path,
    168                          uint32 mask,
    169                          base::MessageLoopProxy* cache_thread,
    170                          net::NetLog* net_log)
    171     : background_queue_(this, cache_thread),
    172       path_(path),
    173       block_files_(path),
    174       mask_(mask),
    175       max_size_(0),
    176       up_ticks_(0),
    177       cache_type_(net::DISK_CACHE),
    178       uma_report_(0),
    179       user_flags_(kMask),
    180       init_(false),
    181       restarted_(false),
    182       unit_test_(false),
    183       read_only_(false),
    184       disabled_(false),
    185       new_eviction_(false),
    186       first_timer_(true),
    187       user_load_(false),
    188       net_log_(net_log),
    189       done_(true, false),
    190       ptr_factory_(this) {
    191 }
    192 
    193 BackendImpl::~BackendImpl() {
    194   if (user_flags_ & kNoRandom) {
    195     // This is a unit test, so we want to be strict about not leaking entries
    196     // and completing all the work.
    197     background_queue_.WaitForPendingIO();
    198   } else {
    199     // This is most likely not a test, so we want to do as little work as
    200     // possible at this time, at the price of leaving dirty entries behind.
    201     background_queue_.DropPendingIO();
    202   }
    203 
    204   if (background_queue_.BackgroundIsCurrentThread()) {
    205     // Unit tests may use the same thread for everything.
    206     CleanupCache();
    207   } else {
    208     background_queue_.background_thread()->PostTask(
    209         FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
    210     // http://crbug.com/74623
    211     base::ThreadRestrictions::ScopedAllowWait allow_wait;
    212     done_.Wait();
    213   }
    214 }
    215 
    216 int BackendImpl::Init(const CompletionCallback& callback) {
    217   background_queue_.Init(callback);
    218   return net::ERR_IO_PENDING;
    219 }
    220 
    221 int BackendImpl::SyncInit() {
    222 #if defined(NET_BUILD_STRESS_CACHE)
    223   // Start evictions right away.
    224   up_ticks_ = kTrimDelay * 2;
    225 #endif
    226   DCHECK(!init_);
    227   if (init_)
    228     return net::ERR_FAILED;
    229 
    230   bool create_files = false;
    231   if (!InitBackingStore(&create_files)) {
    232     ReportError(ERR_STORAGE_ERROR);
    233     return net::ERR_FAILED;
    234   }
    235 
    236   num_refs_ = num_pending_io_ = max_refs_ = 0;
    237   entry_count_ = byte_count_ = 0;
    238 
    239   if (!restarted_) {
    240     buffer_bytes_ = 0;
    241     trace_object_ = TraceObject::GetTraceObject();
    242     // Create a recurrent timer of 30 secs.
    243     int timer_delay = unit_test_ ? 1000 : 30000;
    244     timer_.reset(new base::RepeatingTimer<BackendImpl>());
    245     timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
    246                   &BackendImpl::OnStatsTimer);
    247   }
    248 
    249   init_ = true;
    250   Trace("Init");
    251 
    252   if (data_->header.experiment != NO_EXPERIMENT &&
    253       cache_type_ != net::DISK_CACHE) {
    254     // No experiment for other caches.
    255     return net::ERR_FAILED;
    256   }
    257 
    258   if (!(user_flags_ & kNoRandom)) {
    259     // The unit test controls directly what to test.
    260     new_eviction_ = (cache_type_ == net::DISK_CACHE);
    261   }
    262 
    263   if (!CheckIndex()) {
    264     ReportError(ERR_INIT_FAILED);
    265     return net::ERR_FAILED;
    266   }
    267 
    268   if (!restarted_ && (create_files || !data_->header.num_entries))
    269     ReportError(ERR_CACHE_CREATED);
    270 
    271   if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
    272       !InitExperiment(&data_->header, create_files)) {
    273     return net::ERR_FAILED;
    274   }
    275 
    276   // We don't care if the value overflows. The only thing we care about is that
    277   // the id cannot be zero, because that value is used as "not dirty".
    278   // Increasing the value once per second gives us many years before we start
    279   // having collisions.
    280   data_->header.this_id++;
    281   if (!data_->header.this_id)
    282     data_->header.this_id++;
    283 
    284   bool previous_crash = (data_->header.crash != 0);
    285   data_->header.crash = 1;
    286 
    287   if (!block_files_.Init(create_files))
    288     return net::ERR_FAILED;
    289 
    290   // We want to minimize the changes to cache for an AppCache.
    291   if (cache_type() == net::APP_CACHE) {
    292     DCHECK(!new_eviction_);
    293     read_only_ = true;
    294   } else if (cache_type() == net::SHADER_CACHE) {
    295     DCHECK(!new_eviction_);
    296   }
    297 
    298   eviction_.Init(this);
    299 
    300   // stats_ and rankings_ may end up calling back to us so we better be enabled.
    301   disabled_ = false;
    302   if (!InitStats())
    303     return net::ERR_FAILED;
    304 
    305   disabled_ = !rankings_.Init(this, new_eviction_);
    306 
    307 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
    308   trace_object_->EnableTracing(false);
    309   int sc = SelfCheck();
    310   if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
    311     NOTREACHED();
    312   trace_object_->EnableTracing(true);
    313 #endif
    314 
    315   if (previous_crash) {
    316     ReportError(ERR_PREVIOUS_CRASH);
    317   } else if (!restarted_) {
    318     ReportError(ERR_NO_ERROR);
    319   }
    320 
    321   FlushIndex();
    322 
    323   return disabled_ ? net::ERR_FAILED : net::OK;
    324 }
    325 
    326 void BackendImpl::CleanupCache() {
    327   Trace("Backend Cleanup");
    328   eviction_.Stop();
    329   timer_.reset();
    330 
    331   if (init_) {
    332     StoreStats();
    333     if (data_)
    334       data_->header.crash = 0;
    335 
    336     if (user_flags_ & kNoRandom) {
    337       // This is a net_unittest, verify that we are not 'leaking' entries.
    338       File::WaitForPendingIO(&num_pending_io_);
    339       DCHECK(!num_refs_);
    340     } else {
    341       File::DropPendingIO();
    342     }
    343   }
    344   block_files_.CloseFiles();
    345   FlushIndex();
    346   index_ = NULL;
    347   ptr_factory_.InvalidateWeakPtrs();
    348   done_.Signal();
    349 }
    350 
    351 // ------------------------------------------------------------------------
    352 
    353 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
    354                                const CompletionCallback& callback) {
    355   DCHECK(!callback.is_null());
    356   background_queue_.OpenPrevEntry(iter, prev_entry, callback);
    357   return net::ERR_IO_PENDING;
    358 }
    359 
    360 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
    361   DCHECK(entry);
    362   *entry = OpenEntryImpl(key);
    363   return (*entry) ? net::OK : net::ERR_FAILED;
    364 }
    365 
    366 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
    367   DCHECK(entry);
    368   *entry = CreateEntryImpl(key);
    369   return (*entry) ? net::OK : net::ERR_FAILED;
    370 }
    371 
    372 int BackendImpl::SyncDoomEntry(const std::string& key) {
    373   if (disabled_)
    374     return net::ERR_FAILED;
    375 
    376   EntryImpl* entry = OpenEntryImpl(key);
    377   if (!entry)
    378     return net::ERR_FAILED;
    379 
    380   entry->DoomImpl();
    381   entry->Release();
    382   return net::OK;
    383 }
    384 
    385 int BackendImpl::SyncDoomAllEntries() {
    386   // This is not really an error, but it is an interesting condition.
    387   ReportError(ERR_CACHE_DOOMED);
    388   stats_.OnEvent(Stats::DOOM_CACHE);
    389   if (!num_refs_) {
    390     RestartCache(false);
    391     return disabled_ ? net::ERR_FAILED : net::OK;
    392   } else {
    393     if (disabled_)
    394       return net::ERR_FAILED;
    395 
    396     eviction_.TrimCache(true);
    397     return net::OK;
    398   }
    399 }
    400 
    401 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
    402                                         const base::Time end_time) {
    403   DCHECK_NE(net::APP_CACHE, cache_type_);
    404   if (end_time.is_null())
    405     return SyncDoomEntriesSince(initial_time);
    406 
    407   DCHECK(end_time >= initial_time);
    408 
    409   if (disabled_)
    410     return net::ERR_FAILED;
    411 
    412   EntryImpl* node;
    413   void* iter = NULL;
    414   EntryImpl* next = OpenNextEntryImpl(&iter);
    415   if (!next)
    416     return net::OK;
    417 
    418   while (next) {
    419     node = next;
    420     next = OpenNextEntryImpl(&iter);
    421 
    422     if (node->GetLastUsed() >= initial_time &&
    423         node->GetLastUsed() < end_time) {
    424       node->DoomImpl();
    425     } else if (node->GetLastUsed() < initial_time) {
    426       if (next)
    427         next->Release();
    428       next = NULL;
    429       SyncEndEnumeration(iter);
    430     }
    431 
    432     node->Release();
    433   }
    434 
    435   return net::OK;
    436 }
    437 
    438 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
    439 // entries that are too old.
    440 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
    441   DCHECK_NE(net::APP_CACHE, cache_type_);
    442   if (disabled_)
    443     return net::ERR_FAILED;
    444 
    445   stats_.OnEvent(Stats::DOOM_RECENT);
    446   for (;;) {
    447     void* iter = NULL;
    448     EntryImpl* entry = OpenNextEntryImpl(&iter);
    449     if (!entry)
    450       return net::OK;
    451 
    452     if (initial_time > entry->GetLastUsed()) {
    453       entry->Release();
    454       SyncEndEnumeration(iter);
    455       return net::OK;
    456     }
    457 
    458     entry->DoomImpl();
    459     entry->Release();
    460     SyncEndEnumeration(iter);  // Dooming the entry invalidates the iterator.
    461   }
    462 }
    463 
    464 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
    465   *next_entry = OpenNextEntryImpl(iter);
    466   return (*next_entry) ? net::OK : net::ERR_FAILED;
    467 }
    468 
    469 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
    470   *prev_entry = OpenPrevEntryImpl(iter);
    471   return (*prev_entry) ? net::OK : net::ERR_FAILED;
    472 }
    473 
    474 void BackendImpl::SyncEndEnumeration(void* iter) {
    475   scoped_ptr<Rankings::Iterator> iterator(
    476       reinterpret_cast<Rankings::Iterator*>(iter));
    477 }
    478 
    479 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
    480   if (disabled_)
    481     return;
    482 
    483   uint32 hash = base::Hash(key);
    484   bool error;
    485   EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
    486   if (cache_entry) {
    487     if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
    488       UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
    489     }
    490     cache_entry->Release();
    491   }
    492 }
    493 
    494 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
    495   if (disabled_)
    496     return NULL;
    497 
    498   TimeTicks start = TimeTicks::Now();
    499   uint32 hash = base::Hash(key);
    500   Trace("Open hash 0x%x", hash);
    501 
    502   bool error;
    503   EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
    504   if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
    505     // The entry was already evicted.
    506     cache_entry->Release();
    507     cache_entry = NULL;
    508   }
    509 
    510   int current_size = data_->header.num_bytes / (1024 * 1024);
    511   int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
    512   int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
    513   int64 use_hours = total_hours - no_use_hours;
    514 
    515   if (!cache_entry) {
    516     CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
    517     CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
    518     CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
    519     CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
    520     stats_.OnEvent(Stats::OPEN_MISS);
    521     return NULL;
    522   }
    523 
    524   eviction_.OnOpenEntry(cache_entry);
    525   entry_count_++;
    526 
    527   Trace("Open hash 0x%x end: 0x%x", hash,
    528         cache_entry->entry()->address().value());
    529   CACHE_UMA(AGE_MS, "OpenTime", 0, start);
    530   CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
    531   CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
    532   CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
    533   stats_.OnEvent(Stats::OPEN_HIT);
    534   SIMPLE_STATS_COUNTER("disk_cache.hit");
    535   return cache_entry;
    536 }
    537 
    538 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
    539   if (disabled_ || key.empty())
    540     return NULL;
    541 
    542   TimeTicks start = TimeTicks::Now();
    543   uint32 hash = base::Hash(key);
    544   Trace("Create hash 0x%x", hash);
    545 
    546   scoped_refptr<EntryImpl> parent;
    547   Addr entry_address(data_->table[hash & mask_]);
    548   if (entry_address.is_initialized()) {
    549     // We have an entry already. It could be the one we are looking for, or just
    550     // a hash conflict.
    551     bool error;
    552     EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
    553     if (old_entry)
    554       return ResurrectEntry(old_entry);
    555 
    556     EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
    557     DCHECK(!error);
    558     if (parent_entry) {
    559       parent.swap(&parent_entry);
    560     } else if (data_->table[hash & mask_]) {
    561       // We should have corrected the problem.
    562       NOTREACHED();
    563       return NULL;
    564     }
    565   }
    566 
    567   // The general flow is to allocate disk space and initialize the entry data,
    568   // followed by saving that to disk, then linking the entry though the index
    569   // and finally through the lists. If there is a crash in this process, we may
    570   // end up with:
    571   // a. Used, unreferenced empty blocks on disk (basically just garbage).
    572   // b. Used, unreferenced but meaningful data on disk (more garbage).
    573   // c. A fully formed entry, reachable only through the index.
    574   // d. A fully formed entry, also reachable through the lists, but still dirty.
    575   //
    576   // Anything after (b) can be automatically cleaned up. We may consider saving
    577   // the current operation (as we do while manipulating the lists) so that we
    578   // can detect and cleanup (a) and (b).
    579 
    580   int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
    581   if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
    582     LOG(ERROR) << "Create entry failed " << key.c_str();
    583     stats_.OnEvent(Stats::CREATE_ERROR);
    584     return NULL;
    585   }
    586 
    587   Addr node_address(0);
    588   if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
    589     block_files_.DeleteBlock(entry_address, false);
    590     LOG(ERROR) << "Create entry failed " << key.c_str();
    591     stats_.OnEvent(Stats::CREATE_ERROR);
    592     return NULL;
    593   }
    594 
    595   scoped_refptr<EntryImpl> cache_entry(
    596       new EntryImpl(this, entry_address, false));
    597   IncreaseNumRefs();
    598 
    599   if (!cache_entry->CreateEntry(node_address, key, hash)) {
    600     block_files_.DeleteBlock(entry_address, false);
    601     block_files_.DeleteBlock(node_address, false);
    602     LOG(ERROR) << "Create entry failed " << key.c_str();
    603     stats_.OnEvent(Stats::CREATE_ERROR);
    604     return NULL;
    605   }
    606 
    607   cache_entry->BeginLogging(net_log_, true);
    608 
    609   // We are not failing the operation; let's add this to the map.
    610   open_entries_[entry_address.value()] = cache_entry.get();
    611 
    612   // Save the entry.
    613   cache_entry->entry()->Store();
    614   cache_entry->rankings()->Store();
    615   IncreaseNumEntries();
    616   entry_count_++;
    617 
    618   // Link this entry through the index.
    619   if (parent.get()) {
    620     parent->SetNextAddress(entry_address);
    621   } else {
    622     data_->table[hash & mask_] = entry_address.value();
    623   }
    624 
    625   // Link this entry through the lists.
    626   eviction_.OnCreateEntry(cache_entry.get());
    627 
    628   CACHE_UMA(AGE_MS, "CreateTime", 0, start);
    629   stats_.OnEvent(Stats::CREATE_HIT);
    630   SIMPLE_STATS_COUNTER("disk_cache.miss");
    631   Trace("create entry hit ");
    632   FlushIndex();
    633   cache_entry->AddRef();
    634   return cache_entry.get();
    635 }
    636 
    637 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
    638   return OpenFollowingEntry(true, iter);
    639 }
    640 
    641 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
    642   return OpenFollowingEntry(false, iter);
    643 }
    644 
    645 bool BackendImpl::SetMaxSize(int max_bytes) {
    646   COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
    647   if (max_bytes < 0)
    648     return false;
    649 
    650   // Zero size means use the default.
    651   if (!max_bytes)
    652     return true;
    653 
    654   // Avoid a DCHECK later on.
    655   if (max_bytes >= kint32max - kint32max / 10)
    656     max_bytes = kint32max - kint32max / 10 - 1;
    657 
    658   user_flags_ |= kMaxSize;
    659   max_size_ = max_bytes;
    660   return true;
    661 }
    662 
    663 void BackendImpl::SetType(net::CacheType type) {
    664   DCHECK_NE(net::MEMORY_CACHE, type);
    665   cache_type_ = type;
    666 }
    667 
    668 base::FilePath BackendImpl::GetFileName(Addr address) const {
    669   if (!address.is_separate_file() || !address.is_initialized()) {
    670     NOTREACHED();
    671     return base::FilePath();
    672   }
    673 
    674   std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
    675   return path_.AppendASCII(tmp);
    676 }
    677 
    678 MappedFile* BackendImpl::File(Addr address) {
    679   if (disabled_)
    680     return NULL;
    681   return block_files_.GetFile(address);
    682 }
    683 
    684 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
    685   return background_queue_.GetWeakPtr();
    686 }
    687 
    688 bool BackendImpl::CreateExternalFile(Addr* address) {
    689   int file_number = data_->header.last_file + 1;
    690   Addr file_address(0);
    691   bool success = false;
    692   for (int i = 0; i < 0x0fffffff; i++, file_number++) {
    693     if (!file_address.SetFileNumber(file_number)) {
    694       file_number = 1;
    695       continue;
    696     }
    697     base::FilePath name = GetFileName(file_address);
    698     int flags = base::PLATFORM_FILE_READ |
    699                 base::PLATFORM_FILE_WRITE |
    700                 base::PLATFORM_FILE_CREATE |
    701                 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
    702     base::PlatformFileError error;
    703     scoped_refptr<disk_cache::File> file(new disk_cache::File(
    704         base::CreatePlatformFile(name, flags, NULL, &error)));
    705     if (!file->IsValid()) {
    706       if (error != base::PLATFORM_FILE_ERROR_EXISTS) {
    707         LOG(ERROR) << "Unable to create file: " << error;
    708         return false;
    709       }
    710       continue;
    711     }
    712 
    713     success = true;
    714     break;
    715   }
    716 
    717   DCHECK(success);
    718   if (!success)
    719     return false;
    720 
    721   data_->header.last_file = file_number;
    722   address->set_value(file_address.value());
    723   return true;
    724 }
    725 
    726 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
    727                              Addr* block_address) {
    728   return block_files_.CreateBlock(block_type, block_count, block_address);
    729 }
    730 
    731 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
    732   block_files_.DeleteBlock(block_address, deep);
    733 }
    734 
    735 LruData* BackendImpl::GetLruData() {
    736   return &data_->header.lru;
    737 }
    738 
    739 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
    740   if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
    741     return;
    742   eviction_.UpdateRank(entry, modified);
    743 }
    744 
    745 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
    746   Addr address(rankings->Data()->contents);
    747   EntryImpl* cache_entry = NULL;
    748   if (NewEntry(address, &cache_entry)) {
    749     STRESS_NOTREACHED();
    750     return;
    751   }
    752 
    753   uint32 hash = cache_entry->GetHash();
    754   cache_entry->Release();
    755 
    756   // Anything on the table means that this entry is there.
    757   if (data_->table[hash & mask_])
    758     return;
    759 
    760   data_->table[hash & mask_] = address.value();
    761   FlushIndex();
    762 }
    763 
    764 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
    765   uint32 hash = entry->GetHash();
    766   std::string key = entry->GetKey();
    767   Addr entry_addr = entry->entry()->address();
    768   bool error;
    769   EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
    770   CacheAddr child(entry->GetNextAddress());
    771 
    772   Trace("Doom entry 0x%p", entry);
    773 
    774   if (!entry->doomed()) {
    775     // We may have doomed this entry from within MatchEntry.
    776     eviction_.OnDoomEntry(entry);
    777     entry->InternalDoom();
    778     if (!new_eviction_) {
    779       DecreaseNumEntries();
    780     }
    781     stats_.OnEvent(Stats::DOOM_ENTRY);
    782   }
    783 
    784   if (parent_entry) {
    785     parent_entry->SetNextAddress(Addr(child));
    786     parent_entry->Release();
    787   } else if (!error) {
    788     data_->table[hash & mask_] = child;
    789   }
    790 
    791   FlushIndex();
    792 }
    793 
    794 #if defined(NET_BUILD_STRESS_CACHE)
    795 
    796 CacheAddr BackendImpl::GetNextAddr(Addr address) {
    797   EntriesMap::iterator it = open_entries_.find(address.value());
    798   if (it != open_entries_.end()) {
    799     EntryImpl* this_entry = it->second;
    800     return this_entry->GetNextAddress();
    801   }
    802   DCHECK(block_files_.IsValid(address));
    803   DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
    804 
    805   CacheEntryBlock entry(File(address), address);
    806   CHECK(entry.Load());
    807   return entry.Data()->next;
    808 }
    809 
    810 void BackendImpl::NotLinked(EntryImpl* entry) {
    811   Addr entry_addr = entry->entry()->address();
    812   uint32 i = entry->GetHash() & mask_;
    813   Addr address(data_->table[i]);
    814   if (!address.is_initialized())
    815     return;
    816 
    817   for (;;) {
    818     DCHECK(entry_addr.value() != address.value());
    819     address.set_value(GetNextAddr(address));
    820     if (!address.is_initialized())
    821       break;
    822   }
    823 }
    824 #endif  // NET_BUILD_STRESS_CACHE
    825 
    826 // An entry may be linked on the DELETED list for a while after being doomed.
    827 // This function is called when we want to remove it.
    828 void BackendImpl::RemoveEntry(EntryImpl* entry) {
    829 #if defined(NET_BUILD_STRESS_CACHE)
    830   NotLinked(entry);
    831 #endif
    832   if (!new_eviction_)
    833     return;
    834 
    835   DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
    836 
    837   Trace("Remove entry 0x%p", entry);
    838   eviction_.OnDestroyEntry(entry);
    839   DecreaseNumEntries();
    840 }
    841 
    842 void BackendImpl::OnEntryDestroyBegin(Addr address) {
    843   EntriesMap::iterator it = open_entries_.find(address.value());
    844   if (it != open_entries_.end())
    845     open_entries_.erase(it);
    846 }
    847 
    848 void BackendImpl::OnEntryDestroyEnd() {
    849   DecreaseNumRefs();
    850   if (data_->header.num_bytes > max_size_ && !read_only_ &&
    851       (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
    852     eviction_.TrimCache(false);
    853 }
    854 
    855 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
    856   DCHECK(rankings->HasData());
    857   EntriesMap::const_iterator it =
    858       open_entries_.find(rankings->Data()->contents);
    859   if (it != open_entries_.end()) {
    860     // We have this entry in memory.
    861     return it->second;
    862   }
    863 
    864   return NULL;
    865 }
    866 
    867 int32 BackendImpl::GetCurrentEntryId() const {
    868   return data_->header.this_id;
    869 }
    870 
    871 int BackendImpl::MaxFileSize() const {
    872   return max_size_ / 8;
    873 }
    874 
    875 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
    876   if (disabled_ || old_size == new_size)
    877     return;
    878   if (old_size > new_size)
    879     SubstractStorageSize(old_size - new_size);
    880   else
    881     AddStorageSize(new_size - old_size);
    882 
    883   FlushIndex();
    884 
    885   // Update the usage statistics.
    886   stats_.ModifyStorageStats(old_size, new_size);
    887 }
    888 
    889 void BackendImpl::TooMuchStorageRequested(int32 size) {
    890   stats_.ModifyStorageStats(0, size);
    891 }
    892 
    893 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
    894   DCHECK_GT(new_size, current_size);
    895   if (user_flags_ & kNoBuffering)
    896     return false;
    897 
    898   int to_add = new_size - current_size;
    899   if (buffer_bytes_ + to_add > MaxBuffersSize())
    900     return false;
    901 
    902   buffer_bytes_ += to_add;
    903   CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
    904   return true;
    905 }
    906 
    907 void BackendImpl::BufferDeleted(int size) {
    908   buffer_bytes_ -= size;
    909   DCHECK_GE(size, 0);
    910 }
    911 
    912 bool BackendImpl::IsLoaded() const {
    913   CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
    914   if (user_flags_ & kNoLoadProtection)
    915     return false;
    916 
    917   return (num_pending_io_ > 5 || user_load_);
    918 }
    919 
    920 std::string BackendImpl::HistogramName(const char* name, int experiment) const {
    921   if (!experiment)
    922     return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
    923   return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
    924                             name, experiment);
    925 }
    926 
    927 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
    928   return ptr_factory_.GetWeakPtr();
    929 }
    930 
    931 // We want to remove biases from some histograms so we only send data once per
    932 // week.
    933 bool BackendImpl::ShouldReportAgain() {
    934   if (uma_report_)
    935     return uma_report_ == 2;
    936 
    937   uma_report_++;
    938   int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
    939   Time last_time = Time::FromInternalValue(last_report);
    940   if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
    941     stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
    942     uma_report_++;
    943     return true;
    944   }
    945   return false;
    946 }
    947 
    948 void BackendImpl::FirstEviction() {
    949   DCHECK(data_->header.create_time);
    950   if (!GetEntryCount())
    951     return;  // This is just for unit tests.
    952 
    953   Time create_time = Time::FromInternalValue(data_->header.create_time);
    954   CACHE_UMA(AGE, "FillupAge", 0, create_time);
    955 
    956   int64 use_time = stats_.GetCounter(Stats::TIMER);
    957   CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
    958   CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
    959 
    960   if (!use_time)
    961     use_time = 1;
    962   CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
    963             static_cast<int>(data_->header.num_entries / use_time));
    964   CACHE_UMA(COUNTS, "FirstByteIORate", 0,
    965             static_cast<int>((data_->header.num_bytes / 1024) / use_time));
    966 
    967   int avg_size = data_->header.num_bytes / GetEntryCount();
    968   CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
    969 
    970   int large_entries_bytes = stats_.GetLargeEntriesSize();
    971   int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
    972   CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
    973 
    974   if (new_eviction_) {
    975     CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
    976     CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
    977               data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
    978     CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
    979               data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
    980     CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
    981               data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
    982   }
    983 
    984   stats_.ResetRatios();
    985 }
    986 
    987 void BackendImpl::CriticalError(int error) {
    988   STRESS_NOTREACHED();
    989   LOG(ERROR) << "Critical error found " << error;
    990   if (disabled_)
    991     return;
    992 
    993   stats_.OnEvent(Stats::FATAL_ERROR);
    994   LogStats();
    995   ReportError(error);
    996 
    997   // Setting the index table length to an invalid value will force re-creation
    998   // of the cache files.
    999   data_->header.table_len = 1;
   1000   disabled_ = true;
   1001 
   1002   if (!num_refs_)
   1003     base::MessageLoop::current()->PostTask(
   1004         FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
   1005 }
   1006 
   1007 void BackendImpl::ReportError(int error) {
   1008   STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
   1009                 error == ERR_CACHE_CREATED);
   1010 
   1011   // We transmit positive numbers, instead of direct error codes.
   1012   DCHECK_LE(error, 0);
   1013   CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
   1014 }
   1015 
   1016 void BackendImpl::OnEvent(Stats::Counters an_event) {
   1017   stats_.OnEvent(an_event);
   1018 }
   1019 
   1020 void BackendImpl::OnRead(int32 bytes) {
   1021   DCHECK_GE(bytes, 0);
   1022   byte_count_ += bytes;
   1023   if (byte_count_ < 0)
   1024     byte_count_ = kint32max;
   1025 }
   1026 
   1027 void BackendImpl::OnWrite(int32 bytes) {
   1028   // We use the same implementation as OnRead... just log the number of bytes.
   1029   OnRead(bytes);
   1030 }
   1031 
   1032 void BackendImpl::OnStatsTimer() {
   1033   stats_.OnEvent(Stats::TIMER);
   1034   int64 time = stats_.GetCounter(Stats::TIMER);
   1035   int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
   1036 
   1037   // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
   1038   // the bias towards 0.
   1039   if (num_refs_ && (current != num_refs_)) {
   1040     int64 diff = (num_refs_ - current) / 50;
   1041     if (!diff)
   1042       diff = num_refs_ > current ? 1 : -1;
   1043     current = current + diff;
   1044     stats_.SetCounter(Stats::OPEN_ENTRIES, current);
   1045     stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
   1046   }
   1047 
   1048   CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
   1049 
   1050   CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
   1051   CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
   1052 
   1053   // These values cover about 99.5% of the population (Oct 2011).
   1054   user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
   1055   entry_count_ = 0;
   1056   byte_count_ = 0;
   1057   up_ticks_++;
   1058 
   1059   if (!data_)
   1060     first_timer_ = false;
   1061   if (first_timer_) {
   1062     first_timer_ = false;
   1063     if (ShouldReportAgain())
   1064       ReportStats();
   1065   }
   1066 
   1067   // Save stats to disk at 5 min intervals.
   1068   if (time % 10 == 0)
   1069     StoreStats();
   1070 }
   1071 
   1072 void BackendImpl::IncrementIoCount() {
   1073   num_pending_io_++;
   1074 }
   1075 
   1076 void BackendImpl::DecrementIoCount() {
   1077   num_pending_io_--;
   1078 }
   1079 
   1080 void BackendImpl::SetUnitTestMode() {
   1081   user_flags_ |= kUnitTestMode;
   1082   unit_test_ = true;
   1083 }
   1084 
   1085 void BackendImpl::SetUpgradeMode() {
   1086   user_flags_ |= kUpgradeMode;
   1087   read_only_ = true;
   1088 }
   1089 
   1090 void BackendImpl::SetNewEviction() {
   1091   user_flags_ |= kNewEviction;
   1092   new_eviction_ = true;
   1093 }
   1094 
   1095 void BackendImpl::SetFlags(uint32 flags) {
   1096   user_flags_ |= flags;
   1097 }
   1098 
   1099 void BackendImpl::ClearRefCountForTest() {
   1100   num_refs_ = 0;
   1101 }
   1102 
   1103 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) {
   1104   background_queue_.FlushQueue(callback);
   1105   return net::ERR_IO_PENDING;
   1106 }
   1107 
   1108 int BackendImpl::RunTaskForTest(const base::Closure& task,
   1109                                 const CompletionCallback& callback) {
   1110   background_queue_.RunTask(task, callback);
   1111   return net::ERR_IO_PENDING;
   1112 }
   1113 
   1114 void BackendImpl::TrimForTest(bool empty) {
   1115   eviction_.SetTestMode();
   1116   eviction_.TrimCache(empty);
   1117 }
   1118 
   1119 void BackendImpl::TrimDeletedListForTest(bool empty) {
   1120   eviction_.SetTestMode();
   1121   eviction_.TrimDeletedList(empty);
   1122 }
   1123 
   1124 int BackendImpl::SelfCheck() {
   1125   if (!init_) {
   1126     LOG(ERROR) << "Init failed";
   1127     return ERR_INIT_FAILED;
   1128   }
   1129 
   1130   int num_entries = rankings_.SelfCheck();
   1131   if (num_entries < 0) {
   1132     LOG(ERROR) << "Invalid rankings list, error " << num_entries;
   1133 #if !defined(NET_BUILD_STRESS_CACHE)
   1134     return num_entries;
   1135 #endif
   1136   }
   1137 
   1138   if (num_entries != data_->header.num_entries) {
   1139     LOG(ERROR) << "Number of entries mismatch";
   1140 #if !defined(NET_BUILD_STRESS_CACHE)
   1141     return ERR_NUM_ENTRIES_MISMATCH;
   1142 #endif
   1143   }
   1144 
   1145   return CheckAllEntries();
   1146 }
   1147 
   1148 void BackendImpl::FlushIndex() {
   1149   if (index_.get() && !disabled_)
   1150     index_->Flush();
   1151 }
   1152 
   1153 // ------------------------------------------------------------------------
   1154 
   1155 net::CacheType BackendImpl::GetCacheType() const {
   1156   return cache_type_;
   1157 }
   1158 
   1159 int32 BackendImpl::GetEntryCount() const {
   1160   if (!index_.get() || disabled_)
   1161     return 0;
   1162   // num_entries includes entries already evicted.
   1163   int32 not_deleted = data_->header.num_entries -
   1164                       data_->header.lru.sizes[Rankings::DELETED];
   1165 
   1166   if (not_deleted < 0) {
   1167     NOTREACHED();
   1168     not_deleted = 0;
   1169   }
   1170 
   1171   return not_deleted;
   1172 }
   1173 
   1174 int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
   1175                            const CompletionCallback& callback) {
   1176   DCHECK(!callback.is_null());
   1177   background_queue_.OpenEntry(key, entry, callback);
   1178   return net::ERR_IO_PENDING;
   1179 }
   1180 
   1181 int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
   1182                              const CompletionCallback& callback) {
   1183   DCHECK(!callback.is_null());
   1184   background_queue_.CreateEntry(key, entry, callback);
   1185   return net::ERR_IO_PENDING;
   1186 }
   1187 
   1188 int BackendImpl::DoomEntry(const std::string& key,
   1189                            const CompletionCallback& callback) {
   1190   DCHECK(!callback.is_null());
   1191   background_queue_.DoomEntry(key, callback);
   1192   return net::ERR_IO_PENDING;
   1193 }
   1194 
   1195 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) {
   1196   DCHECK(!callback.is_null());
   1197   background_queue_.DoomAllEntries(callback);
   1198   return net::ERR_IO_PENDING;
   1199 }
   1200 
   1201 int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
   1202                                     const base::Time end_time,
   1203                                     const CompletionCallback& callback) {
   1204   DCHECK(!callback.is_null());
   1205   background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
   1206   return net::ERR_IO_PENDING;
   1207 }
   1208 
   1209 int BackendImpl::DoomEntriesSince(const base::Time initial_time,
   1210                                   const CompletionCallback& callback) {
   1211   DCHECK(!callback.is_null());
   1212   background_queue_.DoomEntriesSince(initial_time, callback);
   1213   return net::ERR_IO_PENDING;
   1214 }
   1215 
   1216 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
   1217                                const CompletionCallback& callback) {
   1218   DCHECK(!callback.is_null());
   1219   background_queue_.OpenNextEntry(iter, next_entry, callback);
   1220   return net::ERR_IO_PENDING;
   1221 }
   1222 
   1223 void BackendImpl::EndEnumeration(void** iter) {
   1224   background_queue_.EndEnumeration(*iter);
   1225   *iter = NULL;
   1226 }
   1227 
   1228 void BackendImpl::GetStats(StatsItems* stats) {
   1229   if (disabled_)
   1230     return;
   1231 
   1232   std::pair<std::string, std::string> item;
   1233 
   1234   item.first = "Entries";
   1235   item.second = base::StringPrintf("%d", data_->header.num_entries);
   1236   stats->push_back(item);
   1237 
   1238   item.first = "Pending IO";
   1239   item.second = base::StringPrintf("%d", num_pending_io_);
   1240   stats->push_back(item);
   1241 
   1242   item.first = "Max size";
   1243   item.second = base::StringPrintf("%d", max_size_);
   1244   stats->push_back(item);
   1245 
   1246   item.first = "Current size";
   1247   item.second = base::StringPrintf("%d", data_->header.num_bytes);
   1248   stats->push_back(item);
   1249 
   1250   item.first = "Cache type";
   1251   item.second = "Blockfile Cache";
   1252   stats->push_back(item);
   1253 
   1254   stats_.GetItems(stats);
   1255 }
   1256 
   1257 void BackendImpl::OnExternalCacheHit(const std::string& key) {
   1258   background_queue_.OnExternalCacheHit(key);
   1259 }
   1260 
   1261 // ------------------------------------------------------------------------
   1262 
   1263 // We just created a new file so we're going to write the header and set the
   1264 // file length to include the hash table (zero filled).
   1265 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
   1266   AdjustMaxCacheSize(0);
   1267 
   1268   IndexHeader header;
   1269   header.table_len = DesiredIndexTableLen(max_size_);
   1270 
   1271   // We need file version 2.1 for the new eviction algorithm.
   1272   if (new_eviction_)
   1273     header.version = 0x20001;
   1274 
   1275   header.create_time = Time::Now().ToInternalValue();
   1276 
   1277   if (!file->Write(&header, sizeof(header), 0))
   1278     return false;
   1279 
   1280   return file->SetLength(GetIndexSize(header.table_len));
   1281 }
   1282 
   1283 bool BackendImpl::InitBackingStore(bool* file_created) {
   1284   if (!file_util::CreateDirectory(path_))
   1285     return false;
   1286 
   1287   base::FilePath index_name = path_.AppendASCII(kIndexName);
   1288 
   1289   int flags = base::PLATFORM_FILE_READ |
   1290               base::PLATFORM_FILE_WRITE |
   1291               base::PLATFORM_FILE_OPEN_ALWAYS |
   1292               base::PLATFORM_FILE_EXCLUSIVE_WRITE;
   1293   scoped_refptr<disk_cache::File> file(new disk_cache::File(
   1294       base::CreatePlatformFile(index_name, flags, file_created, NULL)));
   1295 
   1296   if (!file->IsValid())
   1297     return false;
   1298 
   1299   bool ret = true;
   1300   if (*file_created)
   1301     ret = CreateBackingStore(file.get());
   1302 
   1303   file = NULL;
   1304   if (!ret)
   1305     return false;
   1306 
   1307   index_ = new MappedFile();
   1308   data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
   1309   if (!data_) {
   1310     LOG(ERROR) << "Unable to map Index file";
   1311     return false;
   1312   }
   1313 
   1314   if (index_->GetLength() < sizeof(Index)) {
   1315     // We verify this again on CheckIndex() but it's easier to make sure now
   1316     // that the header is there.
   1317     LOG(ERROR) << "Corrupt Index file";
   1318     return false;
   1319   }
   1320 
   1321   return true;
   1322 }
   1323 
   1324 // The maximum cache size will be either set explicitly by the caller, or
   1325 // calculated by this code.
   1326 void BackendImpl::AdjustMaxCacheSize(int table_len) {
   1327   if (max_size_)
   1328     return;
   1329 
   1330   // If table_len is provided, the index file exists.
   1331   DCHECK(!table_len || data_->header.magic);
   1332 
   1333   // The user is not setting the size, let's figure it out.
   1334   int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
   1335   if (available < 0) {
   1336     max_size_ = kDefaultCacheSize;
   1337     return;
   1338   }
   1339 
   1340   if (table_len)
   1341     available += data_->header.num_bytes;
   1342 
   1343   max_size_ = PreferedCacheSize(available);
   1344 
   1345   // Let's not use more than the default size while we tune-up the performance
   1346   // of bigger caches. TODO(rvargas): remove this limit.
   1347   if (max_size_ > kDefaultCacheSize * 4)
   1348     max_size_ = kDefaultCacheSize * 4;
   1349 
   1350   if (!table_len)
   1351     return;
   1352 
   1353   // If we already have a table, adjust the size to it.
   1354   int current_max_size = MaxStorageSizeForTable(table_len);
   1355   if (max_size_ > current_max_size)
   1356     max_size_= current_max_size;
   1357 }
   1358 
   1359 bool BackendImpl::InitStats() {
   1360   Addr address(data_->header.stats);
   1361   int size = stats_.StorageSize();
   1362 
   1363   if (!address.is_initialized()) {
   1364     FileType file_type = Addr::RequiredFileType(size);
   1365     DCHECK_NE(file_type, EXTERNAL);
   1366     int num_blocks = Addr::RequiredBlocks(size, file_type);
   1367 
   1368     if (!CreateBlock(file_type, num_blocks, &address))
   1369       return false;
   1370 
   1371     data_->header.stats = address.value();
   1372     return stats_.Init(NULL, 0, address);
   1373   }
   1374 
   1375   if (!address.is_block_file()) {
   1376     NOTREACHED();
   1377     return false;
   1378   }
   1379 
   1380   // Load the required data.
   1381   size = address.num_blocks() * address.BlockSize();
   1382   MappedFile* file = File(address);
   1383   if (!file)
   1384     return false;
   1385 
   1386   scoped_ptr<char[]> data(new char[size]);
   1387   size_t offset = address.start_block() * address.BlockSize() +
   1388                   kBlockHeaderSize;
   1389   if (!file->Read(data.get(), size, offset))
   1390     return false;
   1391 
   1392   if (!stats_.Init(data.get(), size, address))
   1393     return false;
   1394   if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
   1395     stats_.InitSizeHistogram();
   1396   return true;
   1397 }
   1398 
   1399 void BackendImpl::StoreStats() {
   1400   int size = stats_.StorageSize();
   1401   scoped_ptr<char[]> data(new char[size]);
   1402   Addr address;
   1403   size = stats_.SerializeStats(data.get(), size, &address);
   1404   DCHECK(size);
   1405   if (!address.is_initialized())
   1406     return;
   1407 
   1408   MappedFile* file = File(address);
   1409   if (!file)
   1410     return;
   1411 
   1412   size_t offset = address.start_block() * address.BlockSize() +
   1413                   kBlockHeaderSize;
   1414   file->Write(data.get(), size, offset);  // ignore result.
   1415 }
   1416 
   1417 void BackendImpl::RestartCache(bool failure) {
   1418   int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
   1419   int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
   1420   int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
   1421   int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
   1422 
   1423   PrepareForRestart();
   1424   if (failure) {
   1425     DCHECK(!num_refs_);
   1426     DCHECK(!open_entries_.size());
   1427     DelayedCacheCleanup(path_);
   1428   } else {
   1429     DeleteCache(path_, false);
   1430   }
   1431 
   1432   // Don't call Init() if directed by the unit test: we are simulating a failure
   1433   // trying to re-enable the cache.
   1434   if (unit_test_)
   1435     init_ = true;  // Let the destructor do proper cleanup.
   1436   else if (SyncInit() == net::OK) {
   1437     stats_.SetCounter(Stats::FATAL_ERROR, errors);
   1438     stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
   1439     stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
   1440     stats_.SetCounter(Stats::LAST_REPORT, last_report);
   1441   }
   1442 }
   1443 
   1444 void BackendImpl::PrepareForRestart() {
   1445   // Reset the mask_ if it was not given by the user.
   1446   if (!(user_flags_ & kMask))
   1447     mask_ = 0;
   1448 
   1449   if (!(user_flags_ & kNewEviction))
   1450     new_eviction_ = false;
   1451 
   1452   disabled_ = true;
   1453   data_->header.crash = 0;
   1454   index_->Flush();
   1455   index_ = NULL;
   1456   data_ = NULL;
   1457   block_files_.CloseFiles();
   1458   rankings_.Reset();
   1459   init_ = false;
   1460   restarted_ = true;
   1461 }
   1462 
   1463 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
   1464   EntriesMap::iterator it = open_entries_.find(address.value());
   1465   if (it != open_entries_.end()) {
   1466     // Easy job. This entry is already in memory.
   1467     EntryImpl* this_entry = it->second;
   1468     this_entry->AddRef();
   1469     *entry = this_entry;
   1470     return 0;
   1471   }
   1472 
   1473   STRESS_DCHECK(block_files_.IsValid(address));
   1474 
   1475   if (!address.SanityCheckForEntryV2()) {
   1476     LOG(WARNING) << "Wrong entry address.";
   1477     STRESS_NOTREACHED();
   1478     return ERR_INVALID_ADDRESS;
   1479   }
   1480 
   1481   scoped_refptr<EntryImpl> cache_entry(
   1482       new EntryImpl(this, address, read_only_));
   1483   IncreaseNumRefs();
   1484   *entry = NULL;
   1485 
   1486   TimeTicks start = TimeTicks::Now();
   1487   if (!cache_entry->entry()->Load())
   1488     return ERR_READ_FAILURE;
   1489 
   1490   if (IsLoaded()) {
   1491     CACHE_UMA(AGE_MS, "LoadTime", 0, start);
   1492   }
   1493 
   1494   if (!cache_entry->SanityCheck()) {
   1495     LOG(WARNING) << "Messed up entry found.";
   1496     STRESS_NOTREACHED();
   1497     return ERR_INVALID_ENTRY;
   1498   }
   1499 
   1500   STRESS_DCHECK(block_files_.IsValid(
   1501                     Addr(cache_entry->entry()->Data()->rankings_node)));
   1502 
   1503   if (!cache_entry->LoadNodeAddress())
   1504     return ERR_READ_FAILURE;
   1505 
   1506   if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
   1507     STRESS_NOTREACHED();
   1508     cache_entry->SetDirtyFlag(0);
   1509     // Don't remove this from the list (it is not linked properly). Instead,
   1510     // break the link back to the entry because it is going away, and leave the
   1511     // rankings node to be deleted if we find it through a list.
   1512     rankings_.SetContents(cache_entry->rankings(), 0);
   1513   } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
   1514     STRESS_NOTREACHED();
   1515     cache_entry->SetDirtyFlag(0);
   1516     rankings_.SetContents(cache_entry->rankings(), address.value());
   1517   }
   1518 
   1519   if (!cache_entry->DataSanityCheck()) {
   1520     LOG(WARNING) << "Messed up entry found.";
   1521     cache_entry->SetDirtyFlag(0);
   1522     cache_entry->FixForDelete();
   1523   }
   1524 
   1525   // Prevent overwriting the dirty flag on the destructor.
   1526   cache_entry->SetDirtyFlag(GetCurrentEntryId());
   1527 
   1528   if (cache_entry->dirty()) {
   1529     Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
   1530           address.value());
   1531   }
   1532 
   1533   open_entries_[address.value()] = cache_entry.get();
   1534 
   1535   cache_entry->BeginLogging(net_log_, false);
   1536   cache_entry.swap(entry);
   1537   return 0;
   1538 }
   1539 
   1540 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
   1541                                    bool find_parent, Addr entry_addr,
   1542                                    bool* match_error) {
   1543   Addr address(data_->table[hash & mask_]);
   1544   scoped_refptr<EntryImpl> cache_entry, parent_entry;
   1545   EntryImpl* tmp = NULL;
   1546   bool found = false;
   1547   std::set<CacheAddr> visited;
   1548   *match_error = false;
   1549 
   1550   for (;;) {
   1551     if (disabled_)
   1552       break;
   1553 
   1554     if (visited.find(address.value()) != visited.end()) {
   1555       // It's possible for a buggy version of the code to write a loop. Just
   1556       // break it.
   1557       Trace("Hash collision loop 0x%x", address.value());
   1558       address.set_value(0);
   1559       parent_entry->SetNextAddress(address);
   1560     }
   1561     visited.insert(address.value());
   1562 
   1563     if (!address.is_initialized()) {
   1564       if (find_parent)
   1565         found = true;
   1566       break;
   1567     }
   1568 
   1569     int error = NewEntry(address, &tmp);
   1570     cache_entry.swap(&tmp);
   1571 
   1572     if (error || cache_entry->dirty()) {
   1573       // This entry is dirty on disk (it was not properly closed): we cannot
   1574       // trust it.
   1575       Addr child(0);
   1576       if (!error)
   1577         child.set_value(cache_entry->GetNextAddress());
   1578 
   1579       if (parent_entry.get()) {
   1580         parent_entry->SetNextAddress(child);
   1581         parent_entry = NULL;
   1582       } else {
   1583         data_->table[hash & mask_] = child.value();
   1584       }
   1585 
   1586       Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
   1587             address.value());
   1588 
   1589       if (!error) {
   1590         // It is important to call DestroyInvalidEntry after removing this
   1591         // entry from the table.
   1592         DestroyInvalidEntry(cache_entry.get());
   1593         cache_entry = NULL;
   1594       } else {
   1595         Trace("NewEntry failed on MatchEntry 0x%x", address.value());
   1596       }
   1597 
   1598       // Restart the search.
   1599       address.set_value(data_->table[hash & mask_]);
   1600       visited.clear();
   1601       continue;
   1602     }
   1603 
   1604     DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
   1605     if (cache_entry->IsSameEntry(key, hash)) {
   1606       if (!cache_entry->Update())
   1607         cache_entry = NULL;
   1608       found = true;
   1609       if (find_parent && entry_addr.value() != address.value()) {
   1610         Trace("Entry not on the index 0x%x", address.value());
   1611         *match_error = true;
   1612         parent_entry = NULL;
   1613       }
   1614       break;
   1615     }
   1616     if (!cache_entry->Update())
   1617       cache_entry = NULL;
   1618     parent_entry = cache_entry;
   1619     cache_entry = NULL;
   1620     if (!parent_entry.get())
   1621       break;
   1622 
   1623     address.set_value(parent_entry->GetNextAddress());
   1624   }
   1625 
   1626   if (parent_entry.get() && (!find_parent || !found))
   1627     parent_entry = NULL;
   1628 
   1629   if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
   1630     *match_error = true;
   1631     parent_entry = NULL;
   1632   }
   1633 
   1634   if (cache_entry.get() && (find_parent || !found))
   1635     cache_entry = NULL;
   1636 
   1637   find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
   1638   FlushIndex();
   1639   return tmp;
   1640 }
   1641 
   1642 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
   1643 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
   1644   if (disabled_)
   1645     return NULL;
   1646 
   1647   DCHECK(iter);
   1648 
   1649   const int kListsToSearch = 3;
   1650   scoped_refptr<EntryImpl> entries[kListsToSearch];
   1651   scoped_ptr<Rankings::Iterator> iterator(
   1652       reinterpret_cast<Rankings::Iterator*>(*iter));
   1653   *iter = NULL;
   1654 
   1655   if (!iterator.get()) {
   1656     iterator.reset(new Rankings::Iterator(&rankings_));
   1657     bool ret = false;
   1658 
   1659     // Get an entry from each list.
   1660     for (int i = 0; i < kListsToSearch; i++) {
   1661       EntryImpl* temp = NULL;
   1662       ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
   1663                                         &iterator->nodes[i], &temp);
   1664       entries[i].swap(&temp);  // The entry was already addref'd.
   1665     }
   1666     if (!ret)
   1667       return NULL;
   1668   } else {
   1669     // Get the next entry from the last list, and the actual entries for the
   1670     // elements on the other lists.
   1671     for (int i = 0; i < kListsToSearch; i++) {
   1672       EntryImpl* temp = NULL;
   1673       if (iterator->list == i) {
   1674           OpenFollowingEntryFromList(forward, iterator->list,
   1675                                      &iterator->nodes[i], &temp);
   1676       } else {
   1677         temp = GetEnumeratedEntry(iterator->nodes[i],
   1678                                   static_cast<Rankings::List>(i));
   1679       }
   1680 
   1681       entries[i].swap(&temp);  // The entry was already addref'd.
   1682     }
   1683   }
   1684 
   1685   int newest = -1;
   1686   int oldest = -1;
   1687   Time access_times[kListsToSearch];
   1688   for (int i = 0; i < kListsToSearch; i++) {
   1689     if (entries[i].get()) {
   1690       access_times[i] = entries[i]->GetLastUsed();
   1691       if (newest < 0) {
   1692         DCHECK_LT(oldest, 0);
   1693         newest = oldest = i;
   1694         continue;
   1695       }
   1696       if (access_times[i] > access_times[newest])
   1697         newest = i;
   1698       if (access_times[i] < access_times[oldest])
   1699         oldest = i;
   1700     }
   1701   }
   1702 
   1703   if (newest < 0 || oldest < 0)
   1704     return NULL;
   1705 
   1706   EntryImpl* next_entry;
   1707   if (forward) {
   1708     next_entry = entries[newest].get();
   1709     iterator->list = static_cast<Rankings::List>(newest);
   1710   } else {
   1711     next_entry = entries[oldest].get();
   1712     iterator->list = static_cast<Rankings::List>(oldest);
   1713   }
   1714 
   1715   *iter = iterator.release();
   1716   next_entry->AddRef();
   1717   return next_entry;
   1718 }
   1719 
   1720 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
   1721                                              CacheRankingsBlock** from_entry,
   1722                                              EntryImpl** next_entry) {
   1723   if (disabled_)
   1724     return false;
   1725 
   1726   if (!new_eviction_ && Rankings::NO_USE != list)
   1727     return false;
   1728 
   1729   Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
   1730   CacheRankingsBlock* next_block = forward ?
   1731       rankings_.GetNext(rankings.get(), list) :
   1732       rankings_.GetPrev(rankings.get(), list);
   1733   Rankings::ScopedRankingsBlock next(&rankings_, next_block);
   1734   *from_entry = NULL;
   1735 
   1736   *next_entry = GetEnumeratedEntry(next.get(), list);
   1737   if (!*next_entry)
   1738     return false;
   1739 
   1740   *from_entry = next.release();
   1741   return true;
   1742 }
   1743 
   1744 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
   1745                                            Rankings::List list) {
   1746   if (!next || disabled_)
   1747     return NULL;
   1748 
   1749   EntryImpl* entry;
   1750   int rv = NewEntry(Addr(next->Data()->contents), &entry);
   1751   if (rv) {
   1752     STRESS_NOTREACHED();
   1753     rankings_.Remove(next, list, false);
   1754     if (rv == ERR_INVALID_ADDRESS) {
   1755       // There is nothing linked from the index. Delete the rankings node.
   1756       DeleteBlock(next->address(), true);
   1757     }
   1758     return NULL;
   1759   }
   1760 
   1761   if (entry->dirty()) {
   1762     // We cannot trust this entry.
   1763     InternalDoomEntry(entry);
   1764     entry->Release();
   1765     return NULL;
   1766   }
   1767 
   1768   if (!entry->Update()) {
   1769     STRESS_NOTREACHED();
   1770     entry->Release();
   1771     return NULL;
   1772   }
   1773 
   1774   // Note that it is unfortunate (but possible) for this entry to be clean, but
   1775   // not actually the real entry. In other words, we could have lost this entry
   1776   // from the index, and it could have been replaced with a newer one. It's not
   1777   // worth checking that this entry is "the real one", so we just return it and
   1778   // let the enumeration continue; this entry will be evicted at some point, and
   1779   // the regular path will work with the real entry. With time, this problem
   1780   // will disasappear because this scenario is just a bug.
   1781 
   1782   // Make sure that we save the key for later.
   1783   entry->GetKey();
   1784 
   1785   return entry;
   1786 }
   1787 
   1788 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
   1789   if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
   1790     deleted_entry->Release();
   1791     stats_.OnEvent(Stats::CREATE_MISS);
   1792     Trace("create entry miss ");
   1793     return NULL;
   1794   }
   1795 
   1796   // We are attempting to create an entry and found out that the entry was
   1797   // previously deleted.
   1798 
   1799   eviction_.OnCreateEntry(deleted_entry);
   1800   entry_count_++;
   1801 
   1802   stats_.OnEvent(Stats::RESURRECT_HIT);
   1803   Trace("Resurrect entry hit ");
   1804   return deleted_entry;
   1805 }
   1806 
   1807 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
   1808   LOG(WARNING) << "Destroying invalid entry.";
   1809   Trace("Destroying invalid entry 0x%p", entry);
   1810 
   1811   entry->SetPointerForInvalidEntry(GetCurrentEntryId());
   1812 
   1813   eviction_.OnDoomEntry(entry);
   1814   entry->InternalDoom();
   1815 
   1816   if (!new_eviction_)
   1817     DecreaseNumEntries();
   1818   stats_.OnEvent(Stats::INVALID_ENTRY);
   1819 }
   1820 
   1821 void BackendImpl::AddStorageSize(int32 bytes) {
   1822   data_->header.num_bytes += bytes;
   1823   DCHECK_GE(data_->header.num_bytes, 0);
   1824 }
   1825 
   1826 void BackendImpl::SubstractStorageSize(int32 bytes) {
   1827   data_->header.num_bytes -= bytes;
   1828   DCHECK_GE(data_->header.num_bytes, 0);
   1829 }
   1830 
   1831 void BackendImpl::IncreaseNumRefs() {
   1832   num_refs_++;
   1833   if (max_refs_ < num_refs_)
   1834     max_refs_ = num_refs_;
   1835 }
   1836 
   1837 void BackendImpl::DecreaseNumRefs() {
   1838   DCHECK(num_refs_);
   1839   num_refs_--;
   1840 
   1841   if (!num_refs_ && disabled_)
   1842     base::MessageLoop::current()->PostTask(
   1843         FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
   1844 }
   1845 
   1846 void BackendImpl::IncreaseNumEntries() {
   1847   data_->header.num_entries++;
   1848   DCHECK_GT(data_->header.num_entries, 0);
   1849 }
   1850 
   1851 void BackendImpl::DecreaseNumEntries() {
   1852   data_->header.num_entries--;
   1853   if (data_->header.num_entries < 0) {
   1854     NOTREACHED();
   1855     data_->header.num_entries = 0;
   1856   }
   1857 }
   1858 
   1859 void BackendImpl::LogStats() {
   1860   StatsItems stats;
   1861   GetStats(&stats);
   1862 
   1863   for (size_t index = 0; index < stats.size(); index++)
   1864     VLOG(1) << stats[index].first << ": " << stats[index].second;
   1865 }
   1866 
   1867 void BackendImpl::ReportStats() {
   1868   CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
   1869 
   1870   int current_size = data_->header.num_bytes / (1024 * 1024);
   1871   int max_size = max_size_ / (1024 * 1024);
   1872   int hit_ratio_as_percentage = stats_.GetHitRatio();
   1873 
   1874   CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
   1875   // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
   1876   // ratio of that bin's total count to the count in the same bin in the Size2
   1877   // histogram.
   1878   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
   1879     CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
   1880   CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
   1881   if (!max_size)
   1882     max_size++;
   1883   CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
   1884 
   1885   CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
   1886             static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
   1887   CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
   1888             static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
   1889   stats_.SetCounter(Stats::MAX_ENTRIES, 0);
   1890 
   1891   CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
   1892             static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
   1893   CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
   1894             static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
   1895   CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
   1896             static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
   1897   stats_.SetCounter(Stats::FATAL_ERROR, 0);
   1898   stats_.SetCounter(Stats::DOOM_CACHE, 0);
   1899   stats_.SetCounter(Stats::DOOM_RECENT, 0);
   1900 
   1901   int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
   1902   if (!data_->header.create_time || !data_->header.lru.filled) {
   1903     int cause = data_->header.create_time ? 0 : 1;
   1904     if (!data_->header.lru.filled)
   1905       cause |= 2;
   1906     CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
   1907     CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
   1908     return;
   1909   }
   1910 
   1911   // This is an up to date client that will report FirstEviction() data. After
   1912   // that event, start reporting this:
   1913 
   1914   CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
   1915   // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
   1916   // time is the ratio of that bin's total count to the count in the same bin in
   1917   // the TotalTime histogram.
   1918   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
   1919     CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours));
   1920 
   1921   int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
   1922   stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
   1923 
   1924   // We may see users with no use_hours at this point if this is the first time
   1925   // we are running this code.
   1926   if (use_hours)
   1927     use_hours = total_hours - use_hours;
   1928 
   1929   if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
   1930     return;
   1931 
   1932   CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
   1933   // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
   1934   // is the ratio of that bin's total count to the count in the same bin in the
   1935   // UseTime histogram.
   1936   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
   1937     CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours));
   1938   CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
   1939 
   1940   int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
   1941   CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
   1942 
   1943   int avg_size = data_->header.num_bytes / GetEntryCount();
   1944   CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
   1945   CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
   1946 
   1947   CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
   1948             data_->header.num_entries * 100 / (mask_ + 1));
   1949 
   1950   int large_entries_bytes = stats_.GetLargeEntriesSize();
   1951   int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
   1952   CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
   1953 
   1954   if (new_eviction_) {
   1955     CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
   1956     CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
   1957               data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
   1958     CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
   1959               data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
   1960     CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
   1961               data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
   1962     CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
   1963               data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
   1964   }
   1965 
   1966   stats_.ResetRatios();
   1967   stats_.SetCounter(Stats::TRIM_ENTRY, 0);
   1968 
   1969   if (cache_type_ == net::DISK_CACHE)
   1970     block_files_.ReportStats();
   1971 }
   1972 
   1973 void BackendImpl::UpgradeTo2_1() {
   1974   // 2.1 is basically the same as 2.0, except that new fields are actually
   1975   // updated by the new eviction algorithm.
   1976   DCHECK(0x20000 == data_->header.version);
   1977   data_->header.version = 0x20001;
   1978   data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
   1979 }
   1980 
   1981 bool BackendImpl::CheckIndex() {
   1982   DCHECK(data_);
   1983 
   1984   size_t current_size = index_->GetLength();
   1985   if (current_size < sizeof(Index)) {
   1986     LOG(ERROR) << "Corrupt Index file";
   1987     return false;
   1988   }
   1989 
   1990   if (new_eviction_) {
   1991     // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
   1992     if (kIndexMagic != data_->header.magic ||
   1993         kCurrentVersion >> 16 != data_->header.version >> 16) {
   1994       LOG(ERROR) << "Invalid file version or magic";
   1995       return false;
   1996     }
   1997     if (kCurrentVersion == data_->header.version) {
   1998       // We need file version 2.1 for the new eviction algorithm.
   1999       UpgradeTo2_1();
   2000     }
   2001   } else {
   2002     if (kIndexMagic != data_->header.magic ||
   2003         kCurrentVersion != data_->header.version) {
   2004       LOG(ERROR) << "Invalid file version or magic";
   2005       return false;
   2006     }
   2007   }
   2008 
   2009   if (!data_->header.table_len) {
   2010     LOG(ERROR) << "Invalid table size";
   2011     return false;
   2012   }
   2013 
   2014   if (current_size < GetIndexSize(data_->header.table_len) ||
   2015       data_->header.table_len & (kBaseTableLen - 1)) {
   2016     LOG(ERROR) << "Corrupt Index file";
   2017     return false;
   2018   }
   2019 
   2020   AdjustMaxCacheSize(data_->header.table_len);
   2021 
   2022 #if !defined(NET_BUILD_STRESS_CACHE)
   2023   if (data_->header.num_bytes < 0 ||
   2024       (max_size_ < kint32max - kDefaultCacheSize &&
   2025        data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
   2026     LOG(ERROR) << "Invalid cache (current) size";
   2027     return false;
   2028   }
   2029 #endif
   2030 
   2031   if (data_->header.num_entries < 0) {
   2032     LOG(ERROR) << "Invalid number of entries";
   2033     return false;
   2034   }
   2035 
   2036   if (!mask_)
   2037     mask_ = data_->header.table_len - 1;
   2038 
   2039   // Load the table into memory with a single read.
   2040   scoped_ptr<char[]> buf(new char[current_size]);
   2041   return index_->Read(buf.get(), current_size, 0);
   2042 }
   2043 
   2044 int BackendImpl::CheckAllEntries() {
   2045   int num_dirty = 0;
   2046   int num_entries = 0;
   2047   DCHECK(mask_ < kuint32max);
   2048   for (unsigned int i = 0; i <= mask_; i++) {
   2049     Addr address(data_->table[i]);
   2050     if (!address.is_initialized())
   2051       continue;
   2052     for (;;) {
   2053       EntryImpl* tmp;
   2054       int ret = NewEntry(address, &tmp);
   2055       if (ret) {
   2056         STRESS_NOTREACHED();
   2057         return ret;
   2058       }
   2059       scoped_refptr<EntryImpl> cache_entry;
   2060       cache_entry.swap(&tmp);
   2061 
   2062       if (cache_entry->dirty())
   2063         num_dirty++;
   2064       else if (CheckEntry(cache_entry.get()))
   2065         num_entries++;
   2066       else
   2067         return ERR_INVALID_ENTRY;
   2068 
   2069       DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
   2070       address.set_value(cache_entry->GetNextAddress());
   2071       if (!address.is_initialized())
   2072         break;
   2073     }
   2074   }
   2075 
   2076   Trace("CheckAllEntries End");
   2077   if (num_entries + num_dirty != data_->header.num_entries) {
   2078     LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
   2079                   " " << data_->header.num_entries;
   2080     DCHECK_LT(num_entries, data_->header.num_entries);
   2081     return ERR_NUM_ENTRIES_MISMATCH;
   2082   }
   2083 
   2084   return num_dirty;
   2085 }
   2086 
   2087 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
   2088   bool ok = block_files_.IsValid(cache_entry->entry()->address());
   2089   ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
   2090   EntryStore* data = cache_entry->entry()->Data();
   2091   for (size_t i = 0; i < arraysize(data->data_addr); i++) {
   2092     if (data->data_addr[i]) {
   2093       Addr address(data->data_addr[i]);
   2094       if (address.is_block_file())
   2095         ok = ok && block_files_.IsValid(address);
   2096     }
   2097   }
   2098 
   2099   return ok && cache_entry->rankings()->VerifyHash();
   2100 }
   2101 
   2102 int BackendImpl::MaxBuffersSize() {
   2103   static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
   2104   static bool done = false;
   2105 
   2106   if (!done) {
   2107     const int kMaxBuffersSize = 30 * 1024 * 1024;
   2108 
   2109     // We want to use up to 2% of the computer's memory.
   2110     total_memory = total_memory * 2 / 100;
   2111     if (total_memory > kMaxBuffersSize || total_memory <= 0)
   2112       total_memory = kMaxBuffersSize;
   2113 
   2114     done = true;
   2115   }
   2116 
   2117   return static_cast<int>(total_memory);
   2118 }
   2119 
   2120 }  // namespace disk_cache
   2121