Home | History | Annotate | Download | only in blockfile
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/disk_cache/blockfile/backend_impl.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/bind_helpers.h"
      9 #include "base/files/file.h"
     10 #include "base/files/file_path.h"
     11 #include "base/files/file_util.h"
     12 #include "base/hash.h"
     13 #include "base/message_loop/message_loop.h"
     14 #include "base/metrics/field_trial.h"
     15 #include "base/metrics/histogram.h"
     16 #include "base/metrics/stats_counters.h"
     17 #include "base/rand_util.h"
     18 #include "base/single_thread_task_runner.h"
     19 #include "base/strings/string_util.h"
     20 #include "base/strings/stringprintf.h"
     21 #include "base/sys_info.h"
     22 #include "base/threading/thread_restrictions.h"
     23 #include "base/time/time.h"
     24 #include "base/timer/timer.h"
     25 #include "net/base/net_errors.h"
     26 #include "net/disk_cache/blockfile/disk_format.h"
     27 #include "net/disk_cache/blockfile/entry_impl.h"
     28 #include "net/disk_cache/blockfile/errors.h"
     29 #include "net/disk_cache/blockfile/experiments.h"
     30 #include "net/disk_cache/blockfile/file.h"
     31 #include "net/disk_cache/blockfile/histogram_macros.h"
     32 #include "net/disk_cache/blockfile/webfonts_histogram.h"
     33 #include "net/disk_cache/cache_util.h"
     34 
     35 // Provide a BackendImpl object to macros from histogram_macros.h.
     36 #define CACHE_UMA_BACKEND_IMPL_OBJ this
     37 
     38 using base::Time;
     39 using base::TimeDelta;
     40 using base::TimeTicks;
     41 
     42 namespace {
     43 
     44 const char* kIndexName = "index";
     45 
     46 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
     47 // Note that the actual target is to keep the index table load factor under 55%
     48 // for most users.
     49 const int k64kEntriesStore = 240 * 1000 * 1000;
     50 const int kBaseTableLen = 64 * 1024;
     51 
     52 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
     53 const int kTrimDelay = 10;
     54 
     55 int DesiredIndexTableLen(int32 storage_size) {
     56   if (storage_size <= k64kEntriesStore)
     57     return kBaseTableLen;
     58   if (storage_size <= k64kEntriesStore * 2)
     59     return kBaseTableLen * 2;
     60   if (storage_size <= k64kEntriesStore * 4)
     61     return kBaseTableLen * 4;
     62   if (storage_size <= k64kEntriesStore * 8)
     63     return kBaseTableLen * 8;
     64 
     65   // The biggest storage_size for int32 requires a 4 MB table.
     66   return kBaseTableLen * 16;
     67 }
     68 
     69 int MaxStorageSizeForTable(int table_len) {
     70   return table_len * (k64kEntriesStore / kBaseTableLen);
     71 }
     72 
     73 size_t GetIndexSize(int table_len) {
     74   size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
     75   return sizeof(disk_cache::IndexHeader) + table_size;
     76 }
     77 
     78 // ------------------------------------------------------------------------
     79 
     80 // Sets group for the current experiment. Returns false if the files should be
     81 // discarded.
     82 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
     83   if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
     84       header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
     85     // Discard current cache.
     86     return false;
     87   }
     88 
     89   if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
     90           "ExperimentControl") {
     91     if (cache_created) {
     92       header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
     93       return true;
     94     }
     95     return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL;
     96   }
     97 
     98   header->experiment = disk_cache::NO_EXPERIMENT;
     99   return true;
    100 }
    101 
    102 // A callback to perform final cleanup on the background thread.
    103 void FinalCleanupCallback(disk_cache::BackendImpl* backend) {
    104   backend->CleanupCache();
    105 }
    106 
    107 }  // namespace
    108 
    109 // ------------------------------------------------------------------------
    110 
    111 namespace disk_cache {
    112 
    113 BackendImpl::BackendImpl(
    114     const base::FilePath& path,
    115     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
    116     net::NetLog* net_log)
    117     : background_queue_(this, cache_thread),
    118       path_(path),
    119       block_files_(path),
    120       mask_(0),
    121       max_size_(0),
    122       up_ticks_(0),
    123       cache_type_(net::DISK_CACHE),
    124       uma_report_(0),
    125       user_flags_(0),
    126       init_(false),
    127       restarted_(false),
    128       unit_test_(false),
    129       read_only_(false),
    130       disabled_(false),
    131       new_eviction_(false),
    132       first_timer_(true),
    133       user_load_(false),
    134       net_log_(net_log),
    135       done_(true, false),
    136       ptr_factory_(this) {
    137 }
    138 
    139 BackendImpl::BackendImpl(
    140     const base::FilePath& path,
    141     uint32 mask,
    142     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
    143     net::NetLog* net_log)
    144     : background_queue_(this, cache_thread),
    145       path_(path),
    146       block_files_(path),
    147       mask_(mask),
    148       max_size_(0),
    149       up_ticks_(0),
    150       cache_type_(net::DISK_CACHE),
    151       uma_report_(0),
    152       user_flags_(kMask),
    153       init_(false),
    154       restarted_(false),
    155       unit_test_(false),
    156       read_only_(false),
    157       disabled_(false),
    158       new_eviction_(false),
    159       first_timer_(true),
    160       user_load_(false),
    161       net_log_(net_log),
    162       done_(true, false),
    163       ptr_factory_(this) {
    164 }
    165 
    166 BackendImpl::~BackendImpl() {
    167   if (user_flags_ & kNoRandom) {
    168     // This is a unit test, so we want to be strict about not leaking entries
    169     // and completing all the work.
    170     background_queue_.WaitForPendingIO();
    171   } else {
    172     // This is most likely not a test, so we want to do as little work as
    173     // possible at this time, at the price of leaving dirty entries behind.
    174     background_queue_.DropPendingIO();
    175   }
    176 
    177   if (background_queue_.BackgroundIsCurrentThread()) {
    178     // Unit tests may use the same thread for everything.
    179     CleanupCache();
    180   } else {
    181     background_queue_.background_thread()->PostTask(
    182         FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
    183     // http://crbug.com/74623
    184     base::ThreadRestrictions::ScopedAllowWait allow_wait;
    185     done_.Wait();
    186   }
    187 }
    188 
    189 int BackendImpl::Init(const CompletionCallback& callback) {
    190   background_queue_.Init(callback);
    191   return net::ERR_IO_PENDING;
    192 }
    193 
    194 int BackendImpl::SyncInit() {
    195 #if defined(NET_BUILD_STRESS_CACHE)
    196   // Start evictions right away.
    197   up_ticks_ = kTrimDelay * 2;
    198 #endif
    199   DCHECK(!init_);
    200   if (init_)
    201     return net::ERR_FAILED;
    202 
    203   bool create_files = false;
    204   if (!InitBackingStore(&create_files)) {
    205     ReportError(ERR_STORAGE_ERROR);
    206     return net::ERR_FAILED;
    207   }
    208 
    209   num_refs_ = num_pending_io_ = max_refs_ = 0;
    210   entry_count_ = byte_count_ = 0;
    211 
    212   bool should_create_timer = false;
    213   if (!restarted_) {
    214     buffer_bytes_ = 0;
    215     trace_object_ = TraceObject::GetTraceObject();
    216     should_create_timer = true;
    217   }
    218 
    219   init_ = true;
    220   Trace("Init");
    221 
    222   if (data_->header.experiment != NO_EXPERIMENT &&
    223       cache_type_ != net::DISK_CACHE) {
    224     // No experiment for other caches.
    225     return net::ERR_FAILED;
    226   }
    227 
    228   if (!(user_flags_ & kNoRandom)) {
    229     // The unit test controls directly what to test.
    230     new_eviction_ = (cache_type_ == net::DISK_CACHE);
    231   }
    232 
    233   if (!CheckIndex()) {
    234     ReportError(ERR_INIT_FAILED);
    235     return net::ERR_FAILED;
    236   }
    237 
    238   if (!restarted_ && (create_files || !data_->header.num_entries))
    239     ReportError(ERR_CACHE_CREATED);
    240 
    241   if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
    242       !InitExperiment(&data_->header, create_files)) {
    243     return net::ERR_FAILED;
    244   }
    245 
    246   // We don't care if the value overflows. The only thing we care about is that
    247   // the id cannot be zero, because that value is used as "not dirty".
    248   // Increasing the value once per second gives us many years before we start
    249   // having collisions.
    250   data_->header.this_id++;
    251   if (!data_->header.this_id)
    252     data_->header.this_id++;
    253 
    254   bool previous_crash = (data_->header.crash != 0);
    255   data_->header.crash = 1;
    256 
    257   if (!block_files_.Init(create_files))
    258     return net::ERR_FAILED;
    259 
    260   // We want to minimize the changes to cache for an AppCache.
    261   if (cache_type() == net::APP_CACHE) {
    262     DCHECK(!new_eviction_);
    263     read_only_ = true;
    264   } else if (cache_type() == net::SHADER_CACHE) {
    265     DCHECK(!new_eviction_);
    266   }
    267 
    268   eviction_.Init(this);
    269 
    270   // stats_ and rankings_ may end up calling back to us so we better be enabled.
    271   disabled_ = false;
    272   if (!InitStats())
    273     return net::ERR_FAILED;
    274 
    275   disabled_ = !rankings_.Init(this, new_eviction_);
    276 
    277 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
    278   trace_object_->EnableTracing(false);
    279   int sc = SelfCheck();
    280   if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
    281     NOTREACHED();
    282   trace_object_->EnableTracing(true);
    283 #endif
    284 
    285   if (previous_crash) {
    286     ReportError(ERR_PREVIOUS_CRASH);
    287   } else if (!restarted_) {
    288     ReportError(ERR_NO_ERROR);
    289   }
    290 
    291   FlushIndex();
    292 
    293   if (!disabled_ && should_create_timer) {
    294     // Create a recurrent timer of 30 secs.
    295     int timer_delay = unit_test_ ? 1000 : 30000;
    296     timer_.reset(new base::RepeatingTimer<BackendImpl>());
    297     timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
    298                   &BackendImpl::OnStatsTimer);
    299   }
    300 
    301   return disabled_ ? net::ERR_FAILED : net::OK;
    302 }
    303 
    304 void BackendImpl::CleanupCache() {
    305   Trace("Backend Cleanup");
    306   eviction_.Stop();
    307   timer_.reset();
    308 
    309   if (init_) {
    310     StoreStats();
    311     if (data_)
    312       data_->header.crash = 0;
    313 
    314     if (user_flags_ & kNoRandom) {
    315       // This is a net_unittest, verify that we are not 'leaking' entries.
    316       File::WaitForPendingIO(&num_pending_io_);
    317       DCHECK(!num_refs_);
    318     } else {
    319       File::DropPendingIO();
    320     }
    321   }
    322   block_files_.CloseFiles();
    323   FlushIndex();
    324   index_ = NULL;
    325   ptr_factory_.InvalidateWeakPtrs();
    326   done_.Signal();
    327 }
    328 
    329 // ------------------------------------------------------------------------
    330 
    331 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
    332   DCHECK(entry);
    333   *entry = OpenEntryImpl(key);
    334   return (*entry) ? net::OK : net::ERR_FAILED;
    335 }
    336 
    337 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
    338   DCHECK(entry);
    339   *entry = CreateEntryImpl(key);
    340   return (*entry) ? net::OK : net::ERR_FAILED;
    341 }
    342 
    343 int BackendImpl::SyncDoomEntry(const std::string& key) {
    344   if (disabled_)
    345     return net::ERR_FAILED;
    346 
    347   EntryImpl* entry = OpenEntryImpl(key);
    348   if (!entry)
    349     return net::ERR_FAILED;
    350 
    351   entry->DoomImpl();
    352   entry->Release();
    353   return net::OK;
    354 }
    355 
    356 int BackendImpl::SyncDoomAllEntries() {
    357   // This is not really an error, but it is an interesting condition.
    358   ReportError(ERR_CACHE_DOOMED);
    359   stats_.OnEvent(Stats::DOOM_CACHE);
    360   if (!num_refs_) {
    361     RestartCache(false);
    362     return disabled_ ? net::ERR_FAILED : net::OK;
    363   } else {
    364     if (disabled_)
    365       return net::ERR_FAILED;
    366 
    367     eviction_.TrimCache(true);
    368     return net::OK;
    369   }
    370 }
    371 
    372 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
    373                                         const base::Time end_time) {
    374   DCHECK_NE(net::APP_CACHE, cache_type_);
    375   if (end_time.is_null())
    376     return SyncDoomEntriesSince(initial_time);
    377 
    378   DCHECK(end_time >= initial_time);
    379 
    380   if (disabled_)
    381     return net::ERR_FAILED;
    382 
    383   EntryImpl* node;
    384   scoped_ptr<Rankings::Iterator> iterator(new Rankings::Iterator());
    385   EntryImpl* next = OpenNextEntryImpl(iterator.get());
    386   if (!next)
    387     return net::OK;
    388 
    389   while (next) {
    390     node = next;
    391     next = OpenNextEntryImpl(iterator.get());
    392 
    393     if (node->GetLastUsed() >= initial_time &&
    394         node->GetLastUsed() < end_time) {
    395       node->DoomImpl();
    396     } else if (node->GetLastUsed() < initial_time) {
    397       if (next)
    398         next->Release();
    399       next = NULL;
    400       SyncEndEnumeration(iterator.Pass());
    401     }
    402 
    403     node->Release();
    404   }
    405 
    406   return net::OK;
    407 }
    408 
    409 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
    410 // entries that are too old.
    411 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
    412   DCHECK_NE(net::APP_CACHE, cache_type_);
    413   if (disabled_)
    414     return net::ERR_FAILED;
    415 
    416   stats_.OnEvent(Stats::DOOM_RECENT);
    417   for (;;) {
    418     scoped_ptr<Rankings::Iterator> iterator(new Rankings::Iterator());
    419     EntryImpl* entry = OpenNextEntryImpl(iterator.get());
    420     if (!entry)
    421       return net::OK;
    422 
    423     if (initial_time > entry->GetLastUsed()) {
    424       entry->Release();
    425       SyncEndEnumeration(iterator.Pass());
    426       return net::OK;
    427     }
    428 
    429     entry->DoomImpl();
    430     entry->Release();
    431     SyncEndEnumeration(iterator.Pass());  // The doom invalidated the iterator.
    432   }
    433 }
    434 
    435 int BackendImpl::SyncOpenNextEntry(Rankings::Iterator* iterator,
    436                                    Entry** next_entry) {
    437   *next_entry = OpenNextEntryImpl(iterator);
    438   return (*next_entry) ? net::OK : net::ERR_FAILED;
    439 }
    440 
    441 void BackendImpl::SyncEndEnumeration(scoped_ptr<Rankings::Iterator> iterator) {
    442   iterator->Reset();
    443 }
    444 
    445 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
    446   if (disabled_)
    447     return;
    448 
    449   uint32 hash = base::Hash(key);
    450   bool error;
    451   EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
    452   if (cache_entry) {
    453     if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
    454       UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
    455     }
    456     cache_entry->Release();
    457   }
    458 }
    459 
    460 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
    461   if (disabled_)
    462     return NULL;
    463 
    464   TimeTicks start = TimeTicks::Now();
    465   uint32 hash = base::Hash(key);
    466   Trace("Open hash 0x%x", hash);
    467 
    468   bool error;
    469   EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
    470   if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
    471     // The entry was already evicted.
    472     cache_entry->Release();
    473     cache_entry = NULL;
    474     web_fonts_histogram::RecordEvictedEntry(key);
    475   } else if (!cache_entry) {
    476     web_fonts_histogram::RecordCacheMiss(key);
    477   }
    478 
    479   int current_size = data_->header.num_bytes / (1024 * 1024);
    480   int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
    481   int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
    482   int64 use_hours = total_hours - no_use_hours;
    483 
    484   if (!cache_entry) {
    485     CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
    486     CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
    487     CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
    488     CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
    489     stats_.OnEvent(Stats::OPEN_MISS);
    490     return NULL;
    491   }
    492 
    493   eviction_.OnOpenEntry(cache_entry);
    494   entry_count_++;
    495 
    496   Trace("Open hash 0x%x end: 0x%x", hash,
    497         cache_entry->entry()->address().value());
    498   CACHE_UMA(AGE_MS, "OpenTime", 0, start);
    499   CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
    500   CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
    501   CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
    502   stats_.OnEvent(Stats::OPEN_HIT);
    503   web_fonts_histogram::RecordCacheHit(cache_entry);
    504   SIMPLE_STATS_COUNTER("disk_cache.hit");
    505   return cache_entry;
    506 }
    507 
    508 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
    509   if (disabled_ || key.empty())
    510     return NULL;
    511 
    512   TimeTicks start = TimeTicks::Now();
    513   uint32 hash = base::Hash(key);
    514   Trace("Create hash 0x%x", hash);
    515 
    516   scoped_refptr<EntryImpl> parent;
    517   Addr entry_address(data_->table[hash & mask_]);
    518   if (entry_address.is_initialized()) {
    519     // We have an entry already. It could be the one we are looking for, or just
    520     // a hash conflict.
    521     bool error;
    522     EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
    523     if (old_entry)
    524       return ResurrectEntry(old_entry);
    525 
    526     EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
    527     DCHECK(!error);
    528     if (parent_entry) {
    529       parent.swap(&parent_entry);
    530     } else if (data_->table[hash & mask_]) {
    531       // We should have corrected the problem.
    532       NOTREACHED();
    533       return NULL;
    534     }
    535   }
    536 
    537   // The general flow is to allocate disk space and initialize the entry data,
    538   // followed by saving that to disk, then linking the entry though the index
    539   // and finally through the lists. If there is a crash in this process, we may
    540   // end up with:
    541   // a. Used, unreferenced empty blocks on disk (basically just garbage).
    542   // b. Used, unreferenced but meaningful data on disk (more garbage).
    543   // c. A fully formed entry, reachable only through the index.
    544   // d. A fully formed entry, also reachable through the lists, but still dirty.
    545   //
    546   // Anything after (b) can be automatically cleaned up. We may consider saving
    547   // the current operation (as we do while manipulating the lists) so that we
    548   // can detect and cleanup (a) and (b).
    549 
    550   int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
    551   if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
    552     LOG(ERROR) << "Create entry failed " << key.c_str();
    553     stats_.OnEvent(Stats::CREATE_ERROR);
    554     return NULL;
    555   }
    556 
    557   Addr node_address(0);
    558   if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
    559     block_files_.DeleteBlock(entry_address, false);
    560     LOG(ERROR) << "Create entry failed " << key.c_str();
    561     stats_.OnEvent(Stats::CREATE_ERROR);
    562     return NULL;
    563   }
    564 
    565   scoped_refptr<EntryImpl> cache_entry(
    566       new EntryImpl(this, entry_address, false));
    567   IncreaseNumRefs();
    568 
    569   if (!cache_entry->CreateEntry(node_address, key, hash)) {
    570     block_files_.DeleteBlock(entry_address, false);
    571     block_files_.DeleteBlock(node_address, false);
    572     LOG(ERROR) << "Create entry failed " << key.c_str();
    573     stats_.OnEvent(Stats::CREATE_ERROR);
    574     return NULL;
    575   }
    576 
    577   cache_entry->BeginLogging(net_log_, true);
    578 
    579   // We are not failing the operation; let's add this to the map.
    580   open_entries_[entry_address.value()] = cache_entry.get();
    581 
    582   // Save the entry.
    583   cache_entry->entry()->Store();
    584   cache_entry->rankings()->Store();
    585   IncreaseNumEntries();
    586   entry_count_++;
    587 
    588   // Link this entry through the index.
    589   if (parent.get()) {
    590     parent->SetNextAddress(entry_address);
    591   } else {
    592     data_->table[hash & mask_] = entry_address.value();
    593   }
    594 
    595   // Link this entry through the lists.
    596   eviction_.OnCreateEntry(cache_entry.get());
    597 
    598   CACHE_UMA(AGE_MS, "CreateTime", 0, start);
    599   stats_.OnEvent(Stats::CREATE_HIT);
    600   SIMPLE_STATS_COUNTER("disk_cache.miss");
    601   Trace("create entry hit ");
    602   FlushIndex();
    603   cache_entry->AddRef();
    604   return cache_entry.get();
    605 }
    606 
    607 EntryImpl* BackendImpl::OpenNextEntryImpl(Rankings::Iterator* iterator) {
    608   if (disabled_)
    609     return NULL;
    610 
    611   const int kListsToSearch = 3;
    612   scoped_refptr<EntryImpl> entries[kListsToSearch];
    613   if (!iterator->my_rankings) {
    614     iterator->my_rankings = &rankings_;
    615     bool ret = false;
    616 
    617     // Get an entry from each list.
    618     for (int i = 0; i < kListsToSearch; i++) {
    619       EntryImpl* temp = NULL;
    620       ret |= OpenFollowingEntryFromList(static_cast<Rankings::List>(i),
    621                                         &iterator->nodes[i], &temp);
    622       entries[i].swap(&temp);  // The entry was already addref'd.
    623     }
    624     if (!ret) {
    625       iterator->Reset();
    626       return NULL;
    627     }
    628   } else {
    629     // Get the next entry from the last list, and the actual entries for the
    630     // elements on the other lists.
    631     for (int i = 0; i < kListsToSearch; i++) {
    632       EntryImpl* temp = NULL;
    633       if (iterator->list == i) {
    634           OpenFollowingEntryFromList(
    635               iterator->list, &iterator->nodes[i], &temp);
    636       } else {
    637         temp = GetEnumeratedEntry(iterator->nodes[i],
    638                                   static_cast<Rankings::List>(i));
    639       }
    640 
    641       entries[i].swap(&temp);  // The entry was already addref'd.
    642     }
    643   }
    644 
    645   int newest = -1;
    646   int oldest = -1;
    647   Time access_times[kListsToSearch];
    648   for (int i = 0; i < kListsToSearch; i++) {
    649     if (entries[i].get()) {
    650       access_times[i] = entries[i]->GetLastUsed();
    651       if (newest < 0) {
    652         DCHECK_LT(oldest, 0);
    653         newest = oldest = i;
    654         continue;
    655       }
    656       if (access_times[i] > access_times[newest])
    657         newest = i;
    658       if (access_times[i] < access_times[oldest])
    659         oldest = i;
    660     }
    661   }
    662 
    663   if (newest < 0 || oldest < 0) {
    664     iterator->Reset();
    665     return NULL;
    666   }
    667 
    668   EntryImpl* next_entry;
    669   next_entry = entries[newest].get();
    670   iterator->list = static_cast<Rankings::List>(newest);
    671   next_entry->AddRef();
    672   return next_entry;
    673 }
    674 
    675 bool BackendImpl::SetMaxSize(int max_bytes) {
    676   COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
    677   if (max_bytes < 0)
    678     return false;
    679 
    680   // Zero size means use the default.
    681   if (!max_bytes)
    682     return true;
    683 
    684   // Avoid a DCHECK later on.
    685   if (max_bytes >= kint32max - kint32max / 10)
    686     max_bytes = kint32max - kint32max / 10 - 1;
    687 
    688   user_flags_ |= kMaxSize;
    689   max_size_ = max_bytes;
    690   return true;
    691 }
    692 
    693 void BackendImpl::SetType(net::CacheType type) {
    694   DCHECK_NE(net::MEMORY_CACHE, type);
    695   cache_type_ = type;
    696 }
    697 
    698 base::FilePath BackendImpl::GetFileName(Addr address) const {
    699   if (!address.is_separate_file() || !address.is_initialized()) {
    700     NOTREACHED();
    701     return base::FilePath();
    702   }
    703 
    704   std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
    705   return path_.AppendASCII(tmp);
    706 }
    707 
    708 MappedFile* BackendImpl::File(Addr address) {
    709   if (disabled_)
    710     return NULL;
    711   return block_files_.GetFile(address);
    712 }
    713 
    714 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
    715   return background_queue_.GetWeakPtr();
    716 }
    717 
    718 bool BackendImpl::CreateExternalFile(Addr* address) {
    719   int file_number = data_->header.last_file + 1;
    720   Addr file_address(0);
    721   bool success = false;
    722   for (int i = 0; i < 0x0fffffff; i++, file_number++) {
    723     if (!file_address.SetFileNumber(file_number)) {
    724       file_number = 1;
    725       continue;
    726     }
    727     base::FilePath name = GetFileName(file_address);
    728     int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
    729                 base::File::FLAG_CREATE | base::File::FLAG_EXCLUSIVE_WRITE;
    730     base::File file(name, flags);
    731     if (!file.IsValid()) {
    732       base::File::Error error = file.error_details();
    733       if (error != base::File::FILE_ERROR_EXISTS) {
    734         LOG(ERROR) << "Unable to create file: " << error;
    735         return false;
    736       }
    737       continue;
    738     }
    739 
    740     success = true;
    741     break;
    742   }
    743 
    744   DCHECK(success);
    745   if (!success)
    746     return false;
    747 
    748   data_->header.last_file = file_number;
    749   address->set_value(file_address.value());
    750   return true;
    751 }
    752 
    753 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
    754                              Addr* block_address) {
    755   return block_files_.CreateBlock(block_type, block_count, block_address);
    756 }
    757 
    758 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
    759   block_files_.DeleteBlock(block_address, deep);
    760 }
    761 
    762 LruData* BackendImpl::GetLruData() {
    763   return &data_->header.lru;
    764 }
    765 
    766 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
    767   if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
    768     return;
    769   eviction_.UpdateRank(entry, modified);
    770 }
    771 
    772 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
    773   Addr address(rankings->Data()->contents);
    774   EntryImpl* cache_entry = NULL;
    775   if (NewEntry(address, &cache_entry)) {
    776     STRESS_NOTREACHED();
    777     return;
    778   }
    779 
    780   uint32 hash = cache_entry->GetHash();
    781   cache_entry->Release();
    782 
    783   // Anything on the table means that this entry is there.
    784   if (data_->table[hash & mask_])
    785     return;
    786 
    787   data_->table[hash & mask_] = address.value();
    788   FlushIndex();
    789 }
    790 
    791 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
    792   uint32 hash = entry->GetHash();
    793   std::string key = entry->GetKey();
    794   Addr entry_addr = entry->entry()->address();
    795   bool error;
    796   EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
    797   CacheAddr child(entry->GetNextAddress());
    798 
    799   Trace("Doom entry 0x%p", entry);
    800 
    801   if (!entry->doomed()) {
    802     // We may have doomed this entry from within MatchEntry.
    803     eviction_.OnDoomEntry(entry);
    804     entry->InternalDoom();
    805     if (!new_eviction_) {
    806       DecreaseNumEntries();
    807     }
    808     stats_.OnEvent(Stats::DOOM_ENTRY);
    809   }
    810 
    811   if (parent_entry) {
    812     parent_entry->SetNextAddress(Addr(child));
    813     parent_entry->Release();
    814   } else if (!error) {
    815     data_->table[hash & mask_] = child;
    816   }
    817 
    818   FlushIndex();
    819 }
    820 
    821 #if defined(NET_BUILD_STRESS_CACHE)
    822 
    823 CacheAddr BackendImpl::GetNextAddr(Addr address) {
    824   EntriesMap::iterator it = open_entries_.find(address.value());
    825   if (it != open_entries_.end()) {
    826     EntryImpl* this_entry = it->second;
    827     return this_entry->GetNextAddress();
    828   }
    829   DCHECK(block_files_.IsValid(address));
    830   DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
    831 
    832   CacheEntryBlock entry(File(address), address);
    833   CHECK(entry.Load());
    834   return entry.Data()->next;
    835 }
    836 
    837 void BackendImpl::NotLinked(EntryImpl* entry) {
    838   Addr entry_addr = entry->entry()->address();
    839   uint32 i = entry->GetHash() & mask_;
    840   Addr address(data_->table[i]);
    841   if (!address.is_initialized())
    842     return;
    843 
    844   for (;;) {
    845     DCHECK(entry_addr.value() != address.value());
    846     address.set_value(GetNextAddr(address));
    847     if (!address.is_initialized())
    848       break;
    849   }
    850 }
    851 #endif  // NET_BUILD_STRESS_CACHE
    852 
    853 // An entry may be linked on the DELETED list for a while after being doomed.
    854 // This function is called when we want to remove it.
    855 void BackendImpl::RemoveEntry(EntryImpl* entry) {
    856 #if defined(NET_BUILD_STRESS_CACHE)
    857   NotLinked(entry);
    858 #endif
    859   if (!new_eviction_)
    860     return;
    861 
    862   DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
    863 
    864   Trace("Remove entry 0x%p", entry);
    865   eviction_.OnDestroyEntry(entry);
    866   DecreaseNumEntries();
    867 }
    868 
    869 void BackendImpl::OnEntryDestroyBegin(Addr address) {
    870   EntriesMap::iterator it = open_entries_.find(address.value());
    871   if (it != open_entries_.end())
    872     open_entries_.erase(it);
    873 }
    874 
    875 void BackendImpl::OnEntryDestroyEnd() {
    876   DecreaseNumRefs();
    877   if (data_->header.num_bytes > max_size_ && !read_only_ &&
    878       (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
    879     eviction_.TrimCache(false);
    880 }
    881 
    882 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
    883   DCHECK(rankings->HasData());
    884   EntriesMap::const_iterator it =
    885       open_entries_.find(rankings->Data()->contents);
    886   if (it != open_entries_.end()) {
    887     // We have this entry in memory.
    888     return it->second;
    889   }
    890 
    891   return NULL;
    892 }
    893 
    894 int32 BackendImpl::GetCurrentEntryId() const {
    895   return data_->header.this_id;
    896 }
    897 
    898 int BackendImpl::MaxFileSize() const {
    899   return cache_type() == net::PNACL_CACHE ? max_size_ : max_size_ / 8;
    900 }
    901 
    902 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
    903   if (disabled_ || old_size == new_size)
    904     return;
    905   if (old_size > new_size)
    906     SubstractStorageSize(old_size - new_size);
    907   else
    908     AddStorageSize(new_size - old_size);
    909 
    910   FlushIndex();
    911 
    912   // Update the usage statistics.
    913   stats_.ModifyStorageStats(old_size, new_size);
    914 }
    915 
    916 void BackendImpl::TooMuchStorageRequested(int32 size) {
    917   stats_.ModifyStorageStats(0, size);
    918 }
    919 
    920 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
    921   DCHECK_GT(new_size, current_size);
    922   if (user_flags_ & kNoBuffering)
    923     return false;
    924 
    925   int to_add = new_size - current_size;
    926   if (buffer_bytes_ + to_add > MaxBuffersSize())
    927     return false;
    928 
    929   buffer_bytes_ += to_add;
    930   CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
    931   return true;
    932 }
    933 
    934 void BackendImpl::BufferDeleted(int size) {
    935   buffer_bytes_ -= size;
    936   DCHECK_GE(size, 0);
    937 }
    938 
    939 bool BackendImpl::IsLoaded() const {
    940   CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
    941   if (user_flags_ & kNoLoadProtection)
    942     return false;
    943 
    944   return (num_pending_io_ > 5 || user_load_);
    945 }
    946 
    947 std::string BackendImpl::HistogramName(const char* name, int experiment) const {
    948   if (!experiment)
    949     return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
    950   return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
    951                             name, experiment);
    952 }
    953 
    954 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
    955   return ptr_factory_.GetWeakPtr();
    956 }
    957 
    958 // We want to remove biases from some histograms so we only send data once per
    959 // week.
    960 bool BackendImpl::ShouldReportAgain() {
    961   if (uma_report_)
    962     return uma_report_ == 2;
    963 
    964   uma_report_++;
    965   int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
    966   Time last_time = Time::FromInternalValue(last_report);
    967   if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
    968     stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
    969     uma_report_++;
    970     return true;
    971   }
    972   return false;
    973 }
    974 
    975 void BackendImpl::FirstEviction() {
    976   DCHECK(data_->header.create_time);
    977   if (!GetEntryCount())
    978     return;  // This is just for unit tests.
    979 
    980   Time create_time = Time::FromInternalValue(data_->header.create_time);
    981   CACHE_UMA(AGE, "FillupAge", 0, create_time);
    982 
    983   int64 use_time = stats_.GetCounter(Stats::TIMER);
    984   CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
    985   CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
    986 
    987   if (!use_time)
    988     use_time = 1;
    989   CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
    990             static_cast<int>(data_->header.num_entries / use_time));
    991   CACHE_UMA(COUNTS, "FirstByteIORate", 0,
    992             static_cast<int>((data_->header.num_bytes / 1024) / use_time));
    993 
    994   int avg_size = data_->header.num_bytes / GetEntryCount();
    995   CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
    996 
    997   int large_entries_bytes = stats_.GetLargeEntriesSize();
    998   int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
    999   CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
   1000 
   1001   if (new_eviction_) {
   1002     CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
   1003     CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
   1004               data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
   1005     CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
   1006               data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
   1007     CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
   1008               data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
   1009   }
   1010 
   1011   stats_.ResetRatios();
   1012 }
   1013 
   1014 void BackendImpl::CriticalError(int error) {
   1015   STRESS_NOTREACHED();
   1016   LOG(ERROR) << "Critical error found " << error;
   1017   if (disabled_)
   1018     return;
   1019 
   1020   stats_.OnEvent(Stats::FATAL_ERROR);
   1021   LogStats();
   1022   ReportError(error);
   1023 
   1024   // Setting the index table length to an invalid value will force re-creation
   1025   // of the cache files.
   1026   data_->header.table_len = 1;
   1027   disabled_ = true;
   1028 
   1029   if (!num_refs_)
   1030     base::MessageLoop::current()->PostTask(
   1031         FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
   1032 }
   1033 
   1034 void BackendImpl::ReportError(int error) {
   1035   STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
   1036                 error == ERR_CACHE_CREATED);
   1037 
   1038   // We transmit positive numbers, instead of direct error codes.
   1039   DCHECK_LE(error, 0);
   1040   CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
   1041 }
   1042 
   1043 void BackendImpl::OnEvent(Stats::Counters an_event) {
   1044   stats_.OnEvent(an_event);
   1045 }
   1046 
   1047 void BackendImpl::OnRead(int32 bytes) {
   1048   DCHECK_GE(bytes, 0);
   1049   byte_count_ += bytes;
   1050   if (byte_count_ < 0)
   1051     byte_count_ = kint32max;
   1052 }
   1053 
   1054 void BackendImpl::OnWrite(int32 bytes) {
   1055   // We use the same implementation as OnRead... just log the number of bytes.
   1056   OnRead(bytes);
   1057 }
   1058 
   1059 void BackendImpl::OnStatsTimer() {
   1060   if (disabled_)
   1061     return;
   1062 
   1063   stats_.OnEvent(Stats::TIMER);
   1064   int64 time = stats_.GetCounter(Stats::TIMER);
   1065   int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
   1066 
   1067   // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
   1068   // the bias towards 0.
   1069   if (num_refs_ && (current != num_refs_)) {
   1070     int64 diff = (num_refs_ - current) / 50;
   1071     if (!diff)
   1072       diff = num_refs_ > current ? 1 : -1;
   1073     current = current + diff;
   1074     stats_.SetCounter(Stats::OPEN_ENTRIES, current);
   1075     stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
   1076   }
   1077 
   1078   CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
   1079 
   1080   CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
   1081   CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
   1082 
   1083   // These values cover about 99.5% of the population (Oct 2011).
   1084   user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
   1085   entry_count_ = 0;
   1086   byte_count_ = 0;
   1087   up_ticks_++;
   1088 
   1089   if (!data_)
   1090     first_timer_ = false;
   1091   if (first_timer_) {
   1092     first_timer_ = false;
   1093     if (ShouldReportAgain())
   1094       ReportStats();
   1095   }
   1096 
   1097   // Save stats to disk at 5 min intervals.
   1098   if (time % 10 == 0)
   1099     StoreStats();
   1100 }
   1101 
   1102 void BackendImpl::IncrementIoCount() {
   1103   num_pending_io_++;
   1104 }
   1105 
   1106 void BackendImpl::DecrementIoCount() {
   1107   num_pending_io_--;
   1108 }
   1109 
   1110 void BackendImpl::SetUnitTestMode() {
   1111   user_flags_ |= kUnitTestMode;
   1112   unit_test_ = true;
   1113 }
   1114 
   1115 void BackendImpl::SetUpgradeMode() {
   1116   user_flags_ |= kUpgradeMode;
   1117   read_only_ = true;
   1118 }
   1119 
   1120 void BackendImpl::SetNewEviction() {
   1121   user_flags_ |= kNewEviction;
   1122   new_eviction_ = true;
   1123 }
   1124 
   1125 void BackendImpl::SetFlags(uint32 flags) {
   1126   user_flags_ |= flags;
   1127 }
   1128 
   1129 void BackendImpl::ClearRefCountForTest() {
   1130   num_refs_ = 0;
   1131 }
   1132 
   1133 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) {
   1134   background_queue_.FlushQueue(callback);
   1135   return net::ERR_IO_PENDING;
   1136 }
   1137 
   1138 int BackendImpl::RunTaskForTest(const base::Closure& task,
   1139                                 const CompletionCallback& callback) {
   1140   background_queue_.RunTask(task, callback);
   1141   return net::ERR_IO_PENDING;
   1142 }
   1143 
   1144 void BackendImpl::TrimForTest(bool empty) {
   1145   eviction_.SetTestMode();
   1146   eviction_.TrimCache(empty);
   1147 }
   1148 
   1149 void BackendImpl::TrimDeletedListForTest(bool empty) {
   1150   eviction_.SetTestMode();
   1151   eviction_.TrimDeletedList(empty);
   1152 }
   1153 
   1154 base::RepeatingTimer<BackendImpl>* BackendImpl::GetTimerForTest() {
   1155   return timer_.get();
   1156 }
   1157 
   1158 int BackendImpl::SelfCheck() {
   1159   if (!init_) {
   1160     LOG(ERROR) << "Init failed";
   1161     return ERR_INIT_FAILED;
   1162   }
   1163 
   1164   int num_entries = rankings_.SelfCheck();
   1165   if (num_entries < 0) {
   1166     LOG(ERROR) << "Invalid rankings list, error " << num_entries;
   1167 #if !defined(NET_BUILD_STRESS_CACHE)
   1168     return num_entries;
   1169 #endif
   1170   }
   1171 
   1172   if (num_entries != data_->header.num_entries) {
   1173     LOG(ERROR) << "Number of entries mismatch";
   1174 #if !defined(NET_BUILD_STRESS_CACHE)
   1175     return ERR_NUM_ENTRIES_MISMATCH;
   1176 #endif
   1177   }
   1178 
   1179   return CheckAllEntries();
   1180 }
   1181 
   1182 void BackendImpl::FlushIndex() {
   1183   if (index_.get() && !disabled_)
   1184     index_->Flush();
   1185 }
   1186 
   1187 // ------------------------------------------------------------------------
   1188 
   1189 net::CacheType BackendImpl::GetCacheType() const {
   1190   return cache_type_;
   1191 }
   1192 
   1193 int32 BackendImpl::GetEntryCount() const {
   1194   if (!index_.get() || disabled_)
   1195     return 0;
   1196   // num_entries includes entries already evicted.
   1197   int32 not_deleted = data_->header.num_entries -
   1198                       data_->header.lru.sizes[Rankings::DELETED];
   1199 
   1200   if (not_deleted < 0) {
   1201     NOTREACHED();
   1202     not_deleted = 0;
   1203   }
   1204 
   1205   return not_deleted;
   1206 }
   1207 
   1208 int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
   1209                            const CompletionCallback& callback) {
   1210   DCHECK(!callback.is_null());
   1211   background_queue_.OpenEntry(key, entry, callback);
   1212   return net::ERR_IO_PENDING;
   1213 }
   1214 
   1215 int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
   1216                              const CompletionCallback& callback) {
   1217   DCHECK(!callback.is_null());
   1218   background_queue_.CreateEntry(key, entry, callback);
   1219   return net::ERR_IO_PENDING;
   1220 }
   1221 
   1222 int BackendImpl::DoomEntry(const std::string& key,
   1223                            const CompletionCallback& callback) {
   1224   DCHECK(!callback.is_null());
   1225   background_queue_.DoomEntry(key, callback);
   1226   return net::ERR_IO_PENDING;
   1227 }
   1228 
   1229 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) {
   1230   DCHECK(!callback.is_null());
   1231   background_queue_.DoomAllEntries(callback);
   1232   return net::ERR_IO_PENDING;
   1233 }
   1234 
   1235 int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
   1236                                     const base::Time end_time,
   1237                                     const CompletionCallback& callback) {
   1238   DCHECK(!callback.is_null());
   1239   background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
   1240   return net::ERR_IO_PENDING;
   1241 }
   1242 
   1243 int BackendImpl::DoomEntriesSince(const base::Time initial_time,
   1244                                   const CompletionCallback& callback) {
   1245   DCHECK(!callback.is_null());
   1246   background_queue_.DoomEntriesSince(initial_time, callback);
   1247   return net::ERR_IO_PENDING;
   1248 }
   1249 
   1250 class BackendImpl::IteratorImpl : public Backend::Iterator {
   1251  public:
   1252   explicit IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue)
   1253       : background_queue_(background_queue),
   1254         iterator_(new Rankings::Iterator()) {
   1255   }
   1256 
   1257   virtual ~IteratorImpl() {
   1258     if (background_queue_)
   1259       background_queue_->EndEnumeration(iterator_.Pass());
   1260   }
   1261 
   1262   virtual int OpenNextEntry(Entry** next_entry,
   1263                             const net::CompletionCallback& callback) OVERRIDE {
   1264     if (!background_queue_)
   1265       return net::ERR_FAILED;
   1266     background_queue_->OpenNextEntry(iterator_.get(), next_entry, callback);
   1267     return net::ERR_IO_PENDING;
   1268   }
   1269 
   1270  private:
   1271   const base::WeakPtr<InFlightBackendIO> background_queue_;
   1272   scoped_ptr<Rankings::Iterator> iterator_;
   1273 };
   1274 
   1275 scoped_ptr<Backend::Iterator> BackendImpl::CreateIterator() {
   1276   return scoped_ptr<Backend::Iterator>(new IteratorImpl(GetBackgroundQueue()));
   1277 }
   1278 
   1279 void BackendImpl::GetStats(StatsItems* stats) {
   1280   if (disabled_)
   1281     return;
   1282 
   1283   std::pair<std::string, std::string> item;
   1284 
   1285   item.first = "Entries";
   1286   item.second = base::StringPrintf("%d", data_->header.num_entries);
   1287   stats->push_back(item);
   1288 
   1289   item.first = "Pending IO";
   1290   item.second = base::StringPrintf("%d", num_pending_io_);
   1291   stats->push_back(item);
   1292 
   1293   item.first = "Max size";
   1294   item.second = base::StringPrintf("%d", max_size_);
   1295   stats->push_back(item);
   1296 
   1297   item.first = "Current size";
   1298   item.second = base::StringPrintf("%d", data_->header.num_bytes);
   1299   stats->push_back(item);
   1300 
   1301   item.first = "Cache type";
   1302   item.second = "Blockfile Cache";
   1303   stats->push_back(item);
   1304 
   1305   stats_.GetItems(stats);
   1306 }
   1307 
   1308 void BackendImpl::OnExternalCacheHit(const std::string& key) {
   1309   background_queue_.OnExternalCacheHit(key);
   1310 }
   1311 
   1312 // ------------------------------------------------------------------------
   1313 
   1314 // We just created a new file so we're going to write the header and set the
   1315 // file length to include the hash table (zero filled).
   1316 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
   1317   AdjustMaxCacheSize(0);
   1318 
   1319   IndexHeader header;
   1320   header.table_len = DesiredIndexTableLen(max_size_);
   1321 
   1322   // We need file version 2.1 for the new eviction algorithm.
   1323   if (new_eviction_)
   1324     header.version = 0x20001;
   1325 
   1326   header.create_time = Time::Now().ToInternalValue();
   1327 
   1328   if (!file->Write(&header, sizeof(header), 0))
   1329     return false;
   1330 
   1331   return file->SetLength(GetIndexSize(header.table_len));
   1332 }
   1333 
   1334 bool BackendImpl::InitBackingStore(bool* file_created) {
   1335   if (!base::CreateDirectory(path_))
   1336     return false;
   1337 
   1338   base::FilePath index_name = path_.AppendASCII(kIndexName);
   1339 
   1340   int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
   1341               base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_EXCLUSIVE_WRITE;
   1342   base::File base_file(index_name, flags);
   1343   if (!base_file.IsValid())
   1344     return false;
   1345 
   1346   bool ret = true;
   1347   *file_created = base_file.created();
   1348 
   1349   scoped_refptr<disk_cache::File> file(new disk_cache::File(base_file.Pass()));
   1350   if (*file_created)
   1351     ret = CreateBackingStore(file.get());
   1352 
   1353   file = NULL;
   1354   if (!ret)
   1355     return false;
   1356 
   1357   index_ = new MappedFile();
   1358   data_ = static_cast<Index*>(index_->Init(index_name, 0));
   1359   if (!data_) {
   1360     LOG(ERROR) << "Unable to map Index file";
   1361     return false;
   1362   }
   1363 
   1364   if (index_->GetLength() < sizeof(Index)) {
   1365     // We verify this again on CheckIndex() but it's easier to make sure now
   1366     // that the header is there.
   1367     LOG(ERROR) << "Corrupt Index file";
   1368     return false;
   1369   }
   1370 
   1371   return true;
   1372 }
   1373 
   1374 // The maximum cache size will be either set explicitly by the caller, or
   1375 // calculated by this code.
   1376 void BackendImpl::AdjustMaxCacheSize(int table_len) {
   1377   if (max_size_)
   1378     return;
   1379 
   1380   // If table_len is provided, the index file exists.
   1381   DCHECK(!table_len || data_->header.magic);
   1382 
   1383   // The user is not setting the size, let's figure it out.
   1384   int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
   1385   if (available < 0) {
   1386     max_size_ = kDefaultCacheSize;
   1387     return;
   1388   }
   1389 
   1390   if (table_len)
   1391     available += data_->header.num_bytes;
   1392 
   1393   max_size_ = PreferredCacheSize(available);
   1394 
   1395   if (!table_len)
   1396     return;
   1397 
   1398   // If we already have a table, adjust the size to it.
   1399   int current_max_size = MaxStorageSizeForTable(table_len);
   1400   if (max_size_ > current_max_size)
   1401     max_size_= current_max_size;
   1402 }
   1403 
   1404 bool BackendImpl::InitStats() {
   1405   Addr address(data_->header.stats);
   1406   int size = stats_.StorageSize();
   1407 
   1408   if (!address.is_initialized()) {
   1409     FileType file_type = Addr::RequiredFileType(size);
   1410     DCHECK_NE(file_type, EXTERNAL);
   1411     int num_blocks = Addr::RequiredBlocks(size, file_type);
   1412 
   1413     if (!CreateBlock(file_type, num_blocks, &address))
   1414       return false;
   1415 
   1416     data_->header.stats = address.value();
   1417     return stats_.Init(NULL, 0, address);
   1418   }
   1419 
   1420   if (!address.is_block_file()) {
   1421     NOTREACHED();
   1422     return false;
   1423   }
   1424 
   1425   // Load the required data.
   1426   size = address.num_blocks() * address.BlockSize();
   1427   MappedFile* file = File(address);
   1428   if (!file)
   1429     return false;
   1430 
   1431   scoped_ptr<char[]> data(new char[size]);
   1432   size_t offset = address.start_block() * address.BlockSize() +
   1433                   kBlockHeaderSize;
   1434   if (!file->Read(data.get(), size, offset))
   1435     return false;
   1436 
   1437   if (!stats_.Init(data.get(), size, address))
   1438     return false;
   1439   if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
   1440     stats_.InitSizeHistogram();
   1441   return true;
   1442 }
   1443 
   1444 void BackendImpl::StoreStats() {
   1445   int size = stats_.StorageSize();
   1446   scoped_ptr<char[]> data(new char[size]);
   1447   Addr address;
   1448   size = stats_.SerializeStats(data.get(), size, &address);
   1449   DCHECK(size);
   1450   if (!address.is_initialized())
   1451     return;
   1452 
   1453   MappedFile* file = File(address);
   1454   if (!file)
   1455     return;
   1456 
   1457   size_t offset = address.start_block() * address.BlockSize() +
   1458                   kBlockHeaderSize;
   1459   file->Write(data.get(), size, offset);  // ignore result.
   1460 }
   1461 
   1462 void BackendImpl::RestartCache(bool failure) {
   1463   int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
   1464   int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
   1465   int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
   1466   int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
   1467 
   1468   PrepareForRestart();
   1469   if (failure) {
   1470     DCHECK(!num_refs_);
   1471     DCHECK(!open_entries_.size());
   1472     DelayedCacheCleanup(path_);
   1473   } else {
   1474     DeleteCache(path_, false);
   1475   }
   1476 
   1477   // Don't call Init() if directed by the unit test: we are simulating a failure
   1478   // trying to re-enable the cache.
   1479   if (unit_test_)
   1480     init_ = true;  // Let the destructor do proper cleanup.
   1481   else if (SyncInit() == net::OK) {
   1482     stats_.SetCounter(Stats::FATAL_ERROR, errors);
   1483     stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
   1484     stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
   1485     stats_.SetCounter(Stats::LAST_REPORT, last_report);
   1486   }
   1487 }
   1488 
   1489 void BackendImpl::PrepareForRestart() {
   1490   // Reset the mask_ if it was not given by the user.
   1491   if (!(user_flags_ & kMask))
   1492     mask_ = 0;
   1493 
   1494   if (!(user_flags_ & kNewEviction))
   1495     new_eviction_ = false;
   1496 
   1497   disabled_ = true;
   1498   data_->header.crash = 0;
   1499   index_->Flush();
   1500   index_ = NULL;
   1501   data_ = NULL;
   1502   block_files_.CloseFiles();
   1503   rankings_.Reset();
   1504   init_ = false;
   1505   restarted_ = true;
   1506 }
   1507 
   1508 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
   1509   EntriesMap::iterator it = open_entries_.find(address.value());
   1510   if (it != open_entries_.end()) {
   1511     // Easy job. This entry is already in memory.
   1512     EntryImpl* this_entry = it->second;
   1513     this_entry->AddRef();
   1514     *entry = this_entry;
   1515     return 0;
   1516   }
   1517 
   1518   STRESS_DCHECK(block_files_.IsValid(address));
   1519 
   1520   if (!address.SanityCheckForEntryV2()) {
   1521     LOG(WARNING) << "Wrong entry address.";
   1522     STRESS_NOTREACHED();
   1523     return ERR_INVALID_ADDRESS;
   1524   }
   1525 
   1526   scoped_refptr<EntryImpl> cache_entry(
   1527       new EntryImpl(this, address, read_only_));
   1528   IncreaseNumRefs();
   1529   *entry = NULL;
   1530 
   1531   TimeTicks start = TimeTicks::Now();
   1532   if (!cache_entry->entry()->Load())
   1533     return ERR_READ_FAILURE;
   1534 
   1535   if (IsLoaded()) {
   1536     CACHE_UMA(AGE_MS, "LoadTime", 0, start);
   1537   }
   1538 
   1539   if (!cache_entry->SanityCheck()) {
   1540     LOG(WARNING) << "Messed up entry found.";
   1541     STRESS_NOTREACHED();
   1542     return ERR_INVALID_ENTRY;
   1543   }
   1544 
   1545   STRESS_DCHECK(block_files_.IsValid(
   1546                     Addr(cache_entry->entry()->Data()->rankings_node)));
   1547 
   1548   if (!cache_entry->LoadNodeAddress())
   1549     return ERR_READ_FAILURE;
   1550 
   1551   if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
   1552     STRESS_NOTREACHED();
   1553     cache_entry->SetDirtyFlag(0);
   1554     // Don't remove this from the list (it is not linked properly). Instead,
   1555     // break the link back to the entry because it is going away, and leave the
   1556     // rankings node to be deleted if we find it through a list.
   1557     rankings_.SetContents(cache_entry->rankings(), 0);
   1558   } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
   1559     STRESS_NOTREACHED();
   1560     cache_entry->SetDirtyFlag(0);
   1561     rankings_.SetContents(cache_entry->rankings(), address.value());
   1562   }
   1563 
   1564   if (!cache_entry->DataSanityCheck()) {
   1565     LOG(WARNING) << "Messed up entry found.";
   1566     cache_entry->SetDirtyFlag(0);
   1567     cache_entry->FixForDelete();
   1568   }
   1569 
   1570   // Prevent overwriting the dirty flag on the destructor.
   1571   cache_entry->SetDirtyFlag(GetCurrentEntryId());
   1572 
   1573   if (cache_entry->dirty()) {
   1574     Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
   1575           address.value());
   1576   }
   1577 
   1578   open_entries_[address.value()] = cache_entry.get();
   1579 
   1580   cache_entry->BeginLogging(net_log_, false);
   1581   cache_entry.swap(entry);
   1582   return 0;
   1583 }
   1584 
   1585 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
   1586                                    bool find_parent, Addr entry_addr,
   1587                                    bool* match_error) {
   1588   Addr address(data_->table[hash & mask_]);
   1589   scoped_refptr<EntryImpl> cache_entry, parent_entry;
   1590   EntryImpl* tmp = NULL;
   1591   bool found = false;
   1592   std::set<CacheAddr> visited;
   1593   *match_error = false;
   1594 
   1595   for (;;) {
   1596     if (disabled_)
   1597       break;
   1598 
   1599     if (visited.find(address.value()) != visited.end()) {
   1600       // It's possible for a buggy version of the code to write a loop. Just
   1601       // break it.
   1602       Trace("Hash collision loop 0x%x", address.value());
   1603       address.set_value(0);
   1604       parent_entry->SetNextAddress(address);
   1605     }
   1606     visited.insert(address.value());
   1607 
   1608     if (!address.is_initialized()) {
   1609       if (find_parent)
   1610         found = true;
   1611       break;
   1612     }
   1613 
   1614     int error = NewEntry(address, &tmp);
   1615     cache_entry.swap(&tmp);
   1616 
   1617     if (error || cache_entry->dirty()) {
   1618       // This entry is dirty on disk (it was not properly closed): we cannot
   1619       // trust it.
   1620       Addr child(0);
   1621       if (!error)
   1622         child.set_value(cache_entry->GetNextAddress());
   1623 
   1624       if (parent_entry.get()) {
   1625         parent_entry->SetNextAddress(child);
   1626         parent_entry = NULL;
   1627       } else {
   1628         data_->table[hash & mask_] = child.value();
   1629       }
   1630 
   1631       Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
   1632             address.value());
   1633 
   1634       if (!error) {
   1635         // It is important to call DestroyInvalidEntry after removing this
   1636         // entry from the table.
   1637         DestroyInvalidEntry(cache_entry.get());
   1638         cache_entry = NULL;
   1639       } else {
   1640         Trace("NewEntry failed on MatchEntry 0x%x", address.value());
   1641       }
   1642 
   1643       // Restart the search.
   1644       address.set_value(data_->table[hash & mask_]);
   1645       visited.clear();
   1646       continue;
   1647     }
   1648 
   1649     DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
   1650     if (cache_entry->IsSameEntry(key, hash)) {
   1651       if (!cache_entry->Update())
   1652         cache_entry = NULL;
   1653       found = true;
   1654       if (find_parent && entry_addr.value() != address.value()) {
   1655         Trace("Entry not on the index 0x%x", address.value());
   1656         *match_error = true;
   1657         parent_entry = NULL;
   1658       }
   1659       break;
   1660     }
   1661     if (!cache_entry->Update())
   1662       cache_entry = NULL;
   1663     parent_entry = cache_entry;
   1664     cache_entry = NULL;
   1665     if (!parent_entry.get())
   1666       break;
   1667 
   1668     address.set_value(parent_entry->GetNextAddress());
   1669   }
   1670 
   1671   if (parent_entry.get() && (!find_parent || !found))
   1672     parent_entry = NULL;
   1673 
   1674   if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
   1675     *match_error = true;
   1676     parent_entry = NULL;
   1677   }
   1678 
   1679   if (cache_entry.get() && (find_parent || !found))
   1680     cache_entry = NULL;
   1681 
   1682   find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
   1683   FlushIndex();
   1684   return tmp;
   1685 }
   1686 
   1687 bool BackendImpl::OpenFollowingEntryFromList(Rankings::List list,
   1688                                              CacheRankingsBlock** from_entry,
   1689                                              EntryImpl** next_entry) {
   1690   if (disabled_)
   1691     return false;
   1692 
   1693   if (!new_eviction_ && Rankings::NO_USE != list)
   1694     return false;
   1695 
   1696   Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
   1697   CacheRankingsBlock* next_block = rankings_.GetNext(rankings.get(), list);
   1698   Rankings::ScopedRankingsBlock next(&rankings_, next_block);
   1699   *from_entry = NULL;
   1700 
   1701   *next_entry = GetEnumeratedEntry(next.get(), list);
   1702   if (!*next_entry)
   1703     return false;
   1704 
   1705   *from_entry = next.release();
   1706   return true;
   1707 }
   1708 
   1709 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
   1710                                            Rankings::List list) {
   1711   if (!next || disabled_)
   1712     return NULL;
   1713 
   1714   EntryImpl* entry;
   1715   int rv = NewEntry(Addr(next->Data()->contents), &entry);
   1716   if (rv) {
   1717     STRESS_NOTREACHED();
   1718     rankings_.Remove(next, list, false);
   1719     if (rv == ERR_INVALID_ADDRESS) {
   1720       // There is nothing linked from the index. Delete the rankings node.
   1721       DeleteBlock(next->address(), true);
   1722     }
   1723     return NULL;
   1724   }
   1725 
   1726   if (entry->dirty()) {
   1727     // We cannot trust this entry.
   1728     InternalDoomEntry(entry);
   1729     entry->Release();
   1730     return NULL;
   1731   }
   1732 
   1733   if (!entry->Update()) {
   1734     STRESS_NOTREACHED();
   1735     entry->Release();
   1736     return NULL;
   1737   }
   1738 
   1739   // Note that it is unfortunate (but possible) for this entry to be clean, but
   1740   // not actually the real entry. In other words, we could have lost this entry
   1741   // from the index, and it could have been replaced with a newer one. It's not
   1742   // worth checking that this entry is "the real one", so we just return it and
   1743   // let the enumeration continue; this entry will be evicted at some point, and
   1744   // the regular path will work with the real entry. With time, this problem
   1745   // will disasappear because this scenario is just a bug.
   1746 
   1747   // Make sure that we save the key for later.
   1748   entry->GetKey();
   1749 
   1750   return entry;
   1751 }
   1752 
   1753 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
   1754   if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
   1755     deleted_entry->Release();
   1756     stats_.OnEvent(Stats::CREATE_MISS);
   1757     Trace("create entry miss ");
   1758     return NULL;
   1759   }
   1760 
   1761   // We are attempting to create an entry and found out that the entry was
   1762   // previously deleted.
   1763 
   1764   eviction_.OnCreateEntry(deleted_entry);
   1765   entry_count_++;
   1766 
   1767   stats_.OnEvent(Stats::RESURRECT_HIT);
   1768   Trace("Resurrect entry hit ");
   1769   return deleted_entry;
   1770 }
   1771 
   1772 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
   1773   LOG(WARNING) << "Destroying invalid entry.";
   1774   Trace("Destroying invalid entry 0x%p", entry);
   1775 
   1776   entry->SetPointerForInvalidEntry(GetCurrentEntryId());
   1777 
   1778   eviction_.OnDoomEntry(entry);
   1779   entry->InternalDoom();
   1780 
   1781   if (!new_eviction_)
   1782     DecreaseNumEntries();
   1783   stats_.OnEvent(Stats::INVALID_ENTRY);
   1784 }
   1785 
   1786 void BackendImpl::AddStorageSize(int32 bytes) {
   1787   data_->header.num_bytes += bytes;
   1788   DCHECK_GE(data_->header.num_bytes, 0);
   1789 }
   1790 
   1791 void BackendImpl::SubstractStorageSize(int32 bytes) {
   1792   data_->header.num_bytes -= bytes;
   1793   DCHECK_GE(data_->header.num_bytes, 0);
   1794 }
   1795 
   1796 void BackendImpl::IncreaseNumRefs() {
   1797   num_refs_++;
   1798   if (max_refs_ < num_refs_)
   1799     max_refs_ = num_refs_;
   1800 }
   1801 
   1802 void BackendImpl::DecreaseNumRefs() {
   1803   DCHECK(num_refs_);
   1804   num_refs_--;
   1805 
   1806   if (!num_refs_ && disabled_)
   1807     base::MessageLoop::current()->PostTask(
   1808         FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
   1809 }
   1810 
   1811 void BackendImpl::IncreaseNumEntries() {
   1812   data_->header.num_entries++;
   1813   DCHECK_GT(data_->header.num_entries, 0);
   1814 }
   1815 
   1816 void BackendImpl::DecreaseNumEntries() {
   1817   data_->header.num_entries--;
   1818   if (data_->header.num_entries < 0) {
   1819     NOTREACHED();
   1820     data_->header.num_entries = 0;
   1821   }
   1822 }
   1823 
   1824 void BackendImpl::LogStats() {
   1825   StatsItems stats;
   1826   GetStats(&stats);
   1827 
   1828   for (size_t index = 0; index < stats.size(); index++)
   1829     VLOG(1) << stats[index].first << ": " << stats[index].second;
   1830 }
   1831 
   1832 void BackendImpl::ReportStats() {
   1833   CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
   1834 
   1835   int current_size = data_->header.num_bytes / (1024 * 1024);
   1836   int max_size = max_size_ / (1024 * 1024);
   1837   int hit_ratio_as_percentage = stats_.GetHitRatio();
   1838 
   1839   CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
   1840   // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
   1841   // ratio of that bin's total count to the count in the same bin in the Size2
   1842   // histogram.
   1843   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
   1844     CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
   1845   CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
   1846   if (!max_size)
   1847     max_size++;
   1848   CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
   1849 
   1850   CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
   1851             static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
   1852   CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
   1853             static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
   1854   stats_.SetCounter(Stats::MAX_ENTRIES, 0);
   1855 
   1856   CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
   1857             static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
   1858   CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
   1859             static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
   1860   CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
   1861             static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
   1862   stats_.SetCounter(Stats::FATAL_ERROR, 0);
   1863   stats_.SetCounter(Stats::DOOM_CACHE, 0);
   1864   stats_.SetCounter(Stats::DOOM_RECENT, 0);
   1865 
   1866   int age = (Time::Now() -
   1867              Time::FromInternalValue(data_->header.create_time)).InHours();
   1868   if (age)
   1869     CACHE_UMA(HOURS, "FilesAge", 0, age);
   1870 
   1871   int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
   1872   if (!data_->header.create_time || !data_->header.lru.filled) {
   1873     int cause = data_->header.create_time ? 0 : 1;
   1874     if (!data_->header.lru.filled)
   1875       cause |= 2;
   1876     CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
   1877     CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
   1878     return;
   1879   }
   1880 
   1881   // This is an up to date client that will report FirstEviction() data. After
   1882   // that event, start reporting this:
   1883 
   1884   CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
   1885   // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
   1886   // time is the ratio of that bin's total count to the count in the same bin in
   1887   // the TotalTime histogram.
   1888   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
   1889     CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours));
   1890 
   1891   int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
   1892   stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
   1893 
   1894   // We may see users with no use_hours at this point if this is the first time
   1895   // we are running this code.
   1896   if (use_hours)
   1897     use_hours = total_hours - use_hours;
   1898 
   1899   if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
   1900     return;
   1901 
   1902   CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
   1903   // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
   1904   // is the ratio of that bin's total count to the count in the same bin in the
   1905   // UseTime histogram.
   1906   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
   1907     CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours));
   1908   CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
   1909 
   1910   int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
   1911   CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
   1912 
   1913   int avg_size = data_->header.num_bytes / GetEntryCount();
   1914   CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
   1915   CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
   1916 
   1917   CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
   1918             data_->header.num_entries * 100 / (mask_ + 1));
   1919 
   1920   int large_entries_bytes = stats_.GetLargeEntriesSize();
   1921   int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
   1922   CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
   1923 
   1924   if (new_eviction_) {
   1925     CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
   1926     CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
   1927               data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
   1928     CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
   1929               data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
   1930     CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
   1931               data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
   1932     CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
   1933               data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
   1934   }
   1935 
   1936   stats_.ResetRatios();
   1937   stats_.SetCounter(Stats::TRIM_ENTRY, 0);
   1938 
   1939   if (cache_type_ == net::DISK_CACHE)
   1940     block_files_.ReportStats();
   1941 }
   1942 
   1943 void BackendImpl::UpgradeTo2_1() {
   1944   // 2.1 is basically the same as 2.0, except that new fields are actually
   1945   // updated by the new eviction algorithm.
   1946   DCHECK(0x20000 == data_->header.version);
   1947   data_->header.version = 0x20001;
   1948   data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
   1949 }
   1950 
   1951 bool BackendImpl::CheckIndex() {
   1952   DCHECK(data_);
   1953 
   1954   size_t current_size = index_->GetLength();
   1955   if (current_size < sizeof(Index)) {
   1956     LOG(ERROR) << "Corrupt Index file";
   1957     return false;
   1958   }
   1959 
   1960   if (new_eviction_) {
   1961     // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
   1962     if (kIndexMagic != data_->header.magic ||
   1963         kCurrentVersion >> 16 != data_->header.version >> 16) {
   1964       LOG(ERROR) << "Invalid file version or magic";
   1965       return false;
   1966     }
   1967     if (kCurrentVersion == data_->header.version) {
   1968       // We need file version 2.1 for the new eviction algorithm.
   1969       UpgradeTo2_1();
   1970     }
   1971   } else {
   1972     if (kIndexMagic != data_->header.magic ||
   1973         kCurrentVersion != data_->header.version) {
   1974       LOG(ERROR) << "Invalid file version or magic";
   1975       return false;
   1976     }
   1977   }
   1978 
   1979   if (!data_->header.table_len) {
   1980     LOG(ERROR) << "Invalid table size";
   1981     return false;
   1982   }
   1983 
   1984   if (current_size < GetIndexSize(data_->header.table_len) ||
   1985       data_->header.table_len & (kBaseTableLen - 1)) {
   1986     LOG(ERROR) << "Corrupt Index file";
   1987     return false;
   1988   }
   1989 
   1990   AdjustMaxCacheSize(data_->header.table_len);
   1991 
   1992 #if !defined(NET_BUILD_STRESS_CACHE)
   1993   if (data_->header.num_bytes < 0 ||
   1994       (max_size_ < kint32max - kDefaultCacheSize &&
   1995        data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
   1996     LOG(ERROR) << "Invalid cache (current) size";
   1997     return false;
   1998   }
   1999 #endif
   2000 
   2001   if (data_->header.num_entries < 0) {
   2002     LOG(ERROR) << "Invalid number of entries";
   2003     return false;
   2004   }
   2005 
   2006   if (!mask_)
   2007     mask_ = data_->header.table_len - 1;
   2008 
   2009   // Load the table into memory.
   2010   return index_->Preload();
   2011 }
   2012 
   2013 int BackendImpl::CheckAllEntries() {
   2014   int num_dirty = 0;
   2015   int num_entries = 0;
   2016   DCHECK(mask_ < kuint32max);
   2017   for (unsigned int i = 0; i <= mask_; i++) {
   2018     Addr address(data_->table[i]);
   2019     if (!address.is_initialized())
   2020       continue;
   2021     for (;;) {
   2022       EntryImpl* tmp;
   2023       int ret = NewEntry(address, &tmp);
   2024       if (ret) {
   2025         STRESS_NOTREACHED();
   2026         return ret;
   2027       }
   2028       scoped_refptr<EntryImpl> cache_entry;
   2029       cache_entry.swap(&tmp);
   2030 
   2031       if (cache_entry->dirty())
   2032         num_dirty++;
   2033       else if (CheckEntry(cache_entry.get()))
   2034         num_entries++;
   2035       else
   2036         return ERR_INVALID_ENTRY;
   2037 
   2038       DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
   2039       address.set_value(cache_entry->GetNextAddress());
   2040       if (!address.is_initialized())
   2041         break;
   2042     }
   2043   }
   2044 
   2045   Trace("CheckAllEntries End");
   2046   if (num_entries + num_dirty != data_->header.num_entries) {
   2047     LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
   2048                   " " << data_->header.num_entries;
   2049     DCHECK_LT(num_entries, data_->header.num_entries);
   2050     return ERR_NUM_ENTRIES_MISMATCH;
   2051   }
   2052 
   2053   return num_dirty;
   2054 }
   2055 
   2056 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
   2057   bool ok = block_files_.IsValid(cache_entry->entry()->address());
   2058   ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
   2059   EntryStore* data = cache_entry->entry()->Data();
   2060   for (size_t i = 0; i < arraysize(data->data_addr); i++) {
   2061     if (data->data_addr[i]) {
   2062       Addr address(data->data_addr[i]);
   2063       if (address.is_block_file())
   2064         ok = ok && block_files_.IsValid(address);
   2065     }
   2066   }
   2067 
   2068   return ok && cache_entry->rankings()->VerifyHash();
   2069 }
   2070 
   2071 int BackendImpl::MaxBuffersSize() {
   2072   static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
   2073   static bool done = false;
   2074 
   2075   if (!done) {
   2076     const int kMaxBuffersSize = 30 * 1024 * 1024;
   2077 
   2078     // We want to use up to 2% of the computer's memory.
   2079     total_memory = total_memory * 2 / 100;
   2080     if (total_memory > kMaxBuffersSize || total_memory <= 0)
   2081       total_memory = kMaxBuffersSize;
   2082 
   2083     done = true;
   2084   }
   2085 
   2086   return static_cast<int>(total_memory);
   2087 }
   2088 
   2089 }  // namespace disk_cache
   2090