Home | History | Annotate | Download | only in blockfile
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/disk_cache/blockfile/backend_impl_v3.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/bind_helpers.h"
      9 #include "base/files/file_path.h"
     10 #include "base/files/file_util.h"
     11 #include "base/hash.h"
     12 #include "base/message_loop/message_loop.h"
     13 #include "base/metrics/field_trial.h"
     14 #include "base/metrics/histogram.h"
     15 #include "base/metrics/stats_counters.h"
     16 #include "base/rand_util.h"
     17 #include "base/strings/string_util.h"
     18 #include "base/strings/stringprintf.h"
     19 #include "base/sys_info.h"
     20 #include "base/threading/thread_restrictions.h"
     21 #include "base/time/time.h"
     22 #include "base/timer/timer.h"
     23 #include "net/base/net_errors.h"
     24 #include "net/disk_cache/blockfile/disk_format_v3.h"
     25 #include "net/disk_cache/blockfile/entry_impl_v3.h"
     26 #include "net/disk_cache/blockfile/errors.h"
     27 #include "net/disk_cache/blockfile/experiments.h"
     28 #include "net/disk_cache/blockfile/file.h"
     29 #include "net/disk_cache/blockfile/histogram_macros_v3.h"
     30 #include "net/disk_cache/blockfile/index_table_v3.h"
     31 #include "net/disk_cache/blockfile/storage_block-inl.h"
     32 #include "net/disk_cache/cache_util.h"
     33 
     34 // Provide a BackendImpl object to macros from histogram_macros.h.
     35 #define CACHE_UMA_BACKEND_IMPL_OBJ this
     36 
     37 using base::Time;
     38 using base::TimeDelta;
     39 using base::TimeTicks;
     40 
     41 namespace {
     42 
     43 #if defined(V3_NOT_JUST_YET_READY)
     44 const int kDefaultCacheSize = 80 * 1024 * 1024;
     45 
     46 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
     47 const int kTrimDelay = 10;
     48 #endif  // defined(V3_NOT_JUST_YET_READY).
     49 
     50 }  // namespace
     51 
     52 // ------------------------------------------------------------------------
     53 
     54 namespace disk_cache {
     55 
     56 BackendImplV3::BackendImplV3(
     57     const base::FilePath& path,
     58     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
     59     net::NetLog* net_log)
     60     : index_(NULL),
     61       path_(path),
     62       block_files_(),
     63       max_size_(0),
     64       up_ticks_(0),
     65       cache_type_(net::DISK_CACHE),
     66       uma_report_(0),
     67       user_flags_(0),
     68       init_(false),
     69       restarted_(false),
     70       read_only_(false),
     71       disabled_(false),
     72       lru_eviction_(true),
     73       first_timer_(true),
     74       user_load_(false),
     75       net_log_(net_log),
     76       ptr_factory_(this) {
     77 }
     78 
     79 BackendImplV3::~BackendImplV3() {
     80   CleanupCache();
     81 }
     82 
     83 int BackendImplV3::Init(const CompletionCallback& callback) {
     84   DCHECK(!init_);
     85   if (init_)
     86     return net::ERR_FAILED;
     87 
     88   return net::ERR_IO_PENDING;
     89 }
     90 
     91 // ------------------------------------------------------------------------
     92 
     93 bool BackendImplV3::SetMaxSize(int max_bytes) {
     94   COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
     95   if (max_bytes < 0)
     96     return false;
     97 
     98   // Zero size means use the default.
     99   if (!max_bytes)
    100     return true;
    101 
    102   // Avoid a DCHECK later on.
    103   if (max_bytes >= kint32max - kint32max / 10)
    104     max_bytes = kint32max - kint32max / 10 - 1;
    105 
    106   user_flags_ |= MAX_SIZE;
    107   max_size_ = max_bytes;
    108   return true;
    109 }
    110 
    111 void BackendImplV3::SetType(net::CacheType type) {
    112   DCHECK_NE(net::MEMORY_CACHE, type);
    113   cache_type_ = type;
    114 }
    115 
    116 bool BackendImplV3::CreateBlock(FileType block_type, int block_count,
    117                                 Addr* block_address) {
    118   return block_files_.CreateBlock(block_type, block_count, block_address);
    119 }
    120 
    121 #if defined(V3_NOT_JUST_YET_READY)
    122 void BackendImplV3::UpdateRank(EntryImplV3* entry, bool modified) {
    123   if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
    124     return;
    125   eviction_.UpdateRank(entry, modified);
    126 }
    127 
    128 void BackendImplV3::InternalDoomEntry(EntryImplV3* entry) {
    129   uint32 hash = entry->GetHash();
    130   std::string key = entry->GetKey();
    131   Addr entry_addr = entry->entry()->address();
    132   bool error;
    133   EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
    134   CacheAddr child(entry->GetNextAddress());
    135 
    136   Trace("Doom entry 0x%p", entry);
    137 
    138   if (!entry->doomed()) {
    139     // We may have doomed this entry from within MatchEntry.
    140     eviction_.OnDoomEntry(entry);
    141     entry->InternalDoom();
    142     if (!new_eviction_) {
    143       DecreaseNumEntries();
    144     }
    145     stats_.OnEvent(Stats::DOOM_ENTRY);
    146   }
    147 
    148   if (parent_entry) {
    149     parent_entry->SetNextAddress(Addr(child));
    150     parent_entry->Release();
    151   } else if (!error) {
    152     data_->table[hash & mask_] = child;
    153   }
    154 
    155   FlushIndex();
    156 }
    157 
    158 void BackendImplV3::OnEntryDestroyBegin(Addr address) {
    159   EntriesMap::iterator it = open_entries_.find(address.value());
    160   if (it != open_entries_.end())
    161     open_entries_.erase(it);
    162 }
    163 
    164 void BackendImplV3::OnEntryDestroyEnd() {
    165   DecreaseNumRefs();
    166   if (data_->header.num_bytes > max_size_ && !read_only_ &&
    167       (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
    168     eviction_.TrimCache(false);
    169 }
    170 
    171 EntryImplV3* BackendImplV3::GetOpenEntry(Addr address) const {
    172   DCHECK(rankings->HasData());
    173   EntriesMap::const_iterator it =
    174       open_entries_.find(rankings->Data()->contents);
    175   if (it != open_entries_.end()) {
    176     // We have this entry in memory.
    177     return it->second;
    178   }
    179 
    180   return NULL;
    181 }
    182 
    183 int BackendImplV3::MaxFileSize() const {
    184   return max_size_ / 8;
    185 }
    186 
    187 void BackendImplV3::ModifyStorageSize(int32 old_size, int32 new_size) {
    188   if (disabled_ || old_size == new_size)
    189     return;
    190   if (old_size > new_size)
    191     SubstractStorageSize(old_size - new_size);
    192   else
    193     AddStorageSize(new_size - old_size);
    194 
    195   // Update the usage statistics.
    196   stats_.ModifyStorageStats(old_size, new_size);
    197 }
    198 
    199 void BackendImplV3::TooMuchStorageRequested(int32 size) {
    200   stats_.ModifyStorageStats(0, size);
    201 }
    202 
    203 bool BackendImplV3::IsAllocAllowed(int current_size, int new_size) {
    204   DCHECK_GT(new_size, current_size);
    205   if (user_flags_ & NO_BUFFERING)
    206     return false;
    207 
    208   int to_add = new_size - current_size;
    209   if (buffer_bytes_ + to_add > MaxBuffersSize())
    210     return false;
    211 
    212   buffer_bytes_ += to_add;
    213   CACHE_UMA(COUNTS_50000, "BufferBytes", buffer_bytes_ / 1024);
    214   return true;
    215 }
    216 #endif  // defined(V3_NOT_JUST_YET_READY).
    217 
    218 void BackendImplV3::BufferDeleted(int size) {
    219   DCHECK_GE(size, 0);
    220   buffer_bytes_ -= size;
    221   DCHECK_GE(buffer_bytes_, 0);
    222 }
    223 
    224 bool BackendImplV3::IsLoaded() const {
    225   if (user_flags_ & NO_LOAD_PROTECTION)
    226     return false;
    227 
    228   return user_load_;
    229 }
    230 
    231 std::string BackendImplV3::HistogramName(const char* name) const {
    232   static const char* names[] = { "Http", "", "Media", "AppCache", "Shader" };
    233   DCHECK_NE(cache_type_, net::MEMORY_CACHE);
    234   return base::StringPrintf("DiskCache3.%s_%s", name, names[cache_type_]);
    235 }
    236 
    237 base::WeakPtr<BackendImplV3> BackendImplV3::GetWeakPtr() {
    238   return ptr_factory_.GetWeakPtr();
    239 }
    240 
    241 #if defined(V3_NOT_JUST_YET_READY)
    242 // We want to remove biases from some histograms so we only send data once per
    243 // week.
    244 bool BackendImplV3::ShouldReportAgain() {
    245   if (uma_report_)
    246     return uma_report_ == 2;
    247 
    248   uma_report_++;
    249   int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
    250   Time last_time = Time::FromInternalValue(last_report);
    251   if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
    252     stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
    253     uma_report_++;
    254     return true;
    255   }
    256   return false;
    257 }
    258 
    259 void BackendImplV3::FirstEviction() {
    260   IndexHeaderV3* header = index_.header();
    261   header->flags |= CACHE_EVICTED;
    262   DCHECK(header->create_time);
    263   if (!GetEntryCount())
    264     return;  // This is just for unit tests.
    265 
    266   Time create_time = Time::FromInternalValue(header->create_time);
    267   CACHE_UMA(AGE, "FillupAge", create_time);
    268 
    269   int64 use_time = stats_.GetCounter(Stats::TIMER);
    270   CACHE_UMA(HOURS, "FillupTime", static_cast<int>(use_time / 120));
    271   CACHE_UMA(PERCENTAGE, "FirstHitRatio", stats_.GetHitRatio());
    272 
    273   if (!use_time)
    274     use_time = 1;
    275   CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate",
    276             static_cast<int>(header->num_entries / use_time));
    277   CACHE_UMA(COUNTS, "FirstByteIORate",
    278             static_cast<int>((header->num_bytes / 1024) / use_time));
    279 
    280   int avg_size = header->num_bytes / GetEntryCount();
    281   CACHE_UMA(COUNTS, "FirstEntrySize", avg_size);
    282 
    283   int large_entries_bytes = stats_.GetLargeEntriesSize();
    284   int large_ratio = large_entries_bytes * 100 / header->num_bytes;
    285   CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", large_ratio);
    286 
    287   if (!lru_eviction_) {
    288     CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", stats_.GetResurrectRatio());
    289     CACHE_UMA(PERCENTAGE, "FirstNoUseRatio",
    290               header->num_no_use_entries * 100 / header->num_entries);
    291     CACHE_UMA(PERCENTAGE, "FirstLowUseRatio",
    292               header->num_low_use_entries * 100 / header->num_entries);
    293     CACHE_UMA(PERCENTAGE, "FirstHighUseRatio",
    294               header->num_high_use_entries * 100 / header->num_entries);
    295   }
    296 
    297   stats_.ResetRatios();
    298 }
    299 
    300 void BackendImplV3::OnEvent(Stats::Counters an_event) {
    301   stats_.OnEvent(an_event);
    302 }
    303 
    304 void BackendImplV3::OnRead(int32 bytes) {
    305   DCHECK_GE(bytes, 0);
    306   byte_count_ += bytes;
    307   if (byte_count_ < 0)
    308     byte_count_ = kint32max;
    309 }
    310 
    311 void BackendImplV3::OnWrite(int32 bytes) {
    312   // We use the same implementation as OnRead... just log the number of bytes.
    313   OnRead(bytes);
    314 }
    315 
    316 void BackendImplV3::OnTimerTick() {
    317   stats_.OnEvent(Stats::TIMER);
    318   int64 time = stats_.GetCounter(Stats::TIMER);
    319   int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
    320 
    321   // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
    322   // the bias towards 0.
    323   if (num_refs_ && (current != num_refs_)) {
    324     int64 diff = (num_refs_ - current) / 50;
    325     if (!diff)
    326       diff = num_refs_ > current ? 1 : -1;
    327     current = current + diff;
    328     stats_.SetCounter(Stats::OPEN_ENTRIES, current);
    329     stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
    330   }
    331 
    332   CACHE_UMA(COUNTS, "NumberOfReferences", num_refs_);
    333 
    334   CACHE_UMA(COUNTS_10000, "EntryAccessRate", entry_count_);
    335   CACHE_UMA(COUNTS, "ByteIORate", byte_count_ / 1024);
    336 
    337   // These values cover about 99.5% of the population (Oct 2011).
    338   user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
    339   entry_count_ = 0;
    340   byte_count_ = 0;
    341   up_ticks_++;
    342 
    343   if (!data_)
    344     first_timer_ = false;
    345   if (first_timer_) {
    346     first_timer_ = false;
    347     if (ShouldReportAgain())
    348       ReportStats();
    349   }
    350 
    351   // Save stats to disk at 5 min intervals.
    352   if (time % 10 == 0)
    353     StoreStats();
    354 }
    355 
    356 void BackendImplV3::SetUnitTestMode() {
    357   user_flags_ |= UNIT_TEST_MODE;
    358 }
    359 
    360 void BackendImplV3::SetUpgradeMode() {
    361   user_flags_ |= UPGRADE_MODE;
    362   read_only_ = true;
    363 }
    364 
    365 void BackendImplV3::SetNewEviction() {
    366   user_flags_ |= EVICTION_V2;
    367   lru_eviction_ = false;
    368 }
    369 
    370 void BackendImplV3::SetFlags(uint32 flags) {
    371   user_flags_ |= flags;
    372 }
    373 
    374 int BackendImplV3::FlushQueueForTest(const CompletionCallback& callback) {
    375   background_queue_.FlushQueue(callback);
    376   return net::ERR_IO_PENDING;
    377 }
    378 
    379 void BackendImplV3::TrimForTest(bool empty) {
    380   eviction_.SetTestMode();
    381   eviction_.TrimCache(empty);
    382 }
    383 
    384 void BackendImplV3::TrimDeletedListForTest(bool empty) {
    385   eviction_.SetTestMode();
    386   eviction_.TrimDeletedList(empty);
    387 }
    388 
    389 int BackendImplV3::SelfCheck() {
    390   if (!init_) {
    391     LOG(ERROR) << "Init failed";
    392     return ERR_INIT_FAILED;
    393   }
    394 
    395   int num_entries = rankings_.SelfCheck();
    396   if (num_entries < 0) {
    397     LOG(ERROR) << "Invalid rankings list, error " << num_entries;
    398 #if !defined(NET_BUILD_STRESS_CACHE)
    399     return num_entries;
    400 #endif
    401   }
    402 
    403   if (num_entries != data_->header.num_entries) {
    404     LOG(ERROR) << "Number of entries mismatch";
    405 #if !defined(NET_BUILD_STRESS_CACHE)
    406     return ERR_NUM_ENTRIES_MISMATCH;
    407 #endif
    408   }
    409 
    410   return CheckAllEntries();
    411 }
    412 
    413 // ------------------------------------------------------------------------
    414 
    415 net::CacheType BackendImplV3::GetCacheType() const {
    416   return cache_type_;
    417 }
    418 
    419 int32 BackendImplV3::GetEntryCount() const {
    420   if (disabled_)
    421     return 0;
    422   DCHECK(init_);
    423   return index_.header()->num_entries;
    424 }
    425 
    426 int BackendImplV3::OpenEntry(const std::string& key, Entry** entry,
    427                              const CompletionCallback& callback) {
    428   if (disabled_)
    429     return NULL;
    430 
    431   TimeTicks start = TimeTicks::Now();
    432   uint32 hash = base::Hash(key);
    433   Trace("Open hash 0x%x", hash);
    434 
    435   bool error;
    436   EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
    437   if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
    438     // The entry was already evicted.
    439     cache_entry->Release();
    440     cache_entry = NULL;
    441   }
    442 
    443   int current_size = data_->header.num_bytes / (1024 * 1024);
    444   int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
    445   int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
    446   int64 use_hours = total_hours - no_use_hours;
    447 
    448   if (!cache_entry) {
    449     CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
    450     CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
    451     CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
    452     CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
    453     stats_.OnEvent(Stats::OPEN_MISS);
    454     return NULL;
    455   }
    456 
    457   eviction_.OnOpenEntry(cache_entry);
    458   entry_count_++;
    459 
    460   Trace("Open hash 0x%x end: 0x%x", hash,
    461         cache_entry->entry()->address().value());
    462   CACHE_UMA(AGE_MS, "OpenTime", 0, start);
    463   CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
    464   CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
    465   CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
    466   stats_.OnEvent(Stats::OPEN_HIT);
    467   SIMPLE_STATS_COUNTER("disk_cache.hit");
    468   return cache_entry;
    469 }
    470 
    471 int BackendImplV3::CreateEntry(const std::string& key, Entry** entry,
    472                                const CompletionCallback& callback) {
    473   if (disabled_ || key.empty())
    474     return NULL;
    475 
    476   TimeTicks start = TimeTicks::Now();
    477   Trace("Create hash 0x%x", hash);
    478 
    479   scoped_refptr<EntryImpl> parent;
    480   Addr entry_address(data_->table[hash & mask_]);
    481   if (entry_address.is_initialized()) {
    482     // We have an entry already. It could be the one we are looking for, or just
    483     // a hash conflict.
    484     bool error;
    485     EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
    486     if (old_entry)
    487       return ResurrectEntry(old_entry);
    488 
    489     EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
    490     DCHECK(!error);
    491     if (parent_entry) {
    492       parent.swap(&parent_entry);
    493     } else if (data_->table[hash & mask_]) {
    494       // We should have corrected the problem.
    495       NOTREACHED();
    496       return NULL;
    497     }
    498   }
    499 
    500   // The general flow is to allocate disk space and initialize the entry data,
    501   // followed by saving that to disk, then linking the entry though the index
    502   // and finally through the lists. If there is a crash in this process, we may
    503   // end up with:
    504   // a. Used, unreferenced empty blocks on disk (basically just garbage).
    505   // b. Used, unreferenced but meaningful data on disk (more garbage).
    506   // c. A fully formed entry, reachable only through the index.
    507   // d. A fully formed entry, also reachable through the lists, but still dirty.
    508   //
    509   // Anything after (b) can be automatically cleaned up. We may consider saving
    510   // the current operation (as we do while manipulating the lists) so that we
    511   // can detect and cleanup (a) and (b).
    512 
    513   int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
    514   if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
    515     LOG(ERROR) << "Create entry failed " << key.c_str();
    516     stats_.OnEvent(Stats::CREATE_ERROR);
    517     return NULL;
    518   }
    519 
    520   Addr node_address(0);
    521   if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
    522     block_files_.DeleteBlock(entry_address, false);
    523     LOG(ERROR) << "Create entry failed " << key.c_str();
    524     stats_.OnEvent(Stats::CREATE_ERROR);
    525     return NULL;
    526   }
    527 
    528   scoped_refptr<EntryImpl> cache_entry(
    529       new EntryImpl(this, entry_address, false));
    530   IncreaseNumRefs();
    531 
    532   if (!cache_entry->CreateEntry(node_address, key, hash)) {
    533     block_files_.DeleteBlock(entry_address, false);
    534     block_files_.DeleteBlock(node_address, false);
    535     LOG(ERROR) << "Create entry failed " << key.c_str();
    536     stats_.OnEvent(Stats::CREATE_ERROR);
    537     return NULL;
    538   }
    539 
    540   cache_entry->BeginLogging(net_log_, true);
    541 
    542   // We are not failing the operation; let's add this to the map.
    543   open_entries_[entry_address.value()] = cache_entry.get();
    544 
    545   // Save the entry.
    546   cache_entry->entry()->Store();
    547   cache_entry->rankings()->Store();
    548   IncreaseNumEntries();
    549   entry_count_++;
    550 
    551   // Link this entry through the index.
    552   if (parent.get()) {
    553     parent->SetNextAddress(entry_address);
    554   } else {
    555     data_->table[hash & mask_] = entry_address.value();
    556   }
    557 
    558   // Link this entry through the lists.
    559   eviction_.OnCreateEntry(cache_entry.get());
    560 
    561   CACHE_UMA(AGE_MS, "CreateTime", 0, start);
    562   stats_.OnEvent(Stats::CREATE_HIT);
    563   SIMPLE_STATS_COUNTER("disk_cache.miss");
    564   Trace("create entry hit ");
    565   FlushIndex();
    566   cache_entry->AddRef();
    567   return cache_entry.get();
    568 }
    569 
    570 int BackendImplV3::DoomEntry(const std::string& key,
    571                              const CompletionCallback& callback) {
    572   if (disabled_)
    573     return net::ERR_FAILED;
    574 
    575   EntryImpl* entry = OpenEntryImpl(key);
    576   if (!entry)
    577     return net::ERR_FAILED;
    578 
    579   entry->DoomImpl();
    580   entry->Release();
    581   return net::OK;
    582 }
    583 
    584 int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) {
    585   // This is not really an error, but it is an interesting condition.
    586   ReportError(ERR_CACHE_DOOMED);
    587   stats_.OnEvent(Stats::DOOM_CACHE);
    588   if (!num_refs_) {
    589     RestartCache(false);
    590     return disabled_ ? net::ERR_FAILED : net::OK;
    591   } else {
    592     if (disabled_)
    593       return net::ERR_FAILED;
    594 
    595     eviction_.TrimCache(true);
    596     return net::OK;
    597   }
    598 }
    599 
    600 int BackendImplV3::DoomEntriesBetween(base::Time initial_time,
    601                                       base::Time end_time,
    602                                       const CompletionCallback& callback) {
    603   DCHECK_NE(net::APP_CACHE, cache_type_);
    604   if (end_time.is_null())
    605     return SyncDoomEntriesSince(initial_time);
    606 
    607   DCHECK(end_time >= initial_time);
    608 
    609   if (disabled_)
    610     return net::ERR_FAILED;
    611 
    612   EntryImpl* node;
    613   void* iter = NULL;
    614   EntryImpl* next = OpenNextEntryImpl(&iter);
    615   if (!next)
    616     return net::OK;
    617 
    618   while (next) {
    619     node = next;
    620     next = OpenNextEntryImpl(&iter);
    621 
    622     if (node->GetLastUsed() >= initial_time &&
    623         node->GetLastUsed() < end_time) {
    624       node->DoomImpl();
    625     } else if (node->GetLastUsed() < initial_time) {
    626       if (next)
    627         next->Release();
    628       next = NULL;
    629       SyncEndEnumeration(iter);
    630     }
    631 
    632     node->Release();
    633   }
    634 
    635   return net::OK;
    636 }
    637 
    638 int BackendImplV3::DoomEntriesSince(base::Time initial_time,
    639                                     const CompletionCallback& callback) {
    640   DCHECK_NE(net::APP_CACHE, cache_type_);
    641   if (disabled_)
    642     return net::ERR_FAILED;
    643 
    644   stats_.OnEvent(Stats::DOOM_RECENT);
    645   for (;;) {
    646     void* iter = NULL;
    647     EntryImpl* entry = OpenNextEntryImpl(&iter);
    648     if (!entry)
    649       return net::OK;
    650 
    651     if (initial_time > entry->GetLastUsed()) {
    652       entry->Release();
    653       SyncEndEnumeration(iter);
    654       return net::OK;
    655     }
    656 
    657     entry->DoomImpl();
    658     entry->Release();
    659     SyncEndEnumeration(iter);  // Dooming the entry invalidates the iterator.
    660   }
    661 }
    662 
    663 class BackendImplV3::IteratorImpl : public Backend::Iterator {
    664  public:
    665   explicit IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue)
    666       : background_queue_(background_queue), data_(NULL) {
    667   }
    668 
    669   virtual int OpenNextEntry(Entry** next_entry,
    670                             const net::CompletionCallback& callback) OVERRIDE {
    671     if (!background_queue_)
    672       return net::ERR_FAILED;
    673     background_queue_->OpenNextEntry(&data_, next_entry, callback);
    674     return net::ERR_IO_PENDING;
    675   }
    676 
    677  private:
    678   const base::WeakPtr<InFlightBackendIO> background_queue_;
    679   void* data_;
    680 };
    681 
    682 scoped_ptr<Backend::Iterator> BackendImplV3::CreateIterator() {
    683   return scoped_ptr<Backend::Iterator>(new IteratorImpl(GetBackgroundQueue()));
    684 }
    685 
    686 void BackendImplV3::GetStats(StatsItems* stats) {
    687   if (disabled_)
    688     return;
    689 
    690   std::pair<std::string, std::string> item;
    691 
    692   item.first = "Entries";
    693   item.second = base::StringPrintf("%d", data_->header.num_entries);
    694   stats->push_back(item);
    695 
    696   item.first = "Pending IO";
    697   item.second = base::StringPrintf("%d", num_pending_io_);
    698   stats->push_back(item);
    699 
    700   item.first = "Max size";
    701   item.second = base::StringPrintf("%d", max_size_);
    702   stats->push_back(item);
    703 
    704   item.first = "Current size";
    705   item.second = base::StringPrintf("%d", data_->header.num_bytes);
    706   stats->push_back(item);
    707 
    708   item.first = "Cache type";
    709   item.second = "Blockfile Cache";
    710   stats->push_back(item);
    711 
    712   stats_.GetItems(stats);
    713 }
    714 
    715 void BackendImplV3::OnExternalCacheHit(const std::string& key) {
    716   if (disabled_)
    717     return;
    718 
    719   uint32 hash = base::Hash(key);
    720   bool error;
    721   EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
    722   if (cache_entry) {
    723     if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
    724       UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
    725     }
    726     cache_entry->Release();
    727   }
    728 }
    729 
    730 // ------------------------------------------------------------------------
    731 
    732 // The maximum cache size will be either set explicitly by the caller, or
    733 // calculated by this code.
    734 void BackendImplV3::AdjustMaxCacheSize(int table_len) {
    735   if (max_size_)
    736     return;
    737 
    738   // If table_len is provided, the index file exists.
    739   DCHECK(!table_len || data_->header.magic);
    740 
    741   // The user is not setting the size, let's figure it out.
    742   int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
    743   if (available < 0) {
    744     max_size_ = kDefaultCacheSize;
    745     return;
    746   }
    747 
    748   if (table_len)
    749     available += data_->header.num_bytes;
    750 
    751   max_size_ = PreferedCacheSize(available);
    752 
    753   // Let's not use more than the default size while we tune-up the performance
    754   // of bigger caches. TODO(rvargas): remove this limit.
    755   if (max_size_ > kDefaultCacheSize * 4)
    756     max_size_ = kDefaultCacheSize * 4;
    757 
    758   if (!table_len)
    759     return;
    760 
    761   // If we already have a table, adjust the size to it.
    762   int current_max_size = MaxStorageSizeForTable(table_len);
    763   if (max_size_ > current_max_size)
    764     max_size_= current_max_size;
    765 }
    766 
    767 bool BackendImplV3::InitStats() {
    768   Addr address(data_->header.stats);
    769   int size = stats_.StorageSize();
    770 
    771   if (!address.is_initialized()) {
    772     FileType file_type = Addr::RequiredFileType(size);
    773     DCHECK_NE(file_type, EXTERNAL);
    774     int num_blocks = Addr::RequiredBlocks(size, file_type);
    775 
    776     if (!CreateBlock(file_type, num_blocks, &address))
    777       return false;
    778     return stats_.Init(NULL, 0, address);
    779   }
    780 
    781   if (!address.is_block_file()) {
    782     NOTREACHED();
    783     return false;
    784   }
    785 
    786   // Load the required data.
    787   size = address.num_blocks() * address.BlockSize();
    788   MappedFile* file = File(address);
    789   if (!file)
    790     return false;
    791 
    792   scoped_ptr<char[]> data(new char[size]);
    793   size_t offset = address.start_block() * address.BlockSize() +
    794                   kBlockHeaderSize;
    795   if (!file->Read(data.get(), size, offset))
    796     return false;
    797 
    798   if (!stats_.Init(data.get(), size, address))
    799     return false;
    800   if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
    801     stats_.InitSizeHistogram();
    802   return true;
    803 }
    804 
    805 void BackendImplV3::StoreStats() {
    806   int size = stats_.StorageSize();
    807   scoped_ptr<char[]> data(new char[size]);
    808   Addr address;
    809   size = stats_.SerializeStats(data.get(), size, &address);
    810   DCHECK(size);
    811   if (!address.is_initialized())
    812     return;
    813 
    814   MappedFile* file = File(address);
    815   if (!file)
    816     return;
    817 
    818   size_t offset = address.start_block() * address.BlockSize() +
    819                   kBlockHeaderSize;
    820   file->Write(data.get(), size, offset);  // ignore result.
    821 }
    822 
    823 void BackendImplV3::RestartCache(bool failure) {
    824   int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
    825   int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
    826   int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
    827   int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
    828 
    829   PrepareForRestart();
    830   if (failure) {
    831     DCHECK(!num_refs_);
    832     DCHECK(!open_entries_.size());
    833     DelayedCacheCleanup(path_);
    834   } else {
    835     DeleteCache(path_, false);
    836   }
    837 
    838   // Don't call Init() if directed by the unit test: we are simulating a failure
    839   // trying to re-enable the cache.
    840   if (unit_test_)
    841     init_ = true;  // Let the destructor do proper cleanup.
    842   else if (SyncInit() == net::OK) {
    843     stats_.SetCounter(Stats::FATAL_ERROR, errors);
    844     stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
    845     stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
    846     stats_.SetCounter(Stats::LAST_REPORT, last_report);
    847   }
    848 }
    849 
    850 void BackendImplV3::PrepareForRestart() {
    851   if (!(user_flags_ & EVICTION_V2))
    852     lru_eviction_ = true;
    853 
    854   disabled_ = true;
    855   data_->header.crash = 0;
    856   index_->Flush();
    857   index_ = NULL;
    858   data_ = NULL;
    859   block_files_.CloseFiles();
    860   rankings_.Reset();
    861   init_ = false;
    862   restarted_ = true;
    863 }
    864 
    865 void BackendImplV3::CleanupCache() {
    866   Trace("Backend Cleanup");
    867   eviction_.Stop();
    868   timer_.reset();
    869 
    870   if (init_) {
    871     StoreStats();
    872     if (data_)
    873       data_->header.crash = 0;
    874 
    875     if (user_flags_ & kNoRandom) {
    876       // This is a net_unittest, verify that we are not 'leaking' entries.
    877       File::WaitForPendingIO(&num_pending_io_);
    878       DCHECK(!num_refs_);
    879     } else {
    880       File::DropPendingIO();
    881     }
    882   }
    883   block_files_.CloseFiles();
    884   FlushIndex();
    885   index_ = NULL;
    886   ptr_factory_.InvalidateWeakPtrs();
    887   done_.Signal();
    888 }
    889 
    890 int BackendImplV3::NewEntry(Addr address, EntryImplV3** entry) {
    891   EntriesMap::iterator it = open_entries_.find(address.value());
    892   if (it != open_entries_.end()) {
    893     // Easy job. This entry is already in memory.
    894     EntryImpl* this_entry = it->second;
    895     this_entry->AddRef();
    896     *entry = this_entry;
    897     return 0;
    898   }
    899 
    900   STRESS_DCHECK(block_files_.IsValid(address));
    901 
    902   if (!address.SanityCheckForEntry()) {
    903     LOG(WARNING) << "Wrong entry address.";
    904     STRESS_NOTREACHED();
    905     return ERR_INVALID_ADDRESS;
    906   }
    907 
    908   scoped_refptr<EntryImpl> cache_entry(
    909       new EntryImpl(this, address, read_only_));
    910   IncreaseNumRefs();
    911   *entry = NULL;
    912 
    913   TimeTicks start = TimeTicks::Now();
    914   if (!cache_entry->entry()->Load())
    915     return ERR_READ_FAILURE;
    916 
    917   if (IsLoaded()) {
    918     CACHE_UMA(AGE_MS, "LoadTime", 0, start);
    919   }
    920 
    921   if (!cache_entry->SanityCheck()) {
    922     LOG(WARNING) << "Messed up entry found.";
    923     STRESS_NOTREACHED();
    924     return ERR_INVALID_ENTRY;
    925   }
    926 
    927   STRESS_DCHECK(block_files_.IsValid(
    928                     Addr(cache_entry->entry()->Data()->rankings_node)));
    929 
    930   if (!cache_entry->LoadNodeAddress())
    931     return ERR_READ_FAILURE;
    932 
    933   if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
    934     STRESS_NOTREACHED();
    935     cache_entry->SetDirtyFlag(0);
    936     // Don't remove this from the list (it is not linked properly). Instead,
    937     // break the link back to the entry because it is going away, and leave the
    938     // rankings node to be deleted if we find it through a list.
    939     rankings_.SetContents(cache_entry->rankings(), 0);
    940   } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
    941     STRESS_NOTREACHED();
    942     cache_entry->SetDirtyFlag(0);
    943     rankings_.SetContents(cache_entry->rankings(), address.value());
    944   }
    945 
    946   if (!cache_entry->DataSanityCheck()) {
    947     LOG(WARNING) << "Messed up entry found.";
    948     cache_entry->SetDirtyFlag(0);
    949     cache_entry->FixForDelete();
    950   }
    951 
    952   // Prevent overwriting the dirty flag on the destructor.
    953   cache_entry->SetDirtyFlag(GetCurrentEntryId());
    954 
    955   if (cache_entry->dirty()) {
    956     Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
    957           address.value());
    958   }
    959 
    960   open_entries_[address.value()] = cache_entry.get();
    961 
    962   cache_entry->BeginLogging(net_log_, false);
    963   cache_entry.swap(entry);
    964   return 0;
    965 }
    966 
    967 void BackendImplV3::AddStorageSize(int32 bytes) {
    968   data_->header.num_bytes += bytes;
    969   DCHECK_GE(data_->header.num_bytes, 0);
    970 }
    971 
    972 void BackendImplV3::SubstractStorageSize(int32 bytes) {
    973   data_->header.num_bytes -= bytes;
    974   DCHECK_GE(data_->header.num_bytes, 0);
    975 }
    976 
    977 void BackendImplV3::IncreaseNumRefs() {
    978   num_refs_++;
    979   if (max_refs_ < num_refs_)
    980     max_refs_ = num_refs_;
    981 }
    982 
    983 void BackendImplV3::DecreaseNumRefs() {
    984   DCHECK(num_refs_);
    985   num_refs_--;
    986 
    987   if (!num_refs_ && disabled_)
    988     base::MessageLoop::current()->PostTask(
    989         FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
    990 }
    991 
    992 void BackendImplV3::IncreaseNumEntries() {
    993   index_.header()->num_entries++;
    994   DCHECK_GT(index_.header()->num_entries, 0);
    995 }
    996 
    997 void BackendImplV3::DecreaseNumEntries() {
    998   index_.header()->num_entries--;
    999   if (index_.header()->num_entries < 0) {
   1000     NOTREACHED();
   1001     index_.header()->num_entries = 0;
   1002   }
   1003 }
   1004 
   1005 int BackendImplV3::SyncInit() {
   1006 #if defined(NET_BUILD_STRESS_CACHE)
   1007   // Start evictions right away.
   1008   up_ticks_ = kTrimDelay * 2;
   1009 #endif
   1010   DCHECK(!init_);
   1011   if (init_)
   1012     return net::ERR_FAILED;
   1013 
   1014   bool create_files = false;
   1015   if (!InitBackingStore(&create_files)) {
   1016     ReportError(ERR_STORAGE_ERROR);
   1017     return net::ERR_FAILED;
   1018   }
   1019 
   1020   num_refs_ = num_pending_io_ = max_refs_ = 0;
   1021   entry_count_ = byte_count_ = 0;
   1022 
   1023   if (!restarted_) {
   1024     buffer_bytes_ = 0;
   1025     trace_object_ = TraceObject::GetTraceObject();
   1026     // Create a recurrent timer of 30 secs.
   1027     int timer_delay = unit_test_ ? 1000 : 30000;
   1028     timer_.reset(new base::RepeatingTimer<BackendImplV3>());
   1029     timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
   1030                   &BackendImplV3::OnStatsTimer);
   1031   }
   1032 
   1033   init_ = true;
   1034   Trace("Init");
   1035 
   1036   if (data_->header.experiment != NO_EXPERIMENT &&
   1037       cache_type_ != net::DISK_CACHE) {
   1038     // No experiment for other caches.
   1039     return net::ERR_FAILED;
   1040   }
   1041 
   1042   if (!(user_flags_ & kNoRandom)) {
   1043     // The unit test controls directly what to test.
   1044     new_eviction_ = (cache_type_ == net::DISK_CACHE);
   1045   }
   1046 
   1047   if (!CheckIndex()) {
   1048     ReportError(ERR_INIT_FAILED);
   1049     return net::ERR_FAILED;
   1050   }
   1051 
   1052   if (!restarted_ && (create_files || !data_->header.num_entries))
   1053     ReportError(ERR_CACHE_CREATED);
   1054 
   1055   if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
   1056       !InitExperiment(&data_->header, create_files)) {
   1057     return net::ERR_FAILED;
   1058   }
   1059 
   1060   // We don't care if the value overflows. The only thing we care about is that
   1061   // the id cannot be zero, because that value is used as "not dirty".
   1062   // Increasing the value once per second gives us many years before we start
   1063   // having collisions.
   1064   data_->header.this_id++;
   1065   if (!data_->header.this_id)
   1066     data_->header.this_id++;
   1067 
   1068   bool previous_crash = (data_->header.crash != 0);
   1069   data_->header.crash = 1;
   1070 
   1071   if (!block_files_.Init(create_files))
   1072     return net::ERR_FAILED;
   1073 
   1074   // We want to minimize the changes to cache for an AppCache.
   1075   if (cache_type() == net::APP_CACHE) {
   1076     DCHECK(!new_eviction_);
   1077     read_only_ = true;
   1078   } else if (cache_type() == net::SHADER_CACHE) {
   1079     DCHECK(!new_eviction_);
   1080   }
   1081 
   1082   eviction_.Init(this);
   1083 
   1084   // stats_ and rankings_ may end up calling back to us so we better be enabled.
   1085   disabled_ = false;
   1086   if (!InitStats())
   1087     return net::ERR_FAILED;
   1088 
   1089   disabled_ = !rankings_.Init(this, new_eviction_);
   1090 
   1091 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
   1092   trace_object_->EnableTracing(false);
   1093   int sc = SelfCheck();
   1094   if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
   1095     NOTREACHED();
   1096   trace_object_->EnableTracing(true);
   1097 #endif
   1098 
   1099   if (previous_crash) {
   1100     ReportError(ERR_PREVIOUS_CRASH);
   1101   } else if (!restarted_) {
   1102     ReportError(ERR_NO_ERROR);
   1103   }
   1104 
   1105   FlushIndex();
   1106 
   1107   return disabled_ ? net::ERR_FAILED : net::OK;
   1108 }
   1109 
   1110 EntryImpl* BackendImplV3::ResurrectEntry(EntryImpl* deleted_entry) {
   1111   if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
   1112     deleted_entry->Release();
   1113     stats_.OnEvent(Stats::CREATE_MISS);
   1114     Trace("create entry miss ");
   1115     return NULL;
   1116   }
   1117 
   1118   // We are attempting to create an entry and found out that the entry was
   1119   // previously deleted.
   1120 
   1121   eviction_.OnCreateEntry(deleted_entry);
   1122   entry_count_++;
   1123 
   1124   stats_.OnEvent(Stats::RESURRECT_HIT);
   1125   Trace("Resurrect entry hit ");
   1126   return deleted_entry;
   1127 }
   1128 
   1129 EntryImpl* BackendImplV3::CreateEntryImpl(const std::string& key) {
   1130   if (disabled_ || key.empty())
   1131     return NULL;
   1132 
   1133   TimeTicks start = TimeTicks::Now();
   1134   Trace("Create hash 0x%x", hash);
   1135 
   1136   scoped_refptr<EntryImpl> parent;
   1137   Addr entry_address(data_->table[hash & mask_]);
   1138   if (entry_address.is_initialized()) {
   1139     // We have an entry already. It could be the one we are looking for, or just
   1140     // a hash conflict.
   1141     bool error;
   1142     EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
   1143     if (old_entry)
   1144       return ResurrectEntry(old_entry);
   1145 
   1146     EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
   1147     DCHECK(!error);
   1148     if (parent_entry) {
   1149       parent.swap(&parent_entry);
   1150     } else if (data_->table[hash & mask_]) {
   1151       // We should have corrected the problem.
   1152       NOTREACHED();
   1153       return NULL;
   1154     }
   1155   }
   1156 
   1157   // The general flow is to allocate disk space and initialize the entry data,
   1158   // followed by saving that to disk, then linking the entry though the index
   1159   // and finally through the lists. If there is a crash in this process, we may
   1160   // end up with:
   1161   // a. Used, unreferenced empty blocks on disk (basically just garbage).
   1162   // b. Used, unreferenced but meaningful data on disk (more garbage).
   1163   // c. A fully formed entry, reachable only through the index.
   1164   // d. A fully formed entry, also reachable through the lists, but still dirty.
   1165   //
   1166   // Anything after (b) can be automatically cleaned up. We may consider saving
   1167   // the current operation (as we do while manipulating the lists) so that we
   1168   // can detect and cleanup (a) and (b).
   1169 
   1170   int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
   1171   if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
   1172     LOG(ERROR) << "Create entry failed " << key.c_str();
   1173     stats_.OnEvent(Stats::CREATE_ERROR);
   1174     return NULL;
   1175   }
   1176 
   1177   Addr node_address(0);
   1178   if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
   1179     block_files_.DeleteBlock(entry_address, false);
   1180     LOG(ERROR) << "Create entry failed " << key.c_str();
   1181     stats_.OnEvent(Stats::CREATE_ERROR);
   1182     return NULL;
   1183   }
   1184 
   1185   scoped_refptr<EntryImpl> cache_entry(
   1186       new EntryImpl(this, entry_address, false));
   1187   IncreaseNumRefs();
   1188 
   1189   if (!cache_entry->CreateEntry(node_address, key, hash)) {
   1190     block_files_.DeleteBlock(entry_address, false);
   1191     block_files_.DeleteBlock(node_address, false);
   1192     LOG(ERROR) << "Create entry failed " << key.c_str();
   1193     stats_.OnEvent(Stats::CREATE_ERROR);
   1194     return NULL;
   1195   }
   1196 
   1197   cache_entry->BeginLogging(net_log_, true);
   1198 
   1199   // We are not failing the operation; let's add this to the map.
   1200   open_entries_[entry_address.value()] = cache_entry;
   1201 
   1202   // Save the entry.
   1203   cache_entry->entry()->Store();
   1204   cache_entry->rankings()->Store();
   1205   IncreaseNumEntries();
   1206   entry_count_++;
   1207 
   1208   // Link this entry through the index.
   1209   if (parent.get()) {
   1210     parent->SetNextAddress(entry_address);
   1211   } else {
   1212     data_->table[hash & mask_] = entry_address.value();
   1213   }
   1214 
   1215   // Link this entry through the lists.
   1216   eviction_.OnCreateEntry(cache_entry);
   1217 
   1218   CACHE_UMA(AGE_MS, "CreateTime", 0, start);
   1219   stats_.OnEvent(Stats::CREATE_HIT);
   1220   SIMPLE_STATS_COUNTER("disk_cache.miss");
   1221   Trace("create entry hit ");
   1222   FlushIndex();
   1223   cache_entry->AddRef();
   1224   return cache_entry.get();
   1225 }
   1226 
   1227 void BackendImplV3::LogStats() {
   1228   StatsItems stats;
   1229   GetStats(&stats);
   1230 
   1231   for (size_t index = 0; index < stats.size(); index++)
   1232     VLOG(1) << stats[index].first << ": " << stats[index].second;
   1233 }
   1234 
   1235 void BackendImplV3::ReportStats() {
   1236   IndexHeaderV3* header = index_.header();
   1237   CACHE_UMA(COUNTS, "Entries", header->num_entries);
   1238 
   1239   int current_size = header->num_bytes / (1024 * 1024);
   1240   int max_size = max_size_ / (1024 * 1024);
   1241 
   1242   CACHE_UMA(COUNTS_10000, "Size", current_size);
   1243   CACHE_UMA(COUNTS_10000, "MaxSize", max_size);
   1244   if (!max_size)
   1245     max_size++;
   1246   CACHE_UMA(PERCENTAGE, "UsedSpace", current_size * 100 / max_size);
   1247 
   1248   CACHE_UMA(COUNTS_10000, "AverageOpenEntries",
   1249             static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
   1250   CACHE_UMA(COUNTS_10000, "MaxOpenEntries",
   1251             static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
   1252   stats_.SetCounter(Stats::MAX_ENTRIES, 0);
   1253 
   1254   CACHE_UMA(COUNTS_10000, "TotalFatalErrors",
   1255             static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
   1256   CACHE_UMA(COUNTS_10000, "TotalDoomCache",
   1257             static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
   1258   CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries",
   1259             static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
   1260   stats_.SetCounter(Stats::FATAL_ERROR, 0);
   1261   stats_.SetCounter(Stats::DOOM_CACHE, 0);
   1262   stats_.SetCounter(Stats::DOOM_RECENT, 0);
   1263 
   1264   int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
   1265   if (!(header->flags & CACHE_EVICTED)) {
   1266     CACHE_UMA(HOURS, "TotalTimeNotFull", static_cast<int>(total_hours));
   1267     return;
   1268   }
   1269 
   1270   // This is an up to date client that will report FirstEviction() data. After
   1271   // that event, start reporting this:
   1272 
   1273   CACHE_UMA(HOURS, "TotalTime", static_cast<int>(total_hours));
   1274 
   1275   int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
   1276   stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
   1277 
   1278   // We may see users with no use_hours at this point if this is the first time
   1279   // we are running this code.
   1280   if (use_hours)
   1281     use_hours = total_hours - use_hours;
   1282 
   1283   if (!use_hours || !GetEntryCount() || !header->num_bytes)
   1284     return;
   1285 
   1286   CACHE_UMA(HOURS, "UseTime", static_cast<int>(use_hours));
   1287 
   1288   int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
   1289   CACHE_UMA(COUNTS, "TrimRate", static_cast<int>(trim_rate));
   1290 
   1291   int avg_size = header->num_bytes / GetEntryCount();
   1292   CACHE_UMA(COUNTS, "EntrySize", avg_size);
   1293   CACHE_UMA(COUNTS, "EntriesFull", header->num_entries);
   1294 
   1295   int large_entries_bytes = stats_.GetLargeEntriesSize();
   1296   int large_ratio = large_entries_bytes * 100 / header->num_bytes;
   1297   CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", large_ratio);
   1298 
   1299   if (!lru_eviction_) {
   1300     CACHE_UMA(PERCENTAGE, "ResurrectRatio", stats_.GetResurrectRatio());
   1301     CACHE_UMA(PERCENTAGE, "NoUseRatio",
   1302               header->num_no_use_entries * 100 / header->num_entries);
   1303     CACHE_UMA(PERCENTAGE, "LowUseRatio",
   1304               header->num_low_use_entries * 100 / header->num_entries);
   1305     CACHE_UMA(PERCENTAGE, "HighUseRatio",
   1306               header->num_high_use_entries * 100 / header->num_entries);
   1307     CACHE_UMA(PERCENTAGE, "DeletedRatio",
   1308               header->num_evicted_entries * 100 / header->num_entries);
   1309   }
   1310 
   1311   stats_.ResetRatios();
   1312   stats_.SetCounter(Stats::TRIM_ENTRY, 0);
   1313 
   1314   if (cache_type_ == net::DISK_CACHE)
   1315     block_files_.ReportStats();
   1316 }
   1317 
   1318 void BackendImplV3::ReportError(int error) {
   1319   STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
   1320                 error == ERR_CACHE_CREATED);
   1321 
   1322   // We transmit positive numbers, instead of direct error codes.
   1323   DCHECK_LE(error, 0);
   1324   CACHE_UMA(CACHE_ERROR, "Error", error * -1);
   1325 }
   1326 
   1327 bool BackendImplV3::CheckIndex() {
   1328   DCHECK(data_);
   1329 
   1330   size_t current_size = index_->GetLength();
   1331   if (current_size < sizeof(Index)) {
   1332     LOG(ERROR) << "Corrupt Index file";
   1333     return false;
   1334   }
   1335 
   1336   if (new_eviction_) {
   1337     // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
   1338     if (kIndexMagic != data_->header.magic ||
   1339         kCurrentVersion >> 16 != data_->header.version >> 16) {
   1340       LOG(ERROR) << "Invalid file version or magic";
   1341       return false;
   1342     }
   1343     if (kCurrentVersion == data_->header.version) {
   1344       // We need file version 2.1 for the new eviction algorithm.
   1345       UpgradeTo2_1();
   1346     }
   1347   } else {
   1348     if (kIndexMagic != data_->header.magic ||
   1349         kCurrentVersion != data_->header.version) {
   1350       LOG(ERROR) << "Invalid file version or magic";
   1351       return false;
   1352     }
   1353   }
   1354 
   1355   if (!data_->header.table_len) {
   1356     LOG(ERROR) << "Invalid table size";
   1357     return false;
   1358   }
   1359 
   1360   if (current_size < GetIndexSize(data_->header.table_len) ||
   1361       data_->header.table_len & (kBaseTableLen - 1)) {
   1362     LOG(ERROR) << "Corrupt Index file";
   1363     return false;
   1364   }
   1365 
   1366   AdjustMaxCacheSize(data_->header.table_len);
   1367 
   1368 #if !defined(NET_BUILD_STRESS_CACHE)
   1369   if (data_->header.num_bytes < 0 ||
   1370       (max_size_ < kint32max - kDefaultCacheSize &&
   1371        data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
   1372     LOG(ERROR) << "Invalid cache (current) size";
   1373     return false;
   1374   }
   1375 #endif
   1376 
   1377   if (data_->header.num_entries < 0) {
   1378     LOG(ERROR) << "Invalid number of entries";
   1379     return false;
   1380   }
   1381 
   1382   if (!mask_)
   1383     mask_ = data_->header.table_len - 1;
   1384 
   1385   // Load the table into memory with a single read.
   1386   scoped_ptr<char[]> buf(new char[current_size]);
   1387   return index_->Read(buf.get(), current_size, 0);
   1388 }
   1389 
   1390 int BackendImplV3::CheckAllEntries() {
   1391   int num_dirty = 0;
   1392   int num_entries = 0;
   1393   DCHECK(mask_ < kuint32max);
   1394   for (unsigned int i = 0; i <= mask_; i++) {
   1395     Addr address(data_->table[i]);
   1396     if (!address.is_initialized())
   1397       continue;
   1398     for (;;) {
   1399       EntryImpl* tmp;
   1400       int ret = NewEntry(address, &tmp);
   1401       if (ret) {
   1402         STRESS_NOTREACHED();
   1403         return ret;
   1404       }
   1405       scoped_refptr<EntryImpl> cache_entry;
   1406       cache_entry.swap(&tmp);
   1407 
   1408       if (cache_entry->dirty())
   1409         num_dirty++;
   1410       else if (CheckEntry(cache_entry.get()))
   1411         num_entries++;
   1412       else
   1413         return ERR_INVALID_ENTRY;
   1414 
   1415       DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
   1416       address.set_value(cache_entry->GetNextAddress());
   1417       if (!address.is_initialized())
   1418         break;
   1419     }
   1420   }
   1421 
   1422   Trace("CheckAllEntries End");
   1423   if (num_entries + num_dirty != data_->header.num_entries) {
   1424     LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
   1425                   " " << data_->header.num_entries;
   1426     DCHECK_LT(num_entries, data_->header.num_entries);
   1427     return ERR_NUM_ENTRIES_MISMATCH;
   1428   }
   1429 
   1430   return num_dirty;
   1431 }
   1432 
   1433 bool BackendImplV3::CheckEntry(EntryImpl* cache_entry) {
   1434   bool ok = block_files_.IsValid(cache_entry->entry()->address());
   1435   ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
   1436   EntryStore* data = cache_entry->entry()->Data();
   1437   for (size_t i = 0; i < arraysize(data->data_addr); i++) {
   1438     if (data->data_addr[i]) {
   1439       Addr address(data->data_addr[i]);
   1440       if (address.is_block_file())
   1441         ok = ok && block_files_.IsValid(address);
   1442     }
   1443   }
   1444 
   1445   return ok && cache_entry->rankings()->VerifyHash();
   1446 }
   1447 
   1448 int BackendImplV3::MaxBuffersSize() {
   1449   static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
   1450   static bool done = false;
   1451 
   1452   if (!done) {
   1453     const int kMaxBuffersSize = 30 * 1024 * 1024;
   1454 
   1455     // We want to use up to 2% of the computer's memory.
   1456     total_memory = total_memory * 2 / 100;
   1457     if (total_memory > kMaxBuffersSize || total_memory <= 0)
   1458       total_memory = kMaxBuffersSize;
   1459 
   1460     done = true;
   1461   }
   1462 
   1463   return static_cast<int>(total_memory);
   1464 }
   1465 
   1466 #endif  // defined(V3_NOT_JUST_YET_READY).
   1467 
   1468 bool BackendImplV3::IsAllocAllowed(int current_size, int new_size) {
   1469   return false;
   1470 }
   1471 
   1472 net::CacheType BackendImplV3::GetCacheType() const {
   1473   return cache_type_;
   1474 }
   1475 
   1476 int32 BackendImplV3::GetEntryCount() const {
   1477   return 0;
   1478 }
   1479 
   1480 int BackendImplV3::OpenEntry(const std::string& key, Entry** entry,
   1481                              const CompletionCallback& callback) {
   1482   return net::ERR_FAILED;
   1483 }
   1484 
   1485 int BackendImplV3::CreateEntry(const std::string& key, Entry** entry,
   1486                                const CompletionCallback& callback) {
   1487   return net::ERR_FAILED;
   1488 }
   1489 
   1490 int BackendImplV3::DoomEntry(const std::string& key,
   1491                              const CompletionCallback& callback) {
   1492   return net::ERR_FAILED;
   1493 }
   1494 
   1495 int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) {
   1496   return net::ERR_FAILED;
   1497 }
   1498 
   1499 int BackendImplV3::DoomEntriesBetween(base::Time initial_time,
   1500                                       base::Time end_time,
   1501                                       const CompletionCallback& callback) {
   1502   return net::ERR_FAILED;
   1503 }
   1504 
   1505 int BackendImplV3::DoomEntriesSince(base::Time initial_time,
   1506                                     const CompletionCallback& callback) {
   1507   return net::ERR_FAILED;
   1508 }
   1509 
   1510 
   1511 class BackendImplV3::NotImplementedIterator : public Backend::Iterator {
   1512  public:
   1513   virtual int OpenNextEntry(disk_cache::Entry** next_entry,
   1514                             const net::CompletionCallback& callback) OVERRIDE {
   1515     return net::ERR_NOT_IMPLEMENTED;
   1516   }
   1517 };
   1518 
   1519 scoped_ptr<Backend::Iterator> BackendImplV3::CreateIterator() {
   1520   return scoped_ptr<Iterator>(new NotImplementedIterator());
   1521 }
   1522 
   1523 void BackendImplV3::GetStats(StatsItems* stats) {
   1524   NOTIMPLEMENTED();
   1525 }
   1526 
   1527 void BackendImplV3::OnExternalCacheHit(const std::string& key) {
   1528   NOTIMPLEMENTED();
   1529 }
   1530 
   1531 void BackendImplV3::CleanupCache() {
   1532 }
   1533 
   1534 }  // namespace disk_cache
   1535