Home | History | Annotate | Download | only in disk_cache
      1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/disk_cache/stats.h"
      6 
      7 #include "base/format_macros.h"
      8 #include "base/logging.h"
      9 #include "base/string_util.h"
     10 #include "base/stringprintf.h"
     11 #include "net/disk_cache/backend_impl.h"
     12 
     13 namespace {
     14 
     15 const int32 kDiskSignature = 0xF01427E0;
     16 
     17 struct OnDiskStats {
     18   int32 signature;
     19   int size;
     20   int data_sizes[disk_cache::Stats::kDataSizesLength];
     21   int64 counters[disk_cache::Stats::MAX_COUNTER];
     22 };
     23 COMPILE_ASSERT(sizeof(OnDiskStats) < 512, needs_more_than_2_blocks);
     24 
     25 // Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
     26 int LogBase2(int32 number) {
     27   unsigned int value = static_cast<unsigned int>(number);
     28   const unsigned int mask[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
     29   const unsigned int s[] = {1, 2, 4, 8, 16};
     30 
     31   unsigned int result = 0;
     32   for (int i = 4; i >= 0; i--) {
     33     if (value & mask[i]) {
     34       value >>= s[i];
     35       result |= s[i];
     36     }
     37   }
     38   return static_cast<int>(result);
     39 }
     40 
     41 // WARNING: Add new stats only at the end, or change LoadStats().
     42 static const char* kCounterNames[] = {
     43   "Open miss",
     44   "Open hit",
     45   "Create miss",
     46   "Create hit",
     47   "Resurrect hit",
     48   "Create error",
     49   "Trim entry",
     50   "Doom entry",
     51   "Doom cache",
     52   "Invalid entry",
     53   "Open entries",
     54   "Max entries",
     55   "Timer",
     56   "Read data",
     57   "Write data",
     58   "Open rankings",
     59   "Get rankings",
     60   "Fatal error",
     61   "Last report",
     62   "Last report timer",
     63   "Doom recent entries"
     64 };
     65 COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER,
     66                update_the_names);
     67 
     68 }  // namespace
     69 
     70 namespace disk_cache {
     71 
     72 bool LoadStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
     73   MappedFile* file = backend->File(address);
     74   if (!file)
     75     return false;
     76 
     77   size_t offset = address.start_block() * address.BlockSize() +
     78                   kBlockHeaderSize;
     79   memset(stats, 0, sizeof(*stats));
     80   if (!file->Read(stats, sizeof(*stats), offset))
     81     return false;
     82 
     83   if (stats->signature != kDiskSignature)
     84     return false;
     85 
     86   // We don't want to discard the whole cache every time we have one extra
     87   // counter; we keep old data if we can.
     88   if (static_cast<unsigned int>(stats->size) > sizeof(*stats)) {
     89     memset(stats, 0, sizeof(*stats));
     90   }
     91 
     92   return true;
     93 }
     94 
     95 bool StoreStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
     96   MappedFile* file = backend->File(address);
     97   if (!file)
     98     return false;
     99 
    100   size_t offset = address.start_block() * address.BlockSize() +
    101                   kBlockHeaderSize;
    102   return file->Write(stats, sizeof(*stats), offset);
    103 }
    104 
    105 bool CreateStats(BackendImpl* backend, Addr* address, OnDiskStats* stats) {
    106   if (!backend->CreateBlock(BLOCK_256, 2, address))
    107     return false;
    108 
    109   // If we have more than 512 bytes of counters, change kDiskSignature so we
    110   // don't overwrite something else (LoadStats must fail).
    111   COMPILE_ASSERT(sizeof(*stats) <= 256 * 2, use_more_blocks);
    112   memset(stats, 0, sizeof(*stats));
    113   stats->signature = kDiskSignature;
    114   stats->size = sizeof(*stats);
    115 
    116   return StoreStats(backend, *address, stats);
    117 }
    118 
    119 Stats::Stats() : backend_(NULL), size_histogram_(NULL) {
    120 }
    121 
    122 Stats::~Stats() {
    123 }
    124 
    125 bool Stats::Init(BackendImpl* backend, uint32* storage_addr) {
    126   OnDiskStats stats;
    127   Addr address(*storage_addr);
    128   if (address.is_initialized()) {
    129     if (!LoadStats(backend, address, &stats))
    130       return false;
    131   } else {
    132     if (!CreateStats(backend, &address, &stats))
    133       return false;
    134     *storage_addr = address.value();
    135   }
    136 
    137   storage_addr_ = address.value();
    138   backend_ = backend;
    139 
    140   memcpy(data_sizes_, stats.data_sizes, sizeof(data_sizes_));
    141   memcpy(counters_, stats.counters, sizeof(counters_));
    142 
    143   // It seems impossible to support this histogram for more than one
    144   // simultaneous objects with the current infrastructure.
    145   static bool first_time = true;
    146   if (first_time) {
    147     first_time = false;
    148     // ShouldReportAgain() will re-enter this object.
    149     if (!size_histogram_ && backend->cache_type() == net::DISK_CACHE &&
    150         backend->ShouldReportAgain()) {
    151       // Stats may be reused when the cache is re-created, but we want only one
    152       // histogram at any given time.
    153       size_histogram_ =
    154           StatsHistogram::StatsHistogramFactoryGet("DiskCache.SizeStats");
    155       size_histogram_->Init(this);
    156     }
    157   }
    158 
    159   return true;
    160 }
    161 
    162 void Stats::ModifyStorageStats(int32 old_size, int32 new_size) {
    163   // We keep a counter of the data block size on an array where each entry is
    164   // the adjusted log base 2 of the size. The first entry counts blocks of 256
    165   // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
    166   // one stores entries of more than 64 MB
    167   int new_index = GetStatsBucket(new_size);
    168   int old_index = GetStatsBucket(old_size);
    169 
    170   if (new_size)
    171     data_sizes_[new_index]++;
    172 
    173   if (old_size)
    174     data_sizes_[old_index]--;
    175 }
    176 
    177 void Stats::OnEvent(Counters an_event) {
    178   DCHECK(an_event > MIN_COUNTER || an_event < MAX_COUNTER);
    179   counters_[an_event]++;
    180 }
    181 
    182 void Stats::SetCounter(Counters counter, int64 value) {
    183   DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
    184   counters_[counter] = value;
    185 }
    186 
    187 int64 Stats::GetCounter(Counters counter) const {
    188   DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
    189   return counters_[counter];
    190 }
    191 
    192 void Stats::GetItems(StatsItems* items) {
    193   std::pair<std::string, std::string> item;
    194   for (int i = 0; i < kDataSizesLength; i++) {
    195     item.first = base::StringPrintf("Size%02d", i);
    196     item.second = base::StringPrintf("0x%08x", data_sizes_[i]);
    197     items->push_back(item);
    198   }
    199 
    200   for (int i = MIN_COUNTER + 1; i < MAX_COUNTER; i++) {
    201     item.first = kCounterNames[i];
    202     item.second = base::StringPrintf("0x%" PRIx64, counters_[i]);
    203     items->push_back(item);
    204   }
    205 }
    206 
    207 int Stats::GetHitRatio() const {
    208   return GetRatio(OPEN_HIT, OPEN_MISS);
    209 }
    210 
    211 int Stats::GetResurrectRatio() const {
    212   return GetRatio(RESURRECT_HIT, CREATE_HIT);
    213 }
    214 
    215 void Stats::ResetRatios() {
    216   SetCounter(OPEN_HIT, 0);
    217   SetCounter(OPEN_MISS, 0);
    218   SetCounter(RESURRECT_HIT, 0);
    219   SetCounter(CREATE_HIT, 0);
    220 }
    221 
    222 int Stats::GetLargeEntriesSize() {
    223   int total = 0;
    224   // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before
    225   // GetStatsBucket()).
    226   for (int bucket = 20; bucket < kDataSizesLength; bucket++)
    227     total += data_sizes_[bucket] * GetBucketRange(bucket);
    228 
    229   return total;
    230 }
    231 
    232 void Stats::Store() {
    233   if (!backend_)
    234     return;
    235 
    236   OnDiskStats stats;
    237   stats.signature = kDiskSignature;
    238   stats.size = sizeof(stats);
    239   memcpy(stats.data_sizes, data_sizes_, sizeof(data_sizes_));
    240   memcpy(stats.counters, counters_, sizeof(counters_));
    241 
    242   Addr address(storage_addr_);
    243   StoreStats(backend_, address, &stats);
    244 }
    245 
    246 int Stats::GetBucketRange(size_t i) const {
    247   if (i < 2)
    248     return static_cast<int>(1024 * i);
    249 
    250   if (i < 12)
    251     return static_cast<int>(2048 * (i - 1));
    252 
    253   if (i < 17)
    254     return static_cast<int>(4096 * (i - 11)) + 20 * 1024;
    255 
    256   int n = 64 * 1024;
    257   if (i > static_cast<size_t>(kDataSizesLength)) {
    258     NOTREACHED();
    259     i = kDataSizesLength;
    260   }
    261 
    262   i -= 17;
    263   n <<= i;
    264   return n;
    265 }
    266 
    267 void Stats::Snapshot(StatsHistogram::StatsSamples* samples) const {
    268   samples->GetCounts()->resize(kDataSizesLength);
    269   for (int i = 0; i < kDataSizesLength; i++) {
    270     int count = data_sizes_[i];
    271     if (count < 0)
    272       count = 0;
    273     samples->GetCounts()->at(i) = count;
    274   }
    275 }
    276 
    277 // The array will be filled this way:
    278 //  index      size
    279 //    0       [0, 1024)
    280 //    1    [1024, 2048)
    281 //    2    [2048, 4096)
    282 //    3      [4K, 6K)
    283 //      ...
    284 //   10     [18K, 20K)
    285 //   11     [20K, 24K)
    286 //   12     [24k, 28K)
    287 //      ...
    288 //   15     [36k, 40K)
    289 //   16     [40k, 64K)
    290 //   17     [64K, 128K)
    291 //   18    [128K, 256K)
    292 //      ...
    293 //   23      [4M, 8M)
    294 //   24      [8M, 16M)
    295 //   25     [16M, 32M)
    296 //   26     [32M, 64M)
    297 //   27     [64M, ...)
    298 int Stats::GetStatsBucket(int32 size) {
    299   if (size < 1024)
    300     return 0;
    301 
    302   // 10 slots more, until 20K.
    303   if (size < 20 * 1024)
    304     return size / 2048 + 1;
    305 
    306   // 5 slots more, from 20K to 40K.
    307   if (size < 40 * 1024)
    308     return (size - 20 * 1024) / 4096 + 11;
    309 
    310   // From this point on, use a logarithmic scale.
    311   int result =  LogBase2(size) + 1;
    312 
    313   COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale);
    314   if (result >= kDataSizesLength)
    315     result = kDataSizesLength - 1;
    316 
    317   return result;
    318 }
    319 
    320 int Stats::GetRatio(Counters hit, Counters miss) const {
    321   int64 ratio = GetCounter(hit) * 100;
    322   if (!ratio)
    323     return 0;
    324 
    325   ratio /= (GetCounter(hit) + GetCounter(miss));
    326   return static_cast<int>(ratio);
    327 }
    328 
    329 }  // namespace disk_cache
    330