1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/disk_cache/stats.h" 6 7 #include "base/format_macros.h" 8 #include "base/logging.h" 9 #include "base/metrics/histogram_samples.h" 10 #include "base/strings/string_util.h" 11 #include "base/strings/stringprintf.h" 12 13 namespace { 14 15 const int32 kDiskSignature = 0xF01427E0; 16 17 struct OnDiskStats { 18 int32 signature; 19 int size; 20 int data_sizes[disk_cache::Stats::kDataSizesLength]; 21 int64 counters[disk_cache::Stats::MAX_COUNTER]; 22 }; 23 COMPILE_ASSERT(sizeof(OnDiskStats) < 512, needs_more_than_2_blocks); 24 25 // Returns the "floor" (as opposed to "ceiling") of log base 2 of number. 26 int LogBase2(int32 number) { 27 unsigned int value = static_cast<unsigned int>(number); 28 const unsigned int mask[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000}; 29 const unsigned int s[] = {1, 2, 4, 8, 16}; 30 31 unsigned int result = 0; 32 for (int i = 4; i >= 0; i--) { 33 if (value & mask[i]) { 34 value >>= s[i]; 35 result |= s[i]; 36 } 37 } 38 return static_cast<int>(result); 39 } 40 41 // WARNING: Add new stats only at the end, or change LoadStats(). 42 static const char* kCounterNames[] = { 43 "Open miss", 44 "Open hit", 45 "Create miss", 46 "Create hit", 47 "Resurrect hit", 48 "Create error", 49 "Trim entry", 50 "Doom entry", 51 "Doom cache", 52 "Invalid entry", 53 "Open entries", 54 "Max entries", 55 "Timer", 56 "Read data", 57 "Write data", 58 "Open rankings", 59 "Get rankings", 60 "Fatal error", 61 "Last report", 62 "Last report timer", 63 "Doom recent entries", 64 "unused" 65 }; 66 COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER, 67 update_the_names); 68 69 } // namespace 70 71 namespace disk_cache { 72 73 bool VerifyStats(OnDiskStats* stats) { 74 if (stats->signature != kDiskSignature) 75 return false; 76 77 // We don't want to discard the whole cache every time we have one extra 78 // counter; we keep old data if we can. 79 if (static_cast<unsigned int>(stats->size) > sizeof(*stats)) { 80 memset(stats, 0, sizeof(*stats)); 81 stats->signature = kDiskSignature; 82 } else if (static_cast<unsigned int>(stats->size) != sizeof(*stats)) { 83 size_t delta = sizeof(*stats) - static_cast<unsigned int>(stats->size); 84 memset(reinterpret_cast<char*>(stats) + stats->size, 0, delta); 85 stats->size = sizeof(*stats); 86 } 87 88 return true; 89 } 90 91 Stats::Stats() : size_histogram_(NULL) { 92 } 93 94 Stats::~Stats() { 95 } 96 97 bool Stats::Init(void* data, int num_bytes, Addr address) { 98 OnDiskStats local_stats; 99 OnDiskStats* stats = &local_stats; 100 if (!num_bytes) { 101 memset(stats, 0, sizeof(local_stats)); 102 local_stats.signature = kDiskSignature; 103 local_stats.size = sizeof(local_stats); 104 } else if (num_bytes >= static_cast<int>(sizeof(*stats))) { 105 stats = reinterpret_cast<OnDiskStats*>(data); 106 if (!VerifyStats(stats)) 107 return false; 108 } else { 109 return false; 110 } 111 112 storage_addr_ = address; 113 114 memcpy(data_sizes_, stats->data_sizes, sizeof(data_sizes_)); 115 memcpy(counters_, stats->counters, sizeof(counters_)); 116 117 // Clean up old value. 118 SetCounter(UNUSED, 0); 119 return true; 120 } 121 122 void Stats::InitSizeHistogram() { 123 // It seems impossible to support this histogram for more than one 124 // simultaneous objects with the current infrastructure. 125 static bool first_time = true; 126 if (first_time) { 127 first_time = false; 128 if (!size_histogram_) { 129 // Stats may be reused when the cache is re-created, but we want only one 130 // histogram at any given time. 131 size_histogram_ = StatsHistogram::FactoryGet("DiskCache.SizeStats", this); 132 } 133 } 134 } 135 136 int Stats::StorageSize() { 137 // If we have more than 512 bytes of counters, change kDiskSignature so we 138 // don't overwrite something else (LoadStats must fail). 139 COMPILE_ASSERT(sizeof(OnDiskStats) <= 256 * 2, use_more_blocks); 140 return 256 * 2; 141 } 142 143 void Stats::ModifyStorageStats(int32 old_size, int32 new_size) { 144 // We keep a counter of the data block size on an array where each entry is 145 // the adjusted log base 2 of the size. The first entry counts blocks of 256 146 // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last 147 // one stores entries of more than 64 MB 148 int new_index = GetStatsBucket(new_size); 149 int old_index = GetStatsBucket(old_size); 150 151 if (new_size) 152 data_sizes_[new_index]++; 153 154 if (old_size) 155 data_sizes_[old_index]--; 156 } 157 158 void Stats::OnEvent(Counters an_event) { 159 DCHECK(an_event >= MIN_COUNTER && an_event < MAX_COUNTER); 160 counters_[an_event]++; 161 } 162 163 void Stats::SetCounter(Counters counter, int64 value) { 164 DCHECK(counter >= MIN_COUNTER && counter < MAX_COUNTER); 165 counters_[counter] = value; 166 } 167 168 int64 Stats::GetCounter(Counters counter) const { 169 DCHECK(counter >= MIN_COUNTER && counter < MAX_COUNTER); 170 return counters_[counter]; 171 } 172 173 void Stats::GetItems(StatsItems* items) { 174 std::pair<std::string, std::string> item; 175 for (int i = 0; i < kDataSizesLength; i++) { 176 item.first = base::StringPrintf("Size%02d", i); 177 item.second = base::StringPrintf("0x%08x", data_sizes_[i]); 178 items->push_back(item); 179 } 180 181 for (int i = MIN_COUNTER; i < MAX_COUNTER; i++) { 182 item.first = kCounterNames[i]; 183 item.second = base::StringPrintf("0x%" PRIx64, counters_[i]); 184 items->push_back(item); 185 } 186 } 187 188 int Stats::GetHitRatio() const { 189 return GetRatio(OPEN_HIT, OPEN_MISS); 190 } 191 192 int Stats::GetResurrectRatio() const { 193 return GetRatio(RESURRECT_HIT, CREATE_HIT); 194 } 195 196 void Stats::ResetRatios() { 197 SetCounter(OPEN_HIT, 0); 198 SetCounter(OPEN_MISS, 0); 199 SetCounter(RESURRECT_HIT, 0); 200 SetCounter(CREATE_HIT, 0); 201 } 202 203 int Stats::GetLargeEntriesSize() { 204 int total = 0; 205 // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before 206 // GetStatsBucket()). 207 for (int bucket = 20; bucket < kDataSizesLength; bucket++) 208 total += data_sizes_[bucket] * GetBucketRange(bucket); 209 210 return total; 211 } 212 213 int Stats::SerializeStats(void* data, int num_bytes, Addr* address) { 214 OnDiskStats* stats = reinterpret_cast<OnDiskStats*>(data); 215 if (num_bytes < static_cast<int>(sizeof(*stats))) 216 return 0; 217 218 stats->signature = kDiskSignature; 219 stats->size = sizeof(*stats); 220 memcpy(stats->data_sizes, data_sizes_, sizeof(data_sizes_)); 221 memcpy(stats->counters, counters_, sizeof(counters_)); 222 223 *address = storage_addr_; 224 return sizeof(*stats); 225 } 226 227 int Stats::GetBucketRange(size_t i) const { 228 if (i < 2) 229 return static_cast<int>(1024 * i); 230 231 if (i < 12) 232 return static_cast<int>(2048 * (i - 1)); 233 234 if (i < 17) 235 return static_cast<int>(4096 * (i - 11)) + 20 * 1024; 236 237 int n = 64 * 1024; 238 if (i > static_cast<size_t>(kDataSizesLength)) { 239 NOTREACHED(); 240 i = kDataSizesLength; 241 } 242 243 i -= 17; 244 n <<= i; 245 return n; 246 } 247 248 void Stats::Snapshot(base::HistogramSamples* samples) const { 249 for (int i = 0; i < kDataSizesLength; i++) { 250 int count = data_sizes_[i]; 251 if (count < 0) 252 count = 0; 253 samples->Accumulate(GetBucketRange(i), count); 254 } 255 } 256 257 // The array will be filled this way: 258 // index size 259 // 0 [0, 1024) 260 // 1 [1024, 2048) 261 // 2 [2048, 4096) 262 // 3 [4K, 6K) 263 // ... 264 // 10 [18K, 20K) 265 // 11 [20K, 24K) 266 // 12 [24k, 28K) 267 // ... 268 // 15 [36k, 40K) 269 // 16 [40k, 64K) 270 // 17 [64K, 128K) 271 // 18 [128K, 256K) 272 // ... 273 // 23 [4M, 8M) 274 // 24 [8M, 16M) 275 // 25 [16M, 32M) 276 // 26 [32M, 64M) 277 // 27 [64M, ...) 278 int Stats::GetStatsBucket(int32 size) { 279 if (size < 1024) 280 return 0; 281 282 // 10 slots more, until 20K. 283 if (size < 20 * 1024) 284 return size / 2048 + 1; 285 286 // 5 slots more, from 20K to 40K. 287 if (size < 40 * 1024) 288 return (size - 20 * 1024) / 4096 + 11; 289 290 // From this point on, use a logarithmic scale. 291 int result = LogBase2(size) + 1; 292 293 COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale); 294 if (result >= kDataSizesLength) 295 result = kDataSizesLength - 1; 296 297 return result; 298 } 299 300 int Stats::GetRatio(Counters hit, Counters miss) const { 301 int64 ratio = GetCounter(hit) * 100; 302 if (!ratio) 303 return 0; 304 305 ratio /= (GetCounter(hit) + GetCounter(miss)); 306 return static_cast<int>(ratio); 307 } 308 309 } // namespace disk_cache 310