1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/disk_cache/backend_impl.h" 6 7 #include "base/bind.h" 8 #include "base/bind_helpers.h" 9 #include "base/file_util.h" 10 #include "base/files/file_path.h" 11 #include "base/hash.h" 12 #include "base/message_loop/message_loop.h" 13 #include "base/metrics/field_trial.h" 14 #include "base/metrics/histogram.h" 15 #include "base/metrics/stats_counters.h" 16 #include "base/rand_util.h" 17 #include "base/strings/string_util.h" 18 #include "base/strings/stringprintf.h" 19 #include "base/sys_info.h" 20 #include "base/threading/thread_restrictions.h" 21 #include "base/time/time.h" 22 #include "base/timer/timer.h" 23 #include "net/base/net_errors.h" 24 #include "net/disk_cache/cache_util.h" 25 #include "net/disk_cache/entry_impl.h" 26 #include "net/disk_cache/errors.h" 27 #include "net/disk_cache/experiments.h" 28 #include "net/disk_cache/file.h" 29 30 // This has to be defined before including histogram_macros.h from this file. 31 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ 32 #include "net/disk_cache/histogram_macros.h" 33 34 using base::Time; 35 using base::TimeDelta; 36 using base::TimeTicks; 37 38 namespace { 39 40 const char* kIndexName = "index"; 41 42 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. 43 // Note that the actual target is to keep the index table load factor under 55% 44 // for most users. 45 const int k64kEntriesStore = 240 * 1000 * 1000; 46 const int kBaseTableLen = 64 * 1024; 47 const int kDefaultCacheSize = 80 * 1024 * 1024; 48 49 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). 50 const int kTrimDelay = 10; 51 52 int DesiredIndexTableLen(int32 storage_size) { 53 if (storage_size <= k64kEntriesStore) 54 return kBaseTableLen; 55 if (storage_size <= k64kEntriesStore * 2) 56 return kBaseTableLen * 2; 57 if (storage_size <= k64kEntriesStore * 4) 58 return kBaseTableLen * 4; 59 if (storage_size <= k64kEntriesStore * 8) 60 return kBaseTableLen * 8; 61 62 // The biggest storage_size for int32 requires a 4 MB table. 63 return kBaseTableLen * 16; 64 } 65 66 int MaxStorageSizeForTable(int table_len) { 67 return table_len * (k64kEntriesStore / kBaseTableLen); 68 } 69 70 size_t GetIndexSize(int table_len) { 71 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; 72 return sizeof(disk_cache::IndexHeader) + table_size; 73 } 74 75 } // namespace 76 77 // ------------------------------------------------------------------------ 78 79 namespace disk_cache { 80 81 BackendImpl::BackendImpl(const base::FilePath& path, 82 base::MessageLoopProxy* cache_thread, 83 net::NetLog* net_log) 84 : background_queue_(this, cache_thread), 85 path_(path), 86 block_files_(path), 87 mask_(0), 88 max_size_(0), 89 up_ticks_(0), 90 cache_type_(net::DISK_CACHE), 91 uma_report_(0), 92 user_flags_(0), 93 init_(false), 94 restarted_(false), 95 unit_test_(false), 96 read_only_(false), 97 disabled_(false), 98 new_eviction_(false), 99 first_timer_(true), 100 user_load_(false), 101 net_log_(net_log), 102 done_(true, false), 103 ptr_factory_(this) { 104 } 105 106 BackendImpl::BackendImpl(const base::FilePath& path, 107 uint32 mask, 108 base::MessageLoopProxy* cache_thread, 109 net::NetLog* net_log) 110 : background_queue_(this, cache_thread), 111 path_(path), 112 block_files_(path), 113 mask_(mask), 114 max_size_(0), 115 up_ticks_(0), 116 cache_type_(net::DISK_CACHE), 117 uma_report_(0), 118 user_flags_(kMask), 119 init_(false), 120 restarted_(false), 121 unit_test_(false), 122 read_only_(false), 123 disabled_(false), 124 new_eviction_(false), 125 first_timer_(true), 126 user_load_(false), 127 net_log_(net_log), 128 done_(true, false), 129 ptr_factory_(this) { 130 } 131 132 BackendImpl::~BackendImpl() { 133 if (user_flags_ & kNoRandom) { 134 // This is a unit test, so we want to be strict about not leaking entries 135 // and completing all the work. 136 background_queue_.WaitForPendingIO(); 137 } else { 138 // This is most likely not a test, so we want to do as little work as 139 // possible at this time, at the price of leaving dirty entries behind. 140 background_queue_.DropPendingIO(); 141 } 142 143 if (background_queue_.BackgroundIsCurrentThread()) { 144 // Unit tests may use the same thread for everything. 145 CleanupCache(); 146 } else { 147 background_queue_.background_thread()->PostTask( 148 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); 149 // http://crbug.com/74623 150 base::ThreadRestrictions::ScopedAllowWait allow_wait; 151 done_.Wait(); 152 } 153 } 154 155 int BackendImpl::Init(const CompletionCallback& callback) { 156 background_queue_.Init(callback); 157 return net::ERR_IO_PENDING; 158 } 159 160 // ------------------------------------------------------------------------ 161 162 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, 163 const CompletionCallback& callback) { 164 DCHECK(!callback.is_null()); 165 background_queue_.OpenPrevEntry(iter, prev_entry, callback); 166 return net::ERR_IO_PENDING; 167 } 168 169 bool BackendImpl::SetMaxSize(int max_bytes) { 170 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); 171 if (max_bytes < 0) 172 return false; 173 174 // Zero size means use the default. 175 if (!max_bytes) 176 return true; 177 178 // Avoid a DCHECK later on. 179 if (max_bytes >= kint32max - kint32max / 10) 180 max_bytes = kint32max - kint32max / 10 - 1; 181 182 user_flags_ |= kMaxSize; 183 max_size_ = max_bytes; 184 return true; 185 } 186 187 void BackendImpl::SetType(net::CacheType type) { 188 DCHECK_NE(net::MEMORY_CACHE, type); 189 cache_type_ = type; 190 } 191 192 bool BackendImpl::CreateBlock(FileType block_type, int block_count, 193 Addr* block_address) { 194 return block_files_.CreateBlock(block_type, block_count, block_address); 195 } 196 197 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) { 198 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE)) 199 return; 200 eviction_.UpdateRank(entry, modified); 201 } 202 203 void BackendImpl::InternalDoomEntry(EntryImpl* entry) { 204 uint32 hash = entry->GetHash(); 205 std::string key = entry->GetKey(); 206 Addr entry_addr = entry->entry()->address(); 207 bool error; 208 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error); 209 CacheAddr child(entry->GetNextAddress()); 210 211 Trace("Doom entry 0x%p", entry); 212 213 if (!entry->doomed()) { 214 // We may have doomed this entry from within MatchEntry. 215 eviction_.OnDoomEntry(entry); 216 entry->InternalDoom(); 217 if (!new_eviction_) { 218 DecreaseNumEntries(); 219 } 220 stats_.OnEvent(Stats::DOOM_ENTRY); 221 } 222 223 if (parent_entry) { 224 parent_entry->SetNextAddress(Addr(child)); 225 parent_entry->Release(); 226 } else if (!error) { 227 data_->table[hash & mask_] = child; 228 } 229 230 FlushIndex(); 231 } 232 233 void BackendImpl::OnEntryDestroyBegin(Addr address) { 234 EntriesMap::iterator it = open_entries_.find(address.value()); 235 if (it != open_entries_.end()) 236 open_entries_.erase(it); 237 } 238 239 void BackendImpl::OnEntryDestroyEnd() { 240 DecreaseNumRefs(); 241 if (data_->header.num_bytes > max_size_ && !read_only_ && 242 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom)) 243 eviction_.TrimCache(false); 244 } 245 246 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { 247 DCHECK(rankings->HasData()); 248 EntriesMap::const_iterator it = 249 open_entries_.find(rankings->Data()->contents); 250 if (it != open_entries_.end()) { 251 // We have this entry in memory. 252 return it->second; 253 } 254 255 return NULL; 256 } 257 258 int BackendImpl::MaxFileSize() const { 259 return max_size_ / 8; 260 } 261 262 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { 263 if (disabled_ || old_size == new_size) 264 return; 265 if (old_size > new_size) 266 SubstractStorageSize(old_size - new_size); 267 else 268 AddStorageSize(new_size - old_size); 269 270 FlushIndex(); 271 272 // Update the usage statistics. 273 stats_.ModifyStorageStats(old_size, new_size); 274 } 275 276 void BackendImpl::TooMuchStorageRequested(int32 size) { 277 stats_.ModifyStorageStats(0, size); 278 } 279 280 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) { 281 DCHECK_GT(new_size, current_size); 282 if (user_flags_ & kNoBuffering) 283 return false; 284 285 int to_add = new_size - current_size; 286 if (buffer_bytes_ + to_add > MaxBuffersSize()) 287 return false; 288 289 buffer_bytes_ += to_add; 290 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); 291 return true; 292 } 293 294 void BackendImpl::BufferDeleted(int size) { 295 buffer_bytes_ -= size; 296 DCHECK_GE(size, 0); 297 } 298 299 bool BackendImpl::IsLoaded() const { 300 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_); 301 if (user_flags_ & kNoLoadProtection) 302 return false; 303 304 return (num_pending_io_ > 5 || user_load_); 305 } 306 307 std::string BackendImpl::HistogramName(const char* name, int experiment) const { 308 if (!experiment) 309 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name); 310 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_, 311 name, experiment); 312 } 313 314 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() { 315 return ptr_factory_.GetWeakPtr(); 316 } 317 318 // We want to remove biases from some histograms so we only send data once per 319 // week. 320 bool BackendImpl::ShouldReportAgain() { 321 if (uma_report_) 322 return uma_report_ == 2; 323 324 uma_report_++; 325 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 326 Time last_time = Time::FromInternalValue(last_report); 327 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { 328 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); 329 uma_report_++; 330 return true; 331 } 332 return false; 333 } 334 335 void BackendImpl::FirstEviction() { 336 DCHECK(data_->header.create_time); 337 if (!GetEntryCount()) 338 return; // This is just for unit tests. 339 340 Time create_time = Time::FromInternalValue(data_->header.create_time); 341 CACHE_UMA(AGE, "FillupAge", 0, create_time); 342 343 int64 use_time = stats_.GetCounter(Stats::TIMER); 344 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); 345 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); 346 347 if (!use_time) 348 use_time = 1; 349 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, 350 static_cast<int>(data_->header.num_entries / use_time)); 351 CACHE_UMA(COUNTS, "FirstByteIORate", 0, 352 static_cast<int>((data_->header.num_bytes / 1024) / use_time)); 353 354 int avg_size = data_->header.num_bytes / GetEntryCount(); 355 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); 356 357 int large_entries_bytes = stats_.GetLargeEntriesSize(); 358 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 359 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); 360 361 if (new_eviction_) { 362 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); 363 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, 364 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 365 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, 366 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 367 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, 368 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 369 } 370 371 stats_.ResetRatios(); 372 } 373 374 void BackendImpl::OnEvent(Stats::Counters an_event) { 375 stats_.OnEvent(an_event); 376 } 377 378 void BackendImpl::OnRead(int32 bytes) { 379 DCHECK_GE(bytes, 0); 380 byte_count_ += bytes; 381 if (byte_count_ < 0) 382 byte_count_ = kint32max; 383 } 384 385 void BackendImpl::OnWrite(int32 bytes) { 386 // We use the same implementation as OnRead... just log the number of bytes. 387 OnRead(bytes); 388 } 389 390 void BackendImpl::OnStatsTimer() { 391 stats_.OnEvent(Stats::TIMER); 392 int64 time = stats_.GetCounter(Stats::TIMER); 393 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); 394 395 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding 396 // the bias towards 0. 397 if (num_refs_ && (current != num_refs_)) { 398 int64 diff = (num_refs_ - current) / 50; 399 if (!diff) 400 diff = num_refs_ > current ? 1 : -1; 401 current = current + diff; 402 stats_.SetCounter(Stats::OPEN_ENTRIES, current); 403 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); 404 } 405 406 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); 407 408 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); 409 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); 410 411 // These values cover about 99.5% of the population (Oct 2011). 412 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); 413 entry_count_ = 0; 414 byte_count_ = 0; 415 up_ticks_++; 416 417 if (!data_) 418 first_timer_ = false; 419 if (first_timer_) { 420 first_timer_ = false; 421 if (ShouldReportAgain()) 422 ReportStats(); 423 } 424 425 // Save stats to disk at 5 min intervals. 426 if (time % 10 == 0) 427 StoreStats(); 428 } 429 430 void BackendImpl::SetUnitTestMode() { 431 user_flags_ |= kUnitTestMode; 432 unit_test_ = true; 433 } 434 435 void BackendImpl::SetUpgradeMode() { 436 user_flags_ |= kUpgradeMode; 437 read_only_ = true; 438 } 439 440 void BackendImpl::SetNewEviction() { 441 user_flags_ |= kNewEviction; 442 new_eviction_ = true; 443 } 444 445 void BackendImpl::SetFlags(uint32 flags) { 446 user_flags_ |= flags; 447 } 448 449 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) { 450 background_queue_.FlushQueue(callback); 451 return net::ERR_IO_PENDING; 452 } 453 454 void BackendImpl::TrimForTest(bool empty) { 455 eviction_.SetTestMode(); 456 eviction_.TrimCache(empty); 457 } 458 459 void BackendImpl::TrimDeletedListForTest(bool empty) { 460 eviction_.SetTestMode(); 461 eviction_.TrimDeletedList(empty); 462 } 463 464 int BackendImpl::SelfCheck() { 465 if (!init_) { 466 LOG(ERROR) << "Init failed"; 467 return ERR_INIT_FAILED; 468 } 469 470 int num_entries = rankings_.SelfCheck(); 471 if (num_entries < 0) { 472 LOG(ERROR) << "Invalid rankings list, error " << num_entries; 473 #if !defined(NET_BUILD_STRESS_CACHE) 474 return num_entries; 475 #endif 476 } 477 478 if (num_entries != data_->header.num_entries) { 479 LOG(ERROR) << "Number of entries mismatch"; 480 #if !defined(NET_BUILD_STRESS_CACHE) 481 return ERR_NUM_ENTRIES_MISMATCH; 482 #endif 483 } 484 485 return CheckAllEntries(); 486 } 487 488 // ------------------------------------------------------------------------ 489 490 net::CacheType BackendImpl::GetCacheType() const { 491 return cache_type_; 492 } 493 494 int32 BackendImpl::GetEntryCount() const { 495 if (!index_.get() || disabled_) 496 return 0; 497 // num_entries includes entries already evicted. 498 int32 not_deleted = data_->header.num_entries - 499 data_->header.lru.sizes[Rankings::DELETED]; 500 501 if (not_deleted < 0) { 502 NOTREACHED(); 503 not_deleted = 0; 504 } 505 506 return not_deleted; 507 } 508 509 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { 510 if (disabled_) 511 return NULL; 512 513 TimeTicks start = TimeTicks::Now(); 514 uint32 hash = base::Hash(key); 515 Trace("Open hash 0x%x", hash); 516 517 bool error; 518 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); 519 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) { 520 // The entry was already evicted. 521 cache_entry->Release(); 522 cache_entry = NULL; 523 } 524 525 int current_size = data_->header.num_bytes / (1024 * 1024); 526 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; 527 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; 528 int64 use_hours = total_hours - no_use_hours; 529 530 if (!cache_entry) { 531 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start); 532 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size); 533 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours); 534 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours); 535 stats_.OnEvent(Stats::OPEN_MISS); 536 return NULL; 537 } 538 539 eviction_.OnOpenEntry(cache_entry); 540 entry_count_++; 541 542 Trace("Open hash 0x%x end: 0x%x", hash, 543 cache_entry->entry()->address().value()); 544 CACHE_UMA(AGE_MS, "OpenTime", 0, start); 545 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size); 546 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours); 547 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours); 548 stats_.OnEvent(Stats::OPEN_HIT); 549 SIMPLE_STATS_COUNTER("disk_cache.hit"); 550 return cache_entry; 551 } 552 553 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { 554 if (disabled_ || key.empty()) 555 return NULL; 556 557 TimeTicks start = TimeTicks::Now(); 558 Trace("Create hash 0x%x", hash); 559 560 scoped_refptr<EntryImpl> parent; 561 Addr entry_address(data_->table[hash & mask_]); 562 if (entry_address.is_initialized()) { 563 // We have an entry already. It could be the one we are looking for, or just 564 // a hash conflict. 565 bool error; 566 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); 567 if (old_entry) 568 return ResurrectEntry(old_entry); 569 570 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); 571 DCHECK(!error); 572 if (parent_entry) { 573 parent.swap(&parent_entry); 574 } else if (data_->table[hash & mask_]) { 575 // We should have corrected the problem. 576 NOTREACHED(); 577 return NULL; 578 } 579 } 580 581 // The general flow is to allocate disk space and initialize the entry data, 582 // followed by saving that to disk, then linking the entry though the index 583 // and finally through the lists. If there is a crash in this process, we may 584 // end up with: 585 // a. Used, unreferenced empty blocks on disk (basically just garbage). 586 // b. Used, unreferenced but meaningful data on disk (more garbage). 587 // c. A fully formed entry, reachable only through the index. 588 // d. A fully formed entry, also reachable through the lists, but still dirty. 589 // 590 // Anything after (b) can be automatically cleaned up. We may consider saving 591 // the current operation (as we do while manipulating the lists) so that we 592 // can detect and cleanup (a) and (b). 593 594 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); 595 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { 596 LOG(ERROR) << "Create entry failed " << key.c_str(); 597 stats_.OnEvent(Stats::CREATE_ERROR); 598 return NULL; 599 } 600 601 Addr node_address(0); 602 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { 603 block_files_.DeleteBlock(entry_address, false); 604 LOG(ERROR) << "Create entry failed " << key.c_str(); 605 stats_.OnEvent(Stats::CREATE_ERROR); 606 return NULL; 607 } 608 609 scoped_refptr<EntryImpl> cache_entry( 610 new EntryImpl(this, entry_address, false)); 611 IncreaseNumRefs(); 612 613 if (!cache_entry->CreateEntry(node_address, key, hash)) { 614 block_files_.DeleteBlock(entry_address, false); 615 block_files_.DeleteBlock(node_address, false); 616 LOG(ERROR) << "Create entry failed " << key.c_str(); 617 stats_.OnEvent(Stats::CREATE_ERROR); 618 return NULL; 619 } 620 621 cache_entry->BeginLogging(net_log_, true); 622 623 // We are not failing the operation; let's add this to the map. 624 open_entries_[entry_address.value()] = cache_entry.get(); 625 626 // Save the entry. 627 cache_entry->entry()->Store(); 628 cache_entry->rankings()->Store(); 629 IncreaseNumEntries(); 630 entry_count_++; 631 632 // Link this entry through the index. 633 if (parent.get()) { 634 parent->SetNextAddress(entry_address); 635 } else { 636 data_->table[hash & mask_] = entry_address.value(); 637 } 638 639 // Link this entry through the lists. 640 eviction_.OnCreateEntry(cache_entry.get()); 641 642 CACHE_UMA(AGE_MS, "CreateTime", 0, start); 643 stats_.OnEvent(Stats::CREATE_HIT); 644 SIMPLE_STATS_COUNTER("disk_cache.miss"); 645 Trace("create entry hit "); 646 FlushIndex(); 647 cache_entry->AddRef(); 648 return cache_entry.get(); 649 } 650 651 int BackendImpl::SyncDoomEntry(const std::string& key) { 652 if (disabled_) 653 return net::ERR_FAILED; 654 655 EntryImpl* entry = OpenEntryImpl(key); 656 if (!entry) 657 return net::ERR_FAILED; 658 659 entry->DoomImpl(); 660 entry->Release(); 661 return net::OK; 662 } 663 664 int BackendImpl::SyncDoomAllEntries() { 665 // This is not really an error, but it is an interesting condition. 666 ReportError(ERR_CACHE_DOOMED); 667 stats_.OnEvent(Stats::DOOM_CACHE); 668 if (!num_refs_) { 669 RestartCache(false); 670 return disabled_ ? net::ERR_FAILED : net::OK; 671 } else { 672 if (disabled_) 673 return net::ERR_FAILED; 674 675 eviction_.TrimCache(true); 676 return net::OK; 677 } 678 } 679 680 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, 681 const base::Time end_time) { 682 DCHECK_NE(net::APP_CACHE, cache_type_); 683 if (end_time.is_null()) 684 return SyncDoomEntriesSince(initial_time); 685 686 DCHECK(end_time >= initial_time); 687 688 if (disabled_) 689 return net::ERR_FAILED; 690 691 EntryImpl* node; 692 void* iter = NULL; 693 EntryImpl* next = OpenNextEntryImpl(&iter); 694 if (!next) 695 return net::OK; 696 697 while (next) { 698 node = next; 699 next = OpenNextEntryImpl(&iter); 700 701 if (node->GetLastUsed() >= initial_time && 702 node->GetLastUsed() < end_time) { 703 node->DoomImpl(); 704 } else if (node->GetLastUsed() < initial_time) { 705 if (next) 706 next->Release(); 707 next = NULL; 708 SyncEndEnumeration(iter); 709 } 710 711 node->Release(); 712 } 713 714 return net::OK; 715 } 716 717 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get 718 // entries that are too old. 719 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) { 720 DCHECK_NE(net::APP_CACHE, cache_type_); 721 if (disabled_) 722 return net::ERR_FAILED; 723 724 stats_.OnEvent(Stats::DOOM_RECENT); 725 for (;;) { 726 void* iter = NULL; 727 EntryImpl* entry = OpenNextEntryImpl(&iter); 728 if (!entry) 729 return net::OK; 730 731 if (initial_time > entry->GetLastUsed()) { 732 entry->Release(); 733 SyncEndEnumeration(iter); 734 return net::OK; 735 } 736 737 entry->DoomImpl(); 738 entry->Release(); 739 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator. 740 } 741 } 742 743 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, 744 const CompletionCallback& callback) { 745 DCHECK(!callback.is_null()); 746 background_queue_.OpenNextEntry(iter, next_entry, callback); 747 return net::ERR_IO_PENDING; 748 } 749 750 void BackendImpl::EndEnumeration(void** iter) { 751 background_queue_.EndEnumeration(*iter); 752 *iter = NULL; 753 } 754 755 void BackendImpl::GetStats(StatsItems* stats) { 756 if (disabled_) 757 return; 758 759 std::pair<std::string, std::string> item; 760 761 item.first = "Entries"; 762 item.second = base::StringPrintf("%d", data_->header.num_entries); 763 stats->push_back(item); 764 765 item.first = "Pending IO"; 766 item.second = base::StringPrintf("%d", num_pending_io_); 767 stats->push_back(item); 768 769 item.first = "Max size"; 770 item.second = base::StringPrintf("%d", max_size_); 771 stats->push_back(item); 772 773 item.first = "Current size"; 774 item.second = base::StringPrintf("%d", data_->header.num_bytes); 775 stats->push_back(item); 776 777 item.first = "Cache type"; 778 item.second = "Blockfile Cache"; 779 stats->push_back(item); 780 781 stats_.GetItems(stats); 782 } 783 784 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) { 785 if (disabled_) 786 return; 787 788 uint32 hash = base::Hash(key); 789 bool error; 790 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); 791 if (cache_entry) { 792 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) { 793 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE); 794 } 795 cache_entry->Release(); 796 } 797 } 798 799 // ------------------------------------------------------------------------ 800 801 // The maximum cache size will be either set explicitly by the caller, or 802 // calculated by this code. 803 void BackendImpl::AdjustMaxCacheSize(int table_len) { 804 if (max_size_) 805 return; 806 807 // If table_len is provided, the index file exists. 808 DCHECK(!table_len || data_->header.magic); 809 810 // The user is not setting the size, let's figure it out. 811 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); 812 if (available < 0) { 813 max_size_ = kDefaultCacheSize; 814 return; 815 } 816 817 if (table_len) 818 available += data_->header.num_bytes; 819 820 max_size_ = PreferedCacheSize(available); 821 822 // Let's not use more than the default size while we tune-up the performance 823 // of bigger caches. TODO(rvargas): remove this limit. 824 if (max_size_ > kDefaultCacheSize * 4) 825 max_size_ = kDefaultCacheSize * 4; 826 827 if (!table_len) 828 return; 829 830 // If we already have a table, adjust the size to it. 831 int current_max_size = MaxStorageSizeForTable(table_len); 832 if (max_size_ > current_max_size) 833 max_size_= current_max_size; 834 } 835 836 bool BackendImpl::InitStats() { 837 Addr address(data_->header.stats); 838 int size = stats_.StorageSize(); 839 840 if (!address.is_initialized()) { 841 FileType file_type = Addr::RequiredFileType(size); 842 DCHECK_NE(file_type, EXTERNAL); 843 int num_blocks = Addr::RequiredBlocks(size, file_type); 844 845 if (!CreateBlock(file_type, num_blocks, &address)) 846 return false; 847 return stats_.Init(NULL, 0, address); 848 } 849 850 if (!address.is_block_file()) { 851 NOTREACHED(); 852 return false; 853 } 854 855 // Load the required data. 856 size = address.num_blocks() * address.BlockSize(); 857 MappedFile* file = File(address); 858 if (!file) 859 return false; 860 861 scoped_ptr<char[]> data(new char[size]); 862 size_t offset = address.start_block() * address.BlockSize() + 863 kBlockHeaderSize; 864 if (!file->Read(data.get(), size, offset)) 865 return false; 866 867 if (!stats_.Init(data.get(), size, address)) 868 return false; 869 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) 870 stats_.InitSizeHistogram(); 871 return true; 872 } 873 874 void BackendImpl::StoreStats() { 875 int size = stats_.StorageSize(); 876 scoped_ptr<char[]> data(new char[size]); 877 Addr address; 878 size = stats_.SerializeStats(data.get(), size, &address); 879 DCHECK(size); 880 if (!address.is_initialized()) 881 return; 882 883 MappedFile* file = File(address); 884 if (!file) 885 return; 886 887 size_t offset = address.start_block() * address.BlockSize() + 888 kBlockHeaderSize; 889 file->Write(data.get(), size, offset); // ignore result. 890 } 891 892 void BackendImpl::RestartCache(bool failure) { 893 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); 894 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); 895 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); 896 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 897 898 PrepareForRestart(); 899 if (failure) { 900 DCHECK(!num_refs_); 901 DCHECK(!open_entries_.size()); 902 DelayedCacheCleanup(path_); 903 } else { 904 DeleteCache(path_, false); 905 } 906 907 // Don't call Init() if directed by the unit test: we are simulating a failure 908 // trying to re-enable the cache. 909 if (unit_test_) 910 init_ = true; // Let the destructor do proper cleanup. 911 else if (SyncInit() == net::OK) { 912 stats_.SetCounter(Stats::FATAL_ERROR, errors); 913 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); 914 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); 915 stats_.SetCounter(Stats::LAST_REPORT, last_report); 916 } 917 } 918 919 void BackendImpl::PrepareForRestart() { 920 // Reset the mask_ if it was not given by the user. 921 if (!(user_flags_ & kMask)) 922 mask_ = 0; 923 924 if (!(user_flags_ & kNewEviction)) 925 new_eviction_ = false; 926 927 disabled_ = true; 928 data_->header.crash = 0; 929 index_->Flush(); 930 index_ = NULL; 931 data_ = NULL; 932 block_files_.CloseFiles(); 933 rankings_.Reset(); 934 init_ = false; 935 restarted_ = true; 936 } 937 938 void BackendImpl::CleanupCache() { 939 Trace("Backend Cleanup"); 940 eviction_.Stop(); 941 timer_.reset(); 942 943 if (init_) { 944 StoreStats(); 945 if (data_) 946 data_->header.crash = 0; 947 948 if (user_flags_ & kNoRandom) { 949 // This is a net_unittest, verify that we are not 'leaking' entries. 950 File::WaitForPendingIO(&num_pending_io_); 951 DCHECK(!num_refs_); 952 } else { 953 File::DropPendingIO(); 954 } 955 } 956 block_files_.CloseFiles(); 957 FlushIndex(); 958 index_ = NULL; 959 ptr_factory_.InvalidateWeakPtrs(); 960 done_.Signal(); 961 } 962 963 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { 964 EntriesMap::iterator it = open_entries_.find(address.value()); 965 if (it != open_entries_.end()) { 966 // Easy job. This entry is already in memory. 967 EntryImpl* this_entry = it->second; 968 this_entry->AddRef(); 969 *entry = this_entry; 970 return 0; 971 } 972 973 STRESS_DCHECK(block_files_.IsValid(address)); 974 975 if (!address.SanityCheckForEntry()) { 976 LOG(WARNING) << "Wrong entry address."; 977 STRESS_NOTREACHED(); 978 return ERR_INVALID_ADDRESS; 979 } 980 981 scoped_refptr<EntryImpl> cache_entry( 982 new EntryImpl(this, address, read_only_)); 983 IncreaseNumRefs(); 984 *entry = NULL; 985 986 TimeTicks start = TimeTicks::Now(); 987 if (!cache_entry->entry()->Load()) 988 return ERR_READ_FAILURE; 989 990 if (IsLoaded()) { 991 CACHE_UMA(AGE_MS, "LoadTime", 0, start); 992 } 993 994 if (!cache_entry->SanityCheck()) { 995 LOG(WARNING) << "Messed up entry found."; 996 STRESS_NOTREACHED(); 997 return ERR_INVALID_ENTRY; 998 } 999 1000 STRESS_DCHECK(block_files_.IsValid( 1001 Addr(cache_entry->entry()->Data()->rankings_node))); 1002 1003 if (!cache_entry->LoadNodeAddress()) 1004 return ERR_READ_FAILURE; 1005 1006 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) { 1007 STRESS_NOTREACHED(); 1008 cache_entry->SetDirtyFlag(0); 1009 // Don't remove this from the list (it is not linked properly). Instead, 1010 // break the link back to the entry because it is going away, and leave the 1011 // rankings node to be deleted if we find it through a list. 1012 rankings_.SetContents(cache_entry->rankings(), 0); 1013 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) { 1014 STRESS_NOTREACHED(); 1015 cache_entry->SetDirtyFlag(0); 1016 rankings_.SetContents(cache_entry->rankings(), address.value()); 1017 } 1018 1019 if (!cache_entry->DataSanityCheck()) { 1020 LOG(WARNING) << "Messed up entry found."; 1021 cache_entry->SetDirtyFlag(0); 1022 cache_entry->FixForDelete(); 1023 } 1024 1025 // Prevent overwriting the dirty flag on the destructor. 1026 cache_entry->SetDirtyFlag(GetCurrentEntryId()); 1027 1028 if (cache_entry->dirty()) { 1029 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), 1030 address.value()); 1031 } 1032 1033 open_entries_[address.value()] = cache_entry.get(); 1034 1035 cache_entry->BeginLogging(net_log_, false); 1036 cache_entry.swap(entry); 1037 return 0; 1038 } 1039 1040 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. 1041 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { 1042 if (disabled_) 1043 return NULL; 1044 1045 DCHECK(iter); 1046 1047 const int kListsToSearch = 3; 1048 scoped_refptr<EntryImpl> entries[kListsToSearch]; 1049 scoped_ptr<Rankings::Iterator> iterator( 1050 reinterpret_cast<Rankings::Iterator*>(*iter)); 1051 *iter = NULL; 1052 1053 if (!iterator.get()) { 1054 iterator.reset(new Rankings::Iterator(&rankings_)); 1055 bool ret = false; 1056 1057 // Get an entry from each list. 1058 for (int i = 0; i < kListsToSearch; i++) { 1059 EntryImpl* temp = NULL; 1060 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), 1061 &iterator->nodes[i], &temp); 1062 entries[i].swap(&temp); // The entry was already addref'd. 1063 } 1064 if (!ret) 1065 return NULL; 1066 } else { 1067 // Get the next entry from the last list, and the actual entries for the 1068 // elements on the other lists. 1069 for (int i = 0; i < kListsToSearch; i++) { 1070 EntryImpl* temp = NULL; 1071 if (iterator->list == i) { 1072 OpenFollowingEntryFromList(forward, iterator->list, 1073 &iterator->nodes[i], &temp); 1074 } else { 1075 temp = GetEnumeratedEntry(iterator->nodes[i], 1076 static_cast<Rankings::List>(i)); 1077 } 1078 1079 entries[i].swap(&temp); // The entry was already addref'd. 1080 } 1081 } 1082 1083 int newest = -1; 1084 int oldest = -1; 1085 Time access_times[kListsToSearch]; 1086 for (int i = 0; i < kListsToSearch; i++) { 1087 if (entries[i].get()) { 1088 access_times[i] = entries[i]->GetLastUsed(); 1089 if (newest < 0) { 1090 DCHECK_LT(oldest, 0); 1091 newest = oldest = i; 1092 continue; 1093 } 1094 if (access_times[i] > access_times[newest]) 1095 newest = i; 1096 if (access_times[i] < access_times[oldest]) 1097 oldest = i; 1098 } 1099 } 1100 1101 if (newest < 0 || oldest < 0) 1102 return NULL; 1103 1104 EntryImpl* next_entry; 1105 if (forward) { 1106 next_entry = entries[newest].get(); 1107 iterator->list = static_cast<Rankings::List>(newest); 1108 } else { 1109 next_entry = entries[oldest].get(); 1110 iterator->list = static_cast<Rankings::List>(oldest); 1111 } 1112 1113 *iter = iterator.release(); 1114 next_entry->AddRef(); 1115 return next_entry; 1116 } 1117 1118 void BackendImpl::AddStorageSize(int32 bytes) { 1119 data_->header.num_bytes += bytes; 1120 DCHECK_GE(data_->header.num_bytes, 0); 1121 } 1122 1123 void BackendImpl::SubstractStorageSize(int32 bytes) { 1124 data_->header.num_bytes -= bytes; 1125 DCHECK_GE(data_->header.num_bytes, 0); 1126 } 1127 1128 void BackendImpl::IncreaseNumRefs() { 1129 num_refs_++; 1130 if (max_refs_ < num_refs_) 1131 max_refs_ = num_refs_; 1132 } 1133 1134 void BackendImpl::DecreaseNumRefs() { 1135 DCHECK(num_refs_); 1136 num_refs_--; 1137 1138 if (!num_refs_ && disabled_) 1139 base::MessageLoop::current()->PostTask( 1140 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); 1141 } 1142 1143 void BackendImpl::IncreaseNumEntries() { 1144 data_->header.num_entries++; 1145 DCHECK_GT(data_->header.num_entries, 0); 1146 } 1147 1148 void BackendImpl::DecreaseNumEntries() { 1149 data_->header.num_entries--; 1150 if (data_->header.num_entries < 0) { 1151 NOTREACHED(); 1152 data_->header.num_entries = 0; 1153 } 1154 } 1155 1156 int BackendImpl::SyncInit() { 1157 #if defined(NET_BUILD_STRESS_CACHE) 1158 // Start evictions right away. 1159 up_ticks_ = kTrimDelay * 2; 1160 #endif 1161 DCHECK(!init_); 1162 if (init_) 1163 return net::ERR_FAILED; 1164 1165 bool create_files = false; 1166 if (!InitBackingStore(&create_files)) { 1167 ReportError(ERR_STORAGE_ERROR); 1168 return net::ERR_FAILED; 1169 } 1170 1171 num_refs_ = num_pending_io_ = max_refs_ = 0; 1172 entry_count_ = byte_count_ = 0; 1173 1174 if (!restarted_) { 1175 buffer_bytes_ = 0; 1176 trace_object_ = TraceObject::GetTraceObject(); 1177 // Create a recurrent timer of 30 secs. 1178 int timer_delay = unit_test_ ? 1000 : 30000; 1179 timer_.reset(new base::RepeatingTimer<BackendImpl>()); 1180 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, 1181 &BackendImpl::OnStatsTimer); 1182 } 1183 1184 init_ = true; 1185 Trace("Init"); 1186 1187 if (data_->header.experiment != NO_EXPERIMENT && 1188 cache_type_ != net::DISK_CACHE) { 1189 // No experiment for other caches. 1190 return net::ERR_FAILED; 1191 } 1192 1193 if (!(user_flags_ & kNoRandom)) { 1194 // The unit test controls directly what to test. 1195 new_eviction_ = (cache_type_ == net::DISK_CACHE); 1196 } 1197 1198 if (!CheckIndex()) { 1199 ReportError(ERR_INIT_FAILED); 1200 return net::ERR_FAILED; 1201 } 1202 1203 if (!restarted_ && (create_files || !data_->header.num_entries)) 1204 ReportError(ERR_CACHE_CREATED); 1205 1206 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE && 1207 !InitExperiment(&data_->header, create_files)) { 1208 return net::ERR_FAILED; 1209 } 1210 1211 // We don't care if the value overflows. The only thing we care about is that 1212 // the id cannot be zero, because that value is used as "not dirty". 1213 // Increasing the value once per second gives us many years before we start 1214 // having collisions. 1215 data_->header.this_id++; 1216 if (!data_->header.this_id) 1217 data_->header.this_id++; 1218 1219 bool previous_crash = (data_->header.crash != 0); 1220 data_->header.crash = 1; 1221 1222 if (!block_files_.Init(create_files)) 1223 return net::ERR_FAILED; 1224 1225 // We want to minimize the changes to cache for an AppCache. 1226 if (cache_type() == net::APP_CACHE) { 1227 DCHECK(!new_eviction_); 1228 read_only_ = true; 1229 } else if (cache_type() == net::SHADER_CACHE) { 1230 DCHECK(!new_eviction_); 1231 } 1232 1233 eviction_.Init(this); 1234 1235 // stats_ and rankings_ may end up calling back to us so we better be enabled. 1236 disabled_ = false; 1237 if (!InitStats()) 1238 return net::ERR_FAILED; 1239 1240 disabled_ = !rankings_.Init(this, new_eviction_); 1241 1242 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) 1243 trace_object_->EnableTracing(false); 1244 int sc = SelfCheck(); 1245 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) 1246 NOTREACHED(); 1247 trace_object_->EnableTracing(true); 1248 #endif 1249 1250 if (previous_crash) { 1251 ReportError(ERR_PREVIOUS_CRASH); 1252 } else if (!restarted_) { 1253 ReportError(ERR_NO_ERROR); 1254 } 1255 1256 FlushIndex(); 1257 1258 return disabled_ ? net::ERR_FAILED : net::OK; 1259 } 1260 1261 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { 1262 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { 1263 deleted_entry->Release(); 1264 stats_.OnEvent(Stats::CREATE_MISS); 1265 Trace("create entry miss "); 1266 return NULL; 1267 } 1268 1269 // We are attempting to create an entry and found out that the entry was 1270 // previously deleted. 1271 1272 eviction_.OnCreateEntry(deleted_entry); 1273 entry_count_++; 1274 1275 stats_.OnEvent(Stats::RESURRECT_HIT); 1276 Trace("Resurrect entry hit "); 1277 return deleted_entry; 1278 } 1279 1280 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { 1281 if (disabled_ || key.empty()) 1282 return NULL; 1283 1284 TimeTicks start = TimeTicks::Now(); 1285 Trace("Create hash 0x%x", hash); 1286 1287 scoped_refptr<EntryImpl> parent; 1288 Addr entry_address(data_->table[hash & mask_]); 1289 if (entry_address.is_initialized()) { 1290 // We have an entry already. It could be the one we are looking for, or just 1291 // a hash conflict. 1292 bool error; 1293 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); 1294 if (old_entry) 1295 return ResurrectEntry(old_entry); 1296 1297 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); 1298 DCHECK(!error); 1299 if (parent_entry) { 1300 parent.swap(&parent_entry); 1301 } else if (data_->table[hash & mask_]) { 1302 // We should have corrected the problem. 1303 NOTREACHED(); 1304 return NULL; 1305 } 1306 } 1307 1308 // The general flow is to allocate disk space and initialize the entry data, 1309 // followed by saving that to disk, then linking the entry though the index 1310 // and finally through the lists. If there is a crash in this process, we may 1311 // end up with: 1312 // a. Used, unreferenced empty blocks on disk (basically just garbage). 1313 // b. Used, unreferenced but meaningful data on disk (more garbage). 1314 // c. A fully formed entry, reachable only through the index. 1315 // d. A fully formed entry, also reachable through the lists, but still dirty. 1316 // 1317 // Anything after (b) can be automatically cleaned up. We may consider saving 1318 // the current operation (as we do while manipulating the lists) so that we 1319 // can detect and cleanup (a) and (b). 1320 1321 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); 1322 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { 1323 LOG(ERROR) << "Create entry failed " << key.c_str(); 1324 stats_.OnEvent(Stats::CREATE_ERROR); 1325 return NULL; 1326 } 1327 1328 Addr node_address(0); 1329 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { 1330 block_files_.DeleteBlock(entry_address, false); 1331 LOG(ERROR) << "Create entry failed " << key.c_str(); 1332 stats_.OnEvent(Stats::CREATE_ERROR); 1333 return NULL; 1334 } 1335 1336 scoped_refptr<EntryImpl> cache_entry( 1337 new EntryImpl(this, entry_address, false)); 1338 IncreaseNumRefs(); 1339 1340 if (!cache_entry->CreateEntry(node_address, key, hash)) { 1341 block_files_.DeleteBlock(entry_address, false); 1342 block_files_.DeleteBlock(node_address, false); 1343 LOG(ERROR) << "Create entry failed " << key.c_str(); 1344 stats_.OnEvent(Stats::CREATE_ERROR); 1345 return NULL; 1346 } 1347 1348 cache_entry->BeginLogging(net_log_, true); 1349 1350 // We are not failing the operation; let's add this to the map. 1351 open_entries_[entry_address.value()] = cache_entry; 1352 1353 // Save the entry. 1354 cache_entry->entry()->Store(); 1355 cache_entry->rankings()->Store(); 1356 IncreaseNumEntries(); 1357 entry_count_++; 1358 1359 // Link this entry through the index. 1360 if (parent.get()) { 1361 parent->SetNextAddress(entry_address); 1362 } else { 1363 data_->table[hash & mask_] = entry_address.value(); 1364 } 1365 1366 // Link this entry through the lists. 1367 eviction_.OnCreateEntry(cache_entry); 1368 1369 CACHE_UMA(AGE_MS, "CreateTime", 0, start); 1370 stats_.OnEvent(Stats::CREATE_HIT); 1371 SIMPLE_STATS_COUNTER("disk_cache.miss"); 1372 Trace("create entry hit "); 1373 FlushIndex(); 1374 cache_entry->AddRef(); 1375 return cache_entry.get(); 1376 } 1377 1378 void BackendImpl::LogStats() { 1379 StatsItems stats; 1380 GetStats(&stats); 1381 1382 for (size_t index = 0; index < stats.size(); index++) 1383 VLOG(1) << stats[index].first << ": " << stats[index].second; 1384 } 1385 1386 void BackendImpl::ReportStats() { 1387 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries); 1388 1389 int current_size = data_->header.num_bytes / (1024 * 1024); 1390 int max_size = max_size_ / (1024 * 1024); 1391 int hit_ratio_as_percentage = stats_.GetHitRatio(); 1392 1393 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size); 1394 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the 1395 // ratio of that bin's total count to the count in the same bin in the Size2 1396 // histogram. 1397 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1398 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size); 1399 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size); 1400 if (!max_size) 1401 max_size++; 1402 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); 1403 1404 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0, 1405 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); 1406 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0, 1407 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); 1408 stats_.SetCounter(Stats::MAX_ENTRIES, 0); 1409 1410 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, 1411 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); 1412 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, 1413 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); 1414 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, 1415 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); 1416 stats_.SetCounter(Stats::FATAL_ERROR, 0); 1417 stats_.SetCounter(Stats::DOOM_CACHE, 0); 1418 stats_.SetCounter(Stats::DOOM_RECENT, 0); 1419 1420 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; 1421 if (!data_->header.create_time || !data_->header.lru.filled) { 1422 int cause = data_->header.create_time ? 0 : 1; 1423 if (!data_->header.lru.filled) 1424 cause |= 2; 1425 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause); 1426 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); 1427 return; 1428 } 1429 1430 // This is an up to date client that will report FirstEviction() data. After 1431 // that event, start reporting this: 1432 1433 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); 1434 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total 1435 // time is the ratio of that bin's total count to the count in the same bin in 1436 // the TotalTime histogram. 1437 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1438 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours)); 1439 1440 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; 1441 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); 1442 1443 // We may see users with no use_hours at this point if this is the first time 1444 // we are running this code. 1445 if (use_hours) 1446 use_hours = total_hours - use_hours; 1447 1448 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) 1449 return; 1450 1451 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); 1452 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time 1453 // is the ratio of that bin's total count to the count in the same bin in the 1454 // UseTime histogram. 1455 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1456 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours)); 1457 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage); 1458 1459 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; 1460 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); 1461 1462 int avg_size = data_->header.num_bytes / GetEntryCount(); 1463 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); 1464 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries); 1465 1466 CACHE_UMA(PERCENTAGE, "IndexLoad", 0, 1467 data_->header.num_entries * 100 / (mask_ + 1)); 1468 1469 int large_entries_bytes = stats_.GetLargeEntriesSize(); 1470 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 1471 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); 1472 1473 if (new_eviction_) { 1474 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); 1475 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, 1476 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 1477 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, 1478 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 1479 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, 1480 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 1481 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, 1482 data_->header.lru.sizes[4] * 100 / data_->header.num_entries); 1483 } 1484 1485 stats_.ResetRatios(); 1486 stats_.SetCounter(Stats::TRIM_ENTRY, 0); 1487 1488 if (cache_type_ == net::DISK_CACHE) 1489 block_files_.ReportStats(); 1490 } 1491 1492 void BackendImpl::ReportError(int error) { 1493 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || 1494 error == ERR_CACHE_CREATED); 1495 1496 // We transmit positive numbers, instead of direct error codes. 1497 DCHECK_LE(error, 0); 1498 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); 1499 } 1500 1501 bool BackendImpl::CheckIndex() { 1502 DCHECK(data_); 1503 1504 size_t current_size = index_->GetLength(); 1505 if (current_size < sizeof(Index)) { 1506 LOG(ERROR) << "Corrupt Index file"; 1507 return false; 1508 } 1509 1510 if (new_eviction_) { 1511 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. 1512 if (kIndexMagic != data_->header.magic || 1513 kCurrentVersion >> 16 != data_->header.version >> 16) { 1514 LOG(ERROR) << "Invalid file version or magic"; 1515 return false; 1516 } 1517 if (kCurrentVersion == data_->header.version) { 1518 // We need file version 2.1 for the new eviction algorithm. 1519 UpgradeTo2_1(); 1520 } 1521 } else { 1522 if (kIndexMagic != data_->header.magic || 1523 kCurrentVersion != data_->header.version) { 1524 LOG(ERROR) << "Invalid file version or magic"; 1525 return false; 1526 } 1527 } 1528 1529 if (!data_->header.table_len) { 1530 LOG(ERROR) << "Invalid table size"; 1531 return false; 1532 } 1533 1534 if (current_size < GetIndexSize(data_->header.table_len) || 1535 data_->header.table_len & (kBaseTableLen - 1)) { 1536 LOG(ERROR) << "Corrupt Index file"; 1537 return false; 1538 } 1539 1540 AdjustMaxCacheSize(data_->header.table_len); 1541 1542 #if !defined(NET_BUILD_STRESS_CACHE) 1543 if (data_->header.num_bytes < 0 || 1544 (max_size_ < kint32max - kDefaultCacheSize && 1545 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) { 1546 LOG(ERROR) << "Invalid cache (current) size"; 1547 return false; 1548 } 1549 #endif 1550 1551 if (data_->header.num_entries < 0) { 1552 LOG(ERROR) << "Invalid number of entries"; 1553 return false; 1554 } 1555 1556 if (!mask_) 1557 mask_ = data_->header.table_len - 1; 1558 1559 // Load the table into memory with a single read. 1560 scoped_ptr<char[]> buf(new char[current_size]); 1561 return index_->Read(buf.get(), current_size, 0); 1562 } 1563 1564 int BackendImpl::CheckAllEntries() { 1565 int num_dirty = 0; 1566 int num_entries = 0; 1567 DCHECK(mask_ < kuint32max); 1568 for (unsigned int i = 0; i <= mask_; i++) { 1569 Addr address(data_->table[i]); 1570 if (!address.is_initialized()) 1571 continue; 1572 for (;;) { 1573 EntryImpl* tmp; 1574 int ret = NewEntry(address, &tmp); 1575 if (ret) { 1576 STRESS_NOTREACHED(); 1577 return ret; 1578 } 1579 scoped_refptr<EntryImpl> cache_entry; 1580 cache_entry.swap(&tmp); 1581 1582 if (cache_entry->dirty()) 1583 num_dirty++; 1584 else if (CheckEntry(cache_entry.get())) 1585 num_entries++; 1586 else 1587 return ERR_INVALID_ENTRY; 1588 1589 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); 1590 address.set_value(cache_entry->GetNextAddress()); 1591 if (!address.is_initialized()) 1592 break; 1593 } 1594 } 1595 1596 Trace("CheckAllEntries End"); 1597 if (num_entries + num_dirty != data_->header.num_entries) { 1598 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << 1599 " " << data_->header.num_entries; 1600 DCHECK_LT(num_entries, data_->header.num_entries); 1601 return ERR_NUM_ENTRIES_MISMATCH; 1602 } 1603 1604 return num_dirty; 1605 } 1606 1607 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { 1608 bool ok = block_files_.IsValid(cache_entry->entry()->address()); 1609 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); 1610 EntryStore* data = cache_entry->entry()->Data(); 1611 for (size_t i = 0; i < arraysize(data->data_addr); i++) { 1612 if (data->data_addr[i]) { 1613 Addr address(data->data_addr[i]); 1614 if (address.is_block_file()) 1615 ok = ok && block_files_.IsValid(address); 1616 } 1617 } 1618 1619 return ok && cache_entry->rankings()->VerifyHash(); 1620 } 1621 1622 int BackendImpl::MaxBuffersSize() { 1623 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); 1624 static bool done = false; 1625 1626 if (!done) { 1627 const int kMaxBuffersSize = 30 * 1024 * 1024; 1628 1629 // We want to use up to 2% of the computer's memory. 1630 total_memory = total_memory * 2 / 100; 1631 if (total_memory > kMaxBuffersSize || total_memory <= 0) 1632 total_memory = kMaxBuffersSize; 1633 1634 done = true; 1635 } 1636 1637 return static_cast<int>(total_memory); 1638 } 1639 1640 } // namespace disk_cache 1641