1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/disk_cache/backend_impl.h" 6 7 #include "base/bind.h" 8 #include "base/bind_helpers.h" 9 #include "base/file_util.h" 10 #include "base/files/file_path.h" 11 #include "base/hash.h" 12 #include "base/message_loop/message_loop.h" 13 #include "base/metrics/field_trial.h" 14 #include "base/metrics/histogram.h" 15 #include "base/metrics/stats_counters.h" 16 #include "base/rand_util.h" 17 #include "base/strings/string_util.h" 18 #include "base/strings/stringprintf.h" 19 #include "base/sys_info.h" 20 #include "base/threading/thread_restrictions.h" 21 #include "base/time/time.h" 22 #include "base/timer/timer.h" 23 #include "net/base/net_errors.h" 24 #include "net/disk_cache/cache_util.h" 25 #include "net/disk_cache/disk_format.h" 26 #include "net/disk_cache/entry_impl.h" 27 #include "net/disk_cache/errors.h" 28 #include "net/disk_cache/experiments.h" 29 #include "net/disk_cache/file.h" 30 31 // This has to be defined before including histogram_macros.h from this file. 32 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ 33 #include "net/disk_cache/histogram_macros.h" 34 35 using base::Time; 36 using base::TimeDelta; 37 using base::TimeTicks; 38 39 namespace { 40 41 const char* kIndexName = "index"; 42 43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. 44 // Note that the actual target is to keep the index table load factor under 55% 45 // for most users. 46 const int k64kEntriesStore = 240 * 1000 * 1000; 47 const int kBaseTableLen = 64 * 1024; 48 49 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). 50 const int kTrimDelay = 10; 51 52 int DesiredIndexTableLen(int32 storage_size) { 53 if (storage_size <= k64kEntriesStore) 54 return kBaseTableLen; 55 if (storage_size <= k64kEntriesStore * 2) 56 return kBaseTableLen * 2; 57 if (storage_size <= k64kEntriesStore * 4) 58 return kBaseTableLen * 4; 59 if (storage_size <= k64kEntriesStore * 8) 60 return kBaseTableLen * 8; 61 62 // The biggest storage_size for int32 requires a 4 MB table. 63 return kBaseTableLen * 16; 64 } 65 66 int MaxStorageSizeForTable(int table_len) { 67 return table_len * (k64kEntriesStore / kBaseTableLen); 68 } 69 70 size_t GetIndexSize(int table_len) { 71 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; 72 return sizeof(disk_cache::IndexHeader) + table_size; 73 } 74 75 // ------------------------------------------------------------------------ 76 77 // Sets group for the current experiment. Returns false if the files should be 78 // discarded. 79 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) { 80 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 || 81 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) { 82 // Discard current cache. 83 return false; 84 } 85 86 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") == 87 "ExperimentControl") { 88 if (cache_created) { 89 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL; 90 return true; 91 } 92 return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL; 93 } 94 95 header->experiment = disk_cache::NO_EXPERIMENT; 96 return true; 97 } 98 99 // A callback to perform final cleanup on the background thread. 100 void FinalCleanupCallback(disk_cache::BackendImpl* backend) { 101 backend->CleanupCache(); 102 } 103 104 } // namespace 105 106 // ------------------------------------------------------------------------ 107 108 namespace disk_cache { 109 110 BackendImpl::BackendImpl(const base::FilePath& path, 111 base::MessageLoopProxy* cache_thread, 112 net::NetLog* net_log) 113 : background_queue_(this, cache_thread), 114 path_(path), 115 block_files_(path), 116 mask_(0), 117 max_size_(0), 118 up_ticks_(0), 119 cache_type_(net::DISK_CACHE), 120 uma_report_(0), 121 user_flags_(0), 122 init_(false), 123 restarted_(false), 124 unit_test_(false), 125 read_only_(false), 126 disabled_(false), 127 new_eviction_(false), 128 first_timer_(true), 129 user_load_(false), 130 net_log_(net_log), 131 done_(true, false), 132 ptr_factory_(this) { 133 } 134 135 BackendImpl::BackendImpl(const base::FilePath& path, 136 uint32 mask, 137 base::MessageLoopProxy* cache_thread, 138 net::NetLog* net_log) 139 : background_queue_(this, cache_thread), 140 path_(path), 141 block_files_(path), 142 mask_(mask), 143 max_size_(0), 144 up_ticks_(0), 145 cache_type_(net::DISK_CACHE), 146 uma_report_(0), 147 user_flags_(kMask), 148 init_(false), 149 restarted_(false), 150 unit_test_(false), 151 read_only_(false), 152 disabled_(false), 153 new_eviction_(false), 154 first_timer_(true), 155 user_load_(false), 156 net_log_(net_log), 157 done_(true, false), 158 ptr_factory_(this) { 159 } 160 161 BackendImpl::~BackendImpl() { 162 if (user_flags_ & kNoRandom) { 163 // This is a unit test, so we want to be strict about not leaking entries 164 // and completing all the work. 165 background_queue_.WaitForPendingIO(); 166 } else { 167 // This is most likely not a test, so we want to do as little work as 168 // possible at this time, at the price of leaving dirty entries behind. 169 background_queue_.DropPendingIO(); 170 } 171 172 if (background_queue_.BackgroundIsCurrentThread()) { 173 // Unit tests may use the same thread for everything. 174 CleanupCache(); 175 } else { 176 background_queue_.background_thread()->PostTask( 177 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); 178 // http://crbug.com/74623 179 base::ThreadRestrictions::ScopedAllowWait allow_wait; 180 done_.Wait(); 181 } 182 } 183 184 int BackendImpl::Init(const CompletionCallback& callback) { 185 background_queue_.Init(callback); 186 return net::ERR_IO_PENDING; 187 } 188 189 int BackendImpl::SyncInit() { 190 #if defined(NET_BUILD_STRESS_CACHE) 191 // Start evictions right away. 192 up_ticks_ = kTrimDelay * 2; 193 #endif 194 DCHECK(!init_); 195 if (init_) 196 return net::ERR_FAILED; 197 198 bool create_files = false; 199 if (!InitBackingStore(&create_files)) { 200 ReportError(ERR_STORAGE_ERROR); 201 return net::ERR_FAILED; 202 } 203 204 num_refs_ = num_pending_io_ = max_refs_ = 0; 205 entry_count_ = byte_count_ = 0; 206 207 if (!restarted_) { 208 buffer_bytes_ = 0; 209 trace_object_ = TraceObject::GetTraceObject(); 210 // Create a recurrent timer of 30 secs. 211 int timer_delay = unit_test_ ? 1000 : 30000; 212 timer_.reset(new base::RepeatingTimer<BackendImpl>()); 213 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, 214 &BackendImpl::OnStatsTimer); 215 } 216 217 init_ = true; 218 Trace("Init"); 219 220 if (data_->header.experiment != NO_EXPERIMENT && 221 cache_type_ != net::DISK_CACHE) { 222 // No experiment for other caches. 223 return net::ERR_FAILED; 224 } 225 226 if (!(user_flags_ & kNoRandom)) { 227 // The unit test controls directly what to test. 228 new_eviction_ = (cache_type_ == net::DISK_CACHE); 229 } 230 231 if (!CheckIndex()) { 232 ReportError(ERR_INIT_FAILED); 233 return net::ERR_FAILED; 234 } 235 236 if (!restarted_ && (create_files || !data_->header.num_entries)) 237 ReportError(ERR_CACHE_CREATED); 238 239 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE && 240 !InitExperiment(&data_->header, create_files)) { 241 return net::ERR_FAILED; 242 } 243 244 // We don't care if the value overflows. The only thing we care about is that 245 // the id cannot be zero, because that value is used as "not dirty". 246 // Increasing the value once per second gives us many years before we start 247 // having collisions. 248 data_->header.this_id++; 249 if (!data_->header.this_id) 250 data_->header.this_id++; 251 252 bool previous_crash = (data_->header.crash != 0); 253 data_->header.crash = 1; 254 255 if (!block_files_.Init(create_files)) 256 return net::ERR_FAILED; 257 258 // We want to minimize the changes to cache for an AppCache. 259 if (cache_type() == net::APP_CACHE) { 260 DCHECK(!new_eviction_); 261 read_only_ = true; 262 } else if (cache_type() == net::SHADER_CACHE) { 263 DCHECK(!new_eviction_); 264 } 265 266 eviction_.Init(this); 267 268 // stats_ and rankings_ may end up calling back to us so we better be enabled. 269 disabled_ = false; 270 if (!InitStats()) 271 return net::ERR_FAILED; 272 273 disabled_ = !rankings_.Init(this, new_eviction_); 274 275 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) 276 trace_object_->EnableTracing(false); 277 int sc = SelfCheck(); 278 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) 279 NOTREACHED(); 280 trace_object_->EnableTracing(true); 281 #endif 282 283 if (previous_crash) { 284 ReportError(ERR_PREVIOUS_CRASH); 285 } else if (!restarted_) { 286 ReportError(ERR_NO_ERROR); 287 } 288 289 FlushIndex(); 290 291 return disabled_ ? net::ERR_FAILED : net::OK; 292 } 293 294 void BackendImpl::CleanupCache() { 295 Trace("Backend Cleanup"); 296 eviction_.Stop(); 297 timer_.reset(); 298 299 if (init_) { 300 StoreStats(); 301 if (data_) 302 data_->header.crash = 0; 303 304 if (user_flags_ & kNoRandom) { 305 // This is a net_unittest, verify that we are not 'leaking' entries. 306 File::WaitForPendingIO(&num_pending_io_); 307 DCHECK(!num_refs_); 308 } else { 309 File::DropPendingIO(); 310 } 311 } 312 block_files_.CloseFiles(); 313 FlushIndex(); 314 index_ = NULL; 315 ptr_factory_.InvalidateWeakPtrs(); 316 done_.Signal(); 317 } 318 319 // ------------------------------------------------------------------------ 320 321 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, 322 const CompletionCallback& callback) { 323 DCHECK(!callback.is_null()); 324 background_queue_.OpenPrevEntry(iter, prev_entry, callback); 325 return net::ERR_IO_PENDING; 326 } 327 328 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) { 329 DCHECK(entry); 330 *entry = OpenEntryImpl(key); 331 return (*entry) ? net::OK : net::ERR_FAILED; 332 } 333 334 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) { 335 DCHECK(entry); 336 *entry = CreateEntryImpl(key); 337 return (*entry) ? net::OK : net::ERR_FAILED; 338 } 339 340 int BackendImpl::SyncDoomEntry(const std::string& key) { 341 if (disabled_) 342 return net::ERR_FAILED; 343 344 EntryImpl* entry = OpenEntryImpl(key); 345 if (!entry) 346 return net::ERR_FAILED; 347 348 entry->DoomImpl(); 349 entry->Release(); 350 return net::OK; 351 } 352 353 int BackendImpl::SyncDoomAllEntries() { 354 // This is not really an error, but it is an interesting condition. 355 ReportError(ERR_CACHE_DOOMED); 356 stats_.OnEvent(Stats::DOOM_CACHE); 357 if (!num_refs_) { 358 RestartCache(false); 359 return disabled_ ? net::ERR_FAILED : net::OK; 360 } else { 361 if (disabled_) 362 return net::ERR_FAILED; 363 364 eviction_.TrimCache(true); 365 return net::OK; 366 } 367 } 368 369 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, 370 const base::Time end_time) { 371 DCHECK_NE(net::APP_CACHE, cache_type_); 372 if (end_time.is_null()) 373 return SyncDoomEntriesSince(initial_time); 374 375 DCHECK(end_time >= initial_time); 376 377 if (disabled_) 378 return net::ERR_FAILED; 379 380 EntryImpl* node; 381 void* iter = NULL; 382 EntryImpl* next = OpenNextEntryImpl(&iter); 383 if (!next) 384 return net::OK; 385 386 while (next) { 387 node = next; 388 next = OpenNextEntryImpl(&iter); 389 390 if (node->GetLastUsed() >= initial_time && 391 node->GetLastUsed() < end_time) { 392 node->DoomImpl(); 393 } else if (node->GetLastUsed() < initial_time) { 394 if (next) 395 next->Release(); 396 next = NULL; 397 SyncEndEnumeration(iter); 398 } 399 400 node->Release(); 401 } 402 403 return net::OK; 404 } 405 406 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get 407 // entries that are too old. 408 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) { 409 DCHECK_NE(net::APP_CACHE, cache_type_); 410 if (disabled_) 411 return net::ERR_FAILED; 412 413 stats_.OnEvent(Stats::DOOM_RECENT); 414 for (;;) { 415 void* iter = NULL; 416 EntryImpl* entry = OpenNextEntryImpl(&iter); 417 if (!entry) 418 return net::OK; 419 420 if (initial_time > entry->GetLastUsed()) { 421 entry->Release(); 422 SyncEndEnumeration(iter); 423 return net::OK; 424 } 425 426 entry->DoomImpl(); 427 entry->Release(); 428 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator. 429 } 430 } 431 432 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) { 433 *next_entry = OpenNextEntryImpl(iter); 434 return (*next_entry) ? net::OK : net::ERR_FAILED; 435 } 436 437 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) { 438 *prev_entry = OpenPrevEntryImpl(iter); 439 return (*prev_entry) ? net::OK : net::ERR_FAILED; 440 } 441 442 void BackendImpl::SyncEndEnumeration(void* iter) { 443 scoped_ptr<Rankings::Iterator> iterator( 444 reinterpret_cast<Rankings::Iterator*>(iter)); 445 } 446 447 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) { 448 if (disabled_) 449 return; 450 451 uint32 hash = base::Hash(key); 452 bool error; 453 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); 454 if (cache_entry) { 455 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) { 456 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE); 457 } 458 cache_entry->Release(); 459 } 460 } 461 462 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { 463 if (disabled_) 464 return NULL; 465 466 TimeTicks start = TimeTicks::Now(); 467 uint32 hash = base::Hash(key); 468 Trace("Open hash 0x%x", hash); 469 470 bool error; 471 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); 472 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) { 473 // The entry was already evicted. 474 cache_entry->Release(); 475 cache_entry = NULL; 476 } 477 478 int current_size = data_->header.num_bytes / (1024 * 1024); 479 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; 480 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; 481 int64 use_hours = total_hours - no_use_hours; 482 483 if (!cache_entry) { 484 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start); 485 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size); 486 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours); 487 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours); 488 stats_.OnEvent(Stats::OPEN_MISS); 489 return NULL; 490 } 491 492 eviction_.OnOpenEntry(cache_entry); 493 entry_count_++; 494 495 Trace("Open hash 0x%x end: 0x%x", hash, 496 cache_entry->entry()->address().value()); 497 CACHE_UMA(AGE_MS, "OpenTime", 0, start); 498 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size); 499 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours); 500 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours); 501 stats_.OnEvent(Stats::OPEN_HIT); 502 SIMPLE_STATS_COUNTER("disk_cache.hit"); 503 return cache_entry; 504 } 505 506 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { 507 if (disabled_ || key.empty()) 508 return NULL; 509 510 TimeTicks start = TimeTicks::Now(); 511 uint32 hash = base::Hash(key); 512 Trace("Create hash 0x%x", hash); 513 514 scoped_refptr<EntryImpl> parent; 515 Addr entry_address(data_->table[hash & mask_]); 516 if (entry_address.is_initialized()) { 517 // We have an entry already. It could be the one we are looking for, or just 518 // a hash conflict. 519 bool error; 520 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); 521 if (old_entry) 522 return ResurrectEntry(old_entry); 523 524 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); 525 DCHECK(!error); 526 if (parent_entry) { 527 parent.swap(&parent_entry); 528 } else if (data_->table[hash & mask_]) { 529 // We should have corrected the problem. 530 NOTREACHED(); 531 return NULL; 532 } 533 } 534 535 // The general flow is to allocate disk space and initialize the entry data, 536 // followed by saving that to disk, then linking the entry though the index 537 // and finally through the lists. If there is a crash in this process, we may 538 // end up with: 539 // a. Used, unreferenced empty blocks on disk (basically just garbage). 540 // b. Used, unreferenced but meaningful data on disk (more garbage). 541 // c. A fully formed entry, reachable only through the index. 542 // d. A fully formed entry, also reachable through the lists, but still dirty. 543 // 544 // Anything after (b) can be automatically cleaned up. We may consider saving 545 // the current operation (as we do while manipulating the lists) so that we 546 // can detect and cleanup (a) and (b). 547 548 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); 549 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { 550 LOG(ERROR) << "Create entry failed " << key.c_str(); 551 stats_.OnEvent(Stats::CREATE_ERROR); 552 return NULL; 553 } 554 555 Addr node_address(0); 556 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { 557 block_files_.DeleteBlock(entry_address, false); 558 LOG(ERROR) << "Create entry failed " << key.c_str(); 559 stats_.OnEvent(Stats::CREATE_ERROR); 560 return NULL; 561 } 562 563 scoped_refptr<EntryImpl> cache_entry( 564 new EntryImpl(this, entry_address, false)); 565 IncreaseNumRefs(); 566 567 if (!cache_entry->CreateEntry(node_address, key, hash)) { 568 block_files_.DeleteBlock(entry_address, false); 569 block_files_.DeleteBlock(node_address, false); 570 LOG(ERROR) << "Create entry failed " << key.c_str(); 571 stats_.OnEvent(Stats::CREATE_ERROR); 572 return NULL; 573 } 574 575 cache_entry->BeginLogging(net_log_, true); 576 577 // We are not failing the operation; let's add this to the map. 578 open_entries_[entry_address.value()] = cache_entry.get(); 579 580 // Save the entry. 581 cache_entry->entry()->Store(); 582 cache_entry->rankings()->Store(); 583 IncreaseNumEntries(); 584 entry_count_++; 585 586 // Link this entry through the index. 587 if (parent.get()) { 588 parent->SetNextAddress(entry_address); 589 } else { 590 data_->table[hash & mask_] = entry_address.value(); 591 } 592 593 // Link this entry through the lists. 594 eviction_.OnCreateEntry(cache_entry.get()); 595 596 CACHE_UMA(AGE_MS, "CreateTime", 0, start); 597 stats_.OnEvent(Stats::CREATE_HIT); 598 SIMPLE_STATS_COUNTER("disk_cache.miss"); 599 Trace("create entry hit "); 600 FlushIndex(); 601 cache_entry->AddRef(); 602 return cache_entry.get(); 603 } 604 605 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) { 606 return OpenFollowingEntry(true, iter); 607 } 608 609 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) { 610 return OpenFollowingEntry(false, iter); 611 } 612 613 bool BackendImpl::SetMaxSize(int max_bytes) { 614 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); 615 if (max_bytes < 0) 616 return false; 617 618 // Zero size means use the default. 619 if (!max_bytes) 620 return true; 621 622 // Avoid a DCHECK later on. 623 if (max_bytes >= kint32max - kint32max / 10) 624 max_bytes = kint32max - kint32max / 10 - 1; 625 626 user_flags_ |= kMaxSize; 627 max_size_ = max_bytes; 628 return true; 629 } 630 631 void BackendImpl::SetType(net::CacheType type) { 632 DCHECK_NE(net::MEMORY_CACHE, type); 633 cache_type_ = type; 634 } 635 636 base::FilePath BackendImpl::GetFileName(Addr address) const { 637 if (!address.is_separate_file() || !address.is_initialized()) { 638 NOTREACHED(); 639 return base::FilePath(); 640 } 641 642 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber()); 643 return path_.AppendASCII(tmp); 644 } 645 646 MappedFile* BackendImpl::File(Addr address) { 647 if (disabled_) 648 return NULL; 649 return block_files_.GetFile(address); 650 } 651 652 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() { 653 return background_queue_.GetWeakPtr(); 654 } 655 656 bool BackendImpl::CreateExternalFile(Addr* address) { 657 int file_number = data_->header.last_file + 1; 658 Addr file_address(0); 659 bool success = false; 660 for (int i = 0; i < 0x0fffffff; i++, file_number++) { 661 if (!file_address.SetFileNumber(file_number)) { 662 file_number = 1; 663 continue; 664 } 665 base::FilePath name = GetFileName(file_address); 666 int flags = base::PLATFORM_FILE_READ | 667 base::PLATFORM_FILE_WRITE | 668 base::PLATFORM_FILE_CREATE | 669 base::PLATFORM_FILE_EXCLUSIVE_WRITE; 670 base::PlatformFileError error; 671 scoped_refptr<disk_cache::File> file(new disk_cache::File( 672 base::CreatePlatformFile(name, flags, NULL, &error))); 673 if (!file->IsValid()) { 674 if (error != base::PLATFORM_FILE_ERROR_EXISTS) { 675 LOG(ERROR) << "Unable to create file: " << error; 676 return false; 677 } 678 continue; 679 } 680 681 success = true; 682 break; 683 } 684 685 DCHECK(success); 686 if (!success) 687 return false; 688 689 data_->header.last_file = file_number; 690 address->set_value(file_address.value()); 691 return true; 692 } 693 694 bool BackendImpl::CreateBlock(FileType block_type, int block_count, 695 Addr* block_address) { 696 return block_files_.CreateBlock(block_type, block_count, block_address); 697 } 698 699 void BackendImpl::DeleteBlock(Addr block_address, bool deep) { 700 block_files_.DeleteBlock(block_address, deep); 701 } 702 703 LruData* BackendImpl::GetLruData() { 704 return &data_->header.lru; 705 } 706 707 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) { 708 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE)) 709 return; 710 eviction_.UpdateRank(entry, modified); 711 } 712 713 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) { 714 Addr address(rankings->Data()->contents); 715 EntryImpl* cache_entry = NULL; 716 if (NewEntry(address, &cache_entry)) { 717 STRESS_NOTREACHED(); 718 return; 719 } 720 721 uint32 hash = cache_entry->GetHash(); 722 cache_entry->Release(); 723 724 // Anything on the table means that this entry is there. 725 if (data_->table[hash & mask_]) 726 return; 727 728 data_->table[hash & mask_] = address.value(); 729 FlushIndex(); 730 } 731 732 void BackendImpl::InternalDoomEntry(EntryImpl* entry) { 733 uint32 hash = entry->GetHash(); 734 std::string key = entry->GetKey(); 735 Addr entry_addr = entry->entry()->address(); 736 bool error; 737 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error); 738 CacheAddr child(entry->GetNextAddress()); 739 740 Trace("Doom entry 0x%p", entry); 741 742 if (!entry->doomed()) { 743 // We may have doomed this entry from within MatchEntry. 744 eviction_.OnDoomEntry(entry); 745 entry->InternalDoom(); 746 if (!new_eviction_) { 747 DecreaseNumEntries(); 748 } 749 stats_.OnEvent(Stats::DOOM_ENTRY); 750 } 751 752 if (parent_entry) { 753 parent_entry->SetNextAddress(Addr(child)); 754 parent_entry->Release(); 755 } else if (!error) { 756 data_->table[hash & mask_] = child; 757 } 758 759 FlushIndex(); 760 } 761 762 #if defined(NET_BUILD_STRESS_CACHE) 763 764 CacheAddr BackendImpl::GetNextAddr(Addr address) { 765 EntriesMap::iterator it = open_entries_.find(address.value()); 766 if (it != open_entries_.end()) { 767 EntryImpl* this_entry = it->second; 768 return this_entry->GetNextAddress(); 769 } 770 DCHECK(block_files_.IsValid(address)); 771 DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256); 772 773 CacheEntryBlock entry(File(address), address); 774 CHECK(entry.Load()); 775 return entry.Data()->next; 776 } 777 778 void BackendImpl::NotLinked(EntryImpl* entry) { 779 Addr entry_addr = entry->entry()->address(); 780 uint32 i = entry->GetHash() & mask_; 781 Addr address(data_->table[i]); 782 if (!address.is_initialized()) 783 return; 784 785 for (;;) { 786 DCHECK(entry_addr.value() != address.value()); 787 address.set_value(GetNextAddr(address)); 788 if (!address.is_initialized()) 789 break; 790 } 791 } 792 #endif // NET_BUILD_STRESS_CACHE 793 794 // An entry may be linked on the DELETED list for a while after being doomed. 795 // This function is called when we want to remove it. 796 void BackendImpl::RemoveEntry(EntryImpl* entry) { 797 #if defined(NET_BUILD_STRESS_CACHE) 798 NotLinked(entry); 799 #endif 800 if (!new_eviction_) 801 return; 802 803 DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state); 804 805 Trace("Remove entry 0x%p", entry); 806 eviction_.OnDestroyEntry(entry); 807 DecreaseNumEntries(); 808 } 809 810 void BackendImpl::OnEntryDestroyBegin(Addr address) { 811 EntriesMap::iterator it = open_entries_.find(address.value()); 812 if (it != open_entries_.end()) 813 open_entries_.erase(it); 814 } 815 816 void BackendImpl::OnEntryDestroyEnd() { 817 DecreaseNumRefs(); 818 if (data_->header.num_bytes > max_size_ && !read_only_ && 819 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom)) 820 eviction_.TrimCache(false); 821 } 822 823 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { 824 DCHECK(rankings->HasData()); 825 EntriesMap::const_iterator it = 826 open_entries_.find(rankings->Data()->contents); 827 if (it != open_entries_.end()) { 828 // We have this entry in memory. 829 return it->second; 830 } 831 832 return NULL; 833 } 834 835 int32 BackendImpl::GetCurrentEntryId() const { 836 return data_->header.this_id; 837 } 838 839 int BackendImpl::MaxFileSize() const { 840 return cache_type() == net::PNACL_CACHE ? max_size_ : max_size_ / 8; 841 } 842 843 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { 844 if (disabled_ || old_size == new_size) 845 return; 846 if (old_size > new_size) 847 SubstractStorageSize(old_size - new_size); 848 else 849 AddStorageSize(new_size - old_size); 850 851 FlushIndex(); 852 853 // Update the usage statistics. 854 stats_.ModifyStorageStats(old_size, new_size); 855 } 856 857 void BackendImpl::TooMuchStorageRequested(int32 size) { 858 stats_.ModifyStorageStats(0, size); 859 } 860 861 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) { 862 DCHECK_GT(new_size, current_size); 863 if (user_flags_ & kNoBuffering) 864 return false; 865 866 int to_add = new_size - current_size; 867 if (buffer_bytes_ + to_add > MaxBuffersSize()) 868 return false; 869 870 buffer_bytes_ += to_add; 871 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); 872 return true; 873 } 874 875 void BackendImpl::BufferDeleted(int size) { 876 buffer_bytes_ -= size; 877 DCHECK_GE(size, 0); 878 } 879 880 bool BackendImpl::IsLoaded() const { 881 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_); 882 if (user_flags_ & kNoLoadProtection) 883 return false; 884 885 return (num_pending_io_ > 5 || user_load_); 886 } 887 888 std::string BackendImpl::HistogramName(const char* name, int experiment) const { 889 if (!experiment) 890 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name); 891 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_, 892 name, experiment); 893 } 894 895 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() { 896 return ptr_factory_.GetWeakPtr(); 897 } 898 899 // We want to remove biases from some histograms so we only send data once per 900 // week. 901 bool BackendImpl::ShouldReportAgain() { 902 if (uma_report_) 903 return uma_report_ == 2; 904 905 uma_report_++; 906 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 907 Time last_time = Time::FromInternalValue(last_report); 908 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { 909 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); 910 uma_report_++; 911 return true; 912 } 913 return false; 914 } 915 916 void BackendImpl::FirstEviction() { 917 DCHECK(data_->header.create_time); 918 if (!GetEntryCount()) 919 return; // This is just for unit tests. 920 921 Time create_time = Time::FromInternalValue(data_->header.create_time); 922 CACHE_UMA(AGE, "FillupAge", 0, create_time); 923 924 int64 use_time = stats_.GetCounter(Stats::TIMER); 925 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); 926 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); 927 928 if (!use_time) 929 use_time = 1; 930 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, 931 static_cast<int>(data_->header.num_entries / use_time)); 932 CACHE_UMA(COUNTS, "FirstByteIORate", 0, 933 static_cast<int>((data_->header.num_bytes / 1024) / use_time)); 934 935 int avg_size = data_->header.num_bytes / GetEntryCount(); 936 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); 937 938 int large_entries_bytes = stats_.GetLargeEntriesSize(); 939 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 940 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); 941 942 if (new_eviction_) { 943 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); 944 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, 945 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 946 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, 947 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 948 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, 949 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 950 } 951 952 stats_.ResetRatios(); 953 } 954 955 void BackendImpl::CriticalError(int error) { 956 STRESS_NOTREACHED(); 957 LOG(ERROR) << "Critical error found " << error; 958 if (disabled_) 959 return; 960 961 stats_.OnEvent(Stats::FATAL_ERROR); 962 LogStats(); 963 ReportError(error); 964 965 // Setting the index table length to an invalid value will force re-creation 966 // of the cache files. 967 data_->header.table_len = 1; 968 disabled_ = true; 969 970 if (!num_refs_) 971 base::MessageLoop::current()->PostTask( 972 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); 973 } 974 975 void BackendImpl::ReportError(int error) { 976 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || 977 error == ERR_CACHE_CREATED); 978 979 // We transmit positive numbers, instead of direct error codes. 980 DCHECK_LE(error, 0); 981 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); 982 } 983 984 void BackendImpl::OnEvent(Stats::Counters an_event) { 985 stats_.OnEvent(an_event); 986 } 987 988 void BackendImpl::OnRead(int32 bytes) { 989 DCHECK_GE(bytes, 0); 990 byte_count_ += bytes; 991 if (byte_count_ < 0) 992 byte_count_ = kint32max; 993 } 994 995 void BackendImpl::OnWrite(int32 bytes) { 996 // We use the same implementation as OnRead... just log the number of bytes. 997 OnRead(bytes); 998 } 999 1000 void BackendImpl::OnStatsTimer() { 1001 stats_.OnEvent(Stats::TIMER); 1002 int64 time = stats_.GetCounter(Stats::TIMER); 1003 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); 1004 1005 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding 1006 // the bias towards 0. 1007 if (num_refs_ && (current != num_refs_)) { 1008 int64 diff = (num_refs_ - current) / 50; 1009 if (!diff) 1010 diff = num_refs_ > current ? 1 : -1; 1011 current = current + diff; 1012 stats_.SetCounter(Stats::OPEN_ENTRIES, current); 1013 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); 1014 } 1015 1016 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); 1017 1018 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); 1019 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); 1020 1021 // These values cover about 99.5% of the population (Oct 2011). 1022 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); 1023 entry_count_ = 0; 1024 byte_count_ = 0; 1025 up_ticks_++; 1026 1027 if (!data_) 1028 first_timer_ = false; 1029 if (first_timer_) { 1030 first_timer_ = false; 1031 if (ShouldReportAgain()) 1032 ReportStats(); 1033 } 1034 1035 // Save stats to disk at 5 min intervals. 1036 if (time % 10 == 0) 1037 StoreStats(); 1038 } 1039 1040 void BackendImpl::IncrementIoCount() { 1041 num_pending_io_++; 1042 } 1043 1044 void BackendImpl::DecrementIoCount() { 1045 num_pending_io_--; 1046 } 1047 1048 void BackendImpl::SetUnitTestMode() { 1049 user_flags_ |= kUnitTestMode; 1050 unit_test_ = true; 1051 } 1052 1053 void BackendImpl::SetUpgradeMode() { 1054 user_flags_ |= kUpgradeMode; 1055 read_only_ = true; 1056 } 1057 1058 void BackendImpl::SetNewEviction() { 1059 user_flags_ |= kNewEviction; 1060 new_eviction_ = true; 1061 } 1062 1063 void BackendImpl::SetFlags(uint32 flags) { 1064 user_flags_ |= flags; 1065 } 1066 1067 void BackendImpl::ClearRefCountForTest() { 1068 num_refs_ = 0; 1069 } 1070 1071 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) { 1072 background_queue_.FlushQueue(callback); 1073 return net::ERR_IO_PENDING; 1074 } 1075 1076 int BackendImpl::RunTaskForTest(const base::Closure& task, 1077 const CompletionCallback& callback) { 1078 background_queue_.RunTask(task, callback); 1079 return net::ERR_IO_PENDING; 1080 } 1081 1082 void BackendImpl::TrimForTest(bool empty) { 1083 eviction_.SetTestMode(); 1084 eviction_.TrimCache(empty); 1085 } 1086 1087 void BackendImpl::TrimDeletedListForTest(bool empty) { 1088 eviction_.SetTestMode(); 1089 eviction_.TrimDeletedList(empty); 1090 } 1091 1092 int BackendImpl::SelfCheck() { 1093 if (!init_) { 1094 LOG(ERROR) << "Init failed"; 1095 return ERR_INIT_FAILED; 1096 } 1097 1098 int num_entries = rankings_.SelfCheck(); 1099 if (num_entries < 0) { 1100 LOG(ERROR) << "Invalid rankings list, error " << num_entries; 1101 #if !defined(NET_BUILD_STRESS_CACHE) 1102 return num_entries; 1103 #endif 1104 } 1105 1106 if (num_entries != data_->header.num_entries) { 1107 LOG(ERROR) << "Number of entries mismatch"; 1108 #if !defined(NET_BUILD_STRESS_CACHE) 1109 return ERR_NUM_ENTRIES_MISMATCH; 1110 #endif 1111 } 1112 1113 return CheckAllEntries(); 1114 } 1115 1116 void BackendImpl::FlushIndex() { 1117 if (index_.get() && !disabled_) 1118 index_->Flush(); 1119 } 1120 1121 // ------------------------------------------------------------------------ 1122 1123 net::CacheType BackendImpl::GetCacheType() const { 1124 return cache_type_; 1125 } 1126 1127 int32 BackendImpl::GetEntryCount() const { 1128 if (!index_.get() || disabled_) 1129 return 0; 1130 // num_entries includes entries already evicted. 1131 int32 not_deleted = data_->header.num_entries - 1132 data_->header.lru.sizes[Rankings::DELETED]; 1133 1134 if (not_deleted < 0) { 1135 NOTREACHED(); 1136 not_deleted = 0; 1137 } 1138 1139 return not_deleted; 1140 } 1141 1142 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, 1143 const CompletionCallback& callback) { 1144 DCHECK(!callback.is_null()); 1145 background_queue_.OpenEntry(key, entry, callback); 1146 return net::ERR_IO_PENDING; 1147 } 1148 1149 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, 1150 const CompletionCallback& callback) { 1151 DCHECK(!callback.is_null()); 1152 background_queue_.CreateEntry(key, entry, callback); 1153 return net::ERR_IO_PENDING; 1154 } 1155 1156 int BackendImpl::DoomEntry(const std::string& key, 1157 const CompletionCallback& callback) { 1158 DCHECK(!callback.is_null()); 1159 background_queue_.DoomEntry(key, callback); 1160 return net::ERR_IO_PENDING; 1161 } 1162 1163 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) { 1164 DCHECK(!callback.is_null()); 1165 background_queue_.DoomAllEntries(callback); 1166 return net::ERR_IO_PENDING; 1167 } 1168 1169 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, 1170 const base::Time end_time, 1171 const CompletionCallback& callback) { 1172 DCHECK(!callback.is_null()); 1173 background_queue_.DoomEntriesBetween(initial_time, end_time, callback); 1174 return net::ERR_IO_PENDING; 1175 } 1176 1177 int BackendImpl::DoomEntriesSince(const base::Time initial_time, 1178 const CompletionCallback& callback) { 1179 DCHECK(!callback.is_null()); 1180 background_queue_.DoomEntriesSince(initial_time, callback); 1181 return net::ERR_IO_PENDING; 1182 } 1183 1184 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, 1185 const CompletionCallback& callback) { 1186 DCHECK(!callback.is_null()); 1187 background_queue_.OpenNextEntry(iter, next_entry, callback); 1188 return net::ERR_IO_PENDING; 1189 } 1190 1191 void BackendImpl::EndEnumeration(void** iter) { 1192 background_queue_.EndEnumeration(*iter); 1193 *iter = NULL; 1194 } 1195 1196 void BackendImpl::GetStats(StatsItems* stats) { 1197 if (disabled_) 1198 return; 1199 1200 std::pair<std::string, std::string> item; 1201 1202 item.first = "Entries"; 1203 item.second = base::StringPrintf("%d", data_->header.num_entries); 1204 stats->push_back(item); 1205 1206 item.first = "Pending IO"; 1207 item.second = base::StringPrintf("%d", num_pending_io_); 1208 stats->push_back(item); 1209 1210 item.first = "Max size"; 1211 item.second = base::StringPrintf("%d", max_size_); 1212 stats->push_back(item); 1213 1214 item.first = "Current size"; 1215 item.second = base::StringPrintf("%d", data_->header.num_bytes); 1216 stats->push_back(item); 1217 1218 item.first = "Cache type"; 1219 item.second = "Blockfile Cache"; 1220 stats->push_back(item); 1221 1222 stats_.GetItems(stats); 1223 } 1224 1225 void BackendImpl::OnExternalCacheHit(const std::string& key) { 1226 background_queue_.OnExternalCacheHit(key); 1227 } 1228 1229 // ------------------------------------------------------------------------ 1230 1231 // We just created a new file so we're going to write the header and set the 1232 // file length to include the hash table (zero filled). 1233 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { 1234 AdjustMaxCacheSize(0); 1235 1236 IndexHeader header; 1237 header.table_len = DesiredIndexTableLen(max_size_); 1238 1239 // We need file version 2.1 for the new eviction algorithm. 1240 if (new_eviction_) 1241 header.version = 0x20001; 1242 1243 header.create_time = Time::Now().ToInternalValue(); 1244 1245 if (!file->Write(&header, sizeof(header), 0)) 1246 return false; 1247 1248 return file->SetLength(GetIndexSize(header.table_len)); 1249 } 1250 1251 bool BackendImpl::InitBackingStore(bool* file_created) { 1252 if (!base::CreateDirectory(path_)) 1253 return false; 1254 1255 base::FilePath index_name = path_.AppendASCII(kIndexName); 1256 1257 int flags = base::PLATFORM_FILE_READ | 1258 base::PLATFORM_FILE_WRITE | 1259 base::PLATFORM_FILE_OPEN_ALWAYS | 1260 base::PLATFORM_FILE_EXCLUSIVE_WRITE; 1261 scoped_refptr<disk_cache::File> file(new disk_cache::File( 1262 base::CreatePlatformFile(index_name, flags, file_created, NULL))); 1263 1264 if (!file->IsValid()) 1265 return false; 1266 1267 bool ret = true; 1268 if (*file_created) 1269 ret = CreateBackingStore(file.get()); 1270 1271 file = NULL; 1272 if (!ret) 1273 return false; 1274 1275 index_ = new MappedFile(); 1276 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0)); 1277 if (!data_) { 1278 LOG(ERROR) << "Unable to map Index file"; 1279 return false; 1280 } 1281 1282 if (index_->GetLength() < sizeof(Index)) { 1283 // We verify this again on CheckIndex() but it's easier to make sure now 1284 // that the header is there. 1285 LOG(ERROR) << "Corrupt Index file"; 1286 return false; 1287 } 1288 1289 return true; 1290 } 1291 1292 // The maximum cache size will be either set explicitly by the caller, or 1293 // calculated by this code. 1294 void BackendImpl::AdjustMaxCacheSize(int table_len) { 1295 if (max_size_) 1296 return; 1297 1298 // If table_len is provided, the index file exists. 1299 DCHECK(!table_len || data_->header.magic); 1300 1301 // The user is not setting the size, let's figure it out. 1302 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); 1303 if (available < 0) { 1304 max_size_ = kDefaultCacheSize; 1305 return; 1306 } 1307 1308 if (table_len) 1309 available += data_->header.num_bytes; 1310 1311 max_size_ = PreferredCacheSize(available); 1312 1313 if (!table_len) 1314 return; 1315 1316 // If we already have a table, adjust the size to it. 1317 int current_max_size = MaxStorageSizeForTable(table_len); 1318 if (max_size_ > current_max_size) 1319 max_size_= current_max_size; 1320 } 1321 1322 bool BackendImpl::InitStats() { 1323 Addr address(data_->header.stats); 1324 int size = stats_.StorageSize(); 1325 1326 if (!address.is_initialized()) { 1327 FileType file_type = Addr::RequiredFileType(size); 1328 DCHECK_NE(file_type, EXTERNAL); 1329 int num_blocks = Addr::RequiredBlocks(size, file_type); 1330 1331 if (!CreateBlock(file_type, num_blocks, &address)) 1332 return false; 1333 1334 data_->header.stats = address.value(); 1335 return stats_.Init(NULL, 0, address); 1336 } 1337 1338 if (!address.is_block_file()) { 1339 NOTREACHED(); 1340 return false; 1341 } 1342 1343 // Load the required data. 1344 size = address.num_blocks() * address.BlockSize(); 1345 MappedFile* file = File(address); 1346 if (!file) 1347 return false; 1348 1349 scoped_ptr<char[]> data(new char[size]); 1350 size_t offset = address.start_block() * address.BlockSize() + 1351 kBlockHeaderSize; 1352 if (!file->Read(data.get(), size, offset)) 1353 return false; 1354 1355 if (!stats_.Init(data.get(), size, address)) 1356 return false; 1357 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) 1358 stats_.InitSizeHistogram(); 1359 return true; 1360 } 1361 1362 void BackendImpl::StoreStats() { 1363 int size = stats_.StorageSize(); 1364 scoped_ptr<char[]> data(new char[size]); 1365 Addr address; 1366 size = stats_.SerializeStats(data.get(), size, &address); 1367 DCHECK(size); 1368 if (!address.is_initialized()) 1369 return; 1370 1371 MappedFile* file = File(address); 1372 if (!file) 1373 return; 1374 1375 size_t offset = address.start_block() * address.BlockSize() + 1376 kBlockHeaderSize; 1377 file->Write(data.get(), size, offset); // ignore result. 1378 } 1379 1380 void BackendImpl::RestartCache(bool failure) { 1381 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); 1382 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); 1383 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); 1384 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 1385 1386 PrepareForRestart(); 1387 if (failure) { 1388 DCHECK(!num_refs_); 1389 DCHECK(!open_entries_.size()); 1390 DelayedCacheCleanup(path_); 1391 } else { 1392 DeleteCache(path_, false); 1393 } 1394 1395 // Don't call Init() if directed by the unit test: we are simulating a failure 1396 // trying to re-enable the cache. 1397 if (unit_test_) 1398 init_ = true; // Let the destructor do proper cleanup. 1399 else if (SyncInit() == net::OK) { 1400 stats_.SetCounter(Stats::FATAL_ERROR, errors); 1401 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); 1402 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); 1403 stats_.SetCounter(Stats::LAST_REPORT, last_report); 1404 } 1405 } 1406 1407 void BackendImpl::PrepareForRestart() { 1408 // Reset the mask_ if it was not given by the user. 1409 if (!(user_flags_ & kMask)) 1410 mask_ = 0; 1411 1412 if (!(user_flags_ & kNewEviction)) 1413 new_eviction_ = false; 1414 1415 disabled_ = true; 1416 data_->header.crash = 0; 1417 index_->Flush(); 1418 index_ = NULL; 1419 data_ = NULL; 1420 block_files_.CloseFiles(); 1421 rankings_.Reset(); 1422 init_ = false; 1423 restarted_ = true; 1424 } 1425 1426 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { 1427 EntriesMap::iterator it = open_entries_.find(address.value()); 1428 if (it != open_entries_.end()) { 1429 // Easy job. This entry is already in memory. 1430 EntryImpl* this_entry = it->second; 1431 this_entry->AddRef(); 1432 *entry = this_entry; 1433 return 0; 1434 } 1435 1436 STRESS_DCHECK(block_files_.IsValid(address)); 1437 1438 if (!address.SanityCheckForEntryV2()) { 1439 LOG(WARNING) << "Wrong entry address."; 1440 STRESS_NOTREACHED(); 1441 return ERR_INVALID_ADDRESS; 1442 } 1443 1444 scoped_refptr<EntryImpl> cache_entry( 1445 new EntryImpl(this, address, read_only_)); 1446 IncreaseNumRefs(); 1447 *entry = NULL; 1448 1449 TimeTicks start = TimeTicks::Now(); 1450 if (!cache_entry->entry()->Load()) 1451 return ERR_READ_FAILURE; 1452 1453 if (IsLoaded()) { 1454 CACHE_UMA(AGE_MS, "LoadTime", 0, start); 1455 } 1456 1457 if (!cache_entry->SanityCheck()) { 1458 LOG(WARNING) << "Messed up entry found."; 1459 STRESS_NOTREACHED(); 1460 return ERR_INVALID_ENTRY; 1461 } 1462 1463 STRESS_DCHECK(block_files_.IsValid( 1464 Addr(cache_entry->entry()->Data()->rankings_node))); 1465 1466 if (!cache_entry->LoadNodeAddress()) 1467 return ERR_READ_FAILURE; 1468 1469 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) { 1470 STRESS_NOTREACHED(); 1471 cache_entry->SetDirtyFlag(0); 1472 // Don't remove this from the list (it is not linked properly). Instead, 1473 // break the link back to the entry because it is going away, and leave the 1474 // rankings node to be deleted if we find it through a list. 1475 rankings_.SetContents(cache_entry->rankings(), 0); 1476 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) { 1477 STRESS_NOTREACHED(); 1478 cache_entry->SetDirtyFlag(0); 1479 rankings_.SetContents(cache_entry->rankings(), address.value()); 1480 } 1481 1482 if (!cache_entry->DataSanityCheck()) { 1483 LOG(WARNING) << "Messed up entry found."; 1484 cache_entry->SetDirtyFlag(0); 1485 cache_entry->FixForDelete(); 1486 } 1487 1488 // Prevent overwriting the dirty flag on the destructor. 1489 cache_entry->SetDirtyFlag(GetCurrentEntryId()); 1490 1491 if (cache_entry->dirty()) { 1492 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), 1493 address.value()); 1494 } 1495 1496 open_entries_[address.value()] = cache_entry.get(); 1497 1498 cache_entry->BeginLogging(net_log_, false); 1499 cache_entry.swap(entry); 1500 return 0; 1501 } 1502 1503 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, 1504 bool find_parent, Addr entry_addr, 1505 bool* match_error) { 1506 Addr address(data_->table[hash & mask_]); 1507 scoped_refptr<EntryImpl> cache_entry, parent_entry; 1508 EntryImpl* tmp = NULL; 1509 bool found = false; 1510 std::set<CacheAddr> visited; 1511 *match_error = false; 1512 1513 for (;;) { 1514 if (disabled_) 1515 break; 1516 1517 if (visited.find(address.value()) != visited.end()) { 1518 // It's possible for a buggy version of the code to write a loop. Just 1519 // break it. 1520 Trace("Hash collision loop 0x%x", address.value()); 1521 address.set_value(0); 1522 parent_entry->SetNextAddress(address); 1523 } 1524 visited.insert(address.value()); 1525 1526 if (!address.is_initialized()) { 1527 if (find_parent) 1528 found = true; 1529 break; 1530 } 1531 1532 int error = NewEntry(address, &tmp); 1533 cache_entry.swap(&tmp); 1534 1535 if (error || cache_entry->dirty()) { 1536 // This entry is dirty on disk (it was not properly closed): we cannot 1537 // trust it. 1538 Addr child(0); 1539 if (!error) 1540 child.set_value(cache_entry->GetNextAddress()); 1541 1542 if (parent_entry.get()) { 1543 parent_entry->SetNextAddress(child); 1544 parent_entry = NULL; 1545 } else { 1546 data_->table[hash & mask_] = child.value(); 1547 } 1548 1549 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(), 1550 address.value()); 1551 1552 if (!error) { 1553 // It is important to call DestroyInvalidEntry after removing this 1554 // entry from the table. 1555 DestroyInvalidEntry(cache_entry.get()); 1556 cache_entry = NULL; 1557 } else { 1558 Trace("NewEntry failed on MatchEntry 0x%x", address.value()); 1559 } 1560 1561 // Restart the search. 1562 address.set_value(data_->table[hash & mask_]); 1563 visited.clear(); 1564 continue; 1565 } 1566 1567 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_); 1568 if (cache_entry->IsSameEntry(key, hash)) { 1569 if (!cache_entry->Update()) 1570 cache_entry = NULL; 1571 found = true; 1572 if (find_parent && entry_addr.value() != address.value()) { 1573 Trace("Entry not on the index 0x%x", address.value()); 1574 *match_error = true; 1575 parent_entry = NULL; 1576 } 1577 break; 1578 } 1579 if (!cache_entry->Update()) 1580 cache_entry = NULL; 1581 parent_entry = cache_entry; 1582 cache_entry = NULL; 1583 if (!parent_entry.get()) 1584 break; 1585 1586 address.set_value(parent_entry->GetNextAddress()); 1587 } 1588 1589 if (parent_entry.get() && (!find_parent || !found)) 1590 parent_entry = NULL; 1591 1592 if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) { 1593 *match_error = true; 1594 parent_entry = NULL; 1595 } 1596 1597 if (cache_entry.get() && (find_parent || !found)) 1598 cache_entry = NULL; 1599 1600 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); 1601 FlushIndex(); 1602 return tmp; 1603 } 1604 1605 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. 1606 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { 1607 if (disabled_) 1608 return NULL; 1609 1610 DCHECK(iter); 1611 1612 const int kListsToSearch = 3; 1613 scoped_refptr<EntryImpl> entries[kListsToSearch]; 1614 scoped_ptr<Rankings::Iterator> iterator( 1615 reinterpret_cast<Rankings::Iterator*>(*iter)); 1616 *iter = NULL; 1617 1618 if (!iterator.get()) { 1619 iterator.reset(new Rankings::Iterator(&rankings_)); 1620 bool ret = false; 1621 1622 // Get an entry from each list. 1623 for (int i = 0; i < kListsToSearch; i++) { 1624 EntryImpl* temp = NULL; 1625 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), 1626 &iterator->nodes[i], &temp); 1627 entries[i].swap(&temp); // The entry was already addref'd. 1628 } 1629 if (!ret) 1630 return NULL; 1631 } else { 1632 // Get the next entry from the last list, and the actual entries for the 1633 // elements on the other lists. 1634 for (int i = 0; i < kListsToSearch; i++) { 1635 EntryImpl* temp = NULL; 1636 if (iterator->list == i) { 1637 OpenFollowingEntryFromList(forward, iterator->list, 1638 &iterator->nodes[i], &temp); 1639 } else { 1640 temp = GetEnumeratedEntry(iterator->nodes[i], 1641 static_cast<Rankings::List>(i)); 1642 } 1643 1644 entries[i].swap(&temp); // The entry was already addref'd. 1645 } 1646 } 1647 1648 int newest = -1; 1649 int oldest = -1; 1650 Time access_times[kListsToSearch]; 1651 for (int i = 0; i < kListsToSearch; i++) { 1652 if (entries[i].get()) { 1653 access_times[i] = entries[i]->GetLastUsed(); 1654 if (newest < 0) { 1655 DCHECK_LT(oldest, 0); 1656 newest = oldest = i; 1657 continue; 1658 } 1659 if (access_times[i] > access_times[newest]) 1660 newest = i; 1661 if (access_times[i] < access_times[oldest]) 1662 oldest = i; 1663 } 1664 } 1665 1666 if (newest < 0 || oldest < 0) 1667 return NULL; 1668 1669 EntryImpl* next_entry; 1670 if (forward) { 1671 next_entry = entries[newest].get(); 1672 iterator->list = static_cast<Rankings::List>(newest); 1673 } else { 1674 next_entry = entries[oldest].get(); 1675 iterator->list = static_cast<Rankings::List>(oldest); 1676 } 1677 1678 *iter = iterator.release(); 1679 next_entry->AddRef(); 1680 return next_entry; 1681 } 1682 1683 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, 1684 CacheRankingsBlock** from_entry, 1685 EntryImpl** next_entry) { 1686 if (disabled_) 1687 return false; 1688 1689 if (!new_eviction_ && Rankings::NO_USE != list) 1690 return false; 1691 1692 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry); 1693 CacheRankingsBlock* next_block = forward ? 1694 rankings_.GetNext(rankings.get(), list) : 1695 rankings_.GetPrev(rankings.get(), list); 1696 Rankings::ScopedRankingsBlock next(&rankings_, next_block); 1697 *from_entry = NULL; 1698 1699 *next_entry = GetEnumeratedEntry(next.get(), list); 1700 if (!*next_entry) 1701 return false; 1702 1703 *from_entry = next.release(); 1704 return true; 1705 } 1706 1707 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next, 1708 Rankings::List list) { 1709 if (!next || disabled_) 1710 return NULL; 1711 1712 EntryImpl* entry; 1713 int rv = NewEntry(Addr(next->Data()->contents), &entry); 1714 if (rv) { 1715 STRESS_NOTREACHED(); 1716 rankings_.Remove(next, list, false); 1717 if (rv == ERR_INVALID_ADDRESS) { 1718 // There is nothing linked from the index. Delete the rankings node. 1719 DeleteBlock(next->address(), true); 1720 } 1721 return NULL; 1722 } 1723 1724 if (entry->dirty()) { 1725 // We cannot trust this entry. 1726 InternalDoomEntry(entry); 1727 entry->Release(); 1728 return NULL; 1729 } 1730 1731 if (!entry->Update()) { 1732 STRESS_NOTREACHED(); 1733 entry->Release(); 1734 return NULL; 1735 } 1736 1737 // Note that it is unfortunate (but possible) for this entry to be clean, but 1738 // not actually the real entry. In other words, we could have lost this entry 1739 // from the index, and it could have been replaced with a newer one. It's not 1740 // worth checking that this entry is "the real one", so we just return it and 1741 // let the enumeration continue; this entry will be evicted at some point, and 1742 // the regular path will work with the real entry. With time, this problem 1743 // will disasappear because this scenario is just a bug. 1744 1745 // Make sure that we save the key for later. 1746 entry->GetKey(); 1747 1748 return entry; 1749 } 1750 1751 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { 1752 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { 1753 deleted_entry->Release(); 1754 stats_.OnEvent(Stats::CREATE_MISS); 1755 Trace("create entry miss "); 1756 return NULL; 1757 } 1758 1759 // We are attempting to create an entry and found out that the entry was 1760 // previously deleted. 1761 1762 eviction_.OnCreateEntry(deleted_entry); 1763 entry_count_++; 1764 1765 stats_.OnEvent(Stats::RESURRECT_HIT); 1766 Trace("Resurrect entry hit "); 1767 return deleted_entry; 1768 } 1769 1770 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) { 1771 LOG(WARNING) << "Destroying invalid entry."; 1772 Trace("Destroying invalid entry 0x%p", entry); 1773 1774 entry->SetPointerForInvalidEntry(GetCurrentEntryId()); 1775 1776 eviction_.OnDoomEntry(entry); 1777 entry->InternalDoom(); 1778 1779 if (!new_eviction_) 1780 DecreaseNumEntries(); 1781 stats_.OnEvent(Stats::INVALID_ENTRY); 1782 } 1783 1784 void BackendImpl::AddStorageSize(int32 bytes) { 1785 data_->header.num_bytes += bytes; 1786 DCHECK_GE(data_->header.num_bytes, 0); 1787 } 1788 1789 void BackendImpl::SubstractStorageSize(int32 bytes) { 1790 data_->header.num_bytes -= bytes; 1791 DCHECK_GE(data_->header.num_bytes, 0); 1792 } 1793 1794 void BackendImpl::IncreaseNumRefs() { 1795 num_refs_++; 1796 if (max_refs_ < num_refs_) 1797 max_refs_ = num_refs_; 1798 } 1799 1800 void BackendImpl::DecreaseNumRefs() { 1801 DCHECK(num_refs_); 1802 num_refs_--; 1803 1804 if (!num_refs_ && disabled_) 1805 base::MessageLoop::current()->PostTask( 1806 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); 1807 } 1808 1809 void BackendImpl::IncreaseNumEntries() { 1810 data_->header.num_entries++; 1811 DCHECK_GT(data_->header.num_entries, 0); 1812 } 1813 1814 void BackendImpl::DecreaseNumEntries() { 1815 data_->header.num_entries--; 1816 if (data_->header.num_entries < 0) { 1817 NOTREACHED(); 1818 data_->header.num_entries = 0; 1819 } 1820 } 1821 1822 void BackendImpl::LogStats() { 1823 StatsItems stats; 1824 GetStats(&stats); 1825 1826 for (size_t index = 0; index < stats.size(); index++) 1827 VLOG(1) << stats[index].first << ": " << stats[index].second; 1828 } 1829 1830 void BackendImpl::ReportStats() { 1831 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries); 1832 1833 int current_size = data_->header.num_bytes / (1024 * 1024); 1834 int max_size = max_size_ / (1024 * 1024); 1835 int hit_ratio_as_percentage = stats_.GetHitRatio(); 1836 1837 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size); 1838 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the 1839 // ratio of that bin's total count to the count in the same bin in the Size2 1840 // histogram. 1841 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1842 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size); 1843 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size); 1844 if (!max_size) 1845 max_size++; 1846 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); 1847 1848 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0, 1849 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); 1850 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0, 1851 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); 1852 stats_.SetCounter(Stats::MAX_ENTRIES, 0); 1853 1854 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, 1855 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); 1856 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, 1857 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); 1858 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, 1859 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); 1860 stats_.SetCounter(Stats::FATAL_ERROR, 0); 1861 stats_.SetCounter(Stats::DOOM_CACHE, 0); 1862 stats_.SetCounter(Stats::DOOM_RECENT, 0); 1863 1864 int age = (Time::Now() - 1865 Time::FromInternalValue(data_->header.create_time)).InHours(); 1866 if (age) 1867 CACHE_UMA(HOURS, "FilesAge", 0, age); 1868 1869 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; 1870 if (!data_->header.create_time || !data_->header.lru.filled) { 1871 int cause = data_->header.create_time ? 0 : 1; 1872 if (!data_->header.lru.filled) 1873 cause |= 2; 1874 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause); 1875 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); 1876 return; 1877 } 1878 1879 // This is an up to date client that will report FirstEviction() data. After 1880 // that event, start reporting this: 1881 1882 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); 1883 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total 1884 // time is the ratio of that bin's total count to the count in the same bin in 1885 // the TotalTime histogram. 1886 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1887 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours)); 1888 1889 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; 1890 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); 1891 1892 // We may see users with no use_hours at this point if this is the first time 1893 // we are running this code. 1894 if (use_hours) 1895 use_hours = total_hours - use_hours; 1896 1897 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) 1898 return; 1899 1900 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); 1901 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time 1902 // is the ratio of that bin's total count to the count in the same bin in the 1903 // UseTime histogram. 1904 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1905 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours)); 1906 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage); 1907 1908 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; 1909 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); 1910 1911 int avg_size = data_->header.num_bytes / GetEntryCount(); 1912 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); 1913 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries); 1914 1915 CACHE_UMA(PERCENTAGE, "IndexLoad", 0, 1916 data_->header.num_entries * 100 / (mask_ + 1)); 1917 1918 int large_entries_bytes = stats_.GetLargeEntriesSize(); 1919 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 1920 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); 1921 1922 if (new_eviction_) { 1923 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); 1924 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, 1925 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 1926 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, 1927 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 1928 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, 1929 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 1930 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, 1931 data_->header.lru.sizes[4] * 100 / data_->header.num_entries); 1932 } 1933 1934 stats_.ResetRatios(); 1935 stats_.SetCounter(Stats::TRIM_ENTRY, 0); 1936 1937 if (cache_type_ == net::DISK_CACHE) 1938 block_files_.ReportStats(); 1939 } 1940 1941 void BackendImpl::UpgradeTo2_1() { 1942 // 2.1 is basically the same as 2.0, except that new fields are actually 1943 // updated by the new eviction algorithm. 1944 DCHECK(0x20000 == data_->header.version); 1945 data_->header.version = 0x20001; 1946 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries; 1947 } 1948 1949 bool BackendImpl::CheckIndex() { 1950 DCHECK(data_); 1951 1952 size_t current_size = index_->GetLength(); 1953 if (current_size < sizeof(Index)) { 1954 LOG(ERROR) << "Corrupt Index file"; 1955 return false; 1956 } 1957 1958 if (new_eviction_) { 1959 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. 1960 if (kIndexMagic != data_->header.magic || 1961 kCurrentVersion >> 16 != data_->header.version >> 16) { 1962 LOG(ERROR) << "Invalid file version or magic"; 1963 return false; 1964 } 1965 if (kCurrentVersion == data_->header.version) { 1966 // We need file version 2.1 for the new eviction algorithm. 1967 UpgradeTo2_1(); 1968 } 1969 } else { 1970 if (kIndexMagic != data_->header.magic || 1971 kCurrentVersion != data_->header.version) { 1972 LOG(ERROR) << "Invalid file version or magic"; 1973 return false; 1974 } 1975 } 1976 1977 if (!data_->header.table_len) { 1978 LOG(ERROR) << "Invalid table size"; 1979 return false; 1980 } 1981 1982 if (current_size < GetIndexSize(data_->header.table_len) || 1983 data_->header.table_len & (kBaseTableLen - 1)) { 1984 LOG(ERROR) << "Corrupt Index file"; 1985 return false; 1986 } 1987 1988 AdjustMaxCacheSize(data_->header.table_len); 1989 1990 #if !defined(NET_BUILD_STRESS_CACHE) 1991 if (data_->header.num_bytes < 0 || 1992 (max_size_ < kint32max - kDefaultCacheSize && 1993 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) { 1994 LOG(ERROR) << "Invalid cache (current) size"; 1995 return false; 1996 } 1997 #endif 1998 1999 if (data_->header.num_entries < 0) { 2000 LOG(ERROR) << "Invalid number of entries"; 2001 return false; 2002 } 2003 2004 if (!mask_) 2005 mask_ = data_->header.table_len - 1; 2006 2007 // Load the table into memory with a single read. 2008 scoped_ptr<char[]> buf(new char[current_size]); 2009 return index_->Read(buf.get(), current_size, 0); 2010 } 2011 2012 int BackendImpl::CheckAllEntries() { 2013 int num_dirty = 0; 2014 int num_entries = 0; 2015 DCHECK(mask_ < kuint32max); 2016 for (unsigned int i = 0; i <= mask_; i++) { 2017 Addr address(data_->table[i]); 2018 if (!address.is_initialized()) 2019 continue; 2020 for (;;) { 2021 EntryImpl* tmp; 2022 int ret = NewEntry(address, &tmp); 2023 if (ret) { 2024 STRESS_NOTREACHED(); 2025 return ret; 2026 } 2027 scoped_refptr<EntryImpl> cache_entry; 2028 cache_entry.swap(&tmp); 2029 2030 if (cache_entry->dirty()) 2031 num_dirty++; 2032 else if (CheckEntry(cache_entry.get())) 2033 num_entries++; 2034 else 2035 return ERR_INVALID_ENTRY; 2036 2037 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); 2038 address.set_value(cache_entry->GetNextAddress()); 2039 if (!address.is_initialized()) 2040 break; 2041 } 2042 } 2043 2044 Trace("CheckAllEntries End"); 2045 if (num_entries + num_dirty != data_->header.num_entries) { 2046 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << 2047 " " << data_->header.num_entries; 2048 DCHECK_LT(num_entries, data_->header.num_entries); 2049 return ERR_NUM_ENTRIES_MISMATCH; 2050 } 2051 2052 return num_dirty; 2053 } 2054 2055 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { 2056 bool ok = block_files_.IsValid(cache_entry->entry()->address()); 2057 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); 2058 EntryStore* data = cache_entry->entry()->Data(); 2059 for (size_t i = 0; i < arraysize(data->data_addr); i++) { 2060 if (data->data_addr[i]) { 2061 Addr address(data->data_addr[i]); 2062 if (address.is_block_file()) 2063 ok = ok && block_files_.IsValid(address); 2064 } 2065 } 2066 2067 return ok && cache_entry->rankings()->VerifyHash(); 2068 } 2069 2070 int BackendImpl::MaxBuffersSize() { 2071 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); 2072 static bool done = false; 2073 2074 if (!done) { 2075 const int kMaxBuffersSize = 30 * 1024 * 1024; 2076 2077 // We want to use up to 2% of the computer's memory. 2078 total_memory = total_memory * 2 / 100; 2079 if (total_memory > kMaxBuffersSize || total_memory <= 0) 2080 total_memory = kMaxBuffersSize; 2081 2082 done = true; 2083 } 2084 2085 return static_cast<int>(total_memory); 2086 } 2087 2088 } // namespace disk_cache 2089