1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/disk_cache/blockfile/backend_impl.h" 6 7 #include "base/bind.h" 8 #include "base/bind_helpers.h" 9 #include "base/file_util.h" 10 #include "base/files/file.h" 11 #include "base/files/file_path.h" 12 #include "base/hash.h" 13 #include "base/message_loop/message_loop.h" 14 #include "base/metrics/field_trial.h" 15 #include "base/metrics/histogram.h" 16 #include "base/metrics/stats_counters.h" 17 #include "base/rand_util.h" 18 #include "base/strings/string_util.h" 19 #include "base/strings/stringprintf.h" 20 #include "base/sys_info.h" 21 #include "base/threading/thread_restrictions.h" 22 #include "base/time/time.h" 23 #include "base/timer/timer.h" 24 #include "net/base/net_errors.h" 25 #include "net/disk_cache/blockfile/disk_format.h" 26 #include "net/disk_cache/blockfile/entry_impl.h" 27 #include "net/disk_cache/blockfile/errors.h" 28 #include "net/disk_cache/blockfile/experiments.h" 29 #include "net/disk_cache/blockfile/file.h" 30 #include "net/disk_cache/blockfile/histogram_macros.h" 31 #include "net/disk_cache/blockfile/webfonts_histogram.h" 32 #include "net/disk_cache/cache_util.h" 33 34 // Provide a BackendImpl object to macros from histogram_macros.h. 35 #define CACHE_UMA_BACKEND_IMPL_OBJ this 36 37 using base::Time; 38 using base::TimeDelta; 39 using base::TimeTicks; 40 41 namespace { 42 43 const char* kIndexName = "index"; 44 45 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. 46 // Note that the actual target is to keep the index table load factor under 55% 47 // for most users. 48 const int k64kEntriesStore = 240 * 1000 * 1000; 49 const int kBaseTableLen = 64 * 1024; 50 51 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). 52 const int kTrimDelay = 10; 53 54 int DesiredIndexTableLen(int32 storage_size) { 55 if (storage_size <= k64kEntriesStore) 56 return kBaseTableLen; 57 if (storage_size <= k64kEntriesStore * 2) 58 return kBaseTableLen * 2; 59 if (storage_size <= k64kEntriesStore * 4) 60 return kBaseTableLen * 4; 61 if (storage_size <= k64kEntriesStore * 8) 62 return kBaseTableLen * 8; 63 64 // The biggest storage_size for int32 requires a 4 MB table. 65 return kBaseTableLen * 16; 66 } 67 68 int MaxStorageSizeForTable(int table_len) { 69 return table_len * (k64kEntriesStore / kBaseTableLen); 70 } 71 72 size_t GetIndexSize(int table_len) { 73 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; 74 return sizeof(disk_cache::IndexHeader) + table_size; 75 } 76 77 // ------------------------------------------------------------------------ 78 79 // Sets group for the current experiment. Returns false if the files should be 80 // discarded. 81 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) { 82 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 || 83 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) { 84 // Discard current cache. 85 return false; 86 } 87 88 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") == 89 "ExperimentControl") { 90 if (cache_created) { 91 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL; 92 return true; 93 } 94 return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL; 95 } 96 97 header->experiment = disk_cache::NO_EXPERIMENT; 98 return true; 99 } 100 101 // A callback to perform final cleanup on the background thread. 102 void FinalCleanupCallback(disk_cache::BackendImpl* backend) { 103 backend->CleanupCache(); 104 } 105 106 } // namespace 107 108 // ------------------------------------------------------------------------ 109 110 namespace disk_cache { 111 112 BackendImpl::BackendImpl(const base::FilePath& path, 113 base::MessageLoopProxy* cache_thread, 114 net::NetLog* net_log) 115 : background_queue_(this, cache_thread), 116 path_(path), 117 block_files_(path), 118 mask_(0), 119 max_size_(0), 120 up_ticks_(0), 121 cache_type_(net::DISK_CACHE), 122 uma_report_(0), 123 user_flags_(0), 124 init_(false), 125 restarted_(false), 126 unit_test_(false), 127 read_only_(false), 128 disabled_(false), 129 new_eviction_(false), 130 first_timer_(true), 131 user_load_(false), 132 net_log_(net_log), 133 done_(true, false), 134 ptr_factory_(this) { 135 } 136 137 BackendImpl::BackendImpl(const base::FilePath& path, 138 uint32 mask, 139 base::MessageLoopProxy* cache_thread, 140 net::NetLog* net_log) 141 : background_queue_(this, cache_thread), 142 path_(path), 143 block_files_(path), 144 mask_(mask), 145 max_size_(0), 146 up_ticks_(0), 147 cache_type_(net::DISK_CACHE), 148 uma_report_(0), 149 user_flags_(kMask), 150 init_(false), 151 restarted_(false), 152 unit_test_(false), 153 read_only_(false), 154 disabled_(false), 155 new_eviction_(false), 156 first_timer_(true), 157 user_load_(false), 158 net_log_(net_log), 159 done_(true, false), 160 ptr_factory_(this) { 161 } 162 163 BackendImpl::~BackendImpl() { 164 if (user_flags_ & kNoRandom) { 165 // This is a unit test, so we want to be strict about not leaking entries 166 // and completing all the work. 167 background_queue_.WaitForPendingIO(); 168 } else { 169 // This is most likely not a test, so we want to do as little work as 170 // possible at this time, at the price of leaving dirty entries behind. 171 background_queue_.DropPendingIO(); 172 } 173 174 if (background_queue_.BackgroundIsCurrentThread()) { 175 // Unit tests may use the same thread for everything. 176 CleanupCache(); 177 } else { 178 background_queue_.background_thread()->PostTask( 179 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); 180 // http://crbug.com/74623 181 base::ThreadRestrictions::ScopedAllowWait allow_wait; 182 done_.Wait(); 183 } 184 } 185 186 int BackendImpl::Init(const CompletionCallback& callback) { 187 background_queue_.Init(callback); 188 return net::ERR_IO_PENDING; 189 } 190 191 int BackendImpl::SyncInit() { 192 #if defined(NET_BUILD_STRESS_CACHE) 193 // Start evictions right away. 194 up_ticks_ = kTrimDelay * 2; 195 #endif 196 DCHECK(!init_); 197 if (init_) 198 return net::ERR_FAILED; 199 200 bool create_files = false; 201 if (!InitBackingStore(&create_files)) { 202 ReportError(ERR_STORAGE_ERROR); 203 return net::ERR_FAILED; 204 } 205 206 num_refs_ = num_pending_io_ = max_refs_ = 0; 207 entry_count_ = byte_count_ = 0; 208 209 bool should_create_timer = false; 210 if (!restarted_) { 211 buffer_bytes_ = 0; 212 trace_object_ = TraceObject::GetTraceObject(); 213 should_create_timer = true; 214 } 215 216 init_ = true; 217 Trace("Init"); 218 219 if (data_->header.experiment != NO_EXPERIMENT && 220 cache_type_ != net::DISK_CACHE) { 221 // No experiment for other caches. 222 return net::ERR_FAILED; 223 } 224 225 if (!(user_flags_ & kNoRandom)) { 226 // The unit test controls directly what to test. 227 new_eviction_ = (cache_type_ == net::DISK_CACHE); 228 } 229 230 if (!CheckIndex()) { 231 ReportError(ERR_INIT_FAILED); 232 return net::ERR_FAILED; 233 } 234 235 if (!restarted_ && (create_files || !data_->header.num_entries)) 236 ReportError(ERR_CACHE_CREATED); 237 238 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE && 239 !InitExperiment(&data_->header, create_files)) { 240 return net::ERR_FAILED; 241 } 242 243 // We don't care if the value overflows. The only thing we care about is that 244 // the id cannot be zero, because that value is used as "not dirty". 245 // Increasing the value once per second gives us many years before we start 246 // having collisions. 247 data_->header.this_id++; 248 if (!data_->header.this_id) 249 data_->header.this_id++; 250 251 bool previous_crash = (data_->header.crash != 0); 252 data_->header.crash = 1; 253 254 if (!block_files_.Init(create_files)) 255 return net::ERR_FAILED; 256 257 // We want to minimize the changes to cache for an AppCache. 258 if (cache_type() == net::APP_CACHE) { 259 DCHECK(!new_eviction_); 260 read_only_ = true; 261 } else if (cache_type() == net::SHADER_CACHE) { 262 DCHECK(!new_eviction_); 263 } 264 265 eviction_.Init(this); 266 267 // stats_ and rankings_ may end up calling back to us so we better be enabled. 268 disabled_ = false; 269 if (!InitStats()) 270 return net::ERR_FAILED; 271 272 disabled_ = !rankings_.Init(this, new_eviction_); 273 274 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) 275 trace_object_->EnableTracing(false); 276 int sc = SelfCheck(); 277 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) 278 NOTREACHED(); 279 trace_object_->EnableTracing(true); 280 #endif 281 282 if (previous_crash) { 283 ReportError(ERR_PREVIOUS_CRASH); 284 } else if (!restarted_) { 285 ReportError(ERR_NO_ERROR); 286 } 287 288 FlushIndex(); 289 290 if (!disabled_ && should_create_timer) { 291 // Create a recurrent timer of 30 secs. 292 int timer_delay = unit_test_ ? 1000 : 30000; 293 timer_.reset(new base::RepeatingTimer<BackendImpl>()); 294 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, 295 &BackendImpl::OnStatsTimer); 296 } 297 298 return disabled_ ? net::ERR_FAILED : net::OK; 299 } 300 301 void BackendImpl::CleanupCache() { 302 Trace("Backend Cleanup"); 303 eviction_.Stop(); 304 timer_.reset(); 305 306 if (init_) { 307 StoreStats(); 308 if (data_) 309 data_->header.crash = 0; 310 311 if (user_flags_ & kNoRandom) { 312 // This is a net_unittest, verify that we are not 'leaking' entries. 313 File::WaitForPendingIO(&num_pending_io_); 314 DCHECK(!num_refs_); 315 } else { 316 File::DropPendingIO(); 317 } 318 } 319 block_files_.CloseFiles(); 320 FlushIndex(); 321 index_ = NULL; 322 ptr_factory_.InvalidateWeakPtrs(); 323 done_.Signal(); 324 } 325 326 // ------------------------------------------------------------------------ 327 328 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, 329 const CompletionCallback& callback) { 330 DCHECK(!callback.is_null()); 331 background_queue_.OpenPrevEntry(iter, prev_entry, callback); 332 return net::ERR_IO_PENDING; 333 } 334 335 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) { 336 DCHECK(entry); 337 *entry = OpenEntryImpl(key); 338 return (*entry) ? net::OK : net::ERR_FAILED; 339 } 340 341 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) { 342 DCHECK(entry); 343 *entry = CreateEntryImpl(key); 344 return (*entry) ? net::OK : net::ERR_FAILED; 345 } 346 347 int BackendImpl::SyncDoomEntry(const std::string& key) { 348 if (disabled_) 349 return net::ERR_FAILED; 350 351 EntryImpl* entry = OpenEntryImpl(key); 352 if (!entry) 353 return net::ERR_FAILED; 354 355 entry->DoomImpl(); 356 entry->Release(); 357 return net::OK; 358 } 359 360 int BackendImpl::SyncDoomAllEntries() { 361 // This is not really an error, but it is an interesting condition. 362 ReportError(ERR_CACHE_DOOMED); 363 stats_.OnEvent(Stats::DOOM_CACHE); 364 if (!num_refs_) { 365 RestartCache(false); 366 return disabled_ ? net::ERR_FAILED : net::OK; 367 } else { 368 if (disabled_) 369 return net::ERR_FAILED; 370 371 eviction_.TrimCache(true); 372 return net::OK; 373 } 374 } 375 376 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, 377 const base::Time end_time) { 378 DCHECK_NE(net::APP_CACHE, cache_type_); 379 if (end_time.is_null()) 380 return SyncDoomEntriesSince(initial_time); 381 382 DCHECK(end_time >= initial_time); 383 384 if (disabled_) 385 return net::ERR_FAILED; 386 387 EntryImpl* node; 388 void* iter = NULL; 389 EntryImpl* next = OpenNextEntryImpl(&iter); 390 if (!next) 391 return net::OK; 392 393 while (next) { 394 node = next; 395 next = OpenNextEntryImpl(&iter); 396 397 if (node->GetLastUsed() >= initial_time && 398 node->GetLastUsed() < end_time) { 399 node->DoomImpl(); 400 } else if (node->GetLastUsed() < initial_time) { 401 if (next) 402 next->Release(); 403 next = NULL; 404 SyncEndEnumeration(iter); 405 } 406 407 node->Release(); 408 } 409 410 return net::OK; 411 } 412 413 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get 414 // entries that are too old. 415 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) { 416 DCHECK_NE(net::APP_CACHE, cache_type_); 417 if (disabled_) 418 return net::ERR_FAILED; 419 420 stats_.OnEvent(Stats::DOOM_RECENT); 421 for (;;) { 422 void* iter = NULL; 423 EntryImpl* entry = OpenNextEntryImpl(&iter); 424 if (!entry) 425 return net::OK; 426 427 if (initial_time > entry->GetLastUsed()) { 428 entry->Release(); 429 SyncEndEnumeration(iter); 430 return net::OK; 431 } 432 433 entry->DoomImpl(); 434 entry->Release(); 435 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator. 436 } 437 } 438 439 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) { 440 *next_entry = OpenNextEntryImpl(iter); 441 return (*next_entry) ? net::OK : net::ERR_FAILED; 442 } 443 444 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) { 445 *prev_entry = OpenPrevEntryImpl(iter); 446 return (*prev_entry) ? net::OK : net::ERR_FAILED; 447 } 448 449 void BackendImpl::SyncEndEnumeration(void* iter) { 450 scoped_ptr<Rankings::Iterator> iterator( 451 reinterpret_cast<Rankings::Iterator*>(iter)); 452 } 453 454 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) { 455 if (disabled_) 456 return; 457 458 uint32 hash = base::Hash(key); 459 bool error; 460 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); 461 if (cache_entry) { 462 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) { 463 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE); 464 } 465 cache_entry->Release(); 466 } 467 } 468 469 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { 470 if (disabled_) 471 return NULL; 472 473 TimeTicks start = TimeTicks::Now(); 474 uint32 hash = base::Hash(key); 475 Trace("Open hash 0x%x", hash); 476 477 bool error; 478 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); 479 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) { 480 // The entry was already evicted. 481 cache_entry->Release(); 482 cache_entry = NULL; 483 web_fonts_histogram::RecordEvictedEntry(key); 484 } else if (!cache_entry) { 485 web_fonts_histogram::RecordCacheMiss(key); 486 } 487 488 int current_size = data_->header.num_bytes / (1024 * 1024); 489 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; 490 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; 491 int64 use_hours = total_hours - no_use_hours; 492 493 if (!cache_entry) { 494 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start); 495 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size); 496 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours); 497 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours); 498 stats_.OnEvent(Stats::OPEN_MISS); 499 return NULL; 500 } 501 502 eviction_.OnOpenEntry(cache_entry); 503 entry_count_++; 504 505 Trace("Open hash 0x%x end: 0x%x", hash, 506 cache_entry->entry()->address().value()); 507 CACHE_UMA(AGE_MS, "OpenTime", 0, start); 508 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size); 509 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours); 510 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours); 511 stats_.OnEvent(Stats::OPEN_HIT); 512 web_fonts_histogram::RecordCacheHit(cache_entry); 513 SIMPLE_STATS_COUNTER("disk_cache.hit"); 514 return cache_entry; 515 } 516 517 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { 518 if (disabled_ || key.empty()) 519 return NULL; 520 521 TimeTicks start = TimeTicks::Now(); 522 uint32 hash = base::Hash(key); 523 Trace("Create hash 0x%x", hash); 524 525 scoped_refptr<EntryImpl> parent; 526 Addr entry_address(data_->table[hash & mask_]); 527 if (entry_address.is_initialized()) { 528 // We have an entry already. It could be the one we are looking for, or just 529 // a hash conflict. 530 bool error; 531 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); 532 if (old_entry) 533 return ResurrectEntry(old_entry); 534 535 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); 536 DCHECK(!error); 537 if (parent_entry) { 538 parent.swap(&parent_entry); 539 } else if (data_->table[hash & mask_]) { 540 // We should have corrected the problem. 541 NOTREACHED(); 542 return NULL; 543 } 544 } 545 546 // The general flow is to allocate disk space and initialize the entry data, 547 // followed by saving that to disk, then linking the entry though the index 548 // and finally through the lists. If there is a crash in this process, we may 549 // end up with: 550 // a. Used, unreferenced empty blocks on disk (basically just garbage). 551 // b. Used, unreferenced but meaningful data on disk (more garbage). 552 // c. A fully formed entry, reachable only through the index. 553 // d. A fully formed entry, also reachable through the lists, but still dirty. 554 // 555 // Anything after (b) can be automatically cleaned up. We may consider saving 556 // the current operation (as we do while manipulating the lists) so that we 557 // can detect and cleanup (a) and (b). 558 559 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); 560 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { 561 LOG(ERROR) << "Create entry failed " << key.c_str(); 562 stats_.OnEvent(Stats::CREATE_ERROR); 563 return NULL; 564 } 565 566 Addr node_address(0); 567 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { 568 block_files_.DeleteBlock(entry_address, false); 569 LOG(ERROR) << "Create entry failed " << key.c_str(); 570 stats_.OnEvent(Stats::CREATE_ERROR); 571 return NULL; 572 } 573 574 scoped_refptr<EntryImpl> cache_entry( 575 new EntryImpl(this, entry_address, false)); 576 IncreaseNumRefs(); 577 578 if (!cache_entry->CreateEntry(node_address, key, hash)) { 579 block_files_.DeleteBlock(entry_address, false); 580 block_files_.DeleteBlock(node_address, false); 581 LOG(ERROR) << "Create entry failed " << key.c_str(); 582 stats_.OnEvent(Stats::CREATE_ERROR); 583 return NULL; 584 } 585 586 cache_entry->BeginLogging(net_log_, true); 587 588 // We are not failing the operation; let's add this to the map. 589 open_entries_[entry_address.value()] = cache_entry.get(); 590 591 // Save the entry. 592 cache_entry->entry()->Store(); 593 cache_entry->rankings()->Store(); 594 IncreaseNumEntries(); 595 entry_count_++; 596 597 // Link this entry through the index. 598 if (parent.get()) { 599 parent->SetNextAddress(entry_address); 600 } else { 601 data_->table[hash & mask_] = entry_address.value(); 602 } 603 604 // Link this entry through the lists. 605 eviction_.OnCreateEntry(cache_entry.get()); 606 607 CACHE_UMA(AGE_MS, "CreateTime", 0, start); 608 stats_.OnEvent(Stats::CREATE_HIT); 609 SIMPLE_STATS_COUNTER("disk_cache.miss"); 610 Trace("create entry hit "); 611 FlushIndex(); 612 cache_entry->AddRef(); 613 return cache_entry.get(); 614 } 615 616 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) { 617 return OpenFollowingEntry(true, iter); 618 } 619 620 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) { 621 return OpenFollowingEntry(false, iter); 622 } 623 624 bool BackendImpl::SetMaxSize(int max_bytes) { 625 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); 626 if (max_bytes < 0) 627 return false; 628 629 // Zero size means use the default. 630 if (!max_bytes) 631 return true; 632 633 // Avoid a DCHECK later on. 634 if (max_bytes >= kint32max - kint32max / 10) 635 max_bytes = kint32max - kint32max / 10 - 1; 636 637 user_flags_ |= kMaxSize; 638 max_size_ = max_bytes; 639 return true; 640 } 641 642 void BackendImpl::SetType(net::CacheType type) { 643 DCHECK_NE(net::MEMORY_CACHE, type); 644 cache_type_ = type; 645 } 646 647 base::FilePath BackendImpl::GetFileName(Addr address) const { 648 if (!address.is_separate_file() || !address.is_initialized()) { 649 NOTREACHED(); 650 return base::FilePath(); 651 } 652 653 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber()); 654 return path_.AppendASCII(tmp); 655 } 656 657 MappedFile* BackendImpl::File(Addr address) { 658 if (disabled_) 659 return NULL; 660 return block_files_.GetFile(address); 661 } 662 663 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() { 664 return background_queue_.GetWeakPtr(); 665 } 666 667 bool BackendImpl::CreateExternalFile(Addr* address) { 668 int file_number = data_->header.last_file + 1; 669 Addr file_address(0); 670 bool success = false; 671 for (int i = 0; i < 0x0fffffff; i++, file_number++) { 672 if (!file_address.SetFileNumber(file_number)) { 673 file_number = 1; 674 continue; 675 } 676 base::FilePath name = GetFileName(file_address); 677 int flags = base::File::FLAG_READ | base::File::FLAG_WRITE | 678 base::File::FLAG_CREATE | base::File::FLAG_EXCLUSIVE_WRITE; 679 base::File file(name, flags); 680 if (!file.IsValid()) { 681 base::File::Error error = file.error_details(); 682 if (error != base::File::FILE_ERROR_EXISTS) { 683 LOG(ERROR) << "Unable to create file: " << error; 684 return false; 685 } 686 continue; 687 } 688 689 success = true; 690 break; 691 } 692 693 DCHECK(success); 694 if (!success) 695 return false; 696 697 data_->header.last_file = file_number; 698 address->set_value(file_address.value()); 699 return true; 700 } 701 702 bool BackendImpl::CreateBlock(FileType block_type, int block_count, 703 Addr* block_address) { 704 return block_files_.CreateBlock(block_type, block_count, block_address); 705 } 706 707 void BackendImpl::DeleteBlock(Addr block_address, bool deep) { 708 block_files_.DeleteBlock(block_address, deep); 709 } 710 711 LruData* BackendImpl::GetLruData() { 712 return &data_->header.lru; 713 } 714 715 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) { 716 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE)) 717 return; 718 eviction_.UpdateRank(entry, modified); 719 } 720 721 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) { 722 Addr address(rankings->Data()->contents); 723 EntryImpl* cache_entry = NULL; 724 if (NewEntry(address, &cache_entry)) { 725 STRESS_NOTREACHED(); 726 return; 727 } 728 729 uint32 hash = cache_entry->GetHash(); 730 cache_entry->Release(); 731 732 // Anything on the table means that this entry is there. 733 if (data_->table[hash & mask_]) 734 return; 735 736 data_->table[hash & mask_] = address.value(); 737 FlushIndex(); 738 } 739 740 void BackendImpl::InternalDoomEntry(EntryImpl* entry) { 741 uint32 hash = entry->GetHash(); 742 std::string key = entry->GetKey(); 743 Addr entry_addr = entry->entry()->address(); 744 bool error; 745 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error); 746 CacheAddr child(entry->GetNextAddress()); 747 748 Trace("Doom entry 0x%p", entry); 749 750 if (!entry->doomed()) { 751 // We may have doomed this entry from within MatchEntry. 752 eviction_.OnDoomEntry(entry); 753 entry->InternalDoom(); 754 if (!new_eviction_) { 755 DecreaseNumEntries(); 756 } 757 stats_.OnEvent(Stats::DOOM_ENTRY); 758 } 759 760 if (parent_entry) { 761 parent_entry->SetNextAddress(Addr(child)); 762 parent_entry->Release(); 763 } else if (!error) { 764 data_->table[hash & mask_] = child; 765 } 766 767 FlushIndex(); 768 } 769 770 #if defined(NET_BUILD_STRESS_CACHE) 771 772 CacheAddr BackendImpl::GetNextAddr(Addr address) { 773 EntriesMap::iterator it = open_entries_.find(address.value()); 774 if (it != open_entries_.end()) { 775 EntryImpl* this_entry = it->second; 776 return this_entry->GetNextAddress(); 777 } 778 DCHECK(block_files_.IsValid(address)); 779 DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256); 780 781 CacheEntryBlock entry(File(address), address); 782 CHECK(entry.Load()); 783 return entry.Data()->next; 784 } 785 786 void BackendImpl::NotLinked(EntryImpl* entry) { 787 Addr entry_addr = entry->entry()->address(); 788 uint32 i = entry->GetHash() & mask_; 789 Addr address(data_->table[i]); 790 if (!address.is_initialized()) 791 return; 792 793 for (;;) { 794 DCHECK(entry_addr.value() != address.value()); 795 address.set_value(GetNextAddr(address)); 796 if (!address.is_initialized()) 797 break; 798 } 799 } 800 #endif // NET_BUILD_STRESS_CACHE 801 802 // An entry may be linked on the DELETED list for a while after being doomed. 803 // This function is called when we want to remove it. 804 void BackendImpl::RemoveEntry(EntryImpl* entry) { 805 #if defined(NET_BUILD_STRESS_CACHE) 806 NotLinked(entry); 807 #endif 808 if (!new_eviction_) 809 return; 810 811 DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state); 812 813 Trace("Remove entry 0x%p", entry); 814 eviction_.OnDestroyEntry(entry); 815 DecreaseNumEntries(); 816 } 817 818 void BackendImpl::OnEntryDestroyBegin(Addr address) { 819 EntriesMap::iterator it = open_entries_.find(address.value()); 820 if (it != open_entries_.end()) 821 open_entries_.erase(it); 822 } 823 824 void BackendImpl::OnEntryDestroyEnd() { 825 DecreaseNumRefs(); 826 if (data_->header.num_bytes > max_size_ && !read_only_ && 827 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom)) 828 eviction_.TrimCache(false); 829 } 830 831 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { 832 DCHECK(rankings->HasData()); 833 EntriesMap::const_iterator it = 834 open_entries_.find(rankings->Data()->contents); 835 if (it != open_entries_.end()) { 836 // We have this entry in memory. 837 return it->second; 838 } 839 840 return NULL; 841 } 842 843 int32 BackendImpl::GetCurrentEntryId() const { 844 return data_->header.this_id; 845 } 846 847 int BackendImpl::MaxFileSize() const { 848 return cache_type() == net::PNACL_CACHE ? max_size_ : max_size_ / 8; 849 } 850 851 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { 852 if (disabled_ || old_size == new_size) 853 return; 854 if (old_size > new_size) 855 SubstractStorageSize(old_size - new_size); 856 else 857 AddStorageSize(new_size - old_size); 858 859 FlushIndex(); 860 861 // Update the usage statistics. 862 stats_.ModifyStorageStats(old_size, new_size); 863 } 864 865 void BackendImpl::TooMuchStorageRequested(int32 size) { 866 stats_.ModifyStorageStats(0, size); 867 } 868 869 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) { 870 DCHECK_GT(new_size, current_size); 871 if (user_flags_ & kNoBuffering) 872 return false; 873 874 int to_add = new_size - current_size; 875 if (buffer_bytes_ + to_add > MaxBuffersSize()) 876 return false; 877 878 buffer_bytes_ += to_add; 879 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); 880 return true; 881 } 882 883 void BackendImpl::BufferDeleted(int size) { 884 buffer_bytes_ -= size; 885 DCHECK_GE(size, 0); 886 } 887 888 bool BackendImpl::IsLoaded() const { 889 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_); 890 if (user_flags_ & kNoLoadProtection) 891 return false; 892 893 return (num_pending_io_ > 5 || user_load_); 894 } 895 896 std::string BackendImpl::HistogramName(const char* name, int experiment) const { 897 if (!experiment) 898 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name); 899 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_, 900 name, experiment); 901 } 902 903 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() { 904 return ptr_factory_.GetWeakPtr(); 905 } 906 907 // We want to remove biases from some histograms so we only send data once per 908 // week. 909 bool BackendImpl::ShouldReportAgain() { 910 if (uma_report_) 911 return uma_report_ == 2; 912 913 uma_report_++; 914 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 915 Time last_time = Time::FromInternalValue(last_report); 916 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { 917 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); 918 uma_report_++; 919 return true; 920 } 921 return false; 922 } 923 924 void BackendImpl::FirstEviction() { 925 DCHECK(data_->header.create_time); 926 if (!GetEntryCount()) 927 return; // This is just for unit tests. 928 929 Time create_time = Time::FromInternalValue(data_->header.create_time); 930 CACHE_UMA(AGE, "FillupAge", 0, create_time); 931 932 int64 use_time = stats_.GetCounter(Stats::TIMER); 933 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); 934 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); 935 936 if (!use_time) 937 use_time = 1; 938 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, 939 static_cast<int>(data_->header.num_entries / use_time)); 940 CACHE_UMA(COUNTS, "FirstByteIORate", 0, 941 static_cast<int>((data_->header.num_bytes / 1024) / use_time)); 942 943 int avg_size = data_->header.num_bytes / GetEntryCount(); 944 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); 945 946 int large_entries_bytes = stats_.GetLargeEntriesSize(); 947 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 948 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); 949 950 if (new_eviction_) { 951 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); 952 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, 953 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 954 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, 955 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 956 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, 957 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 958 } 959 960 stats_.ResetRatios(); 961 } 962 963 void BackendImpl::CriticalError(int error) { 964 STRESS_NOTREACHED(); 965 LOG(ERROR) << "Critical error found " << error; 966 if (disabled_) 967 return; 968 969 stats_.OnEvent(Stats::FATAL_ERROR); 970 LogStats(); 971 ReportError(error); 972 973 // Setting the index table length to an invalid value will force re-creation 974 // of the cache files. 975 data_->header.table_len = 1; 976 disabled_ = true; 977 978 if (!num_refs_) 979 base::MessageLoop::current()->PostTask( 980 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); 981 } 982 983 void BackendImpl::ReportError(int error) { 984 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || 985 error == ERR_CACHE_CREATED); 986 987 // We transmit positive numbers, instead of direct error codes. 988 DCHECK_LE(error, 0); 989 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); 990 } 991 992 void BackendImpl::OnEvent(Stats::Counters an_event) { 993 stats_.OnEvent(an_event); 994 } 995 996 void BackendImpl::OnRead(int32 bytes) { 997 DCHECK_GE(bytes, 0); 998 byte_count_ += bytes; 999 if (byte_count_ < 0) 1000 byte_count_ = kint32max; 1001 } 1002 1003 void BackendImpl::OnWrite(int32 bytes) { 1004 // We use the same implementation as OnRead... just log the number of bytes. 1005 OnRead(bytes); 1006 } 1007 1008 void BackendImpl::OnStatsTimer() { 1009 if (disabled_) 1010 return; 1011 1012 stats_.OnEvent(Stats::TIMER); 1013 int64 time = stats_.GetCounter(Stats::TIMER); 1014 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); 1015 1016 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding 1017 // the bias towards 0. 1018 if (num_refs_ && (current != num_refs_)) { 1019 int64 diff = (num_refs_ - current) / 50; 1020 if (!diff) 1021 diff = num_refs_ > current ? 1 : -1; 1022 current = current + diff; 1023 stats_.SetCounter(Stats::OPEN_ENTRIES, current); 1024 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); 1025 } 1026 1027 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); 1028 1029 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); 1030 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); 1031 1032 // These values cover about 99.5% of the population (Oct 2011). 1033 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); 1034 entry_count_ = 0; 1035 byte_count_ = 0; 1036 up_ticks_++; 1037 1038 if (!data_) 1039 first_timer_ = false; 1040 if (first_timer_) { 1041 first_timer_ = false; 1042 if (ShouldReportAgain()) 1043 ReportStats(); 1044 } 1045 1046 // Save stats to disk at 5 min intervals. 1047 if (time % 10 == 0) 1048 StoreStats(); 1049 } 1050 1051 void BackendImpl::IncrementIoCount() { 1052 num_pending_io_++; 1053 } 1054 1055 void BackendImpl::DecrementIoCount() { 1056 num_pending_io_--; 1057 } 1058 1059 void BackendImpl::SetUnitTestMode() { 1060 user_flags_ |= kUnitTestMode; 1061 unit_test_ = true; 1062 } 1063 1064 void BackendImpl::SetUpgradeMode() { 1065 user_flags_ |= kUpgradeMode; 1066 read_only_ = true; 1067 } 1068 1069 void BackendImpl::SetNewEviction() { 1070 user_flags_ |= kNewEviction; 1071 new_eviction_ = true; 1072 } 1073 1074 void BackendImpl::SetFlags(uint32 flags) { 1075 user_flags_ |= flags; 1076 } 1077 1078 void BackendImpl::ClearRefCountForTest() { 1079 num_refs_ = 0; 1080 } 1081 1082 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) { 1083 background_queue_.FlushQueue(callback); 1084 return net::ERR_IO_PENDING; 1085 } 1086 1087 int BackendImpl::RunTaskForTest(const base::Closure& task, 1088 const CompletionCallback& callback) { 1089 background_queue_.RunTask(task, callback); 1090 return net::ERR_IO_PENDING; 1091 } 1092 1093 void BackendImpl::TrimForTest(bool empty) { 1094 eviction_.SetTestMode(); 1095 eviction_.TrimCache(empty); 1096 } 1097 1098 void BackendImpl::TrimDeletedListForTest(bool empty) { 1099 eviction_.SetTestMode(); 1100 eviction_.TrimDeletedList(empty); 1101 } 1102 1103 base::RepeatingTimer<BackendImpl>* BackendImpl::GetTimerForTest() { 1104 return timer_.get(); 1105 } 1106 1107 int BackendImpl::SelfCheck() { 1108 if (!init_) { 1109 LOG(ERROR) << "Init failed"; 1110 return ERR_INIT_FAILED; 1111 } 1112 1113 int num_entries = rankings_.SelfCheck(); 1114 if (num_entries < 0) { 1115 LOG(ERROR) << "Invalid rankings list, error " << num_entries; 1116 #if !defined(NET_BUILD_STRESS_CACHE) 1117 return num_entries; 1118 #endif 1119 } 1120 1121 if (num_entries != data_->header.num_entries) { 1122 LOG(ERROR) << "Number of entries mismatch"; 1123 #if !defined(NET_BUILD_STRESS_CACHE) 1124 return ERR_NUM_ENTRIES_MISMATCH; 1125 #endif 1126 } 1127 1128 return CheckAllEntries(); 1129 } 1130 1131 void BackendImpl::FlushIndex() { 1132 if (index_.get() && !disabled_) 1133 index_->Flush(); 1134 } 1135 1136 // ------------------------------------------------------------------------ 1137 1138 net::CacheType BackendImpl::GetCacheType() const { 1139 return cache_type_; 1140 } 1141 1142 int32 BackendImpl::GetEntryCount() const { 1143 if (!index_.get() || disabled_) 1144 return 0; 1145 // num_entries includes entries already evicted. 1146 int32 not_deleted = data_->header.num_entries - 1147 data_->header.lru.sizes[Rankings::DELETED]; 1148 1149 if (not_deleted < 0) { 1150 NOTREACHED(); 1151 not_deleted = 0; 1152 } 1153 1154 return not_deleted; 1155 } 1156 1157 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, 1158 const CompletionCallback& callback) { 1159 DCHECK(!callback.is_null()); 1160 background_queue_.OpenEntry(key, entry, callback); 1161 return net::ERR_IO_PENDING; 1162 } 1163 1164 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, 1165 const CompletionCallback& callback) { 1166 DCHECK(!callback.is_null()); 1167 background_queue_.CreateEntry(key, entry, callback); 1168 return net::ERR_IO_PENDING; 1169 } 1170 1171 int BackendImpl::DoomEntry(const std::string& key, 1172 const CompletionCallback& callback) { 1173 DCHECK(!callback.is_null()); 1174 background_queue_.DoomEntry(key, callback); 1175 return net::ERR_IO_PENDING; 1176 } 1177 1178 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) { 1179 DCHECK(!callback.is_null()); 1180 background_queue_.DoomAllEntries(callback); 1181 return net::ERR_IO_PENDING; 1182 } 1183 1184 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, 1185 const base::Time end_time, 1186 const CompletionCallback& callback) { 1187 DCHECK(!callback.is_null()); 1188 background_queue_.DoomEntriesBetween(initial_time, end_time, callback); 1189 return net::ERR_IO_PENDING; 1190 } 1191 1192 int BackendImpl::DoomEntriesSince(const base::Time initial_time, 1193 const CompletionCallback& callback) { 1194 DCHECK(!callback.is_null()); 1195 background_queue_.DoomEntriesSince(initial_time, callback); 1196 return net::ERR_IO_PENDING; 1197 } 1198 1199 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, 1200 const CompletionCallback& callback) { 1201 DCHECK(!callback.is_null()); 1202 background_queue_.OpenNextEntry(iter, next_entry, callback); 1203 return net::ERR_IO_PENDING; 1204 } 1205 1206 void BackendImpl::EndEnumeration(void** iter) { 1207 background_queue_.EndEnumeration(*iter); 1208 *iter = NULL; 1209 } 1210 1211 void BackendImpl::GetStats(StatsItems* stats) { 1212 if (disabled_) 1213 return; 1214 1215 std::pair<std::string, std::string> item; 1216 1217 item.first = "Entries"; 1218 item.second = base::StringPrintf("%d", data_->header.num_entries); 1219 stats->push_back(item); 1220 1221 item.first = "Pending IO"; 1222 item.second = base::StringPrintf("%d", num_pending_io_); 1223 stats->push_back(item); 1224 1225 item.first = "Max size"; 1226 item.second = base::StringPrintf("%d", max_size_); 1227 stats->push_back(item); 1228 1229 item.first = "Current size"; 1230 item.second = base::StringPrintf("%d", data_->header.num_bytes); 1231 stats->push_back(item); 1232 1233 item.first = "Cache type"; 1234 item.second = "Blockfile Cache"; 1235 stats->push_back(item); 1236 1237 stats_.GetItems(stats); 1238 } 1239 1240 void BackendImpl::OnExternalCacheHit(const std::string& key) { 1241 background_queue_.OnExternalCacheHit(key); 1242 } 1243 1244 // ------------------------------------------------------------------------ 1245 1246 // We just created a new file so we're going to write the header and set the 1247 // file length to include the hash table (zero filled). 1248 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { 1249 AdjustMaxCacheSize(0); 1250 1251 IndexHeader header; 1252 header.table_len = DesiredIndexTableLen(max_size_); 1253 1254 // We need file version 2.1 for the new eviction algorithm. 1255 if (new_eviction_) 1256 header.version = 0x20001; 1257 1258 header.create_time = Time::Now().ToInternalValue(); 1259 1260 if (!file->Write(&header, sizeof(header), 0)) 1261 return false; 1262 1263 return file->SetLength(GetIndexSize(header.table_len)); 1264 } 1265 1266 bool BackendImpl::InitBackingStore(bool* file_created) { 1267 if (!base::CreateDirectory(path_)) 1268 return false; 1269 1270 base::FilePath index_name = path_.AppendASCII(kIndexName); 1271 1272 int flags = base::File::FLAG_READ | base::File::FLAG_WRITE | 1273 base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_EXCLUSIVE_WRITE; 1274 base::File base_file(index_name, flags); 1275 if (!base_file.IsValid()) 1276 return false; 1277 1278 bool ret = true; 1279 *file_created = base_file.created(); 1280 1281 scoped_refptr<disk_cache::File> file(new disk_cache::File(base_file.Pass())); 1282 if (*file_created) 1283 ret = CreateBackingStore(file.get()); 1284 1285 file = NULL; 1286 if (!ret) 1287 return false; 1288 1289 index_ = new MappedFile(); 1290 data_ = static_cast<Index*>(index_->Init(index_name, 0)); 1291 if (!data_) { 1292 LOG(ERROR) << "Unable to map Index file"; 1293 return false; 1294 } 1295 1296 if (index_->GetLength() < sizeof(Index)) { 1297 // We verify this again on CheckIndex() but it's easier to make sure now 1298 // that the header is there. 1299 LOG(ERROR) << "Corrupt Index file"; 1300 return false; 1301 } 1302 1303 return true; 1304 } 1305 1306 // The maximum cache size will be either set explicitly by the caller, or 1307 // calculated by this code. 1308 void BackendImpl::AdjustMaxCacheSize(int table_len) { 1309 if (max_size_) 1310 return; 1311 1312 // If table_len is provided, the index file exists. 1313 DCHECK(!table_len || data_->header.magic); 1314 1315 // The user is not setting the size, let's figure it out. 1316 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); 1317 if (available < 0) { 1318 max_size_ = kDefaultCacheSize; 1319 return; 1320 } 1321 1322 if (table_len) 1323 available += data_->header.num_bytes; 1324 1325 max_size_ = PreferredCacheSize(available); 1326 1327 if (!table_len) 1328 return; 1329 1330 // If we already have a table, adjust the size to it. 1331 int current_max_size = MaxStorageSizeForTable(table_len); 1332 if (max_size_ > current_max_size) 1333 max_size_= current_max_size; 1334 } 1335 1336 bool BackendImpl::InitStats() { 1337 Addr address(data_->header.stats); 1338 int size = stats_.StorageSize(); 1339 1340 if (!address.is_initialized()) { 1341 FileType file_type = Addr::RequiredFileType(size); 1342 DCHECK_NE(file_type, EXTERNAL); 1343 int num_blocks = Addr::RequiredBlocks(size, file_type); 1344 1345 if (!CreateBlock(file_type, num_blocks, &address)) 1346 return false; 1347 1348 data_->header.stats = address.value(); 1349 return stats_.Init(NULL, 0, address); 1350 } 1351 1352 if (!address.is_block_file()) { 1353 NOTREACHED(); 1354 return false; 1355 } 1356 1357 // Load the required data. 1358 size = address.num_blocks() * address.BlockSize(); 1359 MappedFile* file = File(address); 1360 if (!file) 1361 return false; 1362 1363 scoped_ptr<char[]> data(new char[size]); 1364 size_t offset = address.start_block() * address.BlockSize() + 1365 kBlockHeaderSize; 1366 if (!file->Read(data.get(), size, offset)) 1367 return false; 1368 1369 if (!stats_.Init(data.get(), size, address)) 1370 return false; 1371 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) 1372 stats_.InitSizeHistogram(); 1373 return true; 1374 } 1375 1376 void BackendImpl::StoreStats() { 1377 int size = stats_.StorageSize(); 1378 scoped_ptr<char[]> data(new char[size]); 1379 Addr address; 1380 size = stats_.SerializeStats(data.get(), size, &address); 1381 DCHECK(size); 1382 if (!address.is_initialized()) 1383 return; 1384 1385 MappedFile* file = File(address); 1386 if (!file) 1387 return; 1388 1389 size_t offset = address.start_block() * address.BlockSize() + 1390 kBlockHeaderSize; 1391 file->Write(data.get(), size, offset); // ignore result. 1392 } 1393 1394 void BackendImpl::RestartCache(bool failure) { 1395 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); 1396 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); 1397 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); 1398 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 1399 1400 PrepareForRestart(); 1401 if (failure) { 1402 DCHECK(!num_refs_); 1403 DCHECK(!open_entries_.size()); 1404 DelayedCacheCleanup(path_); 1405 } else { 1406 DeleteCache(path_, false); 1407 } 1408 1409 // Don't call Init() if directed by the unit test: we are simulating a failure 1410 // trying to re-enable the cache. 1411 if (unit_test_) 1412 init_ = true; // Let the destructor do proper cleanup. 1413 else if (SyncInit() == net::OK) { 1414 stats_.SetCounter(Stats::FATAL_ERROR, errors); 1415 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); 1416 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); 1417 stats_.SetCounter(Stats::LAST_REPORT, last_report); 1418 } 1419 } 1420 1421 void BackendImpl::PrepareForRestart() { 1422 // Reset the mask_ if it was not given by the user. 1423 if (!(user_flags_ & kMask)) 1424 mask_ = 0; 1425 1426 if (!(user_flags_ & kNewEviction)) 1427 new_eviction_ = false; 1428 1429 disabled_ = true; 1430 data_->header.crash = 0; 1431 index_->Flush(); 1432 index_ = NULL; 1433 data_ = NULL; 1434 block_files_.CloseFiles(); 1435 rankings_.Reset(); 1436 init_ = false; 1437 restarted_ = true; 1438 } 1439 1440 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { 1441 EntriesMap::iterator it = open_entries_.find(address.value()); 1442 if (it != open_entries_.end()) { 1443 // Easy job. This entry is already in memory. 1444 EntryImpl* this_entry = it->second; 1445 this_entry->AddRef(); 1446 *entry = this_entry; 1447 return 0; 1448 } 1449 1450 STRESS_DCHECK(block_files_.IsValid(address)); 1451 1452 if (!address.SanityCheckForEntryV2()) { 1453 LOG(WARNING) << "Wrong entry address."; 1454 STRESS_NOTREACHED(); 1455 return ERR_INVALID_ADDRESS; 1456 } 1457 1458 scoped_refptr<EntryImpl> cache_entry( 1459 new EntryImpl(this, address, read_only_)); 1460 IncreaseNumRefs(); 1461 *entry = NULL; 1462 1463 TimeTicks start = TimeTicks::Now(); 1464 if (!cache_entry->entry()->Load()) 1465 return ERR_READ_FAILURE; 1466 1467 if (IsLoaded()) { 1468 CACHE_UMA(AGE_MS, "LoadTime", 0, start); 1469 } 1470 1471 if (!cache_entry->SanityCheck()) { 1472 LOG(WARNING) << "Messed up entry found."; 1473 STRESS_NOTREACHED(); 1474 return ERR_INVALID_ENTRY; 1475 } 1476 1477 STRESS_DCHECK(block_files_.IsValid( 1478 Addr(cache_entry->entry()->Data()->rankings_node))); 1479 1480 if (!cache_entry->LoadNodeAddress()) 1481 return ERR_READ_FAILURE; 1482 1483 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) { 1484 STRESS_NOTREACHED(); 1485 cache_entry->SetDirtyFlag(0); 1486 // Don't remove this from the list (it is not linked properly). Instead, 1487 // break the link back to the entry because it is going away, and leave the 1488 // rankings node to be deleted if we find it through a list. 1489 rankings_.SetContents(cache_entry->rankings(), 0); 1490 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) { 1491 STRESS_NOTREACHED(); 1492 cache_entry->SetDirtyFlag(0); 1493 rankings_.SetContents(cache_entry->rankings(), address.value()); 1494 } 1495 1496 if (!cache_entry->DataSanityCheck()) { 1497 LOG(WARNING) << "Messed up entry found."; 1498 cache_entry->SetDirtyFlag(0); 1499 cache_entry->FixForDelete(); 1500 } 1501 1502 // Prevent overwriting the dirty flag on the destructor. 1503 cache_entry->SetDirtyFlag(GetCurrentEntryId()); 1504 1505 if (cache_entry->dirty()) { 1506 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), 1507 address.value()); 1508 } 1509 1510 open_entries_[address.value()] = cache_entry.get(); 1511 1512 cache_entry->BeginLogging(net_log_, false); 1513 cache_entry.swap(entry); 1514 return 0; 1515 } 1516 1517 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, 1518 bool find_parent, Addr entry_addr, 1519 bool* match_error) { 1520 Addr address(data_->table[hash & mask_]); 1521 scoped_refptr<EntryImpl> cache_entry, parent_entry; 1522 EntryImpl* tmp = NULL; 1523 bool found = false; 1524 std::set<CacheAddr> visited; 1525 *match_error = false; 1526 1527 for (;;) { 1528 if (disabled_) 1529 break; 1530 1531 if (visited.find(address.value()) != visited.end()) { 1532 // It's possible for a buggy version of the code to write a loop. Just 1533 // break it. 1534 Trace("Hash collision loop 0x%x", address.value()); 1535 address.set_value(0); 1536 parent_entry->SetNextAddress(address); 1537 } 1538 visited.insert(address.value()); 1539 1540 if (!address.is_initialized()) { 1541 if (find_parent) 1542 found = true; 1543 break; 1544 } 1545 1546 int error = NewEntry(address, &tmp); 1547 cache_entry.swap(&tmp); 1548 1549 if (error || cache_entry->dirty()) { 1550 // This entry is dirty on disk (it was not properly closed): we cannot 1551 // trust it. 1552 Addr child(0); 1553 if (!error) 1554 child.set_value(cache_entry->GetNextAddress()); 1555 1556 if (parent_entry.get()) { 1557 parent_entry->SetNextAddress(child); 1558 parent_entry = NULL; 1559 } else { 1560 data_->table[hash & mask_] = child.value(); 1561 } 1562 1563 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(), 1564 address.value()); 1565 1566 if (!error) { 1567 // It is important to call DestroyInvalidEntry after removing this 1568 // entry from the table. 1569 DestroyInvalidEntry(cache_entry.get()); 1570 cache_entry = NULL; 1571 } else { 1572 Trace("NewEntry failed on MatchEntry 0x%x", address.value()); 1573 } 1574 1575 // Restart the search. 1576 address.set_value(data_->table[hash & mask_]); 1577 visited.clear(); 1578 continue; 1579 } 1580 1581 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_); 1582 if (cache_entry->IsSameEntry(key, hash)) { 1583 if (!cache_entry->Update()) 1584 cache_entry = NULL; 1585 found = true; 1586 if (find_parent && entry_addr.value() != address.value()) { 1587 Trace("Entry not on the index 0x%x", address.value()); 1588 *match_error = true; 1589 parent_entry = NULL; 1590 } 1591 break; 1592 } 1593 if (!cache_entry->Update()) 1594 cache_entry = NULL; 1595 parent_entry = cache_entry; 1596 cache_entry = NULL; 1597 if (!parent_entry.get()) 1598 break; 1599 1600 address.set_value(parent_entry->GetNextAddress()); 1601 } 1602 1603 if (parent_entry.get() && (!find_parent || !found)) 1604 parent_entry = NULL; 1605 1606 if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) { 1607 *match_error = true; 1608 parent_entry = NULL; 1609 } 1610 1611 if (cache_entry.get() && (find_parent || !found)) 1612 cache_entry = NULL; 1613 1614 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); 1615 FlushIndex(); 1616 return tmp; 1617 } 1618 1619 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. 1620 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { 1621 if (disabled_) 1622 return NULL; 1623 1624 DCHECK(iter); 1625 1626 const int kListsToSearch = 3; 1627 scoped_refptr<EntryImpl> entries[kListsToSearch]; 1628 scoped_ptr<Rankings::Iterator> iterator( 1629 reinterpret_cast<Rankings::Iterator*>(*iter)); 1630 *iter = NULL; 1631 1632 if (!iterator.get()) { 1633 iterator.reset(new Rankings::Iterator(&rankings_)); 1634 bool ret = false; 1635 1636 // Get an entry from each list. 1637 for (int i = 0; i < kListsToSearch; i++) { 1638 EntryImpl* temp = NULL; 1639 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), 1640 &iterator->nodes[i], &temp); 1641 entries[i].swap(&temp); // The entry was already addref'd. 1642 } 1643 if (!ret) 1644 return NULL; 1645 } else { 1646 // Get the next entry from the last list, and the actual entries for the 1647 // elements on the other lists. 1648 for (int i = 0; i < kListsToSearch; i++) { 1649 EntryImpl* temp = NULL; 1650 if (iterator->list == i) { 1651 OpenFollowingEntryFromList(forward, iterator->list, 1652 &iterator->nodes[i], &temp); 1653 } else { 1654 temp = GetEnumeratedEntry(iterator->nodes[i], 1655 static_cast<Rankings::List>(i)); 1656 } 1657 1658 entries[i].swap(&temp); // The entry was already addref'd. 1659 } 1660 } 1661 1662 int newest = -1; 1663 int oldest = -1; 1664 Time access_times[kListsToSearch]; 1665 for (int i = 0; i < kListsToSearch; i++) { 1666 if (entries[i].get()) { 1667 access_times[i] = entries[i]->GetLastUsed(); 1668 if (newest < 0) { 1669 DCHECK_LT(oldest, 0); 1670 newest = oldest = i; 1671 continue; 1672 } 1673 if (access_times[i] > access_times[newest]) 1674 newest = i; 1675 if (access_times[i] < access_times[oldest]) 1676 oldest = i; 1677 } 1678 } 1679 1680 if (newest < 0 || oldest < 0) 1681 return NULL; 1682 1683 EntryImpl* next_entry; 1684 if (forward) { 1685 next_entry = entries[newest].get(); 1686 iterator->list = static_cast<Rankings::List>(newest); 1687 } else { 1688 next_entry = entries[oldest].get(); 1689 iterator->list = static_cast<Rankings::List>(oldest); 1690 } 1691 1692 *iter = iterator.release(); 1693 next_entry->AddRef(); 1694 return next_entry; 1695 } 1696 1697 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, 1698 CacheRankingsBlock** from_entry, 1699 EntryImpl** next_entry) { 1700 if (disabled_) 1701 return false; 1702 1703 if (!new_eviction_ && Rankings::NO_USE != list) 1704 return false; 1705 1706 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry); 1707 CacheRankingsBlock* next_block = forward ? 1708 rankings_.GetNext(rankings.get(), list) : 1709 rankings_.GetPrev(rankings.get(), list); 1710 Rankings::ScopedRankingsBlock next(&rankings_, next_block); 1711 *from_entry = NULL; 1712 1713 *next_entry = GetEnumeratedEntry(next.get(), list); 1714 if (!*next_entry) 1715 return false; 1716 1717 *from_entry = next.release(); 1718 return true; 1719 } 1720 1721 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next, 1722 Rankings::List list) { 1723 if (!next || disabled_) 1724 return NULL; 1725 1726 EntryImpl* entry; 1727 int rv = NewEntry(Addr(next->Data()->contents), &entry); 1728 if (rv) { 1729 STRESS_NOTREACHED(); 1730 rankings_.Remove(next, list, false); 1731 if (rv == ERR_INVALID_ADDRESS) { 1732 // There is nothing linked from the index. Delete the rankings node. 1733 DeleteBlock(next->address(), true); 1734 } 1735 return NULL; 1736 } 1737 1738 if (entry->dirty()) { 1739 // We cannot trust this entry. 1740 InternalDoomEntry(entry); 1741 entry->Release(); 1742 return NULL; 1743 } 1744 1745 if (!entry->Update()) { 1746 STRESS_NOTREACHED(); 1747 entry->Release(); 1748 return NULL; 1749 } 1750 1751 // Note that it is unfortunate (but possible) for this entry to be clean, but 1752 // not actually the real entry. In other words, we could have lost this entry 1753 // from the index, and it could have been replaced with a newer one. It's not 1754 // worth checking that this entry is "the real one", so we just return it and 1755 // let the enumeration continue; this entry will be evicted at some point, and 1756 // the regular path will work with the real entry. With time, this problem 1757 // will disasappear because this scenario is just a bug. 1758 1759 // Make sure that we save the key for later. 1760 entry->GetKey(); 1761 1762 return entry; 1763 } 1764 1765 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { 1766 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { 1767 deleted_entry->Release(); 1768 stats_.OnEvent(Stats::CREATE_MISS); 1769 Trace("create entry miss "); 1770 return NULL; 1771 } 1772 1773 // We are attempting to create an entry and found out that the entry was 1774 // previously deleted. 1775 1776 eviction_.OnCreateEntry(deleted_entry); 1777 entry_count_++; 1778 1779 stats_.OnEvent(Stats::RESURRECT_HIT); 1780 Trace("Resurrect entry hit "); 1781 return deleted_entry; 1782 } 1783 1784 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) { 1785 LOG(WARNING) << "Destroying invalid entry."; 1786 Trace("Destroying invalid entry 0x%p", entry); 1787 1788 entry->SetPointerForInvalidEntry(GetCurrentEntryId()); 1789 1790 eviction_.OnDoomEntry(entry); 1791 entry->InternalDoom(); 1792 1793 if (!new_eviction_) 1794 DecreaseNumEntries(); 1795 stats_.OnEvent(Stats::INVALID_ENTRY); 1796 } 1797 1798 void BackendImpl::AddStorageSize(int32 bytes) { 1799 data_->header.num_bytes += bytes; 1800 DCHECK_GE(data_->header.num_bytes, 0); 1801 } 1802 1803 void BackendImpl::SubstractStorageSize(int32 bytes) { 1804 data_->header.num_bytes -= bytes; 1805 DCHECK_GE(data_->header.num_bytes, 0); 1806 } 1807 1808 void BackendImpl::IncreaseNumRefs() { 1809 num_refs_++; 1810 if (max_refs_ < num_refs_) 1811 max_refs_ = num_refs_; 1812 } 1813 1814 void BackendImpl::DecreaseNumRefs() { 1815 DCHECK(num_refs_); 1816 num_refs_--; 1817 1818 if (!num_refs_ && disabled_) 1819 base::MessageLoop::current()->PostTask( 1820 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); 1821 } 1822 1823 void BackendImpl::IncreaseNumEntries() { 1824 data_->header.num_entries++; 1825 DCHECK_GT(data_->header.num_entries, 0); 1826 } 1827 1828 void BackendImpl::DecreaseNumEntries() { 1829 data_->header.num_entries--; 1830 if (data_->header.num_entries < 0) { 1831 NOTREACHED(); 1832 data_->header.num_entries = 0; 1833 } 1834 } 1835 1836 void BackendImpl::LogStats() { 1837 StatsItems stats; 1838 GetStats(&stats); 1839 1840 for (size_t index = 0; index < stats.size(); index++) 1841 VLOG(1) << stats[index].first << ": " << stats[index].second; 1842 } 1843 1844 void BackendImpl::ReportStats() { 1845 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries); 1846 1847 int current_size = data_->header.num_bytes / (1024 * 1024); 1848 int max_size = max_size_ / (1024 * 1024); 1849 int hit_ratio_as_percentage = stats_.GetHitRatio(); 1850 1851 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size); 1852 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the 1853 // ratio of that bin's total count to the count in the same bin in the Size2 1854 // histogram. 1855 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1856 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size); 1857 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size); 1858 if (!max_size) 1859 max_size++; 1860 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); 1861 1862 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0, 1863 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); 1864 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0, 1865 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); 1866 stats_.SetCounter(Stats::MAX_ENTRIES, 0); 1867 1868 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, 1869 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); 1870 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, 1871 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); 1872 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, 1873 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); 1874 stats_.SetCounter(Stats::FATAL_ERROR, 0); 1875 stats_.SetCounter(Stats::DOOM_CACHE, 0); 1876 stats_.SetCounter(Stats::DOOM_RECENT, 0); 1877 1878 int age = (Time::Now() - 1879 Time::FromInternalValue(data_->header.create_time)).InHours(); 1880 if (age) 1881 CACHE_UMA(HOURS, "FilesAge", 0, age); 1882 1883 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; 1884 if (!data_->header.create_time || !data_->header.lru.filled) { 1885 int cause = data_->header.create_time ? 0 : 1; 1886 if (!data_->header.lru.filled) 1887 cause |= 2; 1888 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause); 1889 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); 1890 return; 1891 } 1892 1893 // This is an up to date client that will report FirstEviction() data. After 1894 // that event, start reporting this: 1895 1896 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); 1897 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total 1898 // time is the ratio of that bin's total count to the count in the same bin in 1899 // the TotalTime histogram. 1900 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1901 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours)); 1902 1903 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; 1904 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); 1905 1906 // We may see users with no use_hours at this point if this is the first time 1907 // we are running this code. 1908 if (use_hours) 1909 use_hours = total_hours - use_hours; 1910 1911 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) 1912 return; 1913 1914 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); 1915 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time 1916 // is the ratio of that bin's total count to the count in the same bin in the 1917 // UseTime histogram. 1918 if (base::RandInt(0, 99) < hit_ratio_as_percentage) 1919 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours)); 1920 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage); 1921 1922 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; 1923 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); 1924 1925 int avg_size = data_->header.num_bytes / GetEntryCount(); 1926 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); 1927 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries); 1928 1929 CACHE_UMA(PERCENTAGE, "IndexLoad", 0, 1930 data_->header.num_entries * 100 / (mask_ + 1)); 1931 1932 int large_entries_bytes = stats_.GetLargeEntriesSize(); 1933 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 1934 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); 1935 1936 if (new_eviction_) { 1937 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); 1938 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, 1939 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 1940 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, 1941 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 1942 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, 1943 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 1944 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, 1945 data_->header.lru.sizes[4] * 100 / data_->header.num_entries); 1946 } 1947 1948 stats_.ResetRatios(); 1949 stats_.SetCounter(Stats::TRIM_ENTRY, 0); 1950 1951 if (cache_type_ == net::DISK_CACHE) 1952 block_files_.ReportStats(); 1953 } 1954 1955 void BackendImpl::UpgradeTo2_1() { 1956 // 2.1 is basically the same as 2.0, except that new fields are actually 1957 // updated by the new eviction algorithm. 1958 DCHECK(0x20000 == data_->header.version); 1959 data_->header.version = 0x20001; 1960 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries; 1961 } 1962 1963 bool BackendImpl::CheckIndex() { 1964 DCHECK(data_); 1965 1966 size_t current_size = index_->GetLength(); 1967 if (current_size < sizeof(Index)) { 1968 LOG(ERROR) << "Corrupt Index file"; 1969 return false; 1970 } 1971 1972 if (new_eviction_) { 1973 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. 1974 if (kIndexMagic != data_->header.magic || 1975 kCurrentVersion >> 16 != data_->header.version >> 16) { 1976 LOG(ERROR) << "Invalid file version or magic"; 1977 return false; 1978 } 1979 if (kCurrentVersion == data_->header.version) { 1980 // We need file version 2.1 for the new eviction algorithm. 1981 UpgradeTo2_1(); 1982 } 1983 } else { 1984 if (kIndexMagic != data_->header.magic || 1985 kCurrentVersion != data_->header.version) { 1986 LOG(ERROR) << "Invalid file version or magic"; 1987 return false; 1988 } 1989 } 1990 1991 if (!data_->header.table_len) { 1992 LOG(ERROR) << "Invalid table size"; 1993 return false; 1994 } 1995 1996 if (current_size < GetIndexSize(data_->header.table_len) || 1997 data_->header.table_len & (kBaseTableLen - 1)) { 1998 LOG(ERROR) << "Corrupt Index file"; 1999 return false; 2000 } 2001 2002 AdjustMaxCacheSize(data_->header.table_len); 2003 2004 #if !defined(NET_BUILD_STRESS_CACHE) 2005 if (data_->header.num_bytes < 0 || 2006 (max_size_ < kint32max - kDefaultCacheSize && 2007 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) { 2008 LOG(ERROR) << "Invalid cache (current) size"; 2009 return false; 2010 } 2011 #endif 2012 2013 if (data_->header.num_entries < 0) { 2014 LOG(ERROR) << "Invalid number of entries"; 2015 return false; 2016 } 2017 2018 if (!mask_) 2019 mask_ = data_->header.table_len - 1; 2020 2021 // Load the table into memory. 2022 return index_->Preload(); 2023 } 2024 2025 int BackendImpl::CheckAllEntries() { 2026 int num_dirty = 0; 2027 int num_entries = 0; 2028 DCHECK(mask_ < kuint32max); 2029 for (unsigned int i = 0; i <= mask_; i++) { 2030 Addr address(data_->table[i]); 2031 if (!address.is_initialized()) 2032 continue; 2033 for (;;) { 2034 EntryImpl* tmp; 2035 int ret = NewEntry(address, &tmp); 2036 if (ret) { 2037 STRESS_NOTREACHED(); 2038 return ret; 2039 } 2040 scoped_refptr<EntryImpl> cache_entry; 2041 cache_entry.swap(&tmp); 2042 2043 if (cache_entry->dirty()) 2044 num_dirty++; 2045 else if (CheckEntry(cache_entry.get())) 2046 num_entries++; 2047 else 2048 return ERR_INVALID_ENTRY; 2049 2050 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); 2051 address.set_value(cache_entry->GetNextAddress()); 2052 if (!address.is_initialized()) 2053 break; 2054 } 2055 } 2056 2057 Trace("CheckAllEntries End"); 2058 if (num_entries + num_dirty != data_->header.num_entries) { 2059 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << 2060 " " << data_->header.num_entries; 2061 DCHECK_LT(num_entries, data_->header.num_entries); 2062 return ERR_NUM_ENTRIES_MISMATCH; 2063 } 2064 2065 return num_dirty; 2066 } 2067 2068 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { 2069 bool ok = block_files_.IsValid(cache_entry->entry()->address()); 2070 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); 2071 EntryStore* data = cache_entry->entry()->Data(); 2072 for (size_t i = 0; i < arraysize(data->data_addr); i++) { 2073 if (data->data_addr[i]) { 2074 Addr address(data->data_addr[i]); 2075 if (address.is_block_file()) 2076 ok = ok && block_files_.IsValid(address); 2077 } 2078 } 2079 2080 return ok && cache_entry->rankings()->VerifyHash(); 2081 } 2082 2083 int BackendImpl::MaxBuffersSize() { 2084 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); 2085 static bool done = false; 2086 2087 if (!done) { 2088 const int kMaxBuffersSize = 30 * 1024 * 1024; 2089 2090 // We want to use up to 2% of the computer's memory. 2091 total_memory = total_memory * 2 / 100; 2092 if (total_memory > kMaxBuffersSize || total_memory <= 0) 2093 total_memory = kMaxBuffersSize; 2094 2095 done = true; 2096 } 2097 2098 return static_cast<int>(total_memory); 2099 } 2100 2101 } // namespace disk_cache 2102