1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/disk_cache/backend_impl.h" 6 7 #include "base/field_trial.h" 8 #include "base/file_path.h" 9 #include "base/file_util.h" 10 #include "base/histogram.h" 11 #include "base/message_loop.h" 12 #include "base/rand_util.h" 13 #include "base/string_util.h" 14 #include "base/sys_info.h" 15 #include "base/timer.h" 16 #include "base/worker_pool.h" 17 #include "net/base/net_errors.h" 18 #include "net/disk_cache/cache_util.h" 19 #include "net/disk_cache/entry_impl.h" 20 #include "net/disk_cache/errors.h" 21 #include "net/disk_cache/hash.h" 22 #include "net/disk_cache/file.h" 23 24 // This has to be defined before including histogram_macros.h from this file. 25 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ 26 #include "net/disk_cache/histogram_macros.h" 27 28 using base::Time; 29 using base::TimeDelta; 30 31 namespace { 32 33 const char* kIndexName = "index"; 34 const int kMaxOldFolders = 100; 35 36 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. 37 const int k64kEntriesStore = 240 * 1000 * 1000; 38 const int kBaseTableLen = 64 * 1024; 39 const int kDefaultCacheSize = 80 * 1024 * 1024; 40 41 int DesiredIndexTableLen(int32 storage_size) { 42 if (storage_size <= k64kEntriesStore) 43 return kBaseTableLen; 44 if (storage_size <= k64kEntriesStore * 2) 45 return kBaseTableLen * 2; 46 if (storage_size <= k64kEntriesStore * 4) 47 return kBaseTableLen * 4; 48 if (storage_size <= k64kEntriesStore * 8) 49 return kBaseTableLen * 8; 50 51 // The biggest storage_size for int32 requires a 4 MB table. 52 return kBaseTableLen * 16; 53 } 54 55 int MaxStorageSizeForTable(int table_len) { 56 return table_len * (k64kEntriesStore / kBaseTableLen); 57 } 58 59 size_t GetIndexSize(int table_len) { 60 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; 61 return sizeof(disk_cache::IndexHeader) + table_size; 62 } 63 64 // ------------------------------------------------------------------------ 65 66 // Returns a fully qualified name from path and name, using a given name prefix 67 // and index number. For instance, if the arguments are "/foo", "bar" and 5, it 68 // will return "/foo/old_bar_005". 69 FilePath GetPrefixedName(const FilePath& path, const std::string& name, 70 int index) { 71 std::string tmp = StringPrintf("%s%s_%03d", "old_", name.c_str(), index); 72 return path.AppendASCII(tmp); 73 } 74 75 // This is a simple Task to cleanup old caches. 76 class CleanupTask : public Task { 77 public: 78 CleanupTask(const FilePath& path, const std::string& name) 79 : path_(path), name_(name) {} 80 81 virtual void Run(); 82 83 private: 84 FilePath path_; 85 std::string name_; 86 DISALLOW_EVIL_CONSTRUCTORS(CleanupTask); 87 }; 88 89 void CleanupTask::Run() { 90 for (int i = 0; i < kMaxOldFolders; i++) { 91 FilePath to_delete = GetPrefixedName(path_, name_, i); 92 disk_cache::DeleteCache(to_delete, true); 93 } 94 } 95 96 // Returns a full path to rename the current cache, in order to delete it. path 97 // is the current folder location, and name is the current folder name. 98 FilePath GetTempCacheName(const FilePath& path, const std::string& name) { 99 // We'll attempt to have up to kMaxOldFolders folders for deletion. 100 for (int i = 0; i < kMaxOldFolders; i++) { 101 FilePath to_delete = GetPrefixedName(path, name, i); 102 if (!file_util::PathExists(to_delete)) 103 return to_delete; 104 } 105 return FilePath(); 106 } 107 108 // Moves the cache files to a new folder and creates a task to delete them. 109 bool DelayedCacheCleanup(const FilePath& full_path) { 110 FilePath current_path = full_path.StripTrailingSeparators(); 111 112 FilePath path = current_path.DirName(); 113 FilePath name = current_path.BaseName(); 114 #if defined(OS_POSIX) 115 std::string name_str = name.value(); 116 #elif defined(OS_WIN) 117 // We created this file so it should only contain ASCII. 118 std::string name_str = WideToASCII(name.value()); 119 #endif 120 121 FilePath to_delete = GetTempCacheName(path, name_str); 122 if (to_delete.empty()) { 123 LOG(ERROR) << "Unable to get another cache folder"; 124 return false; 125 } 126 127 if (!disk_cache::MoveCache(full_path, to_delete)) { 128 LOG(ERROR) << "Unable to rename cache folder"; 129 return false; 130 } 131 132 #if defined(OS_WIN) 133 WorkerPool::PostTask(FROM_HERE, new CleanupTask(path, name_str), true); 134 #elif defined(OS_POSIX) 135 // TODO(rvargas): Use the worker pool. 136 MessageLoop::current()->PostTask(FROM_HERE, new CleanupTask(path, name_str)); 137 #endif 138 return true; 139 } 140 141 // Sets |current_group| for the current experiment. Returns false if the files 142 // should be discarded. 143 bool InitExperiment(int* current_group) { 144 if (*current_group == 3 || *current_group == 4) { 145 // Discard current cache for groups 3 and 4. 146 return false; 147 } 148 149 // There is no experiment. 150 *current_group = 0; 151 return true; 152 } 153 154 // Initializes the field trial structures to allow performance measurements 155 // for the current cache configuration. 156 void SetFieldTrialInfo(int size_group) { 157 static bool first = true; 158 if (!first) 159 return; 160 161 // Field trials involve static objects so we have to do this only once. 162 first = false; 163 scoped_refptr<FieldTrial> trial1 = new FieldTrial("CacheSize", 10); 164 std::string group1 = StringPrintf("CacheSizeGroup_%d", size_group); 165 trial1->AppendGroup(group1, FieldTrial::kAllRemainingProbability); 166 } 167 168 } // namespace 169 170 // ------------------------------------------------------------------------ 171 172 namespace disk_cache { 173 174 Backend* CreateCacheBackend(const FilePath& full_path, bool force, 175 int max_bytes, net::CacheType type) { 176 // Create a backend without extra flags. 177 return BackendImpl::CreateBackend(full_path, force, max_bytes, type, kNone); 178 } 179 180 int PreferedCacheSize(int64 available) { 181 // If there is not enough space to use kDefaultCacheSize, use 80% of the 182 // available space. 183 if (available < kDefaultCacheSize) 184 return static_cast<int32>(available * 8 / 10); 185 186 // Don't use more than 10% of the available space. 187 if (available < 10 * kDefaultCacheSize) 188 return kDefaultCacheSize; 189 190 // Use 10% of the free space until we reach 2.5 * kDefaultCacheSize. 191 if (available < static_cast<int64>(kDefaultCacheSize) * 25) 192 return static_cast<int32>(available / 10); 193 194 // After reaching our target size (2.5 * kDefaultCacheSize), attempt to use 195 // 1% of the availabe space. 196 if (available < static_cast<int64>(kDefaultCacheSize) * 100) 197 return kDefaultCacheSize * 5 / 2; 198 199 int64 one_percent = available / 100; 200 if (one_percent > kint32max) 201 return kint32max; 202 203 return static_cast<int32>(one_percent); 204 } 205 206 // ------------------------------------------------------------------------ 207 208 // If the initialization of the cache fails, and force is true, we will discard 209 // the whole cache and create a new one. In order to process a potentially large 210 // number of files, we'll rename the cache folder to old_ + original_name + 211 // number, (located on the same parent folder), and spawn a worker thread to 212 // delete all the files on all the stale cache folders. The whole process can 213 // still fail if we are not able to rename the cache folder (for instance due to 214 // a sharing violation), and in that case a cache for this profile (on the 215 // desired path) cannot be created. 216 // 217 // Static. 218 Backend* BackendImpl::CreateBackend(const FilePath& full_path, bool force, 219 int max_bytes, net::CacheType type, 220 BackendFlags flags) { 221 BackendImpl* cache = new BackendImpl(full_path); 222 cache->SetMaxSize(max_bytes); 223 cache->SetType(type); 224 cache->SetFlags(flags); 225 if (cache->Init()) 226 return cache; 227 228 delete cache; 229 if (!force) 230 return NULL; 231 232 if (!DelayedCacheCleanup(full_path)) 233 return NULL; 234 235 // The worker thread will start deleting files soon, but the original folder 236 // is not there anymore... let's create a new set of files. 237 cache = new BackendImpl(full_path); 238 cache->SetMaxSize(max_bytes); 239 cache->SetType(type); 240 cache->SetFlags(flags); 241 if (cache->Init()) 242 return cache; 243 244 delete cache; 245 LOG(ERROR) << "Unable to create cache"; 246 return NULL; 247 } 248 249 bool BackendImpl::Init() { 250 DCHECK(!init_); 251 if (init_) 252 return false; 253 254 bool create_files = false; 255 if (!InitBackingStore(&create_files)) { 256 ReportError(ERR_STORAGE_ERROR); 257 return false; 258 } 259 260 num_refs_ = num_pending_io_ = max_refs_ = 0; 261 262 if (!restarted_) { 263 trace_object_ = TraceObject::GetTraceObject(); 264 // Create a recurrent timer of 30 secs. 265 int timer_delay = unit_test_ ? 1000 : 30000; 266 timer_.Start(TimeDelta::FromMilliseconds(timer_delay), this, 267 &BackendImpl::OnStatsTimer); 268 } 269 270 init_ = true; 271 272 if (data_->header.experiment != 0 && cache_type_ != net::DISK_CACHE) { 273 // No experiment for other caches. 274 return false; 275 } 276 277 if (!(user_flags_ & disk_cache::kNoRandom)) { 278 // The unit test controls directly what to test. 279 if (!InitExperiment(&data_->header.experiment)) 280 return false; 281 282 new_eviction_ = (cache_type_ == net::DISK_CACHE); 283 } 284 285 if (!CheckIndex()) { 286 ReportError(ERR_INIT_FAILED); 287 return false; 288 } 289 290 // We don't care if the value overflows. The only thing we care about is that 291 // the id cannot be zero, because that value is used as "not dirty". 292 // Increasing the value once per second gives us many years before a we start 293 // having collisions. 294 data_->header.this_id++; 295 if (!data_->header.this_id) 296 data_->header.this_id++; 297 298 if (data_->header.crash) { 299 ReportError(ERR_PREVIOUS_CRASH); 300 } else { 301 ReportError(0); 302 data_->header.crash = 1; 303 } 304 305 if (!block_files_.Init(create_files)) 306 return false; 307 308 // stats_ and rankings_ may end up calling back to us so we better be enabled. 309 disabled_ = false; 310 if (!stats_.Init(this, &data_->header.stats)) 311 return false; 312 313 disabled_ = !rankings_.Init(this, new_eviction_); 314 eviction_.Init(this); 315 316 // Setup load-time data only for the main cache. 317 if (cache_type() == net::DISK_CACHE) 318 SetFieldTrialInfo(GetSizeGroup()); 319 320 return !disabled_; 321 } 322 323 BackendImpl::~BackendImpl() { 324 Trace("Backend destructor"); 325 if (!init_) 326 return; 327 328 if (data_) 329 data_->header.crash = 0; 330 331 timer_.Stop(); 332 333 File::WaitForPendingIO(&num_pending_io_); 334 DCHECK(!num_refs_); 335 } 336 337 // ------------------------------------------------------------------------ 338 339 int32 BackendImpl::GetEntryCount() const { 340 if (!index_) 341 return 0; 342 // num_entries includes entries already evicted. 343 int32 not_deleted = data_->header.num_entries - 344 data_->header.lru.sizes[Rankings::DELETED]; 345 346 if (not_deleted < 0) { 347 NOTREACHED(); 348 not_deleted = 0; 349 } 350 351 return not_deleted; 352 } 353 354 bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) { 355 if (disabled_) 356 return false; 357 358 Time start = Time::Now(); 359 uint32 hash = Hash(key); 360 361 EntryImpl* cache_entry = MatchEntry(key, hash, false); 362 if (!cache_entry) { 363 stats_.OnEvent(Stats::OPEN_MISS); 364 return false; 365 } 366 367 if (ENTRY_NORMAL != cache_entry->entry()->Data()->state) { 368 // The entry was already evicted. 369 cache_entry->Release(); 370 stats_.OnEvent(Stats::OPEN_MISS); 371 return false; 372 } 373 374 eviction_.OnOpenEntry(cache_entry); 375 DCHECK(entry); 376 *entry = cache_entry; 377 378 CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start); 379 stats_.OnEvent(Stats::OPEN_HIT); 380 return true; 381 } 382 383 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, 384 CompletionCallback* callback) { 385 if (OpenEntry(key, entry)) 386 return net::OK; 387 388 return net::ERR_FAILED; 389 } 390 391 bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) { 392 if (disabled_ || key.empty()) 393 return false; 394 395 DCHECK(entry); 396 *entry = NULL; 397 398 Time start = Time::Now(); 399 uint32 hash = Hash(key); 400 401 scoped_refptr<EntryImpl> parent; 402 Addr entry_address(data_->table[hash & mask_]); 403 if (entry_address.is_initialized()) { 404 // We have an entry already. It could be the one we are looking for, or just 405 // a hash conflict. 406 EntryImpl* old_entry = MatchEntry(key, hash, false); 407 if (old_entry) 408 return ResurrectEntry(old_entry, entry); 409 410 EntryImpl* parent_entry = MatchEntry(key, hash, true); 411 if (!parent_entry) { 412 NOTREACHED(); 413 return false; 414 } 415 parent.swap(&parent_entry); 416 } 417 418 int num_blocks; 419 size_t key1_len = sizeof(EntryStore) - offsetof(EntryStore, key); 420 if (key.size() < key1_len || 421 key.size() > static_cast<size_t>(kMaxInternalKeyLength)) 422 num_blocks = 1; 423 else 424 num_blocks = static_cast<int>((key.size() - key1_len) / 256 + 2); 425 426 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { 427 LOG(ERROR) << "Create entry failed " << key.c_str(); 428 stats_.OnEvent(Stats::CREATE_ERROR); 429 return false; 430 } 431 432 Addr node_address(0); 433 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { 434 block_files_.DeleteBlock(entry_address, false); 435 LOG(ERROR) << "Create entry failed " << key.c_str(); 436 stats_.OnEvent(Stats::CREATE_ERROR); 437 return false; 438 } 439 440 scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, entry_address)); 441 IncreaseNumRefs(); 442 443 if (!cache_entry->CreateEntry(node_address, key, hash)) { 444 block_files_.DeleteBlock(entry_address, false); 445 block_files_.DeleteBlock(node_address, false); 446 LOG(ERROR) << "Create entry failed " << key.c_str(); 447 stats_.OnEvent(Stats::CREATE_ERROR); 448 return false; 449 } 450 451 // We are not failing the operation; let's add this to the map. 452 open_entries_[entry_address.value()] = cache_entry; 453 454 if (parent.get()) 455 parent->SetNextAddress(entry_address); 456 457 block_files_.GetFile(entry_address)->Store(cache_entry->entry()); 458 block_files_.GetFile(node_address)->Store(cache_entry->rankings()); 459 460 IncreaseNumEntries(); 461 eviction_.OnCreateEntry(cache_entry); 462 if (!parent.get()) 463 data_->table[hash & mask_] = entry_address.value(); 464 465 cache_entry.swap(reinterpret_cast<EntryImpl**>(entry)); 466 467 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start); 468 stats_.OnEvent(Stats::CREATE_HIT); 469 Trace("create entry hit "); 470 return true; 471 } 472 473 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, 474 CompletionCallback* callback) { 475 if (CreateEntry(key, entry)) 476 return net::OK; 477 478 return net::ERR_FAILED; 479 } 480 481 bool BackendImpl::DoomEntry(const std::string& key) { 482 if (disabled_) 483 return false; 484 485 Entry* entry; 486 if (!OpenEntry(key, &entry)) 487 return false; 488 489 // Note that you'd think you could just pass &entry_impl to OpenEntry, 490 // but that triggers strict aliasing problems with gcc. 491 EntryImpl* entry_impl = reinterpret_cast<EntryImpl*>(entry); 492 entry_impl->Doom(); 493 entry_impl->Release(); 494 return true; 495 } 496 497 int BackendImpl::DoomEntry(const std::string& key, 498 CompletionCallback* callback) { 499 if (DoomEntry(key)) 500 return net::OK; 501 502 return net::ERR_FAILED; 503 } 504 505 bool BackendImpl::DoomAllEntries() { 506 if (!num_refs_) { 507 PrepareForRestart(); 508 DeleteCache(path_, false); 509 return Init(); 510 } else { 511 if (disabled_) 512 return false; 513 514 eviction_.TrimCache(true); 515 stats_.OnEvent(Stats::DOOM_CACHE); 516 return true; 517 } 518 } 519 520 int BackendImpl::DoomAllEntries(CompletionCallback* callback) { 521 if (DoomAllEntries()) 522 return net::OK; 523 524 return net::ERR_FAILED; 525 } 526 527 bool BackendImpl::DoomEntriesBetween(const Time initial_time, 528 const Time end_time) { 529 if (end_time.is_null()) 530 return DoomEntriesSince(initial_time); 531 532 DCHECK(end_time >= initial_time); 533 534 if (disabled_) 535 return false; 536 537 Entry* node, *next; 538 void* iter = NULL; 539 if (!OpenNextEntry(&iter, &next)) 540 return true; 541 542 while (next) { 543 node = next; 544 if (!OpenNextEntry(&iter, &next)) 545 next = NULL; 546 547 if (node->GetLastUsed() >= initial_time && 548 node->GetLastUsed() < end_time) { 549 node->Doom(); 550 } else if (node->GetLastUsed() < initial_time) { 551 if (next) 552 next->Close(); 553 next = NULL; 554 EndEnumeration(&iter); 555 } 556 557 node->Close(); 558 } 559 560 return true; 561 } 562 563 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, 564 const base::Time end_time, 565 CompletionCallback* callback) { 566 if (DoomEntriesBetween(initial_time, end_time)) 567 return net::OK; 568 569 return net::ERR_FAILED; 570 } 571 572 // We use OpenNextEntry to retrieve elements from the cache, until we get 573 // entries that are too old. 574 bool BackendImpl::DoomEntriesSince(const Time initial_time) { 575 if (disabled_) 576 return false; 577 578 for (;;) { 579 Entry* entry; 580 void* iter = NULL; 581 if (!OpenNextEntry(&iter, &entry)) 582 return true; 583 584 if (initial_time > entry->GetLastUsed()) { 585 entry->Close(); 586 EndEnumeration(&iter); 587 return true; 588 } 589 590 entry->Doom(); 591 entry->Close(); 592 EndEnumeration(&iter); // Dooming the entry invalidates the iterator. 593 } 594 } 595 596 int BackendImpl::DoomEntriesSince(const base::Time initial_time, 597 CompletionCallback* callback) { 598 if (DoomEntriesSince(initial_time)) 599 return net::OK; 600 601 return net::ERR_FAILED; 602 } 603 604 bool BackendImpl::OpenNextEntry(void** iter, Entry** next_entry) { 605 return OpenFollowingEntry(true, iter, next_entry); 606 } 607 608 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, 609 CompletionCallback* callback) { 610 if (OpenNextEntry(iter, next_entry)) 611 return net::OK; 612 613 return net::ERR_FAILED; 614 } 615 616 void BackendImpl::EndEnumeration(void** iter) { 617 scoped_ptr<Rankings::Iterator> iterator( 618 reinterpret_cast<Rankings::Iterator*>(*iter)); 619 *iter = NULL; 620 } 621 622 void BackendImpl::GetStats(StatsItems* stats) { 623 if (disabled_) 624 return; 625 626 std::pair<std::string, std::string> item; 627 628 item.first = "Entries"; 629 item.second = StringPrintf("%d", data_->header.num_entries); 630 stats->push_back(item); 631 632 item.first = "Pending IO"; 633 item.second = StringPrintf("%d", num_pending_io_); 634 stats->push_back(item); 635 636 item.first = "Max size"; 637 item.second = StringPrintf("%d", max_size_); 638 stats->push_back(item); 639 640 item.first = "Current size"; 641 item.second = StringPrintf("%d", data_->header.num_bytes); 642 stats->push_back(item); 643 644 stats_.GetItems(stats); 645 } 646 647 // ------------------------------------------------------------------------ 648 649 bool BackendImpl::SetMaxSize(int max_bytes) { 650 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); 651 if (max_bytes < 0) 652 return false; 653 654 // Zero size means use the default. 655 if (!max_bytes) 656 return true; 657 658 // Avoid a DCHECK later on. 659 if (max_bytes >= kint32max - kint32max / 10) 660 max_bytes = kint32max - kint32max / 10 - 1; 661 662 user_flags_ |= kMaxSize; 663 max_size_ = max_bytes; 664 return true; 665 } 666 667 void BackendImpl::SetType(net::CacheType type) { 668 DCHECK(type != net::MEMORY_CACHE); 669 cache_type_ = type; 670 } 671 672 FilePath BackendImpl::GetFileName(Addr address) const { 673 if (!address.is_separate_file() || !address.is_initialized()) { 674 NOTREACHED(); 675 return FilePath(); 676 } 677 678 std::string tmp = StringPrintf("f_%06x", address.FileNumber()); 679 return path_.AppendASCII(tmp); 680 } 681 682 MappedFile* BackendImpl::File(Addr address) { 683 if (disabled_) 684 return NULL; 685 return block_files_.GetFile(address); 686 } 687 688 bool BackendImpl::CreateExternalFile(Addr* address) { 689 int file_number = data_->header.last_file + 1; 690 Addr file_address(0); 691 bool success = false; 692 for (int i = 0; i < 0x0fffffff; i++, file_number++) { 693 if (!file_address.SetFileNumber(file_number)) { 694 file_number = 1; 695 continue; 696 } 697 FilePath name = GetFileName(file_address); 698 int flags = base::PLATFORM_FILE_READ | 699 base::PLATFORM_FILE_WRITE | 700 base::PLATFORM_FILE_CREATE | 701 base::PLATFORM_FILE_EXCLUSIVE_WRITE; 702 scoped_refptr<disk_cache::File> file(new disk_cache::File( 703 base::CreatePlatformFile(name, flags, NULL))); 704 if (!file->IsValid()) 705 continue; 706 707 success = true; 708 break; 709 } 710 711 DCHECK(success); 712 if (!success) 713 return false; 714 715 data_->header.last_file = file_number; 716 address->set_value(file_address.value()); 717 return true; 718 } 719 720 bool BackendImpl::CreateBlock(FileType block_type, int block_count, 721 Addr* block_address) { 722 return block_files_.CreateBlock(block_type, block_count, block_address); 723 } 724 725 void BackendImpl::DeleteBlock(Addr block_address, bool deep) { 726 block_files_.DeleteBlock(block_address, deep); 727 } 728 729 LruData* BackendImpl::GetLruData() { 730 return &data_->header.lru; 731 } 732 733 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) { 734 if (!read_only_) { 735 eviction_.UpdateRank(entry, modified); 736 } 737 } 738 739 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) { 740 Addr address(rankings->Data()->contents); 741 EntryImpl* cache_entry = NULL; 742 bool dirty; 743 if (NewEntry(address, &cache_entry, &dirty)) 744 return; 745 746 uint32 hash = cache_entry->GetHash(); 747 cache_entry->Release(); 748 749 // Anything on the table means that this entry is there. 750 if (data_->table[hash & mask_]) 751 return; 752 753 data_->table[hash & mask_] = address.value(); 754 } 755 756 void BackendImpl::InternalDoomEntry(EntryImpl* entry) { 757 uint32 hash = entry->GetHash(); 758 std::string key = entry->GetKey(); 759 EntryImpl* parent_entry = MatchEntry(key, hash, true); 760 CacheAddr child(entry->GetNextAddress()); 761 762 Trace("Doom entry 0x%p", entry); 763 764 eviction_.OnDoomEntry(entry); 765 entry->InternalDoom(); 766 767 if (parent_entry) { 768 parent_entry->SetNextAddress(Addr(child)); 769 parent_entry->Release(); 770 } else { 771 data_->table[hash & mask_] = child; 772 } 773 774 if (!new_eviction_) { 775 DecreaseNumEntries(); 776 } 777 778 stats_.OnEvent(Stats::DOOM_ENTRY); 779 } 780 781 // An entry may be linked on the DELETED list for a while after being doomed. 782 // This function is called when we want to remove it. 783 void BackendImpl::RemoveEntry(EntryImpl* entry) { 784 if (!new_eviction_) 785 return; 786 787 DCHECK(ENTRY_NORMAL != entry->entry()->Data()->state); 788 789 Trace("Remove entry 0x%p", entry); 790 eviction_.OnDestroyEntry(entry); 791 DecreaseNumEntries(); 792 } 793 794 void BackendImpl::CacheEntryDestroyed(Addr address) { 795 EntriesMap::iterator it = open_entries_.find(address.value()); 796 if (it != open_entries_.end()) 797 open_entries_.erase(it); 798 DecreaseNumRefs(); 799 } 800 801 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { 802 DCHECK(rankings->HasData()); 803 EntriesMap::const_iterator it = 804 open_entries_.find(rankings->Data()->contents); 805 if (it != open_entries_.end()) { 806 // We have this entry in memory. 807 return it->second; 808 } 809 810 return NULL; 811 } 812 813 int32 BackendImpl::GetCurrentEntryId() const { 814 return data_->header.this_id; 815 } 816 817 int BackendImpl::MaxFileSize() const { 818 return max_size_ / 8; 819 } 820 821 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { 822 if (disabled_ || old_size == new_size) 823 return; 824 if (old_size > new_size) 825 SubstractStorageSize(old_size - new_size); 826 else 827 AddStorageSize(new_size - old_size); 828 829 // Update the usage statistics. 830 stats_.ModifyStorageStats(old_size, new_size); 831 } 832 833 void BackendImpl::TooMuchStorageRequested(int32 size) { 834 stats_.ModifyStorageStats(0, size); 835 } 836 837 bool BackendImpl::IsLoaded() const { 838 CACHE_UMA(COUNTS, "PendingIO", GetSizeGroup(), num_pending_io_); 839 if (user_flags_ & kNoLoadProtection) 840 return false; 841 842 return num_pending_io_ > 5; 843 } 844 845 std::string BackendImpl::HistogramName(const char* name, int experiment) const { 846 if (!experiment) 847 return StringPrintf("DiskCache.%d.%s", cache_type_, name); 848 return StringPrintf("DiskCache.%d.%s_%d", cache_type_, name, experiment); 849 } 850 851 int BackendImpl::GetSizeGroup() const { 852 if (disabled_) 853 return 0; 854 855 // We want to report times grouped by the current cache size (50 MB groups). 856 int group = data_->header.num_bytes / (50 * 1024 * 1024); 857 if (group > 6) 858 group = 6; // Limit the number of groups, just in case. 859 return group; 860 } 861 862 // We want to remove biases from some histograms so we only send data once per 863 // week. 864 bool BackendImpl::ShouldReportAgain() { 865 if (uma_report_) 866 return uma_report_ == 2; 867 868 uma_report_++; 869 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 870 Time last_time = Time::FromInternalValue(last_report); 871 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { 872 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); 873 uma_report_++; 874 return true; 875 } 876 return false; 877 } 878 879 void BackendImpl::FirstEviction() { 880 DCHECK(data_->header.create_time); 881 882 Time create_time = Time::FromInternalValue(data_->header.create_time); 883 CACHE_UMA(AGE, "FillupAge", 0, create_time); 884 885 int64 use_hours = stats_.GetCounter(Stats::TIMER) / 120; 886 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_hours)); 887 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); 888 889 int avg_size = data_->header.num_bytes / GetEntryCount(); 890 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); 891 892 int large_entries_bytes = stats_.GetLargeEntriesSize(); 893 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 894 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); 895 896 if (new_eviction_) { 897 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); 898 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, 899 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 900 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, 901 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 902 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, 903 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 904 } 905 906 stats_.ResetRatios(); 907 } 908 909 void BackendImpl::CriticalError(int error) { 910 LOG(ERROR) << "Critical error found " << error; 911 if (disabled_) 912 return; 913 914 LogStats(); 915 ReportError(error); 916 917 // Setting the index table length to an invalid value will force re-creation 918 // of the cache files. 919 data_->header.table_len = 1; 920 disabled_ = true; 921 922 if (!num_refs_) 923 MessageLoop::current()->PostTask(FROM_HERE, 924 factory_.NewRunnableMethod(&BackendImpl::RestartCache)); 925 } 926 927 void BackendImpl::ReportError(int error) { 928 // We transmit positive numbers, instead of direct error codes. 929 DCHECK(error <= 0); 930 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); 931 } 932 933 void BackendImpl::OnEvent(Stats::Counters an_event) { 934 stats_.OnEvent(an_event); 935 } 936 937 void BackendImpl::OnStatsTimer() { 938 stats_.OnEvent(Stats::TIMER); 939 int64 time = stats_.GetCounter(Stats::TIMER); 940 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); 941 942 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding 943 // the bias towards 0. 944 if (num_refs_ && (current != num_refs_)) { 945 int64 diff = (num_refs_ - current) / 50; 946 if (!diff) 947 diff = num_refs_ > current ? 1 : -1; 948 current = current + diff; 949 stats_.SetCounter(Stats::OPEN_ENTRIES, current); 950 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); 951 } 952 953 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); 954 955 if (!data_) 956 first_timer_ = false; 957 if (first_timer_) { 958 first_timer_ = false; 959 if (ShouldReportAgain()) 960 ReportStats(); 961 } 962 963 // Save stats to disk at 5 min intervals. 964 if (time % 10 == 0) 965 stats_.Store(); 966 } 967 968 void BackendImpl::IncrementIoCount() { 969 num_pending_io_++; 970 } 971 972 void BackendImpl::DecrementIoCount() { 973 num_pending_io_--; 974 } 975 976 void BackendImpl::SetUnitTestMode() { 977 user_flags_ |= kUnitTestMode; 978 unit_test_ = true; 979 } 980 981 void BackendImpl::SetUpgradeMode() { 982 user_flags_ |= kUpgradeMode; 983 read_only_ = true; 984 } 985 986 void BackendImpl::SetNewEviction() { 987 user_flags_ |= kNewEviction; 988 new_eviction_ = true; 989 } 990 991 void BackendImpl::SetFlags(uint32 flags) { 992 user_flags_ |= flags; 993 } 994 995 void BackendImpl::ClearRefCountForTest() { 996 num_refs_ = 0; 997 } 998 999 int BackendImpl::SelfCheck() { 1000 if (!init_) { 1001 LOG(ERROR) << "Init failed"; 1002 return ERR_INIT_FAILED; 1003 } 1004 1005 int num_entries = rankings_.SelfCheck(); 1006 if (num_entries < 0) { 1007 LOG(ERROR) << "Invalid rankings list, error " << num_entries; 1008 return num_entries; 1009 } 1010 1011 if (num_entries != data_->header.num_entries) { 1012 LOG(ERROR) << "Number of entries mismatch"; 1013 return ERR_NUM_ENTRIES_MISMATCH; 1014 } 1015 1016 return CheckAllEntries(); 1017 } 1018 1019 bool BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry) { 1020 return OpenFollowingEntry(false, iter, prev_entry); 1021 } 1022 1023 // ------------------------------------------------------------------------ 1024 1025 // We just created a new file so we're going to write the header and set the 1026 // file length to include the hash table (zero filled). 1027 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { 1028 AdjustMaxCacheSize(0); 1029 1030 IndexHeader header; 1031 header.table_len = DesiredIndexTableLen(max_size_); 1032 1033 // We need file version 2.1 for the new eviction algorithm. 1034 if (new_eviction_) 1035 header.version = 0x20001; 1036 1037 header.create_time = Time::Now().ToInternalValue(); 1038 1039 if (!file->Write(&header, sizeof(header), 0)) 1040 return false; 1041 1042 return file->SetLength(GetIndexSize(header.table_len)); 1043 } 1044 1045 bool BackendImpl::InitBackingStore(bool* file_created) { 1046 file_util::CreateDirectory(path_); 1047 1048 FilePath index_name = path_.AppendASCII(kIndexName); 1049 1050 int flags = base::PLATFORM_FILE_READ | 1051 base::PLATFORM_FILE_WRITE | 1052 base::PLATFORM_FILE_OPEN_ALWAYS | 1053 base::PLATFORM_FILE_EXCLUSIVE_WRITE; 1054 scoped_refptr<disk_cache::File> file(new disk_cache::File( 1055 base::CreatePlatformFile(index_name, flags, file_created))); 1056 1057 if (!file->IsValid()) 1058 return false; 1059 1060 bool ret = true; 1061 if (*file_created) 1062 ret = CreateBackingStore(file); 1063 1064 file = NULL; 1065 if (!ret) 1066 return false; 1067 1068 index_ = new MappedFile(); 1069 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0)); 1070 if (!data_) { 1071 LOG(ERROR) << "Unable to map Index file"; 1072 return false; 1073 } 1074 return true; 1075 } 1076 1077 // The maximum cache size will be either set explicitly by the caller, or 1078 // calculated by this code. 1079 void BackendImpl::AdjustMaxCacheSize(int table_len) { 1080 if (max_size_) 1081 return; 1082 1083 // If table_len is provided, the index file exists. 1084 DCHECK(!table_len || data_->header.magic); 1085 1086 // The user is not setting the size, let's figure it out. 1087 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); 1088 if (available < 0) { 1089 max_size_ = kDefaultCacheSize; 1090 return; 1091 } 1092 1093 if (table_len) 1094 available += data_->header.num_bytes; 1095 1096 max_size_ = PreferedCacheSize(available); 1097 1098 // Let's not use more than the default size while we tune-up the performance 1099 // of bigger caches. TODO(rvargas): remove this limit. 1100 if (max_size_ > kDefaultCacheSize * 4) 1101 max_size_ = kDefaultCacheSize * 4; 1102 1103 if (!table_len) 1104 return; 1105 1106 // If we already have a table, adjust the size to it. 1107 int current_max_size = MaxStorageSizeForTable(table_len); 1108 if (max_size_ > current_max_size) 1109 max_size_= current_max_size; 1110 } 1111 1112 // We always execute this method from the message loop so that we can freely 1113 // release files, memory pointers etc. 1114 void BackendImpl::RestartCache() { 1115 DCHECK(!num_refs_); 1116 DCHECK(!open_entries_.size()); 1117 PrepareForRestart(); 1118 DelayedCacheCleanup(path_); 1119 1120 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); 1121 1122 // Don't call Init() if directed by the unit test: we are simulating a failure 1123 // trying to re-enable the cache. 1124 if (unit_test_) 1125 init_ = true; // Let the destructor do proper cleanup. 1126 else if (Init()) 1127 stats_.SetCounter(Stats::FATAL_ERROR, errors + 1); 1128 } 1129 1130 void BackendImpl::PrepareForRestart() { 1131 // Reset the mask_ if it was not given by the user. 1132 if (!(user_flags_ & kMask)) 1133 mask_ = 0; 1134 1135 if (!(user_flags_ & kNewEviction)) 1136 new_eviction_ = false; 1137 1138 data_->header.crash = 0; 1139 index_ = NULL; 1140 data_ = NULL; 1141 block_files_.CloseFiles(); 1142 rankings_.Reset(); 1143 init_ = false; 1144 restarted_ = true; 1145 } 1146 1147 int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) { 1148 EntriesMap::iterator it = open_entries_.find(address.value()); 1149 if (it != open_entries_.end()) { 1150 // Easy job. This entry is already in memory. 1151 EntryImpl* this_entry = it->second; 1152 this_entry->AddRef(); 1153 *entry = this_entry; 1154 *dirty = false; 1155 return 0; 1156 } 1157 1158 scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, address)); 1159 IncreaseNumRefs(); 1160 *entry = NULL; 1161 1162 if (!address.is_initialized() || address.is_separate_file() || 1163 address.file_type() != BLOCK_256) { 1164 LOG(WARNING) << "Wrong entry address."; 1165 return ERR_INVALID_ADDRESS; 1166 } 1167 1168 if (!cache_entry->entry()->Load()) 1169 return ERR_READ_FAILURE; 1170 1171 if (!cache_entry->SanityCheck()) { 1172 LOG(WARNING) << "Messed up entry found."; 1173 return ERR_INVALID_ENTRY; 1174 } 1175 1176 if (!cache_entry->LoadNodeAddress()) 1177 return ERR_READ_FAILURE; 1178 1179 *dirty = cache_entry->IsDirty(GetCurrentEntryId()); 1180 1181 // Prevent overwriting the dirty flag on the destructor. 1182 cache_entry->ClearDirtyFlag(); 1183 1184 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) 1185 return ERR_INVALID_LINKS; 1186 1187 // We only add clean entries to the map. 1188 if (!*dirty) 1189 open_entries_[address.value()] = cache_entry; 1190 1191 cache_entry.swap(entry); 1192 return 0; 1193 } 1194 1195 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, 1196 bool find_parent) { 1197 Addr address(data_->table[hash & mask_]); 1198 scoped_refptr<EntryImpl> cache_entry, parent_entry; 1199 EntryImpl* tmp = NULL; 1200 bool found = false; 1201 1202 for (;;) { 1203 if (disabled_) 1204 break; 1205 1206 if (!address.is_initialized()) { 1207 if (find_parent) 1208 found = true; 1209 break; 1210 } 1211 1212 bool dirty; 1213 int error = NewEntry(address, &tmp, &dirty); 1214 cache_entry.swap(&tmp); 1215 1216 if (error || dirty) { 1217 // This entry is dirty on disk (it was not properly closed): we cannot 1218 // trust it. 1219 Addr child(0); 1220 if (!error) 1221 child.set_value(cache_entry->GetNextAddress()); 1222 1223 if (parent_entry) { 1224 parent_entry->SetNextAddress(child); 1225 parent_entry = NULL; 1226 } else { 1227 data_->table[hash & mask_] = child.value(); 1228 } 1229 1230 if (!error) { 1231 // It is important to call DestroyInvalidEntry after removing this 1232 // entry from the table. 1233 DestroyInvalidEntry(cache_entry); 1234 cache_entry = NULL; 1235 } else { 1236 Trace("NewEntry failed on MatchEntry 0x%x", address.value()); 1237 } 1238 1239 // Restart the search. 1240 address.set_value(data_->table[hash & mask_]); 1241 continue; 1242 } 1243 1244 if (cache_entry->IsSameEntry(key, hash)) { 1245 if (!cache_entry->Update()) 1246 cache_entry = NULL; 1247 found = true; 1248 break; 1249 } 1250 if (!cache_entry->Update()) 1251 cache_entry = NULL; 1252 parent_entry = cache_entry; 1253 cache_entry = NULL; 1254 if (!parent_entry) 1255 break; 1256 1257 address.set_value(parent_entry->GetNextAddress()); 1258 } 1259 1260 if (parent_entry && (!find_parent || !found)) 1261 parent_entry = NULL; 1262 1263 if (cache_entry && (find_parent || !found)) 1264 cache_entry = NULL; 1265 1266 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); 1267 return tmp; 1268 } 1269 1270 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. 1271 bool BackendImpl::OpenFollowingEntry(bool forward, void** iter, 1272 Entry** next_entry) { 1273 if (disabled_) 1274 return false; 1275 1276 DCHECK(iter); 1277 DCHECK(next_entry); 1278 *next_entry = NULL; 1279 1280 const int kListsToSearch = 3; 1281 scoped_refptr<EntryImpl> entries[kListsToSearch]; 1282 scoped_ptr<Rankings::Iterator> iterator( 1283 reinterpret_cast<Rankings::Iterator*>(*iter)); 1284 *iter = NULL; 1285 1286 if (!iterator.get()) { 1287 iterator.reset(new Rankings::Iterator(&rankings_)); 1288 bool ret = false; 1289 1290 // Get an entry from each list. 1291 for (int i = 0; i < kListsToSearch; i++) { 1292 EntryImpl* temp = NULL; 1293 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), 1294 &iterator->nodes[i], &temp); 1295 entries[i].swap(&temp); // The entry was already addref'd. 1296 } 1297 if (!ret) 1298 return false; 1299 } else { 1300 // Get the next entry from the last list, and the actual entries for the 1301 // elements on the other lists. 1302 for (int i = 0; i < kListsToSearch; i++) { 1303 EntryImpl* temp = NULL; 1304 if (iterator->list == i) { 1305 OpenFollowingEntryFromList(forward, iterator->list, 1306 &iterator->nodes[i], &temp); 1307 } else { 1308 temp = GetEnumeratedEntry(iterator->nodes[i], false); 1309 } 1310 1311 entries[i].swap(&temp); // The entry was already addref'd. 1312 } 1313 } 1314 1315 int newest = -1; 1316 int oldest = -1; 1317 Time access_times[kListsToSearch]; 1318 for (int i = 0; i < kListsToSearch; i++) { 1319 if (entries[i].get()) { 1320 access_times[i] = entries[i]->GetLastUsed(); 1321 if (newest < 0) { 1322 DCHECK(oldest < 0); 1323 newest = oldest = i; 1324 continue; 1325 } 1326 if (access_times[i] > access_times[newest]) 1327 newest = i; 1328 if (access_times[i] < access_times[oldest]) 1329 oldest = i; 1330 } 1331 } 1332 1333 if (newest < 0 || oldest < 0) 1334 return false; 1335 1336 if (forward) { 1337 entries[newest].swap(reinterpret_cast<EntryImpl**>(next_entry)); 1338 iterator->list = static_cast<Rankings::List>(newest); 1339 } else { 1340 entries[oldest].swap(reinterpret_cast<EntryImpl**>(next_entry)); 1341 iterator->list = static_cast<Rankings::List>(oldest); 1342 } 1343 1344 *iter = iterator.release(); 1345 return true; 1346 } 1347 1348 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, 1349 CacheRankingsBlock** from_entry, 1350 EntryImpl** next_entry) { 1351 if (disabled_) 1352 return false; 1353 1354 if (!new_eviction_ && Rankings::NO_USE != list) 1355 return false; 1356 1357 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry); 1358 CacheRankingsBlock* next_block = forward ? 1359 rankings_.GetNext(rankings.get(), list) : 1360 rankings_.GetPrev(rankings.get(), list); 1361 Rankings::ScopedRankingsBlock next(&rankings_, next_block); 1362 *from_entry = NULL; 1363 1364 *next_entry = GetEnumeratedEntry(next.get(), false); 1365 if (!*next_entry) 1366 return false; 1367 1368 *from_entry = next.release(); 1369 return true; 1370 } 1371 1372 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next, 1373 bool to_evict) { 1374 if (!next || disabled_) 1375 return NULL; 1376 1377 EntryImpl* entry; 1378 bool dirty; 1379 if (NewEntry(Addr(next->Data()->contents), &entry, &dirty)) 1380 return NULL; 1381 1382 if (dirty) { 1383 // We cannot trust this entry. This code also releases the reference. 1384 DestroyInvalidEntryFromEnumeration(entry); 1385 return NULL; 1386 } 1387 1388 // There is no need to store the entry to disk if we want to delete it. 1389 if (!to_evict && !entry->Update()) { 1390 entry->Release(); 1391 return NULL; 1392 } 1393 1394 return entry; 1395 } 1396 1397 bool BackendImpl::ResurrectEntry(EntryImpl* deleted_entry, Entry** entry) { 1398 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { 1399 deleted_entry->Release(); 1400 stats_.OnEvent(Stats::CREATE_MISS); 1401 Trace("create entry miss "); 1402 return false; 1403 } 1404 1405 // We are attempting to create an entry and found out that the entry was 1406 // previously deleted. 1407 1408 eviction_.OnCreateEntry(deleted_entry); 1409 *entry = deleted_entry; 1410 1411 stats_.OnEvent(Stats::CREATE_HIT); 1412 Trace("Resurrect entry hit "); 1413 return true; 1414 } 1415 1416 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) { 1417 LOG(WARNING) << "Destroying invalid entry."; 1418 Trace("Destroying invalid entry 0x%p", entry); 1419 1420 entry->SetPointerForInvalidEntry(GetCurrentEntryId()); 1421 1422 eviction_.OnDoomEntry(entry); 1423 entry->InternalDoom(); 1424 1425 if (!new_eviction_) 1426 DecreaseNumEntries(); 1427 stats_.OnEvent(Stats::INVALID_ENTRY); 1428 } 1429 1430 // This is kind of ugly. The entry may or may not be part of the cache index 1431 // table, and it may even have corrupt fields. If we just doom it, we may end up 1432 // deleting it twice (if all fields are right, and when looking up the parent of 1433 // chained entries wee see this one... and we delete it because it is dirty). If 1434 // we ignore it, we may leave it here forever. So we're going to attempt to 1435 // delete it through the provided object, without touching the index table 1436 // (because we cannot jus call MatchEntry()), and also attempt to delete it from 1437 // the table through the key: this may find a new entry (too bad), or an entry 1438 // that was just deleted and consider it a very corrupt entry. 1439 void BackendImpl::DestroyInvalidEntryFromEnumeration(EntryImpl* entry) { 1440 std::string key = entry->GetKey(); 1441 entry->SetPointerForInvalidEntry(GetCurrentEntryId()); 1442 CacheAddr next_entry = entry->entry()->Data()->next; 1443 if (!next_entry) { 1444 DestroyInvalidEntry(entry); 1445 entry->Release(); 1446 } 1447 DoomEntry(key); 1448 1449 if (!next_entry) 1450 return; 1451 1452 // We have a chained entry so instead of destroying first this entry and then 1453 // anything with this key, we just called DoomEntry() first. If that call 1454 // deleted everything, |entry| has invalid data. Let's see if there is 1455 // something else to do. We started with just a rankings node (we come from 1456 // an enumeration), so that one may still be there. 1457 CacheRankingsBlock* rankings = entry->rankings(); 1458 rankings->Load(); 1459 if (rankings->Data()->contents) { 1460 // We still have something. Clean this up. 1461 DestroyInvalidEntry(entry); 1462 } 1463 entry->Release(); 1464 } 1465 1466 void BackendImpl::AddStorageSize(int32 bytes) { 1467 data_->header.num_bytes += bytes; 1468 DCHECK(data_->header.num_bytes >= 0); 1469 1470 if (data_->header.num_bytes > max_size_) 1471 eviction_.TrimCache(false); 1472 } 1473 1474 void BackendImpl::SubstractStorageSize(int32 bytes) { 1475 data_->header.num_bytes -= bytes; 1476 DCHECK(data_->header.num_bytes >= 0); 1477 } 1478 1479 void BackendImpl::IncreaseNumRefs() { 1480 num_refs_++; 1481 if (max_refs_ < num_refs_) 1482 max_refs_ = num_refs_; 1483 } 1484 1485 void BackendImpl::DecreaseNumRefs() { 1486 DCHECK(num_refs_); 1487 num_refs_--; 1488 1489 if (!num_refs_ && disabled_) 1490 MessageLoop::current()->PostTask(FROM_HERE, 1491 factory_.NewRunnableMethod(&BackendImpl::RestartCache)); 1492 } 1493 1494 void BackendImpl::IncreaseNumEntries() { 1495 data_->header.num_entries++; 1496 DCHECK(data_->header.num_entries > 0); 1497 } 1498 1499 void BackendImpl::DecreaseNumEntries() { 1500 data_->header.num_entries--; 1501 if (data_->header.num_entries < 0) { 1502 NOTREACHED(); 1503 data_->header.num_entries = 0; 1504 } 1505 } 1506 1507 void BackendImpl::LogStats() { 1508 StatsItems stats; 1509 GetStats(&stats); 1510 1511 for (size_t index = 0; index < stats.size(); index++) { 1512 LOG(INFO) << stats[index].first << ": " << stats[index].second; 1513 } 1514 } 1515 1516 void BackendImpl::ReportStats() { 1517 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries); 1518 CACHE_UMA(COUNTS, "Size", 0, data_->header.num_bytes / (1024 * 1024)); 1519 CACHE_UMA(COUNTS, "MaxSize", 0, max_size_ / (1024 * 1024)); 1520 1521 CACHE_UMA(COUNTS, "AverageOpenEntries", 0, 1522 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); 1523 CACHE_UMA(COUNTS, "MaxOpenEntries", 0, 1524 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); 1525 stats_.SetCounter(Stats::MAX_ENTRIES, 0); 1526 1527 if (!data_->header.create_time || !data_->header.lru.filled) 1528 return; 1529 1530 // This is an up to date client that will report FirstEviction() data. After 1531 // that event, start reporting this: 1532 1533 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; 1534 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); 1535 1536 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; 1537 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); 1538 1539 // We may see users with no use_hours at this point if this is the first time 1540 // we are running this code. 1541 if (use_hours) 1542 use_hours = total_hours - use_hours; 1543 1544 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) 1545 return; 1546 1547 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); 1548 CACHE_UMA(PERCENTAGE, "HitRatio", 0, stats_.GetHitRatio()); 1549 1550 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; 1551 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); 1552 1553 int avg_size = data_->header.num_bytes / GetEntryCount(); 1554 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); 1555 1556 int large_entries_bytes = stats_.GetLargeEntriesSize(); 1557 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 1558 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); 1559 1560 if (new_eviction_) { 1561 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); 1562 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, 1563 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 1564 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, 1565 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 1566 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, 1567 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 1568 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, 1569 data_->header.lru.sizes[4] * 100 / data_->header.num_entries); 1570 } 1571 1572 stats_.ResetRatios(); 1573 stats_.SetCounter(Stats::TRIM_ENTRY, 0); 1574 } 1575 1576 void BackendImpl::UpgradeTo2_1() { 1577 // 2.1 is basically the same as 2.0, except that new fields are actually 1578 // updated by the new eviction algorithm. 1579 DCHECK(0x20000 == data_->header.version); 1580 data_->header.version = 0x20001; 1581 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries; 1582 } 1583 1584 bool BackendImpl::CheckIndex() { 1585 DCHECK(data_); 1586 1587 size_t current_size = index_->GetLength(); 1588 if (current_size < sizeof(Index)) { 1589 LOG(ERROR) << "Corrupt Index file"; 1590 return false; 1591 } 1592 1593 if (new_eviction_) { 1594 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. 1595 if (kIndexMagic != data_->header.magic || 1596 kCurrentVersion >> 16 != data_->header.version >> 16) { 1597 LOG(ERROR) << "Invalid file version or magic"; 1598 return false; 1599 } 1600 if (kCurrentVersion == data_->header.version) { 1601 // We need file version 2.1 for the new eviction algorithm. 1602 UpgradeTo2_1(); 1603 } 1604 } else { 1605 if (kIndexMagic != data_->header.magic || 1606 kCurrentVersion != data_->header.version) { 1607 LOG(ERROR) << "Invalid file version or magic"; 1608 return false; 1609 } 1610 } 1611 1612 if (!data_->header.table_len) { 1613 LOG(ERROR) << "Invalid table size"; 1614 return false; 1615 } 1616 1617 if (current_size < GetIndexSize(data_->header.table_len) || 1618 data_->header.table_len & (kBaseTableLen - 1)) { 1619 LOG(ERROR) << "Corrupt Index file"; 1620 return false; 1621 } 1622 1623 AdjustMaxCacheSize(data_->header.table_len); 1624 1625 if (data_->header.num_bytes < 0) { 1626 LOG(ERROR) << "Invalid cache (current) size"; 1627 return false; 1628 } 1629 1630 if (data_->header.num_entries < 0) { 1631 LOG(ERROR) << "Invalid number of entries"; 1632 return false; 1633 } 1634 1635 if (!mask_) 1636 mask_ = data_->header.table_len - 1; 1637 1638 return true; 1639 } 1640 1641 int BackendImpl::CheckAllEntries() { 1642 int num_dirty = 0; 1643 int num_entries = 0; 1644 DCHECK(mask_ < kuint32max); 1645 for (int i = 0; i <= static_cast<int>(mask_); i++) { 1646 Addr address(data_->table[i]); 1647 if (!address.is_initialized()) 1648 continue; 1649 for (;;) { 1650 bool dirty; 1651 EntryImpl* tmp; 1652 int ret = NewEntry(address, &tmp, &dirty); 1653 if (ret) 1654 return ret; 1655 scoped_refptr<EntryImpl> cache_entry; 1656 cache_entry.swap(&tmp); 1657 1658 if (dirty) 1659 num_dirty++; 1660 else if (CheckEntry(cache_entry.get())) 1661 num_entries++; 1662 else 1663 return ERR_INVALID_ENTRY; 1664 1665 address.set_value(cache_entry->GetNextAddress()); 1666 if (!address.is_initialized()) 1667 break; 1668 } 1669 } 1670 1671 if (num_entries + num_dirty != data_->header.num_entries) { 1672 LOG(ERROR) << "Number of entries mismatch"; 1673 return ERR_NUM_ENTRIES_MISMATCH; 1674 } 1675 1676 return num_dirty; 1677 } 1678 1679 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { 1680 RankingsNode* rankings = cache_entry->rankings()->Data(); 1681 return !rankings->dummy; 1682 } 1683 1684 } // namespace disk_cache 1685