1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/disk_cache/backend_impl.h" 6 7 #include "base/file_path.h" 8 #include "base/file_util.h" 9 #include "base/message_loop.h" 10 #include "base/metrics/field_trial.h" 11 #include "base/metrics/histogram.h" 12 #include "base/metrics/stats_counters.h" 13 #include "base/rand_util.h" 14 #include "base/string_util.h" 15 #include "base/stringprintf.h" 16 #include "base/sys_info.h" 17 #include "base/threading/worker_pool.h" 18 #include "base/threading/thread_restrictions.h" 19 #include "base/time.h" 20 #include "base/timer.h" 21 #include "net/base/net_errors.h" 22 #include "net/disk_cache/cache_util.h" 23 #include "net/disk_cache/entry_impl.h" 24 #include "net/disk_cache/errors.h" 25 #include "net/disk_cache/experiments.h" 26 #include "net/disk_cache/file.h" 27 #include "net/disk_cache/hash.h" 28 #include "net/disk_cache/mem_backend_impl.h" 29 30 // This has to be defined before including histogram_macros.h from this file. 31 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ 32 #include "net/disk_cache/histogram_macros.h" 33 34 using base::Time; 35 using base::TimeDelta; 36 using base::TimeTicks; 37 38 namespace { 39 40 const char* kIndexName = "index"; 41 const int kMaxOldFolders = 100; 42 43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. 44 // Note that the actual target is to keep the index table load factor under 55% 45 // for most users. 46 const int k64kEntriesStore = 240 * 1000 * 1000; 47 const int kBaseTableLen = 64 * 1024; 48 const int kDefaultCacheSize = 80 * 1024 * 1024; 49 50 int DesiredIndexTableLen(int32 storage_size) { 51 if (storage_size <= k64kEntriesStore) 52 return kBaseTableLen; 53 if (storage_size <= k64kEntriesStore * 2) 54 return kBaseTableLen * 2; 55 if (storage_size <= k64kEntriesStore * 4) 56 return kBaseTableLen * 4; 57 if (storage_size <= k64kEntriesStore * 8) 58 return kBaseTableLen * 8; 59 60 // The biggest storage_size for int32 requires a 4 MB table. 61 return kBaseTableLen * 16; 62 } 63 64 int MaxStorageSizeForTable(int table_len) { 65 return table_len * (k64kEntriesStore / kBaseTableLen); 66 } 67 68 size_t GetIndexSize(int table_len) { 69 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; 70 return sizeof(disk_cache::IndexHeader) + table_size; 71 } 72 73 // ------------------------------------------------------------------------ 74 75 // Returns a fully qualified name from path and name, using a given name prefix 76 // and index number. For instance, if the arguments are "/foo", "bar" and 5, it 77 // will return "/foo/old_bar_005". 78 FilePath GetPrefixedName(const FilePath& path, const std::string& name, 79 int index) { 80 std::string tmp = base::StringPrintf("%s%s_%03d", "old_", 81 name.c_str(), index); 82 return path.AppendASCII(tmp); 83 } 84 85 // This is a simple Task to cleanup old caches. 86 class CleanupTask : public Task { 87 public: 88 CleanupTask(const FilePath& path, const std::string& name) 89 : path_(path), name_(name) {} 90 91 virtual void Run(); 92 93 private: 94 FilePath path_; 95 std::string name_; 96 DISALLOW_COPY_AND_ASSIGN(CleanupTask); 97 }; 98 99 void CleanupTask::Run() { 100 for (int i = 0; i < kMaxOldFolders; i++) { 101 FilePath to_delete = GetPrefixedName(path_, name_, i); 102 disk_cache::DeleteCache(to_delete, true); 103 } 104 } 105 106 // Returns a full path to rename the current cache, in order to delete it. path 107 // is the current folder location, and name is the current folder name. 108 FilePath GetTempCacheName(const FilePath& path, const std::string& name) { 109 // We'll attempt to have up to kMaxOldFolders folders for deletion. 110 for (int i = 0; i < kMaxOldFolders; i++) { 111 FilePath to_delete = GetPrefixedName(path, name, i); 112 if (!file_util::PathExists(to_delete)) 113 return to_delete; 114 } 115 return FilePath(); 116 } 117 118 // Moves the cache files to a new folder and creates a task to delete them. 119 bool DelayedCacheCleanup(const FilePath& full_path) { 120 // GetTempCacheName() and MoveCache() use synchronous file 121 // operations. 122 base::ThreadRestrictions::ScopedAllowIO allow_io; 123 124 FilePath current_path = full_path.StripTrailingSeparators(); 125 126 FilePath path = current_path.DirName(); 127 FilePath name = current_path.BaseName(); 128 #if defined(OS_POSIX) 129 std::string name_str = name.value(); 130 #elif defined(OS_WIN) 131 // We created this file so it should only contain ASCII. 132 std::string name_str = WideToASCII(name.value()); 133 #endif 134 135 FilePath to_delete = GetTempCacheName(path, name_str); 136 if (to_delete.empty()) { 137 LOG(ERROR) << "Unable to get another cache folder"; 138 return false; 139 } 140 141 if (!disk_cache::MoveCache(full_path, to_delete)) { 142 LOG(ERROR) << "Unable to move cache folder"; 143 return false; 144 } 145 146 base::WorkerPool::PostTask(FROM_HERE, new CleanupTask(path, name_str), true); 147 return true; 148 } 149 150 // Sets group for the current experiment. Returns false if the files should be 151 // discarded. 152 bool InitExperiment(disk_cache::IndexHeader* header) { 153 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 || 154 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) { 155 // Discard current cache. 156 return false; 157 } 158 159 // See if we already defined the group for this profile. 160 if (header->experiment >= disk_cache::EXPERIMENT_DELETED_LIST_OUT) 161 return true; 162 163 // The experiment is closed. 164 header->experiment = disk_cache::EXPERIMENT_DELETED_LIST_OUT; 165 return true; 166 } 167 168 // Initializes the field trial structures to allow performance measurements 169 // for the current cache configuration. 170 void SetFieldTrialInfo(int size_group) { 171 static bool first = true; 172 if (!first) 173 return; 174 175 // Field trials involve static objects so we have to do this only once. 176 first = false; 177 std::string group1 = base::StringPrintf("CacheSizeGroup_%d", size_group); 178 int totalProbability = 10; 179 scoped_refptr<base::FieldTrial> trial1( 180 new base::FieldTrial("CacheSize", totalProbability, group1, 2011, 6, 30)); 181 trial1->AppendGroup(group1, totalProbability); 182 } 183 184 // ------------------------------------------------------------------------ 185 186 // This class takes care of building an instance of the backend. 187 class CacheCreator { 188 public: 189 CacheCreator(const FilePath& path, bool force, int max_bytes, 190 net::CacheType type, uint32 flags, 191 base::MessageLoopProxy* thread, net::NetLog* net_log, 192 disk_cache::Backend** backend, 193 net::CompletionCallback* callback) 194 : path_(path), force_(force), retry_(false), max_bytes_(max_bytes), 195 type_(type), flags_(flags), thread_(thread), backend_(backend), 196 callback_(callback), cache_(NULL), net_log_(net_log), 197 ALLOW_THIS_IN_INITIALIZER_LIST( 198 my_callback_(this, &CacheCreator::OnIOComplete)) { 199 } 200 ~CacheCreator() {} 201 202 // Creates the backend. 203 int Run(); 204 205 // Callback implementation. 206 void OnIOComplete(int result); 207 208 private: 209 void DoCallback(int result); 210 211 const FilePath& path_; 212 bool force_; 213 bool retry_; 214 int max_bytes_; 215 net::CacheType type_; 216 uint32 flags_; 217 scoped_refptr<base::MessageLoopProxy> thread_; 218 disk_cache::Backend** backend_; 219 net::CompletionCallback* callback_; 220 disk_cache::BackendImpl* cache_; 221 net::NetLog* net_log_; 222 net::CompletionCallbackImpl<CacheCreator> my_callback_; 223 224 DISALLOW_COPY_AND_ASSIGN(CacheCreator); 225 }; 226 227 int CacheCreator::Run() { 228 cache_ = new disk_cache::BackendImpl(path_, thread_, net_log_); 229 cache_->SetMaxSize(max_bytes_); 230 cache_->SetType(type_); 231 cache_->SetFlags(flags_); 232 int rv = cache_->Init(&my_callback_); 233 DCHECK_EQ(net::ERR_IO_PENDING, rv); 234 return rv; 235 } 236 237 void CacheCreator::OnIOComplete(int result) { 238 if (result == net::OK || !force_ || retry_) 239 return DoCallback(result); 240 241 // This is a failure and we are supposed to try again, so delete the object, 242 // delete all the files, and try again. 243 retry_ = true; 244 delete cache_; 245 cache_ = NULL; 246 if (!DelayedCacheCleanup(path_)) 247 return DoCallback(result); 248 249 // The worker thread will start deleting files soon, but the original folder 250 // is not there anymore... let's create a new set of files. 251 int rv = Run(); 252 DCHECK_EQ(net::ERR_IO_PENDING, rv); 253 } 254 255 void CacheCreator::DoCallback(int result) { 256 DCHECK_NE(net::ERR_IO_PENDING, result); 257 if (result == net::OK) { 258 *backend_ = cache_; 259 } else { 260 LOG(ERROR) << "Unable to create cache"; 261 *backend_ = NULL; 262 delete cache_; 263 } 264 callback_->Run(result); 265 delete this; 266 } 267 268 // ------------------------------------------------------------------------ 269 270 // A task to perform final cleanup on the background thread. 271 class FinalCleanup : public Task { 272 public: 273 explicit FinalCleanup(disk_cache::BackendImpl* backend) : backend_(backend) {} 274 ~FinalCleanup() {} 275 276 virtual void Run(); 277 private: 278 disk_cache::BackendImpl* backend_; 279 DISALLOW_EVIL_CONSTRUCTORS(FinalCleanup); 280 }; 281 282 void FinalCleanup::Run() { 283 backend_->CleanupCache(); 284 } 285 286 } // namespace 287 288 // ------------------------------------------------------------------------ 289 290 namespace disk_cache { 291 292 int CreateCacheBackend(net::CacheType type, const FilePath& path, int max_bytes, 293 bool force, base::MessageLoopProxy* thread, 294 net::NetLog* net_log, Backend** backend, 295 CompletionCallback* callback) { 296 DCHECK(callback); 297 if (type == net::MEMORY_CACHE) { 298 *backend = MemBackendImpl::CreateBackend(max_bytes, net_log); 299 return *backend ? net::OK : net::ERR_FAILED; 300 } 301 DCHECK(thread); 302 303 return BackendImpl::CreateBackend(path, force, max_bytes, type, kNone, thread, 304 net_log, backend, callback); 305 } 306 307 // Returns the preferred maximum number of bytes for the cache given the 308 // number of available bytes. 309 int PreferedCacheSize(int64 available) { 310 // Return 80% of the available space if there is not enough space to use 311 // kDefaultCacheSize. 312 if (available < kDefaultCacheSize * 10 / 8) 313 return static_cast<int32>(available * 8 / 10); 314 315 // Return kDefaultCacheSize if it uses 80% to 10% of the available space. 316 if (available < kDefaultCacheSize * 10) 317 return kDefaultCacheSize; 318 319 // Return 10% of the available space if the target size 320 // (2.5 * kDefaultCacheSize) is more than 10%. 321 if (available < static_cast<int64>(kDefaultCacheSize) * 25) 322 return static_cast<int32>(available / 10); 323 324 // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1% 325 // of the available space. 326 if (available < static_cast<int64>(kDefaultCacheSize) * 250) 327 return kDefaultCacheSize * 5 / 2; 328 329 // Return 1% of the available space if it does not exceed kint32max. 330 if (available < static_cast<int64>(kint32max) * 100) 331 return static_cast<int32>(available / 100); 332 333 return kint32max; 334 } 335 336 // ------------------------------------------------------------------------ 337 338 BackendImpl::BackendImpl(const FilePath& path, 339 base::MessageLoopProxy* cache_thread, 340 net::NetLog* net_log) 341 : ALLOW_THIS_IN_INITIALIZER_LIST(background_queue_(this, cache_thread)), 342 path_(path), 343 block_files_(path), 344 mask_(0), 345 max_size_(0), 346 io_delay_(0), 347 cache_type_(net::DISK_CACHE), 348 uma_report_(0), 349 user_flags_(0), 350 init_(false), 351 restarted_(false), 352 unit_test_(false), 353 read_only_(false), 354 disabled_(false), 355 new_eviction_(false), 356 first_timer_(true), 357 net_log_(net_log), 358 done_(true, false), 359 ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)), 360 ALLOW_THIS_IN_INITIALIZER_LIST(ptr_factory_(this)) { 361 } 362 363 BackendImpl::BackendImpl(const FilePath& path, 364 uint32 mask, 365 base::MessageLoopProxy* cache_thread, 366 net::NetLog* net_log) 367 : ALLOW_THIS_IN_INITIALIZER_LIST(background_queue_(this, cache_thread)), 368 path_(path), 369 block_files_(path), 370 mask_(mask), 371 max_size_(0), 372 io_delay_(0), 373 cache_type_(net::DISK_CACHE), 374 uma_report_(0), 375 user_flags_(kMask), 376 init_(false), 377 restarted_(false), 378 unit_test_(false), 379 read_only_(false), 380 disabled_(false), 381 new_eviction_(false), 382 first_timer_(true), 383 net_log_(net_log), 384 done_(true, false), 385 ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)), 386 ALLOW_THIS_IN_INITIALIZER_LIST(ptr_factory_(this)) { 387 } 388 389 BackendImpl::~BackendImpl() { 390 background_queue_.WaitForPendingIO(); 391 392 if (background_queue_.BackgroundIsCurrentThread()) { 393 // Unit tests may use the same thread for everything. 394 CleanupCache(); 395 } else { 396 background_queue_.background_thread()->PostTask(FROM_HERE, 397 new FinalCleanup(this)); 398 done_.Wait(); 399 } 400 } 401 402 // If the initialization of the cache fails, and force is true, we will discard 403 // the whole cache and create a new one. In order to process a potentially large 404 // number of files, we'll rename the cache folder to old_ + original_name + 405 // number, (located on the same parent folder), and spawn a worker thread to 406 // delete all the files on all the stale cache folders. The whole process can 407 // still fail if we are not able to rename the cache folder (for instance due to 408 // a sharing violation), and in that case a cache for this profile (on the 409 // desired path) cannot be created. 410 // 411 // Static. 412 int BackendImpl::CreateBackend(const FilePath& full_path, bool force, 413 int max_bytes, net::CacheType type, 414 uint32 flags, base::MessageLoopProxy* thread, 415 net::NetLog* net_log, Backend** backend, 416 CompletionCallback* callback) { 417 DCHECK(callback); 418 CacheCreator* creator = new CacheCreator(full_path, force, max_bytes, type, 419 flags, thread, net_log, backend, 420 callback); 421 // This object will self-destroy when finished. 422 return creator->Run(); 423 } 424 425 int BackendImpl::Init(CompletionCallback* callback) { 426 background_queue_.Init(callback); 427 return net::ERR_IO_PENDING; 428 } 429 430 int BackendImpl::SyncInit() { 431 DCHECK(!init_); 432 if (init_) 433 return net::ERR_FAILED; 434 435 bool create_files = false; 436 if (!InitBackingStore(&create_files)) { 437 ReportError(ERR_STORAGE_ERROR); 438 return net::ERR_FAILED; 439 } 440 441 num_refs_ = num_pending_io_ = max_refs_ = 0; 442 entry_count_ = byte_count_ = 0; 443 444 if (!restarted_) { 445 buffer_bytes_ = 0; 446 trace_object_ = TraceObject::GetTraceObject(); 447 // Create a recurrent timer of 30 secs. 448 int timer_delay = unit_test_ ? 1000 : 30000; 449 timer_.Start(TimeDelta::FromMilliseconds(timer_delay), this, 450 &BackendImpl::OnStatsTimer); 451 } 452 453 init_ = true; 454 455 if (data_->header.experiment != NO_EXPERIMENT && 456 cache_type_ != net::DISK_CACHE) { 457 // No experiment for other caches. 458 return net::ERR_FAILED; 459 } 460 461 if (!(user_flags_ & disk_cache::kNoRandom)) { 462 // The unit test controls directly what to test. 463 new_eviction_ = (cache_type_ == net::DISK_CACHE); 464 } 465 466 if (!CheckIndex()) { 467 ReportError(ERR_INIT_FAILED); 468 return net::ERR_FAILED; 469 } 470 471 if (!(user_flags_ & disk_cache::kNoRandom) && 472 cache_type_ == net::DISK_CACHE && 473 !InitExperiment(&data_->header)) 474 return net::ERR_FAILED; 475 476 // We don't care if the value overflows. The only thing we care about is that 477 // the id cannot be zero, because that value is used as "not dirty". 478 // Increasing the value once per second gives us many years before we start 479 // having collisions. 480 data_->header.this_id++; 481 if (!data_->header.this_id) 482 data_->header.this_id++; 483 484 if (data_->header.crash) { 485 ReportError(ERR_PREVIOUS_CRASH); 486 } else { 487 ReportError(0); 488 data_->header.crash = 1; 489 } 490 491 if (!block_files_.Init(create_files)) 492 return net::ERR_FAILED; 493 494 // We want to minimize the changes to cache for an AppCache. 495 if (cache_type() == net::APP_CACHE) { 496 DCHECK(!new_eviction_); 497 read_only_ = true; 498 } 499 500 // Setup load-time data only for the main cache. 501 if (cache_type() == net::DISK_CACHE) 502 SetFieldTrialInfo(GetSizeGroup()); 503 504 eviction_.Init(this); 505 506 // stats_ and rankings_ may end up calling back to us so we better be enabled. 507 disabled_ = false; 508 if (!stats_.Init(this, &data_->header.stats)) 509 return net::ERR_FAILED; 510 511 disabled_ = !rankings_.Init(this, new_eviction_); 512 513 return disabled_ ? net::ERR_FAILED : net::OK; 514 } 515 516 void BackendImpl::CleanupCache() { 517 Trace("Backend Cleanup"); 518 eviction_.Stop(); 519 timer_.Stop(); 520 521 if (init_) { 522 stats_.Store(); 523 if (data_) 524 data_->header.crash = 0; 525 526 File::WaitForPendingIO(&num_pending_io_); 527 if (user_flags_ & kNoRandom) { 528 // This is a net_unittest, verify that we are not 'leaking' entries. 529 DCHECK(!num_refs_); 530 } 531 } 532 block_files_.CloseFiles(); 533 factory_.RevokeAll(); 534 ptr_factory_.InvalidateWeakPtrs(); 535 done_.Signal(); 536 } 537 538 // ------------------------------------------------------------------------ 539 540 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, 541 CompletionCallback* callback) { 542 DCHECK(callback); 543 background_queue_.OpenPrevEntry(iter, prev_entry, callback); 544 return net::ERR_IO_PENDING; 545 } 546 547 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) { 548 DCHECK(entry); 549 *entry = OpenEntryImpl(key); 550 return (*entry) ? net::OK : net::ERR_FAILED; 551 } 552 553 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) { 554 DCHECK(entry); 555 *entry = CreateEntryImpl(key); 556 return (*entry) ? net::OK : net::ERR_FAILED; 557 } 558 559 int BackendImpl::SyncDoomEntry(const std::string& key) { 560 if (disabled_) 561 return net::ERR_FAILED; 562 563 EntryImpl* entry = OpenEntryImpl(key); 564 if (!entry) 565 return net::ERR_FAILED; 566 567 entry->DoomImpl(); 568 entry->Release(); 569 return net::OK; 570 } 571 572 int BackendImpl::SyncDoomAllEntries() { 573 // This is not really an error, but it is an interesting condition. 574 ReportError(ERR_CACHE_DOOMED); 575 stats_.OnEvent(Stats::DOOM_CACHE); 576 if (!num_refs_) { 577 RestartCache(false); 578 return disabled_ ? net::ERR_FAILED : net::OK; 579 } else { 580 if (disabled_) 581 return net::ERR_FAILED; 582 583 eviction_.TrimCache(true); 584 return net::OK; 585 } 586 } 587 588 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, 589 const base::Time end_time) { 590 DCHECK_NE(net::APP_CACHE, cache_type_); 591 if (end_time.is_null()) 592 return SyncDoomEntriesSince(initial_time); 593 594 DCHECK(end_time >= initial_time); 595 596 if (disabled_) 597 return net::ERR_FAILED; 598 599 EntryImpl* node; 600 void* iter = NULL; 601 EntryImpl* next = OpenNextEntryImpl(&iter); 602 if (!next) 603 return net::OK; 604 605 while (next) { 606 node = next; 607 next = OpenNextEntryImpl(&iter); 608 609 if (node->GetLastUsed() >= initial_time && 610 node->GetLastUsed() < end_time) { 611 node->DoomImpl(); 612 } else if (node->GetLastUsed() < initial_time) { 613 if (next) 614 next->Release(); 615 next = NULL; 616 SyncEndEnumeration(iter); 617 } 618 619 node->Release(); 620 } 621 622 return net::OK; 623 } 624 625 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get 626 // entries that are too old. 627 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) { 628 DCHECK_NE(net::APP_CACHE, cache_type_); 629 if (disabled_) 630 return net::ERR_FAILED; 631 632 stats_.OnEvent(Stats::DOOM_RECENT); 633 for (;;) { 634 void* iter = NULL; 635 EntryImpl* entry = OpenNextEntryImpl(&iter); 636 if (!entry) 637 return net::OK; 638 639 if (initial_time > entry->GetLastUsed()) { 640 entry->Release(); 641 SyncEndEnumeration(iter); 642 return net::OK; 643 } 644 645 entry->DoomImpl(); 646 entry->Release(); 647 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator. 648 } 649 } 650 651 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) { 652 *next_entry = OpenNextEntryImpl(iter); 653 return (*next_entry) ? net::OK : net::ERR_FAILED; 654 } 655 656 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) { 657 *prev_entry = OpenPrevEntryImpl(iter); 658 return (*prev_entry) ? net::OK : net::ERR_FAILED; 659 } 660 661 void BackendImpl::SyncEndEnumeration(void* iter) { 662 scoped_ptr<Rankings::Iterator> iterator( 663 reinterpret_cast<Rankings::Iterator*>(iter)); 664 } 665 666 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { 667 if (disabled_) 668 return NULL; 669 670 TimeTicks start = TimeTicks::Now(); 671 uint32 hash = Hash(key); 672 Trace("Open hash 0x%x", hash); 673 674 bool error; 675 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); 676 if (!cache_entry) { 677 stats_.OnEvent(Stats::OPEN_MISS); 678 return NULL; 679 } 680 681 if (ENTRY_NORMAL != cache_entry->entry()->Data()->state) { 682 // The entry was already evicted. 683 cache_entry->Release(); 684 stats_.OnEvent(Stats::OPEN_MISS); 685 return NULL; 686 } 687 688 eviction_.OnOpenEntry(cache_entry); 689 entry_count_++; 690 691 CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start); 692 stats_.OnEvent(Stats::OPEN_HIT); 693 SIMPLE_STATS_COUNTER("disk_cache.hit"); 694 return cache_entry; 695 } 696 697 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { 698 if (disabled_ || key.empty()) 699 return NULL; 700 701 TimeTicks start = TimeTicks::Now(); 702 uint32 hash = Hash(key); 703 Trace("Create hash 0x%x", hash); 704 705 scoped_refptr<EntryImpl> parent; 706 Addr entry_address(data_->table[hash & mask_]); 707 if (entry_address.is_initialized()) { 708 // We have an entry already. It could be the one we are looking for, or just 709 // a hash conflict. 710 bool error; 711 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); 712 if (old_entry) 713 return ResurrectEntry(old_entry); 714 715 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); 716 DCHECK(!error); 717 if (parent_entry) { 718 parent.swap(&parent_entry); 719 } else if (data_->table[hash & mask_]) { 720 // We should have corrected the problem. 721 NOTREACHED(); 722 return NULL; 723 } 724 } 725 726 // The general flow is to allocate disk space and initialize the entry data, 727 // followed by saving that to disk, then linking the entry though the index 728 // and finally through the lists. If there is a crash in this process, we may 729 // end up with: 730 // a. Used, unreferenced empty blocks on disk (basically just garbage). 731 // b. Used, unreferenced but meaningful data on disk (more garbage). 732 // c. A fully formed entry, reachable only through the index. 733 // d. A fully formed entry, also reachable through the lists, but still dirty. 734 // 735 // Anything after (b) can be automatically cleaned up. We may consider saving 736 // the current operation (as we do while manipulating the lists) so that we 737 // can detect and cleanup (a) and (b). 738 739 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); 740 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { 741 LOG(ERROR) << "Create entry failed " << key.c_str(); 742 stats_.OnEvent(Stats::CREATE_ERROR); 743 return NULL; 744 } 745 746 Addr node_address(0); 747 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { 748 block_files_.DeleteBlock(entry_address, false); 749 LOG(ERROR) << "Create entry failed " << key.c_str(); 750 stats_.OnEvent(Stats::CREATE_ERROR); 751 return NULL; 752 } 753 754 scoped_refptr<EntryImpl> cache_entry( 755 new EntryImpl(this, entry_address, false)); 756 IncreaseNumRefs(); 757 758 if (!cache_entry->CreateEntry(node_address, key, hash)) { 759 block_files_.DeleteBlock(entry_address, false); 760 block_files_.DeleteBlock(node_address, false); 761 LOG(ERROR) << "Create entry failed " << key.c_str(); 762 stats_.OnEvent(Stats::CREATE_ERROR); 763 return NULL; 764 } 765 766 cache_entry->BeginLogging(net_log_, true); 767 768 // We are not failing the operation; let's add this to the map. 769 open_entries_[entry_address.value()] = cache_entry; 770 771 // Save the entry. 772 block_files_.GetFile(entry_address)->Store(cache_entry->entry()); 773 block_files_.GetFile(node_address)->Store(cache_entry->rankings()); 774 IncreaseNumEntries(); 775 entry_count_++; 776 777 // Link this entry through the index. 778 if (parent.get()) { 779 parent->SetNextAddress(entry_address); 780 } else { 781 data_->table[hash & mask_] = entry_address.value(); 782 } 783 784 // Link this entry through the lists. 785 eviction_.OnCreateEntry(cache_entry); 786 787 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start); 788 stats_.OnEvent(Stats::CREATE_HIT); 789 SIMPLE_STATS_COUNTER("disk_cache.miss"); 790 Trace("create entry hit "); 791 return cache_entry.release(); 792 } 793 794 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) { 795 return OpenFollowingEntry(true, iter); 796 } 797 798 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) { 799 return OpenFollowingEntry(false, iter); 800 } 801 802 bool BackendImpl::SetMaxSize(int max_bytes) { 803 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); 804 if (max_bytes < 0) 805 return false; 806 807 // Zero size means use the default. 808 if (!max_bytes) 809 return true; 810 811 // Avoid a DCHECK later on. 812 if (max_bytes >= kint32max - kint32max / 10) 813 max_bytes = kint32max - kint32max / 10 - 1; 814 815 user_flags_ |= kMaxSize; 816 max_size_ = max_bytes; 817 return true; 818 } 819 820 void BackendImpl::SetType(net::CacheType type) { 821 DCHECK(type != net::MEMORY_CACHE); 822 cache_type_ = type; 823 } 824 825 FilePath BackendImpl::GetFileName(Addr address) const { 826 if (!address.is_separate_file() || !address.is_initialized()) { 827 NOTREACHED(); 828 return FilePath(); 829 } 830 831 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber()); 832 return path_.AppendASCII(tmp); 833 } 834 835 MappedFile* BackendImpl::File(Addr address) { 836 if (disabled_) 837 return NULL; 838 return block_files_.GetFile(address); 839 } 840 841 bool BackendImpl::CreateExternalFile(Addr* address) { 842 int file_number = data_->header.last_file + 1; 843 Addr file_address(0); 844 bool success = false; 845 for (int i = 0; i < 0x0fffffff; i++, file_number++) { 846 if (!file_address.SetFileNumber(file_number)) { 847 file_number = 1; 848 continue; 849 } 850 FilePath name = GetFileName(file_address); 851 int flags = base::PLATFORM_FILE_READ | 852 base::PLATFORM_FILE_WRITE | 853 base::PLATFORM_FILE_CREATE | 854 base::PLATFORM_FILE_EXCLUSIVE_WRITE; 855 base::PlatformFileError error; 856 scoped_refptr<disk_cache::File> file(new disk_cache::File( 857 base::CreatePlatformFile(name, flags, NULL, &error))); 858 if (!file->IsValid()) { 859 if (error != base::PLATFORM_FILE_ERROR_EXISTS) 860 return false; 861 continue; 862 } 863 864 success = true; 865 break; 866 } 867 868 DCHECK(success); 869 if (!success) 870 return false; 871 872 data_->header.last_file = file_number; 873 address->set_value(file_address.value()); 874 return true; 875 } 876 877 bool BackendImpl::CreateBlock(FileType block_type, int block_count, 878 Addr* block_address) { 879 return block_files_.CreateBlock(block_type, block_count, block_address); 880 } 881 882 void BackendImpl::DeleteBlock(Addr block_address, bool deep) { 883 block_files_.DeleteBlock(block_address, deep); 884 } 885 886 LruData* BackendImpl::GetLruData() { 887 return &data_->header.lru; 888 } 889 890 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) { 891 if (!read_only_) { 892 eviction_.UpdateRank(entry, modified); 893 } 894 } 895 896 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) { 897 Addr address(rankings->Data()->contents); 898 EntryImpl* cache_entry = NULL; 899 if (NewEntry(address, &cache_entry)) 900 return; 901 902 uint32 hash = cache_entry->GetHash(); 903 cache_entry->Release(); 904 905 // Anything on the table means that this entry is there. 906 if (data_->table[hash & mask_]) 907 return; 908 909 data_->table[hash & mask_] = address.value(); 910 } 911 912 void BackendImpl::InternalDoomEntry(EntryImpl* entry) { 913 uint32 hash = entry->GetHash(); 914 std::string key = entry->GetKey(); 915 Addr entry_addr = entry->entry()->address(); 916 bool error; 917 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error); 918 CacheAddr child(entry->GetNextAddress()); 919 920 Trace("Doom entry 0x%p", entry); 921 922 if (!entry->doomed()) { 923 // We may have doomed this entry from within MatchEntry. 924 eviction_.OnDoomEntry(entry); 925 entry->InternalDoom(); 926 if (!new_eviction_) { 927 DecreaseNumEntries(); 928 } 929 stats_.OnEvent(Stats::DOOM_ENTRY); 930 } 931 932 if (parent_entry) { 933 parent_entry->SetNextAddress(Addr(child)); 934 parent_entry->Release(); 935 } else if (!error) { 936 data_->table[hash & mask_] = child; 937 } 938 } 939 940 // An entry may be linked on the DELETED list for a while after being doomed. 941 // This function is called when we want to remove it. 942 void BackendImpl::RemoveEntry(EntryImpl* entry) { 943 if (!new_eviction_) 944 return; 945 946 DCHECK(ENTRY_NORMAL != entry->entry()->Data()->state); 947 948 Trace("Remove entry 0x%p", entry); 949 eviction_.OnDestroyEntry(entry); 950 DecreaseNumEntries(); 951 } 952 953 void BackendImpl::OnEntryDestroyBegin(Addr address) { 954 EntriesMap::iterator it = open_entries_.find(address.value()); 955 if (it != open_entries_.end()) 956 open_entries_.erase(it); 957 } 958 959 void BackendImpl::OnEntryDestroyEnd() { 960 DecreaseNumRefs(); 961 if (data_->header.num_bytes > max_size_ && !read_only_) 962 eviction_.TrimCache(false); 963 } 964 965 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { 966 DCHECK(rankings->HasData()); 967 EntriesMap::const_iterator it = 968 open_entries_.find(rankings->Data()->contents); 969 if (it != open_entries_.end()) { 970 // We have this entry in memory. 971 return it->second; 972 } 973 974 return NULL; 975 } 976 977 int32 BackendImpl::GetCurrentEntryId() const { 978 return data_->header.this_id; 979 } 980 981 int BackendImpl::MaxFileSize() const { 982 return max_size_ / 8; 983 } 984 985 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { 986 if (disabled_ || old_size == new_size) 987 return; 988 if (old_size > new_size) 989 SubstractStorageSize(old_size - new_size); 990 else 991 AddStorageSize(new_size - old_size); 992 993 // Update the usage statistics. 994 stats_.ModifyStorageStats(old_size, new_size); 995 } 996 997 void BackendImpl::TooMuchStorageRequested(int32 size) { 998 stats_.ModifyStorageStats(0, size); 999 } 1000 1001 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) { 1002 DCHECK_GT(new_size, current_size); 1003 if (user_flags_ & kNoBuffering) 1004 return false; 1005 1006 int to_add = new_size - current_size; 1007 if (buffer_bytes_ + to_add > MaxBuffersSize()) 1008 return false; 1009 1010 buffer_bytes_ += to_add; 1011 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); 1012 return true; 1013 } 1014 1015 void BackendImpl::BufferDeleted(int size) { 1016 buffer_bytes_ -= size; 1017 DCHECK_GE(size, 0); 1018 } 1019 1020 bool BackendImpl::IsLoaded() const { 1021 CACHE_UMA(COUNTS, "PendingIO", GetSizeGroup(), num_pending_io_); 1022 if (user_flags_ & kNoLoadProtection) 1023 return false; 1024 1025 return num_pending_io_ > 5; 1026 } 1027 1028 std::string BackendImpl::HistogramName(const char* name, int experiment) const { 1029 if (!experiment) 1030 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name); 1031 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_, 1032 name, experiment); 1033 } 1034 1035 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() { 1036 return ptr_factory_.GetWeakPtr(); 1037 } 1038 1039 int BackendImpl::GetSizeGroup() const { 1040 if (disabled_) 1041 return 0; 1042 1043 // We want to report times grouped by the current cache size (50 MB groups). 1044 int group = data_->header.num_bytes / (50 * 1024 * 1024); 1045 if (group > 6) 1046 group = 6; // Limit the number of groups, just in case. 1047 return group; 1048 } 1049 1050 // We want to remove biases from some histograms so we only send data once per 1051 // week. 1052 bool BackendImpl::ShouldReportAgain() { 1053 if (uma_report_) 1054 return uma_report_ == 2; 1055 1056 uma_report_++; 1057 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 1058 Time last_time = Time::FromInternalValue(last_report); 1059 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { 1060 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); 1061 uma_report_++; 1062 return true; 1063 } 1064 return false; 1065 } 1066 1067 void BackendImpl::FirstEviction() { 1068 DCHECK(data_->header.create_time); 1069 if (!GetEntryCount()) 1070 return; // This is just for unit tests. 1071 1072 Time create_time = Time::FromInternalValue(data_->header.create_time); 1073 CACHE_UMA(AGE, "FillupAge", 0, create_time); 1074 1075 int64 use_time = stats_.GetCounter(Stats::TIMER); 1076 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); 1077 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); 1078 1079 if (!use_time) 1080 use_time = 1; 1081 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, 1082 static_cast<int>(data_->header.num_entries / use_time)); 1083 CACHE_UMA(COUNTS, "FirstByteIORate", 0, 1084 static_cast<int>((data_->header.num_bytes / 1024) / use_time)); 1085 1086 int avg_size = data_->header.num_bytes / GetEntryCount(); 1087 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); 1088 1089 int large_entries_bytes = stats_.GetLargeEntriesSize(); 1090 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 1091 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); 1092 1093 if (new_eviction_) { 1094 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); 1095 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, 1096 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 1097 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, 1098 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 1099 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, 1100 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 1101 } 1102 1103 stats_.ResetRatios(); 1104 } 1105 1106 void BackendImpl::CriticalError(int error) { 1107 LOG(ERROR) << "Critical error found " << error; 1108 if (disabled_) 1109 return; 1110 1111 stats_.OnEvent(Stats::FATAL_ERROR); 1112 LogStats(); 1113 ReportError(error); 1114 1115 // Setting the index table length to an invalid value will force re-creation 1116 // of the cache files. 1117 data_->header.table_len = 1; 1118 disabled_ = true; 1119 1120 if (!num_refs_) 1121 MessageLoop::current()->PostTask(FROM_HERE, 1122 factory_.NewRunnableMethod(&BackendImpl::RestartCache, true)); 1123 } 1124 1125 void BackendImpl::ReportError(int error) { 1126 // We transmit positive numbers, instead of direct error codes. 1127 DCHECK_LE(error, 0); 1128 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); 1129 } 1130 1131 void BackendImpl::OnEvent(Stats::Counters an_event) { 1132 stats_.OnEvent(an_event); 1133 } 1134 1135 void BackendImpl::OnRead(int32 bytes) { 1136 DCHECK_GE(bytes, 0); 1137 byte_count_ += bytes; 1138 if (byte_count_ < 0) 1139 byte_count_ = kint32max; 1140 } 1141 1142 void BackendImpl::OnWrite(int32 bytes) { 1143 // We use the same implementation as OnRead... just log the number of bytes. 1144 OnRead(bytes); 1145 } 1146 1147 void BackendImpl::OnStatsTimer() { 1148 stats_.OnEvent(Stats::TIMER); 1149 int64 time = stats_.GetCounter(Stats::TIMER); 1150 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); 1151 1152 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding 1153 // the bias towards 0. 1154 if (num_refs_ && (current != num_refs_)) { 1155 int64 diff = (num_refs_ - current) / 50; 1156 if (!diff) 1157 diff = num_refs_ > current ? 1 : -1; 1158 current = current + diff; 1159 stats_.SetCounter(Stats::OPEN_ENTRIES, current); 1160 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); 1161 } 1162 1163 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); 1164 1165 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); 1166 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); 1167 entry_count_ = 0; 1168 byte_count_ = 0; 1169 1170 if (!data_) 1171 first_timer_ = false; 1172 if (first_timer_) { 1173 first_timer_ = false; 1174 if (ShouldReportAgain()) 1175 ReportStats(); 1176 } 1177 1178 // Save stats to disk at 5 min intervals. 1179 if (time % 10 == 0) 1180 stats_.Store(); 1181 } 1182 1183 void BackendImpl::IncrementIoCount() { 1184 num_pending_io_++; 1185 } 1186 1187 void BackendImpl::DecrementIoCount() { 1188 num_pending_io_--; 1189 } 1190 1191 void BackendImpl::SetUnitTestMode() { 1192 user_flags_ |= kUnitTestMode; 1193 unit_test_ = true; 1194 } 1195 1196 void BackendImpl::SetUpgradeMode() { 1197 user_flags_ |= kUpgradeMode; 1198 read_only_ = true; 1199 } 1200 1201 void BackendImpl::SetNewEviction() { 1202 user_flags_ |= kNewEviction; 1203 new_eviction_ = true; 1204 } 1205 1206 void BackendImpl::SetFlags(uint32 flags) { 1207 user_flags_ |= flags; 1208 } 1209 1210 void BackendImpl::ClearRefCountForTest() { 1211 num_refs_ = 0; 1212 } 1213 1214 int BackendImpl::FlushQueueForTest(CompletionCallback* callback) { 1215 background_queue_.FlushQueue(callback); 1216 return net::ERR_IO_PENDING; 1217 } 1218 1219 int BackendImpl::RunTaskForTest(Task* task, CompletionCallback* callback) { 1220 background_queue_.RunTask(task, callback); 1221 return net::ERR_IO_PENDING; 1222 } 1223 1224 void BackendImpl::TrimForTest(bool empty) { 1225 eviction_.SetTestMode(); 1226 eviction_.TrimCache(empty); 1227 } 1228 1229 void BackendImpl::TrimDeletedListForTest(bool empty) { 1230 eviction_.SetTestMode(); 1231 eviction_.TrimDeletedList(empty); 1232 } 1233 1234 int BackendImpl::SelfCheck() { 1235 if (!init_) { 1236 LOG(ERROR) << "Init failed"; 1237 return ERR_INIT_FAILED; 1238 } 1239 1240 int num_entries = rankings_.SelfCheck(); 1241 if (num_entries < 0) { 1242 LOG(ERROR) << "Invalid rankings list, error " << num_entries; 1243 return num_entries; 1244 } 1245 1246 if (num_entries != data_->header.num_entries) { 1247 LOG(ERROR) << "Number of entries mismatch"; 1248 return ERR_NUM_ENTRIES_MISMATCH; 1249 } 1250 1251 return CheckAllEntries(); 1252 } 1253 1254 // ------------------------------------------------------------------------ 1255 1256 int32 BackendImpl::GetEntryCount() const { 1257 if (!index_ || disabled_) 1258 return 0; 1259 // num_entries includes entries already evicted. 1260 int32 not_deleted = data_->header.num_entries - 1261 data_->header.lru.sizes[Rankings::DELETED]; 1262 1263 if (not_deleted < 0) { 1264 NOTREACHED(); 1265 not_deleted = 0; 1266 } 1267 1268 return not_deleted; 1269 } 1270 1271 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, 1272 CompletionCallback* callback) { 1273 DCHECK(callback); 1274 background_queue_.OpenEntry(key, entry, callback); 1275 return net::ERR_IO_PENDING; 1276 } 1277 1278 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, 1279 CompletionCallback* callback) { 1280 DCHECK(callback); 1281 background_queue_.CreateEntry(key, entry, callback); 1282 return net::ERR_IO_PENDING; 1283 } 1284 1285 int BackendImpl::DoomEntry(const std::string& key, 1286 CompletionCallback* callback) { 1287 DCHECK(callback); 1288 background_queue_.DoomEntry(key, callback); 1289 return net::ERR_IO_PENDING; 1290 } 1291 1292 int BackendImpl::DoomAllEntries(CompletionCallback* callback) { 1293 DCHECK(callback); 1294 background_queue_.DoomAllEntries(callback); 1295 return net::ERR_IO_PENDING; 1296 } 1297 1298 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, 1299 const base::Time end_time, 1300 CompletionCallback* callback) { 1301 DCHECK(callback); 1302 background_queue_.DoomEntriesBetween(initial_time, end_time, callback); 1303 return net::ERR_IO_PENDING; 1304 } 1305 1306 int BackendImpl::DoomEntriesSince(const base::Time initial_time, 1307 CompletionCallback* callback) { 1308 DCHECK(callback); 1309 background_queue_.DoomEntriesSince(initial_time, callback); 1310 return net::ERR_IO_PENDING; 1311 } 1312 1313 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, 1314 CompletionCallback* callback) { 1315 DCHECK(callback); 1316 background_queue_.OpenNextEntry(iter, next_entry, callback); 1317 return net::ERR_IO_PENDING; 1318 } 1319 1320 void BackendImpl::EndEnumeration(void** iter) { 1321 background_queue_.EndEnumeration(*iter); 1322 *iter = NULL; 1323 } 1324 1325 void BackendImpl::GetStats(StatsItems* stats) { 1326 if (disabled_) 1327 return; 1328 1329 std::pair<std::string, std::string> item; 1330 1331 item.first = "Entries"; 1332 item.second = base::StringPrintf("%d", data_->header.num_entries); 1333 stats->push_back(item); 1334 1335 item.first = "Pending IO"; 1336 item.second = base::StringPrintf("%d", num_pending_io_); 1337 stats->push_back(item); 1338 1339 item.first = "Max size"; 1340 item.second = base::StringPrintf("%d", max_size_); 1341 stats->push_back(item); 1342 1343 item.first = "Current size"; 1344 item.second = base::StringPrintf("%d", data_->header.num_bytes); 1345 stats->push_back(item); 1346 1347 stats_.GetItems(stats); 1348 } 1349 1350 // ------------------------------------------------------------------------ 1351 1352 // We just created a new file so we're going to write the header and set the 1353 // file length to include the hash table (zero filled). 1354 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { 1355 AdjustMaxCacheSize(0); 1356 1357 IndexHeader header; 1358 header.table_len = DesiredIndexTableLen(max_size_); 1359 1360 // We need file version 2.1 for the new eviction algorithm. 1361 if (new_eviction_) 1362 header.version = 0x20001; 1363 1364 header.create_time = Time::Now().ToInternalValue(); 1365 1366 if (!file->Write(&header, sizeof(header), 0)) 1367 return false; 1368 1369 return file->SetLength(GetIndexSize(header.table_len)); 1370 } 1371 1372 bool BackendImpl::InitBackingStore(bool* file_created) { 1373 file_util::CreateDirectory(path_); 1374 1375 FilePath index_name = path_.AppendASCII(kIndexName); 1376 1377 int flags = base::PLATFORM_FILE_READ | 1378 base::PLATFORM_FILE_WRITE | 1379 base::PLATFORM_FILE_OPEN_ALWAYS | 1380 base::PLATFORM_FILE_EXCLUSIVE_WRITE; 1381 scoped_refptr<disk_cache::File> file(new disk_cache::File( 1382 base::CreatePlatformFile(index_name, flags, file_created, NULL))); 1383 1384 if (!file->IsValid()) 1385 return false; 1386 1387 bool ret = true; 1388 if (*file_created) 1389 ret = CreateBackingStore(file); 1390 1391 file = NULL; 1392 if (!ret) 1393 return false; 1394 1395 index_ = new MappedFile(); 1396 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0)); 1397 if (!data_) { 1398 LOG(ERROR) << "Unable to map Index file"; 1399 return false; 1400 } 1401 1402 if (index_->GetLength() < sizeof(Index)) { 1403 // We verify this again on CheckIndex() but it's easier to make sure now 1404 // that the header is there. 1405 LOG(ERROR) << "Corrupt Index file"; 1406 return false; 1407 } 1408 1409 return true; 1410 } 1411 1412 // The maximum cache size will be either set explicitly by the caller, or 1413 // calculated by this code. 1414 void BackendImpl::AdjustMaxCacheSize(int table_len) { 1415 if (max_size_) 1416 return; 1417 1418 // If table_len is provided, the index file exists. 1419 DCHECK(!table_len || data_->header.magic); 1420 1421 // The user is not setting the size, let's figure it out. 1422 #ifdef ANDROID 1423 int64 available = 10 * 1024 * 1024; // 10 MB 1424 #else 1425 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); 1426 #endif 1427 if (available < 0) { 1428 max_size_ = kDefaultCacheSize; 1429 return; 1430 } 1431 1432 if (table_len) 1433 available += data_->header.num_bytes; 1434 1435 max_size_ = PreferedCacheSize(available); 1436 1437 // Let's not use more than the default size while we tune-up the performance 1438 // of bigger caches. TODO(rvargas): remove this limit. 1439 if (max_size_ > kDefaultCacheSize * 4) 1440 max_size_ = kDefaultCacheSize * 4; 1441 1442 if (!table_len) 1443 return; 1444 1445 // If we already have a table, adjust the size to it. 1446 int current_max_size = MaxStorageSizeForTable(table_len); 1447 if (max_size_ > current_max_size) 1448 max_size_= current_max_size; 1449 } 1450 1451 void BackendImpl::RestartCache(bool failure) { 1452 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); 1453 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); 1454 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); 1455 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); 1456 1457 PrepareForRestart(); 1458 if (failure) { 1459 DCHECK(!num_refs_); 1460 DCHECK(!open_entries_.size()); 1461 DelayedCacheCleanup(path_); 1462 } else { 1463 DeleteCache(path_, false); 1464 } 1465 1466 // Don't call Init() if directed by the unit test: we are simulating a failure 1467 // trying to re-enable the cache. 1468 if (unit_test_) 1469 init_ = true; // Let the destructor do proper cleanup. 1470 else if (SyncInit() == net::OK) { 1471 stats_.SetCounter(Stats::FATAL_ERROR, errors); 1472 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); 1473 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); 1474 stats_.SetCounter(Stats::LAST_REPORT, last_report); 1475 } 1476 } 1477 1478 void BackendImpl::PrepareForRestart() { 1479 // Reset the mask_ if it was not given by the user. 1480 if (!(user_flags_ & kMask)) 1481 mask_ = 0; 1482 1483 if (!(user_flags_ & kNewEviction)) 1484 new_eviction_ = false; 1485 1486 disabled_ = true; 1487 data_->header.crash = 0; 1488 index_ = NULL; 1489 data_ = NULL; 1490 block_files_.CloseFiles(); 1491 rankings_.Reset(); 1492 init_ = false; 1493 restarted_ = true; 1494 } 1495 1496 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { 1497 EntriesMap::iterator it = open_entries_.find(address.value()); 1498 if (it != open_entries_.end()) { 1499 // Easy job. This entry is already in memory. 1500 EntryImpl* this_entry = it->second; 1501 this_entry->AddRef(); 1502 *entry = this_entry; 1503 return 0; 1504 } 1505 1506 scoped_refptr<EntryImpl> cache_entry( 1507 new EntryImpl(this, address, read_only_)); 1508 IncreaseNumRefs(); 1509 *entry = NULL; 1510 1511 if (!address.is_initialized() || address.is_separate_file() || 1512 address.file_type() != BLOCK_256) { 1513 LOG(WARNING) << "Wrong entry address."; 1514 return ERR_INVALID_ADDRESS; 1515 } 1516 1517 TimeTicks start = TimeTicks::Now(); 1518 if (!cache_entry->entry()->Load()) 1519 return ERR_READ_FAILURE; 1520 1521 if (IsLoaded()) { 1522 CACHE_UMA(AGE_MS, "LoadTime", GetSizeGroup(), start); 1523 } 1524 1525 if (!cache_entry->SanityCheck()) { 1526 LOG(WARNING) << "Messed up entry found."; 1527 return ERR_INVALID_ENTRY; 1528 } 1529 1530 if (!cache_entry->LoadNodeAddress()) 1531 return ERR_READ_FAILURE; 1532 1533 // Prevent overwriting the dirty flag on the destructor. 1534 cache_entry->SetDirtyFlag(GetCurrentEntryId()); 1535 1536 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) { 1537 cache_entry->SetDirtyFlag(0); 1538 // Don't remove this from the list (it is not linked properly). Instead, 1539 // break the link back to the entry because it is going away, and leave the 1540 // rankings node to be deleted if we find it through a list. 1541 rankings_.SetContents(cache_entry->rankings(), 0); 1542 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) { 1543 cache_entry->SetDirtyFlag(0); 1544 rankings_.SetContents(cache_entry->rankings(), address.value()); 1545 } 1546 1547 if (!cache_entry->DataSanityCheck()) { 1548 LOG(WARNING) << "Messed up entry found."; 1549 cache_entry->SetDirtyFlag(0); 1550 cache_entry->FixForDelete(); 1551 } 1552 1553 if (cache_entry->dirty()) { 1554 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), 1555 address.value()); 1556 } 1557 1558 open_entries_[address.value()] = cache_entry; 1559 1560 cache_entry->BeginLogging(net_log_, false); 1561 cache_entry.swap(entry); 1562 return 0; 1563 } 1564 1565 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, 1566 bool find_parent, Addr entry_addr, 1567 bool* match_error) { 1568 Addr address(data_->table[hash & mask_]); 1569 scoped_refptr<EntryImpl> cache_entry, parent_entry; 1570 EntryImpl* tmp = NULL; 1571 bool found = false; 1572 std::set<CacheAddr> visited; 1573 *match_error = false; 1574 1575 for (;;) { 1576 if (disabled_) 1577 break; 1578 1579 if (visited.find(address.value()) != visited.end()) { 1580 // It's possible for a buggy version of the code to write a loop. Just 1581 // break it. 1582 Trace("Hash collision loop 0x%x", address.value()); 1583 address.set_value(0); 1584 parent_entry->SetNextAddress(address); 1585 } 1586 visited.insert(address.value()); 1587 1588 if (!address.is_initialized()) { 1589 if (find_parent) 1590 found = true; 1591 break; 1592 } 1593 1594 int error = NewEntry(address, &tmp); 1595 cache_entry.swap(&tmp); 1596 1597 if (error || cache_entry->dirty()) { 1598 // This entry is dirty on disk (it was not properly closed): we cannot 1599 // trust it. 1600 Addr child(0); 1601 if (!error) 1602 child.set_value(cache_entry->GetNextAddress()); 1603 1604 if (parent_entry) { 1605 parent_entry->SetNextAddress(child); 1606 parent_entry = NULL; 1607 } else { 1608 data_->table[hash & mask_] = child.value(); 1609 } 1610 1611 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(), 1612 address.value()); 1613 1614 if (!error) { 1615 // It is important to call DestroyInvalidEntry after removing this 1616 // entry from the table. 1617 DestroyInvalidEntry(cache_entry); 1618 cache_entry = NULL; 1619 } else { 1620 Trace("NewEntry failed on MatchEntry 0x%x", address.value()); 1621 } 1622 1623 // Restart the search. 1624 address.set_value(data_->table[hash & mask_]); 1625 visited.clear(); 1626 continue; 1627 } 1628 1629 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_); 1630 if (cache_entry->IsSameEntry(key, hash)) { 1631 if (!cache_entry->Update()) 1632 cache_entry = NULL; 1633 found = true; 1634 if (find_parent && entry_addr.value() != address.value()) { 1635 Trace("Entry not on the index 0x%x", address.value()); 1636 *match_error = true; 1637 parent_entry = NULL; 1638 } 1639 break; 1640 } 1641 if (!cache_entry->Update()) 1642 cache_entry = NULL; 1643 parent_entry = cache_entry; 1644 cache_entry = NULL; 1645 if (!parent_entry) 1646 break; 1647 1648 address.set_value(parent_entry->GetNextAddress()); 1649 } 1650 1651 if (parent_entry && (!find_parent || !found)) 1652 parent_entry = NULL; 1653 1654 if (find_parent && entry_addr.is_initialized() && !cache_entry) { 1655 *match_error = true; 1656 parent_entry = NULL; 1657 } 1658 1659 if (cache_entry && (find_parent || !found)) 1660 cache_entry = NULL; 1661 1662 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); 1663 return tmp; 1664 } 1665 1666 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. 1667 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { 1668 if (disabled_) 1669 return NULL; 1670 1671 DCHECK(iter); 1672 1673 const int kListsToSearch = 3; 1674 scoped_refptr<EntryImpl> entries[kListsToSearch]; 1675 scoped_ptr<Rankings::Iterator> iterator( 1676 reinterpret_cast<Rankings::Iterator*>(*iter)); 1677 *iter = NULL; 1678 1679 if (!iterator.get()) { 1680 iterator.reset(new Rankings::Iterator(&rankings_)); 1681 bool ret = false; 1682 1683 // Get an entry from each list. 1684 for (int i = 0; i < kListsToSearch; i++) { 1685 EntryImpl* temp = NULL; 1686 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), 1687 &iterator->nodes[i], &temp); 1688 entries[i].swap(&temp); // The entry was already addref'd. 1689 } 1690 if (!ret) 1691 return NULL; 1692 } else { 1693 // Get the next entry from the last list, and the actual entries for the 1694 // elements on the other lists. 1695 for (int i = 0; i < kListsToSearch; i++) { 1696 EntryImpl* temp = NULL; 1697 if (iterator->list == i) { 1698 OpenFollowingEntryFromList(forward, iterator->list, 1699 &iterator->nodes[i], &temp); 1700 } else { 1701 temp = GetEnumeratedEntry(iterator->nodes[i], 1702 static_cast<Rankings::List>(i)); 1703 } 1704 1705 entries[i].swap(&temp); // The entry was already addref'd. 1706 } 1707 } 1708 1709 int newest = -1; 1710 int oldest = -1; 1711 Time access_times[kListsToSearch]; 1712 for (int i = 0; i < kListsToSearch; i++) { 1713 if (entries[i].get()) { 1714 access_times[i] = entries[i]->GetLastUsed(); 1715 if (newest < 0) { 1716 DCHECK_LT(oldest, 0); 1717 newest = oldest = i; 1718 continue; 1719 } 1720 if (access_times[i] > access_times[newest]) 1721 newest = i; 1722 if (access_times[i] < access_times[oldest]) 1723 oldest = i; 1724 } 1725 } 1726 1727 if (newest < 0 || oldest < 0) 1728 return NULL; 1729 1730 EntryImpl* next_entry; 1731 if (forward) { 1732 next_entry = entries[newest].release(); 1733 iterator->list = static_cast<Rankings::List>(newest); 1734 } else { 1735 next_entry = entries[oldest].release(); 1736 iterator->list = static_cast<Rankings::List>(oldest); 1737 } 1738 1739 *iter = iterator.release(); 1740 return next_entry; 1741 } 1742 1743 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, 1744 CacheRankingsBlock** from_entry, 1745 EntryImpl** next_entry) { 1746 if (disabled_) 1747 return false; 1748 1749 if (!new_eviction_ && Rankings::NO_USE != list) 1750 return false; 1751 1752 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry); 1753 CacheRankingsBlock* next_block = forward ? 1754 rankings_.GetNext(rankings.get(), list) : 1755 rankings_.GetPrev(rankings.get(), list); 1756 Rankings::ScopedRankingsBlock next(&rankings_, next_block); 1757 *from_entry = NULL; 1758 1759 *next_entry = GetEnumeratedEntry(next.get(), list); 1760 if (!*next_entry) 1761 return false; 1762 1763 *from_entry = next.release(); 1764 return true; 1765 } 1766 1767 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next, 1768 Rankings::List list) { 1769 if (!next || disabled_) 1770 return NULL; 1771 1772 EntryImpl* entry; 1773 int rv = NewEntry(Addr(next->Data()->contents), &entry); 1774 if (rv) { 1775 rankings_.Remove(next, list, false); 1776 if (rv == ERR_INVALID_ADDRESS) { 1777 // There is nothing linked from the index. Delete the rankings node. 1778 DeleteBlock(next->address(), true); 1779 } 1780 return NULL; 1781 } 1782 1783 if (entry->dirty()) { 1784 // We cannot trust this entry. 1785 InternalDoomEntry(entry); 1786 entry->Release(); 1787 return NULL; 1788 } 1789 1790 if (!entry->Update()) { 1791 entry->Release(); 1792 return NULL; 1793 } 1794 1795 // Note that it is unfortunate (but possible) for this entry to be clean, but 1796 // not actually the real entry. In other words, we could have lost this entry 1797 // from the index, and it could have been replaced with a newer one. It's not 1798 // worth checking that this entry is "the real one", so we just return it and 1799 // let the enumeration continue; this entry will be evicted at some point, and 1800 // the regular path will work with the real entry. With time, this problem 1801 // will disasappear because this scenario is just a bug. 1802 1803 // Make sure that we save the key for later. 1804 entry->GetKey(); 1805 1806 return entry; 1807 } 1808 1809 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { 1810 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { 1811 deleted_entry->Release(); 1812 stats_.OnEvent(Stats::CREATE_MISS); 1813 Trace("create entry miss "); 1814 return NULL; 1815 } 1816 1817 // We are attempting to create an entry and found out that the entry was 1818 // previously deleted. 1819 1820 eviction_.OnCreateEntry(deleted_entry); 1821 entry_count_++; 1822 1823 stats_.OnEvent(Stats::RESURRECT_HIT); 1824 Trace("Resurrect entry hit "); 1825 return deleted_entry; 1826 } 1827 1828 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) { 1829 LOG(WARNING) << "Destroying invalid entry."; 1830 Trace("Destroying invalid entry 0x%p", entry); 1831 1832 entry->SetPointerForInvalidEntry(GetCurrentEntryId()); 1833 1834 eviction_.OnDoomEntry(entry); 1835 entry->InternalDoom(); 1836 1837 if (!new_eviction_) 1838 DecreaseNumEntries(); 1839 stats_.OnEvent(Stats::INVALID_ENTRY); 1840 } 1841 1842 void BackendImpl::AddStorageSize(int32 bytes) { 1843 data_->header.num_bytes += bytes; 1844 DCHECK_GE(data_->header.num_bytes, 0); 1845 } 1846 1847 void BackendImpl::SubstractStorageSize(int32 bytes) { 1848 data_->header.num_bytes -= bytes; 1849 DCHECK_GE(data_->header.num_bytes, 0); 1850 } 1851 1852 void BackendImpl::IncreaseNumRefs() { 1853 num_refs_++; 1854 if (max_refs_ < num_refs_) 1855 max_refs_ = num_refs_; 1856 } 1857 1858 void BackendImpl::DecreaseNumRefs() { 1859 DCHECK(num_refs_); 1860 num_refs_--; 1861 1862 if (!num_refs_ && disabled_) 1863 MessageLoop::current()->PostTask(FROM_HERE, 1864 factory_.NewRunnableMethod(&BackendImpl::RestartCache, true)); 1865 } 1866 1867 void BackendImpl::IncreaseNumEntries() { 1868 data_->header.num_entries++; 1869 DCHECK_GT(data_->header.num_entries, 0); 1870 } 1871 1872 void BackendImpl::DecreaseNumEntries() { 1873 data_->header.num_entries--; 1874 if (data_->header.num_entries < 0) { 1875 NOTREACHED(); 1876 data_->header.num_entries = 0; 1877 } 1878 } 1879 1880 void BackendImpl::LogStats() { 1881 StatsItems stats; 1882 GetStats(&stats); 1883 1884 for (size_t index = 0; index < stats.size(); index++) 1885 VLOG(1) << stats[index].first << ": " << stats[index].second; 1886 } 1887 1888 void BackendImpl::ReportStats() { 1889 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries); 1890 1891 int current_size = data_->header.num_bytes / (1024 * 1024); 1892 int max_size = max_size_ / (1024 * 1024); 1893 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size); 1894 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size); 1895 if (!max_size) 1896 max_size++; 1897 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); 1898 1899 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0, 1900 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); 1901 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0, 1902 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); 1903 stats_.SetCounter(Stats::MAX_ENTRIES, 0); 1904 1905 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, 1906 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); 1907 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, 1908 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); 1909 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, 1910 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); 1911 stats_.SetCounter(Stats::FATAL_ERROR, 0); 1912 stats_.SetCounter(Stats::DOOM_CACHE, 0); 1913 stats_.SetCounter(Stats::DOOM_RECENT, 0); 1914 1915 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; 1916 if (!data_->header.create_time || !data_->header.lru.filled) { 1917 int cause = data_->header.create_time ? 0 : 1; 1918 if (!data_->header.lru.filled) 1919 cause |= 2; 1920 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause); 1921 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); 1922 return; 1923 } 1924 1925 // This is an up to date client that will report FirstEviction() data. After 1926 // that event, start reporting this: 1927 1928 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); 1929 1930 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; 1931 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); 1932 1933 // We may see users with no use_hours at this point if this is the first time 1934 // we are running this code. 1935 if (use_hours) 1936 use_hours = total_hours - use_hours; 1937 1938 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) 1939 return; 1940 1941 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); 1942 CACHE_UMA(PERCENTAGE, "HitRatio", data_->header.experiment, 1943 stats_.GetHitRatio()); 1944 1945 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; 1946 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); 1947 1948 int avg_size = data_->header.num_bytes / GetEntryCount(); 1949 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); 1950 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries); 1951 1952 CACHE_UMA(PERCENTAGE, "IndexLoad", 0, 1953 data_->header.num_entries * 100 / (mask_ + 1)); 1954 1955 int large_entries_bytes = stats_.GetLargeEntriesSize(); 1956 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; 1957 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); 1958 1959 if (new_eviction_) { 1960 CACHE_UMA(PERCENTAGE, "ResurrectRatio", data_->header.experiment, 1961 stats_.GetResurrectRatio()); 1962 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, 1963 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); 1964 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, 1965 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); 1966 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, 1967 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); 1968 CACHE_UMA(PERCENTAGE, "DeletedRatio", data_->header.experiment, 1969 data_->header.lru.sizes[4] * 100 / data_->header.num_entries); 1970 } 1971 1972 stats_.ResetRatios(); 1973 stats_.SetCounter(Stats::TRIM_ENTRY, 0); 1974 1975 if (cache_type_ == net::DISK_CACHE) 1976 block_files_.ReportStats(); 1977 } 1978 1979 void BackendImpl::UpgradeTo2_1() { 1980 // 2.1 is basically the same as 2.0, except that new fields are actually 1981 // updated by the new eviction algorithm. 1982 DCHECK(0x20000 == data_->header.version); 1983 data_->header.version = 0x20001; 1984 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries; 1985 } 1986 1987 bool BackendImpl::CheckIndex() { 1988 DCHECK(data_); 1989 1990 size_t current_size = index_->GetLength(); 1991 if (current_size < sizeof(Index)) { 1992 LOG(ERROR) << "Corrupt Index file"; 1993 return false; 1994 } 1995 1996 if (new_eviction_) { 1997 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. 1998 if (kIndexMagic != data_->header.magic || 1999 kCurrentVersion >> 16 != data_->header.version >> 16) { 2000 LOG(ERROR) << "Invalid file version or magic"; 2001 return false; 2002 } 2003 if (kCurrentVersion == data_->header.version) { 2004 // We need file version 2.1 for the new eviction algorithm. 2005 UpgradeTo2_1(); 2006 } 2007 } else { 2008 if (kIndexMagic != data_->header.magic || 2009 kCurrentVersion != data_->header.version) { 2010 LOG(ERROR) << "Invalid file version or magic"; 2011 return false; 2012 } 2013 } 2014 2015 if (!data_->header.table_len) { 2016 LOG(ERROR) << "Invalid table size"; 2017 return false; 2018 } 2019 2020 if (current_size < GetIndexSize(data_->header.table_len) || 2021 data_->header.table_len & (kBaseTableLen - 1)) { 2022 LOG(ERROR) << "Corrupt Index file"; 2023 return false; 2024 } 2025 2026 AdjustMaxCacheSize(data_->header.table_len); 2027 2028 if (data_->header.num_bytes < 0 || 2029 (max_size_ < kint32max - kDefaultCacheSize && 2030 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) { 2031 LOG(ERROR) << "Invalid cache (current) size"; 2032 return false; 2033 } 2034 2035 if (data_->header.num_entries < 0) { 2036 LOG(ERROR) << "Invalid number of entries"; 2037 return false; 2038 } 2039 2040 if (!mask_) 2041 mask_ = data_->header.table_len - 1; 2042 2043 // Load the table into memory with a single read. 2044 scoped_array<char> buf(new char[current_size]); 2045 return index_->Read(buf.get(), current_size, 0); 2046 } 2047 2048 int BackendImpl::CheckAllEntries() { 2049 int num_dirty = 0; 2050 int num_entries = 0; 2051 DCHECK(mask_ < kuint32max); 2052 for (int i = 0; i <= static_cast<int>(mask_); i++) { 2053 Addr address(data_->table[i]); 2054 if (!address.is_initialized()) 2055 continue; 2056 for (;;) { 2057 EntryImpl* tmp; 2058 int ret = NewEntry(address, &tmp); 2059 if (ret) 2060 return ret; 2061 scoped_refptr<EntryImpl> cache_entry; 2062 cache_entry.swap(&tmp); 2063 2064 if (cache_entry->dirty()) 2065 num_dirty++; 2066 else if (CheckEntry(cache_entry.get())) 2067 num_entries++; 2068 else 2069 return ERR_INVALID_ENTRY; 2070 2071 address.set_value(cache_entry->GetNextAddress()); 2072 if (!address.is_initialized()) 2073 break; 2074 } 2075 } 2076 2077 Trace("CheckAllEntries End"); 2078 if (num_entries + num_dirty != data_->header.num_entries) { 2079 LOG(ERROR) << "Number of entries mismatch"; 2080 return ERR_NUM_ENTRIES_MISMATCH; 2081 } 2082 2083 return num_dirty; 2084 } 2085 2086 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { 2087 bool ok = block_files_.IsValid(cache_entry->entry()->address()); 2088 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); 2089 EntryStore* data = cache_entry->entry()->Data(); 2090 for (size_t i = 0; i < arraysize(data->data_addr); i++) { 2091 if (data->data_addr[i]) { 2092 Addr address(data->data_addr[i]); 2093 if (address.is_block_file()) 2094 ok = ok && block_files_.IsValid(address); 2095 } 2096 } 2097 2098 RankingsNode* rankings = cache_entry->rankings()->Data(); 2099 return ok && !rankings->dummy; 2100 } 2101 2102 int BackendImpl::MaxBuffersSize() { 2103 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); 2104 static bool done = false; 2105 2106 if (!done) { 2107 const int kMaxBuffersSize = 30 * 1024 * 1024; 2108 2109 // We want to use up to 2% of the computer's memory. 2110 total_memory = total_memory * 2 / 100; 2111 if (total_memory > kMaxBuffersSize || total_memory <= 0) 2112 total_memory = kMaxBuffersSize; 2113 2114 done = true; 2115 } 2116 2117 return static_cast<int>(total_memory); 2118 } 2119 2120 } // namespace disk_cache 2121