1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "base/basictypes.h" 6 #include "base/file_util.h" 7 #include "base/metrics/field_trial.h" 8 #include "base/port.h" 9 #include "base/strings/string_util.h" 10 #include "base/strings/stringprintf.h" 11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" 12 #include "base/threading/platform_thread.h" 13 #include "base/threading/thread_restrictions.h" 14 #include "net/base/cache_type.h" 15 #include "net/base/io_buffer.h" 16 #include "net/base/net_errors.h" 17 #include "net/base/test_completion_callback.h" 18 #include "net/disk_cache/backend_impl.h" 19 #include "net/disk_cache/cache_util.h" 20 #include "net/disk_cache/disk_cache_test_base.h" 21 #include "net/disk_cache/disk_cache_test_util.h" 22 #include "net/disk_cache/entry_impl.h" 23 #include "net/disk_cache/experiments.h" 24 #include "net/disk_cache/histogram_macros.h" 25 #include "net/disk_cache/mapped_file.h" 26 #include "net/disk_cache/mem_backend_impl.h" 27 #include "net/disk_cache/simple/simple_backend_impl.h" 28 #include "net/disk_cache/simple/simple_entry_format.h" 29 #include "net/disk_cache/simple/simple_test_util.h" 30 #include "net/disk_cache/simple/simple_util.h" 31 #include "net/disk_cache/tracing_cache_backend.h" 32 #include "testing/gtest/include/gtest/gtest.h" 33 34 #if defined(OS_WIN) 35 #include "base/win/scoped_handle.h" 36 #endif 37 38 using base::Time; 39 40 namespace { 41 42 const char kExistingEntryKey[] = "existing entry key"; 43 44 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache( 45 const base::Thread& cache_thread, 46 base::FilePath& cache_path) { 47 net::TestCompletionCallback cb; 48 49 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( 50 cache_path, cache_thread.message_loop_proxy(), NULL)); 51 int rv = cache->Init(cb.callback()); 52 if (cb.GetResult(rv) != net::OK) 53 return scoped_ptr<disk_cache::BackendImpl>(); 54 55 disk_cache::Entry* entry = NULL; 56 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback()); 57 if (cb.GetResult(rv) != net::OK) 58 return scoped_ptr<disk_cache::BackendImpl>(); 59 entry->Close(); 60 61 return cache.Pass(); 62 } 63 64 } // namespace 65 66 // Tests that can run with different types of caches. 67 class DiskCacheBackendTest : public DiskCacheTestWithCache { 68 protected: 69 // Some utility methods: 70 71 // Perform IO operations on the cache until there is pending IO. 72 int GeneratePendingIO(net::TestCompletionCallback* cb); 73 74 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL, 75 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween. 76 // There are 4 entries after doomed_start and 2 after doomed_end. 77 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end); 78 79 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool); 80 bool EnumerateAndMatchKeys(int max_to_open, 81 void** iter, 82 std::set<std::string>* keys_to_match, 83 size_t* count); 84 85 // Actual tests: 86 void BackendBasics(); 87 void BackendKeying(); 88 void BackendShutdownWithPendingFileIO(bool fast); 89 void BackendShutdownWithPendingIO(bool fast); 90 void BackendShutdownWithPendingCreate(bool fast); 91 void BackendSetSize(); 92 void BackendLoad(); 93 void BackendChain(); 94 void BackendValidEntry(); 95 void BackendInvalidEntry(); 96 void BackendInvalidEntryRead(); 97 void BackendInvalidEntryWithLoad(); 98 void BackendTrimInvalidEntry(); 99 void BackendTrimInvalidEntry2(); 100 void BackendEnumerations(); 101 void BackendEnumerations2(); 102 void BackendInvalidEntryEnumeration(); 103 void BackendFixEnumerators(); 104 void BackendDoomRecent(); 105 void BackendDoomBetween(); 106 void BackendTransaction(const std::string& name, int num_entries, bool load); 107 void BackendRecoverInsert(); 108 void BackendRecoverRemove(); 109 void BackendRecoverWithEviction(); 110 void BackendInvalidEntry2(); 111 void BackendInvalidEntry3(); 112 void BackendInvalidEntry7(); 113 void BackendInvalidEntry8(); 114 void BackendInvalidEntry9(bool eviction); 115 void BackendInvalidEntry10(bool eviction); 116 void BackendInvalidEntry11(bool eviction); 117 void BackendTrimInvalidEntry12(); 118 void BackendDoomAll(); 119 void BackendDoomAll2(); 120 void BackendInvalidRankings(); 121 void BackendInvalidRankings2(); 122 void BackendDisable(); 123 void BackendDisable2(); 124 void BackendDisable3(); 125 void BackendDisable4(); 126 void TracingBackendBasics(); 127 }; 128 129 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) { 130 if (!use_current_thread_) { 131 ADD_FAILURE(); 132 return net::ERR_FAILED; 133 } 134 135 disk_cache::Entry* entry; 136 int rv = cache_->CreateEntry("some key", &entry, cb->callback()); 137 if (cb->GetResult(rv) != net::OK) 138 return net::ERR_CACHE_CREATE_FAILURE; 139 140 const int kSize = 25000; 141 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 142 CacheTestFillBuffer(buffer->data(), kSize, false); 143 144 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) { 145 // We are using the current thread as the cache thread because we want to 146 // be able to call directly this method to make sure that the OS (instead 147 // of us switching thread) is returning IO pending. 148 if (!simple_cache_mode_) { 149 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl( 150 0, i, buffer.get(), kSize, cb->callback(), false); 151 } else { 152 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false); 153 } 154 155 if (rv == net::ERR_IO_PENDING) 156 break; 157 if (rv != kSize) 158 rv = net::ERR_FAILED; 159 } 160 161 // Don't call Close() to avoid going through the queue or we'll deadlock 162 // waiting for the operation to finish. 163 if (!simple_cache_mode_) 164 static_cast<disk_cache::EntryImpl*>(entry)->Release(); 165 else 166 entry->Close(); 167 168 return rv; 169 } 170 171 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start, 172 base::Time* doomed_end) { 173 InitCache(); 174 175 const int kSize = 50; 176 // This must be greater then MemEntryImpl::kMaxSparseEntrySize. 177 const int kOffset = 10 + 1024 * 1024; 178 179 disk_cache::Entry* entry0 = NULL; 180 disk_cache::Entry* entry1 = NULL; 181 disk_cache::Entry* entry2 = NULL; 182 183 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 184 CacheTestFillBuffer(buffer->data(), kSize, false); 185 186 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0)); 187 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize)); 188 ASSERT_EQ(kSize, 189 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize)); 190 entry0->Close(); 191 192 FlushQueueForTest(); 193 AddDelay(); 194 if (doomed_start) 195 *doomed_start = base::Time::Now(); 196 197 // Order in rankings list: 198 // first_part1, first_part2, second_part1, second_part2 199 ASSERT_EQ(net::OK, CreateEntry("first", &entry1)); 200 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize)); 201 ASSERT_EQ(kSize, 202 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize)); 203 entry1->Close(); 204 205 ASSERT_EQ(net::OK, CreateEntry("second", &entry2)); 206 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize)); 207 ASSERT_EQ(kSize, 208 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize)); 209 entry2->Close(); 210 211 FlushQueueForTest(); 212 AddDelay(); 213 if (doomed_end) 214 *doomed_end = base::Time::Now(); 215 216 // Order in rankings list: 217 // third_part1, fourth_part1, third_part2, fourth_part2 218 disk_cache::Entry* entry3 = NULL; 219 disk_cache::Entry* entry4 = NULL; 220 ASSERT_EQ(net::OK, CreateEntry("third", &entry3)); 221 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize)); 222 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4)); 223 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize)); 224 ASSERT_EQ(kSize, 225 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize)); 226 ASSERT_EQ(kSize, 227 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize)); 228 entry3->Close(); 229 entry4->Close(); 230 231 FlushQueueForTest(); 232 AddDelay(); 233 } 234 235 // Creates entries based on random keys. Stores these keys in |key_pool|. 236 bool DiskCacheBackendTest::CreateSetOfRandomEntries( 237 std::set<std::string>* key_pool) { 238 const int kNumEntries = 10; 239 240 for (int i = 0; i < kNumEntries; ++i) { 241 std::string key = GenerateKey(true); 242 disk_cache::Entry* entry; 243 if (CreateEntry(key, &entry) != net::OK) 244 return false; 245 key_pool->insert(key); 246 entry->Close(); 247 } 248 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount()); 249 } 250 251 // Performs iteration over the backend and checks that the keys of entries 252 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries 253 // will be opened, if it is positive. Otherwise, iteration will continue until 254 // OpenNextEntry stops returning net::OK. 255 bool DiskCacheBackendTest::EnumerateAndMatchKeys( 256 int max_to_open, 257 void** iter, 258 std::set<std::string>* keys_to_match, 259 size_t* count) { 260 disk_cache::Entry* entry; 261 262 while (OpenNextEntry(iter, &entry) == net::OK) { 263 if (!entry) 264 return false; 265 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey())); 266 entry->Close(); 267 ++(*count); 268 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open) 269 break; 270 }; 271 272 return true; 273 } 274 275 void DiskCacheBackendTest::BackendBasics() { 276 InitCache(); 277 disk_cache::Entry *entry1 = NULL, *entry2 = NULL; 278 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); 279 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1)); 280 ASSERT_TRUE(NULL != entry1); 281 entry1->Close(); 282 entry1 = NULL; 283 284 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); 285 ASSERT_TRUE(NULL != entry1); 286 entry1->Close(); 287 entry1 = NULL; 288 289 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1)); 290 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); 291 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2)); 292 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2)); 293 ASSERT_TRUE(NULL != entry1); 294 ASSERT_TRUE(NULL != entry2); 295 EXPECT_EQ(2, cache_->GetEntryCount()); 296 297 disk_cache::Entry* entry3 = NULL; 298 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3)); 299 ASSERT_TRUE(NULL != entry3); 300 EXPECT_TRUE(entry2 == entry3); 301 EXPECT_EQ(2, cache_->GetEntryCount()); 302 303 EXPECT_EQ(net::OK, DoomEntry("some other key")); 304 EXPECT_EQ(1, cache_->GetEntryCount()); 305 entry1->Close(); 306 entry2->Close(); 307 entry3->Close(); 308 309 EXPECT_EQ(net::OK, DoomEntry("the first key")); 310 EXPECT_EQ(0, cache_->GetEntryCount()); 311 312 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1)); 313 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2)); 314 entry1->Doom(); 315 entry1->Close(); 316 EXPECT_EQ(net::OK, DoomEntry("some other key")); 317 EXPECT_EQ(0, cache_->GetEntryCount()); 318 entry2->Close(); 319 } 320 321 TEST_F(DiskCacheBackendTest, Basics) { 322 BackendBasics(); 323 } 324 325 TEST_F(DiskCacheBackendTest, NewEvictionBasics) { 326 SetNewEviction(); 327 BackendBasics(); 328 } 329 330 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) { 331 SetMemoryOnlyMode(); 332 BackendBasics(); 333 } 334 335 TEST_F(DiskCacheBackendTest, AppCacheBasics) { 336 SetCacheType(net::APP_CACHE); 337 BackendBasics(); 338 } 339 340 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) { 341 SetCacheType(net::SHADER_CACHE); 342 BackendBasics(); 343 } 344 345 void DiskCacheBackendTest::BackendKeying() { 346 InitCache(); 347 const char* kName1 = "the first key"; 348 const char* kName2 = "the first Key"; 349 disk_cache::Entry *entry1, *entry2; 350 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1)); 351 352 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2)); 353 EXPECT_TRUE(entry1 != entry2) << "Case sensitive"; 354 entry2->Close(); 355 356 char buffer[30]; 357 base::strlcpy(buffer, kName1, arraysize(buffer)); 358 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2)); 359 EXPECT_TRUE(entry1 == entry2); 360 entry2->Close(); 361 362 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1); 363 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2)); 364 EXPECT_TRUE(entry1 == entry2); 365 entry2->Close(); 366 367 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3); 368 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2)); 369 EXPECT_TRUE(entry1 == entry2); 370 entry2->Close(); 371 372 // Now verify long keys. 373 char buffer2[20000]; 374 memset(buffer2, 's', sizeof(buffer2)); 375 buffer2[1023] = '\0'; 376 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file"; 377 entry2->Close(); 378 379 buffer2[1023] = 'g'; 380 buffer2[19999] = '\0'; 381 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file"; 382 entry2->Close(); 383 entry1->Close(); 384 } 385 386 TEST_F(DiskCacheBackendTest, Keying) { 387 BackendKeying(); 388 } 389 390 TEST_F(DiskCacheBackendTest, NewEvictionKeying) { 391 SetNewEviction(); 392 BackendKeying(); 393 } 394 395 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) { 396 SetMemoryOnlyMode(); 397 BackendKeying(); 398 } 399 400 TEST_F(DiskCacheBackendTest, AppCacheKeying) { 401 SetCacheType(net::APP_CACHE); 402 BackendKeying(); 403 } 404 405 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) { 406 SetCacheType(net::SHADER_CACHE); 407 BackendKeying(); 408 } 409 410 TEST_F(DiskCacheTest, CreateBackend) { 411 net::TestCompletionCallback cb; 412 413 { 414 ASSERT_TRUE(CleanupCacheDir()); 415 base::Thread cache_thread("CacheThread"); 416 ASSERT_TRUE(cache_thread.StartWithOptions( 417 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 418 419 // Test the private factory method(s). 420 scoped_ptr<disk_cache::Backend> cache; 421 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL); 422 ASSERT_TRUE(cache.get()); 423 cache.reset(); 424 425 // Now test the public API. 426 int rv = 427 disk_cache::CreateCacheBackend(net::DISK_CACHE, 428 net::CACHE_BACKEND_DEFAULT, 429 cache_path_, 430 0, 431 false, 432 cache_thread.message_loop_proxy().get(), 433 NULL, 434 &cache, 435 cb.callback()); 436 ASSERT_EQ(net::OK, cb.GetResult(rv)); 437 ASSERT_TRUE(cache.get()); 438 cache.reset(); 439 440 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE, 441 net::CACHE_BACKEND_DEFAULT, 442 base::FilePath(), 0, 443 false, NULL, NULL, &cache, 444 cb.callback()); 445 ASSERT_EQ(net::OK, cb.GetResult(rv)); 446 ASSERT_TRUE(cache.get()); 447 cache.reset(); 448 } 449 450 base::MessageLoop::current()->RunUntilIdle(); 451 } 452 453 // Tests that |BackendImpl| fails to initialize with a missing file. 454 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) { 455 ASSERT_TRUE(CopyTestCache("bad_entry")); 456 base::FilePath filename = cache_path_.AppendASCII("data_1"); 457 base::DeleteFile(filename, false); 458 base::Thread cache_thread("CacheThread"); 459 ASSERT_TRUE(cache_thread.StartWithOptions( 460 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 461 net::TestCompletionCallback cb; 462 463 bool prev = base::ThreadRestrictions::SetIOAllowed(false); 464 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( 465 cache_path_, cache_thread.message_loop_proxy().get(), NULL)); 466 int rv = cache->Init(cb.callback()); 467 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv)); 468 base::ThreadRestrictions::SetIOAllowed(prev); 469 470 cache.reset(); 471 DisableIntegrityCheck(); 472 } 473 474 TEST_F(DiskCacheBackendTest, ExternalFiles) { 475 InitCache(); 476 // First, let's create a file on the folder. 477 base::FilePath filename = cache_path_.AppendASCII("f_000001"); 478 479 const int kSize = 50; 480 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); 481 CacheTestFillBuffer(buffer1->data(), kSize, false); 482 ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize)); 483 484 // Now let's create a file with the cache. 485 disk_cache::Entry* entry; 486 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); 487 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false)); 488 entry->Close(); 489 490 // And verify that the first file is still there. 491 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); 492 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize)); 493 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize)); 494 } 495 496 // Tests that we deal with file-level pending operations at destruction time. 497 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) { 498 ASSERT_TRUE(CleanupCacheDir()); 499 uint32 flags = disk_cache::kNoBuffering; 500 if (!fast) 501 flags |= disk_cache::kNoRandom; 502 503 UseCurrentThread(); 504 CreateBackend(flags, NULL); 505 506 net::TestCompletionCallback cb; 507 int rv = GeneratePendingIO(&cb); 508 509 // The cache destructor will see one pending operation here. 510 cache_.reset(); 511 512 if (rv == net::ERR_IO_PENDING) { 513 if (fast || simple_cache_mode_) 514 EXPECT_FALSE(cb.have_result()); 515 else 516 EXPECT_TRUE(cb.have_result()); 517 } 518 519 base::MessageLoop::current()->RunUntilIdle(); 520 521 #if !defined(OS_IOS) 522 // Wait for the actual operation to complete, or we'll keep a file handle that 523 // may cause issues later. Note that on iOS systems even though this test 524 // uses a single thread, the actual IO is posted to a worker thread and the 525 // cache destructor breaks the link to reach cb when the operation completes. 526 rv = cb.GetResult(rv); 527 #endif 528 } 529 530 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) { 531 BackendShutdownWithPendingFileIO(false); 532 } 533 534 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer 535 // builds because they contain a lot of intentional memory leaks. 536 // The wrapper scripts used to run tests under Valgrind Memcheck and 537 // Heapchecker will also disable these tests under those tools. See: 538 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt 539 // tools/heapcheck/net_unittests.gtest-heapcheck.txt 540 #if !defined(LEAK_SANITIZER) 541 // We'll be leaking from this test. 542 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) { 543 // The integrity test sets kNoRandom so there's a version mismatch if we don't 544 // force new eviction. 545 SetNewEviction(); 546 BackendShutdownWithPendingFileIO(true); 547 } 548 #endif 549 550 // See crbug.com/330074 551 #if !defined(OS_IOS) 552 // Tests that one cache instance is not affected by another one going away. 553 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) { 554 base::ScopedTempDir store; 555 ASSERT_TRUE(store.CreateUniqueTempDir()); 556 557 net::TestCompletionCallback cb; 558 scoped_ptr<disk_cache::Backend> extra_cache; 559 int rv = disk_cache::CreateCacheBackend( 560 net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, store.path(), 0, 561 false, base::MessageLoopProxy::current().get(), NULL, 562 &extra_cache, cb.callback()); 563 ASSERT_EQ(net::OK, cb.GetResult(rv)); 564 ASSERT_TRUE(extra_cache.get() != NULL); 565 566 ASSERT_TRUE(CleanupCacheDir()); 567 SetNewEviction(); // Match the expected behavior for integrity verification. 568 UseCurrentThread(); 569 570 CreateBackend(disk_cache::kNoBuffering, NULL); 571 rv = GeneratePendingIO(&cb); 572 573 // cache_ has a pending operation, and extra_cache will go away. 574 extra_cache.reset(); 575 576 if (rv == net::ERR_IO_PENDING) 577 EXPECT_FALSE(cb.have_result()); 578 579 base::MessageLoop::current()->RunUntilIdle(); 580 581 // Wait for the actual operation to complete, or we'll keep a file handle that 582 // may cause issues later. 583 rv = cb.GetResult(rv); 584 } 585 #endif 586 587 // Tests that we deal with background-thread pending operations. 588 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) { 589 net::TestCompletionCallback cb; 590 591 { 592 ASSERT_TRUE(CleanupCacheDir()); 593 base::Thread cache_thread("CacheThread"); 594 ASSERT_TRUE(cache_thread.StartWithOptions( 595 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 596 597 uint32 flags = disk_cache::kNoBuffering; 598 if (!fast) 599 flags |= disk_cache::kNoRandom; 600 601 CreateBackend(flags, &cache_thread); 602 603 disk_cache::Entry* entry; 604 int rv = cache_->CreateEntry("some key", &entry, cb.callback()); 605 ASSERT_EQ(net::OK, cb.GetResult(rv)); 606 607 entry->Close(); 608 609 // The cache destructor will see one pending operation here. 610 cache_.reset(); 611 } 612 613 base::MessageLoop::current()->RunUntilIdle(); 614 } 615 616 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) { 617 BackendShutdownWithPendingIO(false); 618 } 619 620 #if !defined(LEAK_SANITIZER) 621 // We'll be leaking from this test. 622 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) { 623 // The integrity test sets kNoRandom so there's a version mismatch if we don't 624 // force new eviction. 625 SetNewEviction(); 626 BackendShutdownWithPendingIO(true); 627 } 628 #endif 629 630 // Tests that we deal with create-type pending operations. 631 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) { 632 net::TestCompletionCallback cb; 633 634 { 635 ASSERT_TRUE(CleanupCacheDir()); 636 base::Thread cache_thread("CacheThread"); 637 ASSERT_TRUE(cache_thread.StartWithOptions( 638 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 639 640 disk_cache::BackendFlags flags = 641 fast ? disk_cache::kNone : disk_cache::kNoRandom; 642 CreateBackend(flags, &cache_thread); 643 644 disk_cache::Entry* entry; 645 int rv = cache_->CreateEntry("some key", &entry, cb.callback()); 646 ASSERT_EQ(net::ERR_IO_PENDING, rv); 647 648 cache_.reset(); 649 EXPECT_FALSE(cb.have_result()); 650 } 651 652 base::MessageLoop::current()->RunUntilIdle(); 653 } 654 655 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) { 656 BackendShutdownWithPendingCreate(false); 657 } 658 659 #if !defined(LEAK_SANITIZER) 660 // We'll be leaking an entry from this test. 661 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) { 662 // The integrity test sets kNoRandom so there's a version mismatch if we don't 663 // force new eviction. 664 SetNewEviction(); 665 BackendShutdownWithPendingCreate(true); 666 } 667 #endif 668 669 TEST_F(DiskCacheTest, TruncatedIndex) { 670 ASSERT_TRUE(CleanupCacheDir()); 671 base::FilePath index = cache_path_.AppendASCII("index"); 672 ASSERT_EQ(5, file_util::WriteFile(index, "hello", 5)); 673 674 base::Thread cache_thread("CacheThread"); 675 ASSERT_TRUE(cache_thread.StartWithOptions( 676 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 677 net::TestCompletionCallback cb; 678 679 scoped_ptr<disk_cache::Backend> backend; 680 int rv = 681 disk_cache::CreateCacheBackend(net::DISK_CACHE, 682 net::CACHE_BACKEND_BLOCKFILE, 683 cache_path_, 684 0, 685 false, 686 cache_thread.message_loop_proxy().get(), 687 NULL, 688 &backend, 689 cb.callback()); 690 ASSERT_NE(net::OK, cb.GetResult(rv)); 691 692 ASSERT_FALSE(backend); 693 } 694 695 void DiskCacheBackendTest::BackendSetSize() { 696 const int cache_size = 0x10000; // 64 kB 697 SetMaxSize(cache_size); 698 InitCache(); 699 700 std::string first("some key"); 701 std::string second("something else"); 702 disk_cache::Entry* entry; 703 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); 704 705 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size)); 706 memset(buffer->data(), 0, cache_size); 707 EXPECT_EQ(cache_size / 10, 708 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false)) 709 << "normal file"; 710 711 EXPECT_EQ(net::ERR_FAILED, 712 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false)) 713 << "file size above the limit"; 714 715 // By doubling the total size, we make this file cacheable. 716 SetMaxSize(cache_size * 2); 717 EXPECT_EQ(cache_size / 5, 718 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false)); 719 720 // Let's fill up the cache!. 721 SetMaxSize(cache_size * 10); 722 EXPECT_EQ(cache_size * 3 / 4, 723 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false)); 724 entry->Close(); 725 FlushQueueForTest(); 726 727 SetMaxSize(cache_size); 728 729 // The cache is 95% full. 730 731 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); 732 EXPECT_EQ(cache_size / 10, 733 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false)); 734 735 disk_cache::Entry* entry2; 736 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2)); 737 EXPECT_EQ(cache_size / 10, 738 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false)); 739 entry2->Close(); // This will trigger the cache trim. 740 741 EXPECT_NE(net::OK, OpenEntry(first, &entry2)); 742 743 FlushQueueForTest(); // Make sure that we are done trimming the cache. 744 FlushQueueForTest(); // We may have posted two tasks to evict stuff. 745 746 entry->Close(); 747 ASSERT_EQ(net::OK, OpenEntry(second, &entry)); 748 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0)); 749 entry->Close(); 750 } 751 752 TEST_F(DiskCacheBackendTest, SetSize) { 753 BackendSetSize(); 754 } 755 756 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) { 757 SetNewEviction(); 758 BackendSetSize(); 759 } 760 761 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) { 762 SetMemoryOnlyMode(); 763 BackendSetSize(); 764 } 765 766 void DiskCacheBackendTest::BackendLoad() { 767 InitCache(); 768 int seed = static_cast<int>(Time::Now().ToInternalValue()); 769 srand(seed); 770 771 disk_cache::Entry* entries[100]; 772 for (int i = 0; i < 100; i++) { 773 std::string key = GenerateKey(true); 774 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i])); 775 } 776 EXPECT_EQ(100, cache_->GetEntryCount()); 777 778 for (int i = 0; i < 100; i++) { 779 int source1 = rand() % 100; 780 int source2 = rand() % 100; 781 disk_cache::Entry* temp = entries[source1]; 782 entries[source1] = entries[source2]; 783 entries[source2] = temp; 784 } 785 786 for (int i = 0; i < 100; i++) { 787 disk_cache::Entry* entry; 788 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry)); 789 EXPECT_TRUE(entry == entries[i]); 790 entry->Close(); 791 entries[i]->Doom(); 792 entries[i]->Close(); 793 } 794 FlushQueueForTest(); 795 EXPECT_EQ(0, cache_->GetEntryCount()); 796 } 797 798 TEST_F(DiskCacheBackendTest, Load) { 799 // Work with a tiny index table (16 entries) 800 SetMask(0xf); 801 SetMaxSize(0x100000); 802 BackendLoad(); 803 } 804 805 TEST_F(DiskCacheBackendTest, NewEvictionLoad) { 806 SetNewEviction(); 807 // Work with a tiny index table (16 entries) 808 SetMask(0xf); 809 SetMaxSize(0x100000); 810 BackendLoad(); 811 } 812 813 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) { 814 SetMaxSize(0x100000); 815 SetMemoryOnlyMode(); 816 BackendLoad(); 817 } 818 819 TEST_F(DiskCacheBackendTest, AppCacheLoad) { 820 SetCacheType(net::APP_CACHE); 821 // Work with a tiny index table (16 entries) 822 SetMask(0xf); 823 SetMaxSize(0x100000); 824 BackendLoad(); 825 } 826 827 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) { 828 SetCacheType(net::SHADER_CACHE); 829 // Work with a tiny index table (16 entries) 830 SetMask(0xf); 831 SetMaxSize(0x100000); 832 BackendLoad(); 833 } 834 835 // Tests the chaining of an entry to the current head. 836 void DiskCacheBackendTest::BackendChain() { 837 SetMask(0x1); // 2-entry table. 838 SetMaxSize(0x3000); // 12 kB. 839 InitCache(); 840 841 disk_cache::Entry* entry; 842 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); 843 entry->Close(); 844 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry)); 845 entry->Close(); 846 } 847 848 TEST_F(DiskCacheBackendTest, Chain) { 849 BackendChain(); 850 } 851 852 TEST_F(DiskCacheBackendTest, NewEvictionChain) { 853 SetNewEviction(); 854 BackendChain(); 855 } 856 857 TEST_F(DiskCacheBackendTest, AppCacheChain) { 858 SetCacheType(net::APP_CACHE); 859 BackendChain(); 860 } 861 862 TEST_F(DiskCacheBackendTest, ShaderCacheChain) { 863 SetCacheType(net::SHADER_CACHE); 864 BackendChain(); 865 } 866 867 TEST_F(DiskCacheBackendTest, NewEvictionTrim) { 868 SetNewEviction(); 869 InitCache(); 870 871 disk_cache::Entry* entry; 872 for (int i = 0; i < 100; i++) { 873 std::string name(base::StringPrintf("Key %d", i)); 874 ASSERT_EQ(net::OK, CreateEntry(name, &entry)); 875 entry->Close(); 876 if (i < 90) { 877 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0. 878 ASSERT_EQ(net::OK, OpenEntry(name, &entry)); 879 entry->Close(); 880 } 881 } 882 883 // The first eviction must come from list 1 (10% limit), the second must come 884 // from list 0. 885 TrimForTest(false); 886 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry)); 887 TrimForTest(false); 888 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry)); 889 890 // Double check that we still have the list tails. 891 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry)); 892 entry->Close(); 893 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry)); 894 entry->Close(); 895 } 896 897 // Before looking for invalid entries, let's check a valid entry. 898 void DiskCacheBackendTest::BackendValidEntry() { 899 InitCache(); 900 901 std::string key("Some key"); 902 disk_cache::Entry* entry; 903 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 904 905 const int kSize = 50; 906 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); 907 memset(buffer1->data(), 0, kSize); 908 base::strlcpy(buffer1->data(), "And the data to save", kSize); 909 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false)); 910 entry->Close(); 911 SimulateCrash(); 912 913 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); 914 915 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); 916 memset(buffer2->data(), 0, kSize); 917 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize)); 918 entry->Close(); 919 EXPECT_STREQ(buffer1->data(), buffer2->data()); 920 } 921 922 TEST_F(DiskCacheBackendTest, ValidEntry) { 923 BackendValidEntry(); 924 } 925 926 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) { 927 SetNewEviction(); 928 BackendValidEntry(); 929 } 930 931 // The same logic of the previous test (ValidEntry), but this time force the 932 // entry to be invalid, simulating a crash in the middle. 933 // We'll be leaking memory from this test. 934 void DiskCacheBackendTest::BackendInvalidEntry() { 935 InitCache(); 936 937 std::string key("Some key"); 938 disk_cache::Entry* entry; 939 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 940 941 const int kSize = 50; 942 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 943 memset(buffer->data(), 0, kSize); 944 base::strlcpy(buffer->data(), "And the data to save", kSize); 945 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); 946 SimulateCrash(); 947 948 EXPECT_NE(net::OK, OpenEntry(key, &entry)); 949 EXPECT_EQ(0, cache_->GetEntryCount()); 950 } 951 952 #if !defined(LEAK_SANITIZER) 953 // We'll be leaking memory from this test. 954 TEST_F(DiskCacheBackendTest, InvalidEntry) { 955 BackendInvalidEntry(); 956 } 957 958 // We'll be leaking memory from this test. 959 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) { 960 SetNewEviction(); 961 BackendInvalidEntry(); 962 } 963 964 // We'll be leaking memory from this test. 965 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) { 966 SetCacheType(net::APP_CACHE); 967 BackendInvalidEntry(); 968 } 969 970 // We'll be leaking memory from this test. 971 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) { 972 SetCacheType(net::SHADER_CACHE); 973 BackendInvalidEntry(); 974 } 975 976 // Almost the same test, but this time crash the cache after reading an entry. 977 // We'll be leaking memory from this test. 978 void DiskCacheBackendTest::BackendInvalidEntryRead() { 979 InitCache(); 980 981 std::string key("Some key"); 982 disk_cache::Entry* entry; 983 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 984 985 const int kSize = 50; 986 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 987 memset(buffer->data(), 0, kSize); 988 base::strlcpy(buffer->data(), "And the data to save", kSize); 989 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); 990 entry->Close(); 991 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); 992 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize)); 993 994 SimulateCrash(); 995 996 if (type_ == net::APP_CACHE) { 997 // Reading an entry and crashing should not make it dirty. 998 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); 999 EXPECT_EQ(1, cache_->GetEntryCount()); 1000 entry->Close(); 1001 } else { 1002 EXPECT_NE(net::OK, OpenEntry(key, &entry)); 1003 EXPECT_EQ(0, cache_->GetEntryCount()); 1004 } 1005 } 1006 1007 // We'll be leaking memory from this test. 1008 TEST_F(DiskCacheBackendTest, InvalidEntryRead) { 1009 BackendInvalidEntryRead(); 1010 } 1011 1012 // We'll be leaking memory from this test. 1013 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) { 1014 SetNewEviction(); 1015 BackendInvalidEntryRead(); 1016 } 1017 1018 // We'll be leaking memory from this test. 1019 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) { 1020 SetCacheType(net::APP_CACHE); 1021 BackendInvalidEntryRead(); 1022 } 1023 1024 // We'll be leaking memory from this test. 1025 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) { 1026 SetCacheType(net::SHADER_CACHE); 1027 BackendInvalidEntryRead(); 1028 } 1029 1030 // We'll be leaking memory from this test. 1031 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() { 1032 // Work with a tiny index table (16 entries) 1033 SetMask(0xf); 1034 SetMaxSize(0x100000); 1035 InitCache(); 1036 1037 int seed = static_cast<int>(Time::Now().ToInternalValue()); 1038 srand(seed); 1039 1040 const int kNumEntries = 100; 1041 disk_cache::Entry* entries[kNumEntries]; 1042 for (int i = 0; i < kNumEntries; i++) { 1043 std::string key = GenerateKey(true); 1044 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i])); 1045 } 1046 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); 1047 1048 for (int i = 0; i < kNumEntries; i++) { 1049 int source1 = rand() % kNumEntries; 1050 int source2 = rand() % kNumEntries; 1051 disk_cache::Entry* temp = entries[source1]; 1052 entries[source1] = entries[source2]; 1053 entries[source2] = temp; 1054 } 1055 1056 std::string keys[kNumEntries]; 1057 for (int i = 0; i < kNumEntries; i++) { 1058 keys[i] = entries[i]->GetKey(); 1059 if (i < kNumEntries / 2) 1060 entries[i]->Close(); 1061 } 1062 1063 SimulateCrash(); 1064 1065 for (int i = kNumEntries / 2; i < kNumEntries; i++) { 1066 disk_cache::Entry* entry; 1067 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry)); 1068 } 1069 1070 for (int i = 0; i < kNumEntries / 2; i++) { 1071 disk_cache::Entry* entry; 1072 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry)); 1073 entry->Close(); 1074 } 1075 1076 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount()); 1077 } 1078 1079 // We'll be leaking memory from this test. 1080 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) { 1081 BackendInvalidEntryWithLoad(); 1082 } 1083 1084 // We'll be leaking memory from this test. 1085 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) { 1086 SetNewEviction(); 1087 BackendInvalidEntryWithLoad(); 1088 } 1089 1090 // We'll be leaking memory from this test. 1091 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) { 1092 SetCacheType(net::APP_CACHE); 1093 BackendInvalidEntryWithLoad(); 1094 } 1095 1096 // We'll be leaking memory from this test. 1097 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) { 1098 SetCacheType(net::SHADER_CACHE); 1099 BackendInvalidEntryWithLoad(); 1100 } 1101 1102 // We'll be leaking memory from this test. 1103 void DiskCacheBackendTest::BackendTrimInvalidEntry() { 1104 const int kSize = 0x3000; // 12 kB 1105 SetMaxSize(kSize * 10); 1106 InitCache(); 1107 1108 std::string first("some key"); 1109 std::string second("something else"); 1110 disk_cache::Entry* entry; 1111 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); 1112 1113 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 1114 memset(buffer->data(), 0, kSize); 1115 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); 1116 1117 // Simulate a crash. 1118 SimulateCrash(); 1119 1120 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); 1121 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); 1122 1123 EXPECT_EQ(2, cache_->GetEntryCount()); 1124 SetMaxSize(kSize); 1125 entry->Close(); // Trim the cache. 1126 FlushQueueForTest(); 1127 1128 // If we evicted the entry in less than 20mS, we have one entry in the cache; 1129 // if it took more than that, we posted a task and we'll delete the second 1130 // entry too. 1131 base::MessageLoop::current()->RunUntilIdle(); 1132 1133 // This may be not thread-safe in general, but for now it's OK so add some 1134 // ThreadSanitizer annotations to ignore data races on cache_. 1135 // See http://crbug.com/55970 1136 ANNOTATE_IGNORE_READS_BEGIN(); 1137 EXPECT_GE(1, cache_->GetEntryCount()); 1138 ANNOTATE_IGNORE_READS_END(); 1139 1140 EXPECT_NE(net::OK, OpenEntry(first, &entry)); 1141 } 1142 1143 // We'll be leaking memory from this test. 1144 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) { 1145 BackendTrimInvalidEntry(); 1146 } 1147 1148 // We'll be leaking memory from this test. 1149 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) { 1150 SetNewEviction(); 1151 BackendTrimInvalidEntry(); 1152 } 1153 1154 // We'll be leaking memory from this test. 1155 void DiskCacheBackendTest::BackendTrimInvalidEntry2() { 1156 SetMask(0xf); // 16-entry table. 1157 1158 const int kSize = 0x3000; // 12 kB 1159 SetMaxSize(kSize * 40); 1160 InitCache(); 1161 1162 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 1163 memset(buffer->data(), 0, kSize); 1164 disk_cache::Entry* entry; 1165 1166 // Writing 32 entries to this cache chains most of them. 1167 for (int i = 0; i < 32; i++) { 1168 std::string key(base::StringPrintf("some key %d", i)); 1169 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 1170 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); 1171 entry->Close(); 1172 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); 1173 // Note that we are not closing the entries. 1174 } 1175 1176 // Simulate a crash. 1177 SimulateCrash(); 1178 1179 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry)); 1180 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); 1181 1182 FlushQueueForTest(); 1183 EXPECT_EQ(33, cache_->GetEntryCount()); 1184 SetMaxSize(kSize); 1185 1186 // For the new eviction code, all corrupt entries are on the second list so 1187 // they are not going away that easy. 1188 if (new_eviction_) { 1189 EXPECT_EQ(net::OK, DoomAllEntries()); 1190 } 1191 1192 entry->Close(); // Trim the cache. 1193 FlushQueueForTest(); 1194 1195 // We may abort the eviction before cleaning up everything. 1196 base::MessageLoop::current()->RunUntilIdle(); 1197 FlushQueueForTest(); 1198 // If it's not clear enough: we may still have eviction tasks running at this 1199 // time, so the number of entries is changing while we read it. 1200 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); 1201 EXPECT_GE(30, cache_->GetEntryCount()); 1202 ANNOTATE_IGNORE_READS_AND_WRITES_END(); 1203 } 1204 1205 // We'll be leaking memory from this test. 1206 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) { 1207 BackendTrimInvalidEntry2(); 1208 } 1209 1210 // We'll be leaking memory from this test. 1211 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) { 1212 SetNewEviction(); 1213 BackendTrimInvalidEntry2(); 1214 } 1215 #endif // !defined(LEAK_SANITIZER) 1216 1217 void DiskCacheBackendTest::BackendEnumerations() { 1218 InitCache(); 1219 Time initial = Time::Now(); 1220 1221 const int kNumEntries = 100; 1222 for (int i = 0; i < kNumEntries; i++) { 1223 std::string key = GenerateKey(true); 1224 disk_cache::Entry* entry; 1225 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 1226 entry->Close(); 1227 } 1228 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); 1229 Time final = Time::Now(); 1230 1231 disk_cache::Entry* entry; 1232 void* iter = NULL; 1233 int count = 0; 1234 Time last_modified[kNumEntries]; 1235 Time last_used[kNumEntries]; 1236 while (OpenNextEntry(&iter, &entry) == net::OK) { 1237 ASSERT_TRUE(NULL != entry); 1238 if (count < kNumEntries) { 1239 last_modified[count] = entry->GetLastModified(); 1240 last_used[count] = entry->GetLastUsed(); 1241 EXPECT_TRUE(initial <= last_modified[count]); 1242 EXPECT_TRUE(final >= last_modified[count]); 1243 } 1244 1245 entry->Close(); 1246 count++; 1247 }; 1248 EXPECT_EQ(kNumEntries, count); 1249 1250 iter = NULL; 1251 count = 0; 1252 // The previous enumeration should not have changed the timestamps. 1253 while (OpenNextEntry(&iter, &entry) == net::OK) { 1254 ASSERT_TRUE(NULL != entry); 1255 if (count < kNumEntries) { 1256 EXPECT_TRUE(last_modified[count] == entry->GetLastModified()); 1257 EXPECT_TRUE(last_used[count] == entry->GetLastUsed()); 1258 } 1259 entry->Close(); 1260 count++; 1261 }; 1262 EXPECT_EQ(kNumEntries, count); 1263 } 1264 1265 TEST_F(DiskCacheBackendTest, Enumerations) { 1266 BackendEnumerations(); 1267 } 1268 1269 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) { 1270 SetNewEviction(); 1271 BackendEnumerations(); 1272 } 1273 1274 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) { 1275 SetMemoryOnlyMode(); 1276 BackendEnumerations(); 1277 } 1278 1279 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) { 1280 SetCacheType(net::SHADER_CACHE); 1281 BackendEnumerations(); 1282 } 1283 1284 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) { 1285 SetCacheType(net::APP_CACHE); 1286 BackendEnumerations(); 1287 } 1288 1289 // Verifies enumerations while entries are open. 1290 void DiskCacheBackendTest::BackendEnumerations2() { 1291 InitCache(); 1292 const std::string first("first"); 1293 const std::string second("second"); 1294 disk_cache::Entry *entry1, *entry2; 1295 ASSERT_EQ(net::OK, CreateEntry(first, &entry1)); 1296 entry1->Close(); 1297 ASSERT_EQ(net::OK, CreateEntry(second, &entry2)); 1298 entry2->Close(); 1299 FlushQueueForTest(); 1300 1301 // Make sure that the timestamp is not the same. 1302 AddDelay(); 1303 ASSERT_EQ(net::OK, OpenEntry(second, &entry1)); 1304 void* iter = NULL; 1305 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); 1306 EXPECT_EQ(entry2->GetKey(), second); 1307 1308 // Two entries and the iterator pointing at "first". 1309 entry1->Close(); 1310 entry2->Close(); 1311 1312 // The iterator should still be valid, so we should not crash. 1313 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); 1314 EXPECT_EQ(entry2->GetKey(), first); 1315 entry2->Close(); 1316 cache_->EndEnumeration(&iter); 1317 1318 // Modify the oldest entry and get the newest element. 1319 ASSERT_EQ(net::OK, OpenEntry(first, &entry1)); 1320 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false)); 1321 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); 1322 if (type_ == net::APP_CACHE) { 1323 // The list is not updated. 1324 EXPECT_EQ(entry2->GetKey(), second); 1325 } else { 1326 EXPECT_EQ(entry2->GetKey(), first); 1327 } 1328 1329 entry1->Close(); 1330 entry2->Close(); 1331 cache_->EndEnumeration(&iter); 1332 } 1333 1334 TEST_F(DiskCacheBackendTest, Enumerations2) { 1335 BackendEnumerations2(); 1336 } 1337 1338 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) { 1339 SetNewEviction(); 1340 BackendEnumerations2(); 1341 } 1342 1343 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) { 1344 SetMemoryOnlyMode(); 1345 BackendEnumerations2(); 1346 } 1347 1348 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) { 1349 SetCacheType(net::APP_CACHE); 1350 BackendEnumerations2(); 1351 } 1352 1353 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) { 1354 SetCacheType(net::SHADER_CACHE); 1355 BackendEnumerations2(); 1356 } 1357 1358 // Verify that ReadData calls do not update the LRU cache 1359 // when using the SHADER_CACHE type. 1360 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) { 1361 SetCacheType(net::SHADER_CACHE); 1362 InitCache(); 1363 const std::string first("first"); 1364 const std::string second("second"); 1365 disk_cache::Entry *entry1, *entry2; 1366 const int kSize = 50; 1367 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); 1368 1369 ASSERT_EQ(net::OK, CreateEntry(first, &entry1)); 1370 memset(buffer1->data(), 0, kSize); 1371 base::strlcpy(buffer1->data(), "And the data to save", kSize); 1372 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false)); 1373 1374 ASSERT_EQ(net::OK, CreateEntry(second, &entry2)); 1375 entry2->Close(); 1376 1377 FlushQueueForTest(); 1378 1379 // Make sure that the timestamp is not the same. 1380 AddDelay(); 1381 1382 // Read from the last item in the LRU. 1383 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize)); 1384 entry1->Close(); 1385 1386 void* iter = NULL; 1387 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); 1388 EXPECT_EQ(entry2->GetKey(), second); 1389 entry2->Close(); 1390 cache_->EndEnumeration(&iter); 1391 } 1392 1393 #if !defined(LEAK_SANITIZER) 1394 // Verify handling of invalid entries while doing enumerations. 1395 // We'll be leaking memory from this test. 1396 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() { 1397 InitCache(); 1398 1399 std::string key("Some key"); 1400 disk_cache::Entry *entry, *entry1, *entry2; 1401 ASSERT_EQ(net::OK, CreateEntry(key, &entry1)); 1402 1403 const int kSize = 50; 1404 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); 1405 memset(buffer1->data(), 0, kSize); 1406 base::strlcpy(buffer1->data(), "And the data to save", kSize); 1407 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false)); 1408 entry1->Close(); 1409 ASSERT_EQ(net::OK, OpenEntry(key, &entry1)); 1410 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize)); 1411 1412 std::string key2("Another key"); 1413 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); 1414 entry2->Close(); 1415 ASSERT_EQ(2, cache_->GetEntryCount()); 1416 1417 SimulateCrash(); 1418 1419 void* iter = NULL; 1420 int count = 0; 1421 while (OpenNextEntry(&iter, &entry) == net::OK) { 1422 ASSERT_TRUE(NULL != entry); 1423 EXPECT_EQ(key2, entry->GetKey()); 1424 entry->Close(); 1425 count++; 1426 }; 1427 EXPECT_EQ(1, count); 1428 EXPECT_EQ(1, cache_->GetEntryCount()); 1429 } 1430 1431 // We'll be leaking memory from this test. 1432 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) { 1433 BackendInvalidEntryEnumeration(); 1434 } 1435 1436 // We'll be leaking memory from this test. 1437 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) { 1438 SetNewEviction(); 1439 BackendInvalidEntryEnumeration(); 1440 } 1441 #endif // !defined(LEAK_SANITIZER) 1442 1443 // Tests that if for some reason entries are modified close to existing cache 1444 // iterators, we don't generate fatal errors or reset the cache. 1445 void DiskCacheBackendTest::BackendFixEnumerators() { 1446 InitCache(); 1447 1448 int seed = static_cast<int>(Time::Now().ToInternalValue()); 1449 srand(seed); 1450 1451 const int kNumEntries = 10; 1452 for (int i = 0; i < kNumEntries; i++) { 1453 std::string key = GenerateKey(true); 1454 disk_cache::Entry* entry; 1455 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 1456 entry->Close(); 1457 } 1458 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); 1459 1460 disk_cache::Entry *entry1, *entry2; 1461 void* iter1 = NULL; 1462 void* iter2 = NULL; 1463 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1)); 1464 ASSERT_TRUE(NULL != entry1); 1465 entry1->Close(); 1466 entry1 = NULL; 1467 1468 // Let's go to the middle of the list. 1469 for (int i = 0; i < kNumEntries / 2; i++) { 1470 if (entry1) 1471 entry1->Close(); 1472 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1)); 1473 ASSERT_TRUE(NULL != entry1); 1474 1475 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); 1476 ASSERT_TRUE(NULL != entry2); 1477 entry2->Close(); 1478 } 1479 1480 // Messing up with entry1 will modify entry2->next. 1481 entry1->Doom(); 1482 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); 1483 ASSERT_TRUE(NULL != entry2); 1484 1485 // The link entry2->entry1 should be broken. 1486 EXPECT_NE(entry2->GetKey(), entry1->GetKey()); 1487 entry1->Close(); 1488 entry2->Close(); 1489 1490 // And the second iterator should keep working. 1491 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); 1492 ASSERT_TRUE(NULL != entry2); 1493 entry2->Close(); 1494 1495 cache_->EndEnumeration(&iter1); 1496 cache_->EndEnumeration(&iter2); 1497 } 1498 1499 TEST_F(DiskCacheBackendTest, FixEnumerators) { 1500 BackendFixEnumerators(); 1501 } 1502 1503 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) { 1504 SetNewEviction(); 1505 BackendFixEnumerators(); 1506 } 1507 1508 void DiskCacheBackendTest::BackendDoomRecent() { 1509 InitCache(); 1510 1511 disk_cache::Entry *entry; 1512 ASSERT_EQ(net::OK, CreateEntry("first", &entry)); 1513 entry->Close(); 1514 ASSERT_EQ(net::OK, CreateEntry("second", &entry)); 1515 entry->Close(); 1516 FlushQueueForTest(); 1517 1518 AddDelay(); 1519 Time middle = Time::Now(); 1520 1521 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); 1522 entry->Close(); 1523 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); 1524 entry->Close(); 1525 FlushQueueForTest(); 1526 1527 AddDelay(); 1528 Time final = Time::Now(); 1529 1530 ASSERT_EQ(4, cache_->GetEntryCount()); 1531 EXPECT_EQ(net::OK, DoomEntriesSince(final)); 1532 ASSERT_EQ(4, cache_->GetEntryCount()); 1533 1534 EXPECT_EQ(net::OK, DoomEntriesSince(middle)); 1535 ASSERT_EQ(2, cache_->GetEntryCount()); 1536 1537 ASSERT_EQ(net::OK, OpenEntry("second", &entry)); 1538 entry->Close(); 1539 } 1540 1541 TEST_F(DiskCacheBackendTest, DoomRecent) { 1542 BackendDoomRecent(); 1543 } 1544 1545 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) { 1546 SetNewEviction(); 1547 BackendDoomRecent(); 1548 } 1549 1550 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) { 1551 SetMemoryOnlyMode(); 1552 BackendDoomRecent(); 1553 } 1554 1555 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) { 1556 SetMemoryOnlyMode(); 1557 base::Time start; 1558 InitSparseCache(&start, NULL); 1559 DoomEntriesSince(start); 1560 EXPECT_EQ(1, cache_->GetEntryCount()); 1561 } 1562 1563 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) { 1564 base::Time start; 1565 InitSparseCache(&start, NULL); 1566 DoomEntriesSince(start); 1567 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while 1568 // MemBackendImpl does not. Thats why expected value differs here from 1569 // MemoryOnlyDoomEntriesSinceSparse. 1570 EXPECT_EQ(3, cache_->GetEntryCount()); 1571 } 1572 1573 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) { 1574 SetMemoryOnlyMode(); 1575 InitSparseCache(NULL, NULL); 1576 EXPECT_EQ(net::OK, DoomAllEntries()); 1577 EXPECT_EQ(0, cache_->GetEntryCount()); 1578 } 1579 1580 TEST_F(DiskCacheBackendTest, DoomAllSparse) { 1581 InitSparseCache(NULL, NULL); 1582 EXPECT_EQ(net::OK, DoomAllEntries()); 1583 EXPECT_EQ(0, cache_->GetEntryCount()); 1584 } 1585 1586 void DiskCacheBackendTest::BackendDoomBetween() { 1587 InitCache(); 1588 1589 disk_cache::Entry *entry; 1590 ASSERT_EQ(net::OK, CreateEntry("first", &entry)); 1591 entry->Close(); 1592 FlushQueueForTest(); 1593 1594 AddDelay(); 1595 Time middle_start = Time::Now(); 1596 1597 ASSERT_EQ(net::OK, CreateEntry("second", &entry)); 1598 entry->Close(); 1599 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); 1600 entry->Close(); 1601 FlushQueueForTest(); 1602 1603 AddDelay(); 1604 Time middle_end = Time::Now(); 1605 AddDelay(); 1606 1607 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); 1608 entry->Close(); 1609 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry)); 1610 entry->Close(); 1611 FlushQueueForTest(); 1612 1613 AddDelay(); 1614 Time final = Time::Now(); 1615 1616 ASSERT_EQ(4, cache_->GetEntryCount()); 1617 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end)); 1618 ASSERT_EQ(2, cache_->GetEntryCount()); 1619 1620 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry)); 1621 entry->Close(); 1622 1623 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final)); 1624 ASSERT_EQ(1, cache_->GetEntryCount()); 1625 1626 ASSERT_EQ(net::OK, OpenEntry("first", &entry)); 1627 entry->Close(); 1628 } 1629 1630 TEST_F(DiskCacheBackendTest, DoomBetween) { 1631 BackendDoomBetween(); 1632 } 1633 1634 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) { 1635 SetNewEviction(); 1636 BackendDoomBetween(); 1637 } 1638 1639 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) { 1640 SetMemoryOnlyMode(); 1641 BackendDoomBetween(); 1642 } 1643 1644 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) { 1645 SetMemoryOnlyMode(); 1646 base::Time start, end; 1647 InitSparseCache(&start, &end); 1648 DoomEntriesBetween(start, end); 1649 EXPECT_EQ(3, cache_->GetEntryCount()); 1650 1651 start = end; 1652 end = base::Time::Now(); 1653 DoomEntriesBetween(start, end); 1654 EXPECT_EQ(1, cache_->GetEntryCount()); 1655 } 1656 1657 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) { 1658 base::Time start, end; 1659 InitSparseCache(&start, &end); 1660 DoomEntriesBetween(start, end); 1661 EXPECT_EQ(9, cache_->GetEntryCount()); 1662 1663 start = end; 1664 end = base::Time::Now(); 1665 DoomEntriesBetween(start, end); 1666 EXPECT_EQ(3, cache_->GetEntryCount()); 1667 } 1668 1669 void DiskCacheBackendTest::BackendTransaction(const std::string& name, 1670 int num_entries, bool load) { 1671 success_ = false; 1672 ASSERT_TRUE(CopyTestCache(name)); 1673 DisableFirstCleanup(); 1674 1675 uint32 mask; 1676 if (load) { 1677 mask = 0xf; 1678 SetMaxSize(0x100000); 1679 } else { 1680 // Clear the settings from the previous run. 1681 mask = 0; 1682 SetMaxSize(0); 1683 } 1684 SetMask(mask); 1685 1686 InitCache(); 1687 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount()); 1688 1689 std::string key("the first key"); 1690 disk_cache::Entry* entry1; 1691 ASSERT_NE(net::OK, OpenEntry(key, &entry1)); 1692 1693 int actual = cache_->GetEntryCount(); 1694 if (num_entries != actual) { 1695 ASSERT_TRUE(load); 1696 // If there is a heavy load, inserting an entry will make another entry 1697 // dirty (on the hash bucket) so two entries are removed. 1698 ASSERT_EQ(num_entries - 1, actual); 1699 } 1700 1701 cache_.reset(); 1702 cache_impl_ = NULL; 1703 1704 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask)); 1705 success_ = true; 1706 } 1707 1708 void DiskCacheBackendTest::BackendRecoverInsert() { 1709 // Tests with an empty cache. 1710 BackendTransaction("insert_empty1", 0, false); 1711 ASSERT_TRUE(success_) << "insert_empty1"; 1712 BackendTransaction("insert_empty2", 0, false); 1713 ASSERT_TRUE(success_) << "insert_empty2"; 1714 BackendTransaction("insert_empty3", 0, false); 1715 ASSERT_TRUE(success_) << "insert_empty3"; 1716 1717 // Tests with one entry on the cache. 1718 BackendTransaction("insert_one1", 1, false); 1719 ASSERT_TRUE(success_) << "insert_one1"; 1720 BackendTransaction("insert_one2", 1, false); 1721 ASSERT_TRUE(success_) << "insert_one2"; 1722 BackendTransaction("insert_one3", 1, false); 1723 ASSERT_TRUE(success_) << "insert_one3"; 1724 1725 // Tests with one hundred entries on the cache, tiny index. 1726 BackendTransaction("insert_load1", 100, true); 1727 ASSERT_TRUE(success_) << "insert_load1"; 1728 BackendTransaction("insert_load2", 100, true); 1729 ASSERT_TRUE(success_) << "insert_load2"; 1730 } 1731 1732 TEST_F(DiskCacheBackendTest, RecoverInsert) { 1733 BackendRecoverInsert(); 1734 } 1735 1736 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) { 1737 SetNewEviction(); 1738 BackendRecoverInsert(); 1739 } 1740 1741 void DiskCacheBackendTest::BackendRecoverRemove() { 1742 // Removing the only element. 1743 BackendTransaction("remove_one1", 0, false); 1744 ASSERT_TRUE(success_) << "remove_one1"; 1745 BackendTransaction("remove_one2", 0, false); 1746 ASSERT_TRUE(success_) << "remove_one2"; 1747 BackendTransaction("remove_one3", 0, false); 1748 ASSERT_TRUE(success_) << "remove_one3"; 1749 1750 // Removing the head. 1751 BackendTransaction("remove_head1", 1, false); 1752 ASSERT_TRUE(success_) << "remove_head1"; 1753 BackendTransaction("remove_head2", 1, false); 1754 ASSERT_TRUE(success_) << "remove_head2"; 1755 BackendTransaction("remove_head3", 1, false); 1756 ASSERT_TRUE(success_) << "remove_head3"; 1757 1758 // Removing the tail. 1759 BackendTransaction("remove_tail1", 1, false); 1760 ASSERT_TRUE(success_) << "remove_tail1"; 1761 BackendTransaction("remove_tail2", 1, false); 1762 ASSERT_TRUE(success_) << "remove_tail2"; 1763 BackendTransaction("remove_tail3", 1, false); 1764 ASSERT_TRUE(success_) << "remove_tail3"; 1765 1766 // Removing with one hundred entries on the cache, tiny index. 1767 BackendTransaction("remove_load1", 100, true); 1768 ASSERT_TRUE(success_) << "remove_load1"; 1769 BackendTransaction("remove_load2", 100, true); 1770 ASSERT_TRUE(success_) << "remove_load2"; 1771 BackendTransaction("remove_load3", 100, true); 1772 ASSERT_TRUE(success_) << "remove_load3"; 1773 1774 // This case cannot be reverted. 1775 BackendTransaction("remove_one4", 0, false); 1776 ASSERT_TRUE(success_) << "remove_one4"; 1777 BackendTransaction("remove_head4", 1, false); 1778 ASSERT_TRUE(success_) << "remove_head4"; 1779 } 1780 1781 TEST_F(DiskCacheBackendTest, RecoverRemove) { 1782 BackendRecoverRemove(); 1783 } 1784 1785 TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) { 1786 SetNewEviction(); 1787 BackendRecoverRemove(); 1788 } 1789 1790 void DiskCacheBackendTest::BackendRecoverWithEviction() { 1791 success_ = false; 1792 ASSERT_TRUE(CopyTestCache("insert_load1")); 1793 DisableFirstCleanup(); 1794 1795 SetMask(0xf); 1796 SetMaxSize(0x1000); 1797 1798 // We should not crash here. 1799 InitCache(); 1800 DisableIntegrityCheck(); 1801 } 1802 1803 TEST_F(DiskCacheBackendTest, RecoverWithEviction) { 1804 BackendRecoverWithEviction(); 1805 } 1806 1807 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) { 1808 SetNewEviction(); 1809 BackendRecoverWithEviction(); 1810 } 1811 1812 // Tests that the |BackendImpl| fails to start with the wrong cache version. 1813 TEST_F(DiskCacheTest, WrongVersion) { 1814 ASSERT_TRUE(CopyTestCache("wrong_version")); 1815 base::Thread cache_thread("CacheThread"); 1816 ASSERT_TRUE(cache_thread.StartWithOptions( 1817 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 1818 net::TestCompletionCallback cb; 1819 1820 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( 1821 cache_path_, cache_thread.message_loop_proxy().get(), NULL)); 1822 int rv = cache->Init(cb.callback()); 1823 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv)); 1824 } 1825 1826 class BadEntropyProvider : public base::FieldTrial::EntropyProvider { 1827 public: 1828 virtual ~BadEntropyProvider() {} 1829 1830 virtual double GetEntropyForTrial(const std::string& trial_name, 1831 uint32 randomization_seed) const OVERRIDE { 1832 return 0.5; 1833 } 1834 }; 1835 1836 // Tests that the disk cache successfully joins the control group, dropping the 1837 // existing cache in favour of a new empty cache. 1838 TEST_F(DiskCacheTest, SimpleCacheControlJoin) { 1839 base::Thread cache_thread("CacheThread"); 1840 ASSERT_TRUE(cache_thread.StartWithOptions( 1841 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 1842 1843 scoped_ptr<disk_cache::BackendImpl> cache = 1844 CreateExistingEntryCache(cache_thread, cache_path_); 1845 ASSERT_TRUE(cache.get()); 1846 cache.reset(); 1847 1848 // Instantiate the SimpleCacheTrial, forcing this run into the 1849 // ExperimentControl group. 1850 base::FieldTrialList field_trial_list(new BadEntropyProvider()); 1851 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", 1852 "ExperimentControl"); 1853 net::TestCompletionCallback cb; 1854 scoped_ptr<disk_cache::Backend> base_cache; 1855 int rv = 1856 disk_cache::CreateCacheBackend(net::DISK_CACHE, 1857 net::CACHE_BACKEND_BLOCKFILE, 1858 cache_path_, 1859 0, 1860 true, 1861 cache_thread.message_loop_proxy().get(), 1862 NULL, 1863 &base_cache, 1864 cb.callback()); 1865 ASSERT_EQ(net::OK, cb.GetResult(rv)); 1866 EXPECT_EQ(0, base_cache->GetEntryCount()); 1867 } 1868 1869 // Tests that the disk cache can restart in the control group preserving 1870 // existing entries. 1871 TEST_F(DiskCacheTest, SimpleCacheControlRestart) { 1872 // Instantiate the SimpleCacheTrial, forcing this run into the 1873 // ExperimentControl group. 1874 base::FieldTrialList field_trial_list(new BadEntropyProvider()); 1875 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", 1876 "ExperimentControl"); 1877 1878 base::Thread cache_thread("CacheThread"); 1879 ASSERT_TRUE(cache_thread.StartWithOptions( 1880 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 1881 1882 scoped_ptr<disk_cache::BackendImpl> cache = 1883 CreateExistingEntryCache(cache_thread, cache_path_); 1884 ASSERT_TRUE(cache.get()); 1885 1886 net::TestCompletionCallback cb; 1887 1888 const int kRestartCount = 5; 1889 for (int i = 0; i < kRestartCount; ++i) { 1890 cache.reset(new disk_cache::BackendImpl( 1891 cache_path_, cache_thread.message_loop_proxy(), NULL)); 1892 int rv = cache->Init(cb.callback()); 1893 ASSERT_EQ(net::OK, cb.GetResult(rv)); 1894 EXPECT_EQ(1, cache->GetEntryCount()); 1895 1896 disk_cache::Entry* entry = NULL; 1897 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback()); 1898 EXPECT_EQ(net::OK, cb.GetResult(rv)); 1899 EXPECT_TRUE(entry); 1900 entry->Close(); 1901 } 1902 } 1903 1904 // Tests that the disk cache can leave the control group preserving existing 1905 // entries. 1906 TEST_F(DiskCacheTest, SimpleCacheControlLeave) { 1907 base::Thread cache_thread("CacheThread"); 1908 ASSERT_TRUE(cache_thread.StartWithOptions( 1909 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 1910 1911 { 1912 // Instantiate the SimpleCacheTrial, forcing this run into the 1913 // ExperimentControl group. 1914 base::FieldTrialList field_trial_list(new BadEntropyProvider()); 1915 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", 1916 "ExperimentControl"); 1917 1918 scoped_ptr<disk_cache::BackendImpl> cache = 1919 CreateExistingEntryCache(cache_thread, cache_path_); 1920 ASSERT_TRUE(cache.get()); 1921 } 1922 1923 // Instantiate the SimpleCacheTrial, forcing this run into the 1924 // ExperimentNo group. 1925 base::FieldTrialList field_trial_list(new BadEntropyProvider()); 1926 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo"); 1927 net::TestCompletionCallback cb; 1928 1929 const int kRestartCount = 5; 1930 for (int i = 0; i < kRestartCount; ++i) { 1931 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( 1932 cache_path_, cache_thread.message_loop_proxy(), NULL)); 1933 int rv = cache->Init(cb.callback()); 1934 ASSERT_EQ(net::OK, cb.GetResult(rv)); 1935 EXPECT_EQ(1, cache->GetEntryCount()); 1936 1937 disk_cache::Entry* entry = NULL; 1938 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback()); 1939 EXPECT_EQ(net::OK, cb.GetResult(rv)); 1940 EXPECT_TRUE(entry); 1941 entry->Close(); 1942 } 1943 } 1944 1945 // Tests that the cache is properly restarted on recovery error. 1946 TEST_F(DiskCacheBackendTest, DeleteOld) { 1947 ASSERT_TRUE(CopyTestCache("wrong_version")); 1948 SetNewEviction(); 1949 base::Thread cache_thread("CacheThread"); 1950 ASSERT_TRUE(cache_thread.StartWithOptions( 1951 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 1952 1953 net::TestCompletionCallback cb; 1954 bool prev = base::ThreadRestrictions::SetIOAllowed(false); 1955 base::FilePath path(cache_path_); 1956 int rv = 1957 disk_cache::CreateCacheBackend(net::DISK_CACHE, 1958 net::CACHE_BACKEND_BLOCKFILE, 1959 path, 1960 0, 1961 true, 1962 cache_thread.message_loop_proxy().get(), 1963 NULL, 1964 &cache_, 1965 cb.callback()); 1966 path.clear(); // Make sure path was captured by the previous call. 1967 ASSERT_EQ(net::OK, cb.GetResult(rv)); 1968 base::ThreadRestrictions::SetIOAllowed(prev); 1969 cache_.reset(); 1970 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_)); 1971 } 1972 1973 // We want to be able to deal with messed up entries on disk. 1974 void DiskCacheBackendTest::BackendInvalidEntry2() { 1975 ASSERT_TRUE(CopyTestCache("bad_entry")); 1976 DisableFirstCleanup(); 1977 InitCache(); 1978 1979 disk_cache::Entry *entry1, *entry2; 1980 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); 1981 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2)); 1982 entry1->Close(); 1983 1984 // CheckCacheIntegrity will fail at this point. 1985 DisableIntegrityCheck(); 1986 } 1987 1988 TEST_F(DiskCacheBackendTest, InvalidEntry2) { 1989 BackendInvalidEntry2(); 1990 } 1991 1992 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) { 1993 SetNewEviction(); 1994 BackendInvalidEntry2(); 1995 } 1996 1997 // Tests that we don't crash or hang when enumerating this cache. 1998 void DiskCacheBackendTest::BackendInvalidEntry3() { 1999 SetMask(0x1); // 2-entry table. 2000 SetMaxSize(0x3000); // 12 kB. 2001 DisableFirstCleanup(); 2002 InitCache(); 2003 2004 disk_cache::Entry* entry; 2005 void* iter = NULL; 2006 while (OpenNextEntry(&iter, &entry) == net::OK) { 2007 entry->Close(); 2008 } 2009 } 2010 2011 TEST_F(DiskCacheBackendTest, InvalidEntry3) { 2012 ASSERT_TRUE(CopyTestCache("dirty_entry3")); 2013 BackendInvalidEntry3(); 2014 } 2015 2016 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) { 2017 ASSERT_TRUE(CopyTestCache("dirty_entry4")); 2018 SetNewEviction(); 2019 BackendInvalidEntry3(); 2020 DisableIntegrityCheck(); 2021 } 2022 2023 // Test that we handle a dirty entry on the LRU list, already replaced with 2024 // the same key, and with hash collisions. 2025 TEST_F(DiskCacheBackendTest, InvalidEntry4) { 2026 ASSERT_TRUE(CopyTestCache("dirty_entry3")); 2027 SetMask(0x1); // 2-entry table. 2028 SetMaxSize(0x3000); // 12 kB. 2029 DisableFirstCleanup(); 2030 InitCache(); 2031 2032 TrimForTest(false); 2033 } 2034 2035 // Test that we handle a dirty entry on the deleted list, already replaced with 2036 // the same key, and with hash collisions. 2037 TEST_F(DiskCacheBackendTest, InvalidEntry5) { 2038 ASSERT_TRUE(CopyTestCache("dirty_entry4")); 2039 SetNewEviction(); 2040 SetMask(0x1); // 2-entry table. 2041 SetMaxSize(0x3000); // 12 kB. 2042 DisableFirstCleanup(); 2043 InitCache(); 2044 2045 TrimDeletedListForTest(false); 2046 } 2047 2048 TEST_F(DiskCacheBackendTest, InvalidEntry6) { 2049 ASSERT_TRUE(CopyTestCache("dirty_entry5")); 2050 SetMask(0x1); // 2-entry table. 2051 SetMaxSize(0x3000); // 12 kB. 2052 DisableFirstCleanup(); 2053 InitCache(); 2054 2055 // There is a dirty entry (but marked as clean) at the end, pointing to a 2056 // deleted entry through the hash collision list. We should not re-insert the 2057 // deleted entry into the index table. 2058 2059 TrimForTest(false); 2060 // The cache should be clean (as detected by CheckCacheIntegrity). 2061 } 2062 2063 // Tests that we don't hang when there is a loop on the hash collision list. 2064 // The test cache could be a result of bug 69135. 2065 TEST_F(DiskCacheBackendTest, BadNextEntry1) { 2066 ASSERT_TRUE(CopyTestCache("list_loop2")); 2067 SetMask(0x1); // 2-entry table. 2068 SetMaxSize(0x3000); // 12 kB. 2069 DisableFirstCleanup(); 2070 InitCache(); 2071 2072 // The second entry points at itselft, and the first entry is not accessible 2073 // though the index, but it is at the head of the LRU. 2074 2075 disk_cache::Entry* entry; 2076 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); 2077 entry->Close(); 2078 2079 TrimForTest(false); 2080 TrimForTest(false); 2081 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry)); 2082 entry->Close(); 2083 EXPECT_EQ(1, cache_->GetEntryCount()); 2084 } 2085 2086 // Tests that we don't hang when there is a loop on the hash collision list. 2087 // The test cache could be a result of bug 69135. 2088 TEST_F(DiskCacheBackendTest, BadNextEntry2) { 2089 ASSERT_TRUE(CopyTestCache("list_loop3")); 2090 SetMask(0x1); // 2-entry table. 2091 SetMaxSize(0x3000); // 12 kB. 2092 DisableFirstCleanup(); 2093 InitCache(); 2094 2095 // There is a wide loop of 5 entries. 2096 2097 disk_cache::Entry* entry; 2098 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry)); 2099 } 2100 2101 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) { 2102 ASSERT_TRUE(CopyTestCache("bad_rankings3")); 2103 DisableFirstCleanup(); 2104 SetNewEviction(); 2105 InitCache(); 2106 2107 // The second entry is dirty, but removing it should not corrupt the list. 2108 disk_cache::Entry* entry; 2109 ASSERT_NE(net::OK, OpenEntry("the second key", &entry)); 2110 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry)); 2111 2112 // This should not delete the cache. 2113 entry->Doom(); 2114 FlushQueueForTest(); 2115 entry->Close(); 2116 2117 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry)); 2118 entry->Close(); 2119 } 2120 2121 // Tests handling of corrupt entries by keeping the rankings node around, with 2122 // a fatal failure. 2123 void DiskCacheBackendTest::BackendInvalidEntry7() { 2124 const int kSize = 0x3000; // 12 kB. 2125 SetMaxSize(kSize * 10); 2126 InitCache(); 2127 2128 std::string first("some key"); 2129 std::string second("something else"); 2130 disk_cache::Entry* entry; 2131 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); 2132 entry->Close(); 2133 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); 2134 2135 // Corrupt this entry. 2136 disk_cache::EntryImpl* entry_impl = 2137 static_cast<disk_cache::EntryImpl*>(entry); 2138 2139 entry_impl->rankings()->Data()->next = 0; 2140 entry_impl->rankings()->Store(); 2141 entry->Close(); 2142 FlushQueueForTest(); 2143 EXPECT_EQ(2, cache_->GetEntryCount()); 2144 2145 // This should detect the bad entry. 2146 EXPECT_NE(net::OK, OpenEntry(second, &entry)); 2147 EXPECT_EQ(1, cache_->GetEntryCount()); 2148 2149 // We should delete the cache. The list still has a corrupt node. 2150 void* iter = NULL; 2151 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); 2152 FlushQueueForTest(); 2153 EXPECT_EQ(0, cache_->GetEntryCount()); 2154 } 2155 2156 TEST_F(DiskCacheBackendTest, InvalidEntry7) { 2157 BackendInvalidEntry7(); 2158 } 2159 2160 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) { 2161 SetNewEviction(); 2162 BackendInvalidEntry7(); 2163 } 2164 2165 // Tests handling of corrupt entries by keeping the rankings node around, with 2166 // a non fatal failure. 2167 void DiskCacheBackendTest::BackendInvalidEntry8() { 2168 const int kSize = 0x3000; // 12 kB 2169 SetMaxSize(kSize * 10); 2170 InitCache(); 2171 2172 std::string first("some key"); 2173 std::string second("something else"); 2174 disk_cache::Entry* entry; 2175 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); 2176 entry->Close(); 2177 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); 2178 2179 // Corrupt this entry. 2180 disk_cache::EntryImpl* entry_impl = 2181 static_cast<disk_cache::EntryImpl*>(entry); 2182 2183 entry_impl->rankings()->Data()->contents = 0; 2184 entry_impl->rankings()->Store(); 2185 entry->Close(); 2186 FlushQueueForTest(); 2187 EXPECT_EQ(2, cache_->GetEntryCount()); 2188 2189 // This should detect the bad entry. 2190 EXPECT_NE(net::OK, OpenEntry(second, &entry)); 2191 EXPECT_EQ(1, cache_->GetEntryCount()); 2192 2193 // We should not delete the cache. 2194 void* iter = NULL; 2195 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); 2196 entry->Close(); 2197 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); 2198 EXPECT_EQ(1, cache_->GetEntryCount()); 2199 } 2200 2201 TEST_F(DiskCacheBackendTest, InvalidEntry8) { 2202 BackendInvalidEntry8(); 2203 } 2204 2205 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) { 2206 SetNewEviction(); 2207 BackendInvalidEntry8(); 2208 } 2209 2210 // Tests handling of corrupt entries detected by enumerations. Note that these 2211 // tests (xx9 to xx11) are basically just going though slightly different 2212 // codepaths so they are tighlty coupled with the code, but that is better than 2213 // not testing error handling code. 2214 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) { 2215 const int kSize = 0x3000; // 12 kB. 2216 SetMaxSize(kSize * 10); 2217 InitCache(); 2218 2219 std::string first("some key"); 2220 std::string second("something else"); 2221 disk_cache::Entry* entry; 2222 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); 2223 entry->Close(); 2224 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); 2225 2226 // Corrupt this entry. 2227 disk_cache::EntryImpl* entry_impl = 2228 static_cast<disk_cache::EntryImpl*>(entry); 2229 2230 entry_impl->entry()->Data()->state = 0xbad; 2231 entry_impl->entry()->Store(); 2232 entry->Close(); 2233 FlushQueueForTest(); 2234 EXPECT_EQ(2, cache_->GetEntryCount()); 2235 2236 if (eviction) { 2237 TrimForTest(false); 2238 EXPECT_EQ(1, cache_->GetEntryCount()); 2239 TrimForTest(false); 2240 EXPECT_EQ(1, cache_->GetEntryCount()); 2241 } else { 2242 // We should detect the problem through the list, but we should not delete 2243 // the entry, just fail the iteration. 2244 void* iter = NULL; 2245 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); 2246 2247 // Now a full iteration will work, and return one entry. 2248 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); 2249 entry->Close(); 2250 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); 2251 2252 // This should detect what's left of the bad entry. 2253 EXPECT_NE(net::OK, OpenEntry(second, &entry)); 2254 EXPECT_EQ(2, cache_->GetEntryCount()); 2255 } 2256 DisableIntegrityCheck(); 2257 } 2258 2259 TEST_F(DiskCacheBackendTest, InvalidEntry9) { 2260 BackendInvalidEntry9(false); 2261 } 2262 2263 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) { 2264 SetNewEviction(); 2265 BackendInvalidEntry9(false); 2266 } 2267 2268 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) { 2269 BackendInvalidEntry9(true); 2270 } 2271 2272 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) { 2273 SetNewEviction(); 2274 BackendInvalidEntry9(true); 2275 } 2276 2277 // Tests handling of corrupt entries detected by enumerations. 2278 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) { 2279 const int kSize = 0x3000; // 12 kB. 2280 SetMaxSize(kSize * 10); 2281 SetNewEviction(); 2282 InitCache(); 2283 2284 std::string first("some key"); 2285 std::string second("something else"); 2286 disk_cache::Entry* entry; 2287 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); 2288 entry->Close(); 2289 ASSERT_EQ(net::OK, OpenEntry(first, &entry)); 2290 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); 2291 entry->Close(); 2292 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); 2293 2294 // Corrupt this entry. 2295 disk_cache::EntryImpl* entry_impl = 2296 static_cast<disk_cache::EntryImpl*>(entry); 2297 2298 entry_impl->entry()->Data()->state = 0xbad; 2299 entry_impl->entry()->Store(); 2300 entry->Close(); 2301 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); 2302 entry->Close(); 2303 EXPECT_EQ(3, cache_->GetEntryCount()); 2304 2305 // We have: 2306 // List 0: third -> second (bad). 2307 // List 1: first. 2308 2309 if (eviction) { 2310 // Detection order: second -> first -> third. 2311 TrimForTest(false); 2312 EXPECT_EQ(3, cache_->GetEntryCount()); 2313 TrimForTest(false); 2314 EXPECT_EQ(2, cache_->GetEntryCount()); 2315 TrimForTest(false); 2316 EXPECT_EQ(1, cache_->GetEntryCount()); 2317 } else { 2318 // Detection order: third -> second -> first. 2319 // We should detect the problem through the list, but we should not delete 2320 // the entry. 2321 void* iter = NULL; 2322 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); 2323 entry->Close(); 2324 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); 2325 EXPECT_EQ(first, entry->GetKey()); 2326 entry->Close(); 2327 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); 2328 } 2329 DisableIntegrityCheck(); 2330 } 2331 2332 TEST_F(DiskCacheBackendTest, InvalidEntry10) { 2333 BackendInvalidEntry10(false); 2334 } 2335 2336 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) { 2337 BackendInvalidEntry10(true); 2338 } 2339 2340 // Tests handling of corrupt entries detected by enumerations. 2341 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) { 2342 const int kSize = 0x3000; // 12 kB. 2343 SetMaxSize(kSize * 10); 2344 SetNewEviction(); 2345 InitCache(); 2346 2347 std::string first("some key"); 2348 std::string second("something else"); 2349 disk_cache::Entry* entry; 2350 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); 2351 entry->Close(); 2352 ASSERT_EQ(net::OK, OpenEntry(first, &entry)); 2353 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); 2354 entry->Close(); 2355 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); 2356 entry->Close(); 2357 ASSERT_EQ(net::OK, OpenEntry(second, &entry)); 2358 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); 2359 2360 // Corrupt this entry. 2361 disk_cache::EntryImpl* entry_impl = 2362 static_cast<disk_cache::EntryImpl*>(entry); 2363 2364 entry_impl->entry()->Data()->state = 0xbad; 2365 entry_impl->entry()->Store(); 2366 entry->Close(); 2367 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); 2368 entry->Close(); 2369 FlushQueueForTest(); 2370 EXPECT_EQ(3, cache_->GetEntryCount()); 2371 2372 // We have: 2373 // List 0: third. 2374 // List 1: second (bad) -> first. 2375 2376 if (eviction) { 2377 // Detection order: third -> first -> second. 2378 TrimForTest(false); 2379 EXPECT_EQ(2, cache_->GetEntryCount()); 2380 TrimForTest(false); 2381 EXPECT_EQ(1, cache_->GetEntryCount()); 2382 TrimForTest(false); 2383 EXPECT_EQ(1, cache_->GetEntryCount()); 2384 } else { 2385 // Detection order: third -> second. 2386 // We should detect the problem through the list, but we should not delete 2387 // the entry, just fail the iteration. 2388 void* iter = NULL; 2389 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); 2390 entry->Close(); 2391 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); 2392 2393 // Now a full iteration will work, and return two entries. 2394 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); 2395 entry->Close(); 2396 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); 2397 entry->Close(); 2398 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); 2399 } 2400 DisableIntegrityCheck(); 2401 } 2402 2403 TEST_F(DiskCacheBackendTest, InvalidEntry11) { 2404 BackendInvalidEntry11(false); 2405 } 2406 2407 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) { 2408 BackendInvalidEntry11(true); 2409 } 2410 2411 // Tests handling of corrupt entries in the middle of a long eviction run. 2412 void DiskCacheBackendTest::BackendTrimInvalidEntry12() { 2413 const int kSize = 0x3000; // 12 kB 2414 SetMaxSize(kSize * 10); 2415 InitCache(); 2416 2417 std::string first("some key"); 2418 std::string second("something else"); 2419 disk_cache::Entry* entry; 2420 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); 2421 entry->Close(); 2422 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); 2423 2424 // Corrupt this entry. 2425 disk_cache::EntryImpl* entry_impl = 2426 static_cast<disk_cache::EntryImpl*>(entry); 2427 2428 entry_impl->entry()->Data()->state = 0xbad; 2429 entry_impl->entry()->Store(); 2430 entry->Close(); 2431 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); 2432 entry->Close(); 2433 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); 2434 TrimForTest(true); 2435 EXPECT_EQ(1, cache_->GetEntryCount()); 2436 entry->Close(); 2437 DisableIntegrityCheck(); 2438 } 2439 2440 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) { 2441 BackendTrimInvalidEntry12(); 2442 } 2443 2444 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) { 2445 SetNewEviction(); 2446 BackendTrimInvalidEntry12(); 2447 } 2448 2449 // We want to be able to deal with messed up entries on disk. 2450 void DiskCacheBackendTest::BackendInvalidRankings2() { 2451 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2452 DisableFirstCleanup(); 2453 InitCache(); 2454 2455 disk_cache::Entry *entry1, *entry2; 2456 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); 2457 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2)); 2458 entry2->Close(); 2459 2460 // CheckCacheIntegrity will fail at this point. 2461 DisableIntegrityCheck(); 2462 } 2463 2464 TEST_F(DiskCacheBackendTest, InvalidRankings2) { 2465 BackendInvalidRankings2(); 2466 } 2467 2468 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) { 2469 SetNewEviction(); 2470 BackendInvalidRankings2(); 2471 } 2472 2473 // If the LRU is corrupt, we delete the cache. 2474 void DiskCacheBackendTest::BackendInvalidRankings() { 2475 disk_cache::Entry* entry; 2476 void* iter = NULL; 2477 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); 2478 entry->Close(); 2479 EXPECT_EQ(2, cache_->GetEntryCount()); 2480 2481 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); 2482 FlushQueueForTest(); // Allow the restart to finish. 2483 EXPECT_EQ(0, cache_->GetEntryCount()); 2484 } 2485 2486 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) { 2487 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2488 DisableFirstCleanup(); 2489 InitCache(); 2490 BackendInvalidRankings(); 2491 } 2492 2493 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) { 2494 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2495 DisableFirstCleanup(); 2496 SetNewEviction(); 2497 InitCache(); 2498 BackendInvalidRankings(); 2499 } 2500 2501 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) { 2502 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2503 DisableFirstCleanup(); 2504 InitCache(); 2505 SetTestMode(); // Fail cache reinitialization. 2506 BackendInvalidRankings(); 2507 } 2508 2509 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) { 2510 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2511 DisableFirstCleanup(); 2512 SetNewEviction(); 2513 InitCache(); 2514 SetTestMode(); // Fail cache reinitialization. 2515 BackendInvalidRankings(); 2516 } 2517 2518 // If the LRU is corrupt and we have open entries, we disable the cache. 2519 void DiskCacheBackendTest::BackendDisable() { 2520 disk_cache::Entry *entry1, *entry2; 2521 void* iter = NULL; 2522 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); 2523 2524 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2)); 2525 EXPECT_EQ(0, cache_->GetEntryCount()); 2526 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2)); 2527 2528 entry1->Close(); 2529 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache. 2530 FlushQueueForTest(); // This one actually allows that task to complete. 2531 2532 EXPECT_EQ(0, cache_->GetEntryCount()); 2533 } 2534 2535 TEST_F(DiskCacheBackendTest, DisableSuccess) { 2536 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2537 DisableFirstCleanup(); 2538 InitCache(); 2539 BackendDisable(); 2540 } 2541 2542 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) { 2543 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2544 DisableFirstCleanup(); 2545 SetNewEviction(); 2546 InitCache(); 2547 BackendDisable(); 2548 } 2549 2550 TEST_F(DiskCacheBackendTest, DisableFailure) { 2551 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2552 DisableFirstCleanup(); 2553 InitCache(); 2554 SetTestMode(); // Fail cache reinitialization. 2555 BackendDisable(); 2556 } 2557 2558 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) { 2559 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2560 DisableFirstCleanup(); 2561 SetNewEviction(); 2562 InitCache(); 2563 SetTestMode(); // Fail cache reinitialization. 2564 BackendDisable(); 2565 } 2566 2567 // This is another type of corruption on the LRU; disable the cache. 2568 void DiskCacheBackendTest::BackendDisable2() { 2569 EXPECT_EQ(8, cache_->GetEntryCount()); 2570 2571 disk_cache::Entry* entry; 2572 void* iter = NULL; 2573 int count = 0; 2574 while (OpenNextEntry(&iter, &entry) == net::OK) { 2575 ASSERT_TRUE(NULL != entry); 2576 entry->Close(); 2577 count++; 2578 ASSERT_LT(count, 9); 2579 }; 2580 2581 FlushQueueForTest(); 2582 EXPECT_EQ(0, cache_->GetEntryCount()); 2583 } 2584 2585 TEST_F(DiskCacheBackendTest, DisableSuccess2) { 2586 ASSERT_TRUE(CopyTestCache("list_loop")); 2587 DisableFirstCleanup(); 2588 InitCache(); 2589 BackendDisable2(); 2590 } 2591 2592 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) { 2593 ASSERT_TRUE(CopyTestCache("list_loop")); 2594 DisableFirstCleanup(); 2595 SetNewEviction(); 2596 InitCache(); 2597 BackendDisable2(); 2598 } 2599 2600 TEST_F(DiskCacheBackendTest, DisableFailure2) { 2601 ASSERT_TRUE(CopyTestCache("list_loop")); 2602 DisableFirstCleanup(); 2603 InitCache(); 2604 SetTestMode(); // Fail cache reinitialization. 2605 BackendDisable2(); 2606 } 2607 2608 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) { 2609 ASSERT_TRUE(CopyTestCache("list_loop")); 2610 DisableFirstCleanup(); 2611 SetNewEviction(); 2612 InitCache(); 2613 SetTestMode(); // Fail cache reinitialization. 2614 BackendDisable2(); 2615 } 2616 2617 // If the index size changes when we disable the cache, we should not crash. 2618 void DiskCacheBackendTest::BackendDisable3() { 2619 disk_cache::Entry *entry1, *entry2; 2620 void* iter = NULL; 2621 EXPECT_EQ(2, cache_->GetEntryCount()); 2622 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); 2623 entry1->Close(); 2624 2625 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2)); 2626 FlushQueueForTest(); 2627 2628 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2)); 2629 entry2->Close(); 2630 2631 EXPECT_EQ(1, cache_->GetEntryCount()); 2632 } 2633 2634 TEST_F(DiskCacheBackendTest, DisableSuccess3) { 2635 ASSERT_TRUE(CopyTestCache("bad_rankings2")); 2636 DisableFirstCleanup(); 2637 SetMaxSize(20 * 1024 * 1024); 2638 InitCache(); 2639 BackendDisable3(); 2640 } 2641 2642 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) { 2643 ASSERT_TRUE(CopyTestCache("bad_rankings2")); 2644 DisableFirstCleanup(); 2645 SetMaxSize(20 * 1024 * 1024); 2646 SetNewEviction(); 2647 InitCache(); 2648 BackendDisable3(); 2649 } 2650 2651 // If we disable the cache, already open entries should work as far as possible. 2652 void DiskCacheBackendTest::BackendDisable4() { 2653 disk_cache::Entry *entry1, *entry2, *entry3, *entry4; 2654 void* iter = NULL; 2655 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); 2656 2657 char key2[2000]; 2658 char key3[20000]; 2659 CacheTestFillBuffer(key2, sizeof(key2), true); 2660 CacheTestFillBuffer(key3, sizeof(key3), true); 2661 key2[sizeof(key2) - 1] = '\0'; 2662 key3[sizeof(key3) - 1] = '\0'; 2663 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); 2664 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3)); 2665 2666 const int kBufSize = 20000; 2667 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize)); 2668 memset(buf->data(), 0, kBufSize); 2669 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); 2670 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); 2671 2672 // This line should disable the cache but not delete it. 2673 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4)); 2674 EXPECT_EQ(0, cache_->GetEntryCount()); 2675 2676 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4)); 2677 2678 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100)); 2679 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); 2680 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false)); 2681 2682 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize)); 2683 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); 2684 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false)); 2685 2686 std::string key = entry2->GetKey(); 2687 EXPECT_EQ(sizeof(key2) - 1, key.size()); 2688 key = entry3->GetKey(); 2689 EXPECT_EQ(sizeof(key3) - 1, key.size()); 2690 2691 entry1->Close(); 2692 entry2->Close(); 2693 entry3->Close(); 2694 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache. 2695 FlushQueueForTest(); // This one actually allows that task to complete. 2696 2697 EXPECT_EQ(0, cache_->GetEntryCount()); 2698 } 2699 2700 TEST_F(DiskCacheBackendTest, DisableSuccess4) { 2701 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2702 DisableFirstCleanup(); 2703 InitCache(); 2704 BackendDisable4(); 2705 } 2706 2707 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) { 2708 ASSERT_TRUE(CopyTestCache("bad_rankings")); 2709 DisableFirstCleanup(); 2710 SetNewEviction(); 2711 InitCache(); 2712 BackendDisable4(); 2713 } 2714 2715 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) { 2716 MessageLoopHelper helper; 2717 2718 ASSERT_TRUE(CleanupCacheDir()); 2719 scoped_ptr<disk_cache::BackendImpl> cache; 2720 cache.reset(new disk_cache::BackendImpl( 2721 cache_path_, base::MessageLoopProxy::current().get(), NULL)); 2722 ASSERT_TRUE(NULL != cache.get()); 2723 cache->SetUnitTestMode(); 2724 ASSERT_EQ(net::OK, cache->SyncInit()); 2725 2726 // Wait for a callback that never comes... about 2 secs :). The message loop 2727 // has to run to allow invocation of the usage timer. 2728 helper.WaitUntilCacheIoFinished(1); 2729 } 2730 2731 TEST_F(DiskCacheBackendTest, Backend_UsageStats) { 2732 InitCache(); 2733 disk_cache::Entry* entry; 2734 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); 2735 entry->Close(); 2736 FlushQueueForTest(); 2737 2738 disk_cache::StatsItems stats; 2739 cache_->GetStats(&stats); 2740 EXPECT_FALSE(stats.empty()); 2741 2742 disk_cache::StatsItems::value_type hits("Create hit", "0x1"); 2743 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); 2744 2745 cache_.reset(); 2746 2747 // Now open the cache and verify that the stats are still there. 2748 DisableFirstCleanup(); 2749 InitCache(); 2750 EXPECT_EQ(1, cache_->GetEntryCount()); 2751 2752 stats.clear(); 2753 cache_->GetStats(&stats); 2754 EXPECT_FALSE(stats.empty()); 2755 2756 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); 2757 } 2758 2759 void DiskCacheBackendTest::BackendDoomAll() { 2760 InitCache(); 2761 2762 disk_cache::Entry *entry1, *entry2; 2763 ASSERT_EQ(net::OK, CreateEntry("first", &entry1)); 2764 ASSERT_EQ(net::OK, CreateEntry("second", &entry2)); 2765 entry1->Close(); 2766 entry2->Close(); 2767 2768 ASSERT_EQ(net::OK, CreateEntry("third", &entry1)); 2769 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2)); 2770 2771 ASSERT_EQ(4, cache_->GetEntryCount()); 2772 EXPECT_EQ(net::OK, DoomAllEntries()); 2773 ASSERT_EQ(0, cache_->GetEntryCount()); 2774 2775 // We should stop posting tasks at some point (if we post any). 2776 base::MessageLoop::current()->RunUntilIdle(); 2777 2778 disk_cache::Entry *entry3, *entry4; 2779 EXPECT_NE(net::OK, OpenEntry("third", &entry3)); 2780 ASSERT_EQ(net::OK, CreateEntry("third", &entry3)); 2781 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4)); 2782 2783 EXPECT_EQ(net::OK, DoomAllEntries()); 2784 ASSERT_EQ(0, cache_->GetEntryCount()); 2785 2786 entry1->Close(); 2787 entry2->Close(); 2788 entry3->Doom(); // The entry should be already doomed, but this must work. 2789 entry3->Close(); 2790 entry4->Close(); 2791 2792 // Now try with all references released. 2793 ASSERT_EQ(net::OK, CreateEntry("third", &entry1)); 2794 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2)); 2795 entry1->Close(); 2796 entry2->Close(); 2797 2798 ASSERT_EQ(2, cache_->GetEntryCount()); 2799 EXPECT_EQ(net::OK, DoomAllEntries()); 2800 ASSERT_EQ(0, cache_->GetEntryCount()); 2801 2802 EXPECT_EQ(net::OK, DoomAllEntries()); 2803 } 2804 2805 TEST_F(DiskCacheBackendTest, DoomAll) { 2806 BackendDoomAll(); 2807 } 2808 2809 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) { 2810 SetNewEviction(); 2811 BackendDoomAll(); 2812 } 2813 2814 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) { 2815 SetMemoryOnlyMode(); 2816 BackendDoomAll(); 2817 } 2818 2819 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) { 2820 SetCacheType(net::APP_CACHE); 2821 BackendDoomAll(); 2822 } 2823 2824 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) { 2825 SetCacheType(net::SHADER_CACHE); 2826 BackendDoomAll(); 2827 } 2828 2829 // If the index size changes when we doom the cache, we should not crash. 2830 void DiskCacheBackendTest::BackendDoomAll2() { 2831 EXPECT_EQ(2, cache_->GetEntryCount()); 2832 EXPECT_EQ(net::OK, DoomAllEntries()); 2833 2834 disk_cache::Entry* entry; 2835 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry)); 2836 entry->Close(); 2837 2838 EXPECT_EQ(1, cache_->GetEntryCount()); 2839 } 2840 2841 TEST_F(DiskCacheBackendTest, DoomAll2) { 2842 ASSERT_TRUE(CopyTestCache("bad_rankings2")); 2843 DisableFirstCleanup(); 2844 SetMaxSize(20 * 1024 * 1024); 2845 InitCache(); 2846 BackendDoomAll2(); 2847 } 2848 2849 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) { 2850 ASSERT_TRUE(CopyTestCache("bad_rankings2")); 2851 DisableFirstCleanup(); 2852 SetMaxSize(20 * 1024 * 1024); 2853 SetNewEviction(); 2854 InitCache(); 2855 BackendDoomAll2(); 2856 } 2857 2858 // We should be able to create the same entry on multiple simultaneous instances 2859 // of the cache. 2860 TEST_F(DiskCacheTest, MultipleInstances) { 2861 base::ScopedTempDir store1, store2; 2862 ASSERT_TRUE(store1.CreateUniqueTempDir()); 2863 ASSERT_TRUE(store2.CreateUniqueTempDir()); 2864 2865 base::Thread cache_thread("CacheThread"); 2866 ASSERT_TRUE(cache_thread.StartWithOptions( 2867 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 2868 net::TestCompletionCallback cb; 2869 2870 const int kNumberOfCaches = 2; 2871 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches]; 2872 2873 int rv = 2874 disk_cache::CreateCacheBackend(net::DISK_CACHE, 2875 net::CACHE_BACKEND_DEFAULT, 2876 store1.path(), 2877 0, 2878 false, 2879 cache_thread.message_loop_proxy().get(), 2880 NULL, 2881 &cache[0], 2882 cb.callback()); 2883 ASSERT_EQ(net::OK, cb.GetResult(rv)); 2884 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE, 2885 net::CACHE_BACKEND_DEFAULT, 2886 store2.path(), 2887 0, 2888 false, 2889 cache_thread.message_loop_proxy().get(), 2890 NULL, 2891 &cache[1], 2892 cb.callback()); 2893 ASSERT_EQ(net::OK, cb.GetResult(rv)); 2894 2895 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL); 2896 2897 std::string key("the first key"); 2898 disk_cache::Entry* entry; 2899 for (int i = 0; i < kNumberOfCaches; i++) { 2900 rv = cache[i]->CreateEntry(key, &entry, cb.callback()); 2901 ASSERT_EQ(net::OK, cb.GetResult(rv)); 2902 entry->Close(); 2903 } 2904 } 2905 2906 // Test the six regions of the curve that determines the max cache size. 2907 TEST_F(DiskCacheTest, AutomaticMaxSize) { 2908 using disk_cache::kDefaultCacheSize; 2909 int64 large_size = kDefaultCacheSize; 2910 2911 // Region 1: expected = available * 0.8 2912 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10, 2913 disk_cache::PreferredCacheSize(large_size - 1)); 2914 EXPECT_EQ(kDefaultCacheSize * 8 / 10, 2915 disk_cache::PreferredCacheSize(large_size)); 2916 EXPECT_EQ(kDefaultCacheSize - 1, 2917 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1)); 2918 2919 // Region 2: expected = default_size 2920 EXPECT_EQ(kDefaultCacheSize, 2921 disk_cache::PreferredCacheSize(large_size * 10 / 8)); 2922 EXPECT_EQ(kDefaultCacheSize, 2923 disk_cache::PreferredCacheSize(large_size * 10 - 1)); 2924 2925 // Region 3: expected = available * 0.1 2926 EXPECT_EQ(kDefaultCacheSize, 2927 disk_cache::PreferredCacheSize(large_size * 10)); 2928 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10, 2929 disk_cache::PreferredCacheSize(large_size * 25 - 1)); 2930 2931 // Region 4: expected = default_size * 2.5 2932 EXPECT_EQ(kDefaultCacheSize * 25 / 10, 2933 disk_cache::PreferredCacheSize(large_size * 25)); 2934 EXPECT_EQ(kDefaultCacheSize * 25 / 10, 2935 disk_cache::PreferredCacheSize(large_size * 100 - 1)); 2936 EXPECT_EQ(kDefaultCacheSize * 25 / 10, 2937 disk_cache::PreferredCacheSize(large_size * 100)); 2938 EXPECT_EQ(kDefaultCacheSize * 25 / 10, 2939 disk_cache::PreferredCacheSize(large_size * 250 - 1)); 2940 2941 // Region 5: expected = available * 0.1 2942 int64 largest_size = kDefaultCacheSize * 4; 2943 EXPECT_EQ(kDefaultCacheSize * 25 / 10, 2944 disk_cache::PreferredCacheSize(large_size * 250)); 2945 EXPECT_EQ(largest_size - 1, 2946 disk_cache::PreferredCacheSize(largest_size * 100 - 1)); 2947 2948 // Region 6: expected = largest possible size 2949 EXPECT_EQ(largest_size, 2950 disk_cache::PreferredCacheSize(largest_size * 100)); 2951 EXPECT_EQ(largest_size, 2952 disk_cache::PreferredCacheSize(largest_size * 10000)); 2953 } 2954 2955 // Tests that we can "migrate" a running instance from one experiment group to 2956 // another. 2957 TEST_F(DiskCacheBackendTest, Histograms) { 2958 InitCache(); 2959 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro. 2960 2961 for (int i = 1; i < 3; i++) { 2962 CACHE_UMA(HOURS, "FillupTime", i, 28); 2963 } 2964 } 2965 2966 // Make sure that we keep the total memory used by the internal buffers under 2967 // control. 2968 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) { 2969 InitCache(); 2970 std::string key("the first key"); 2971 disk_cache::Entry* entry; 2972 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 2973 2974 const int kSize = 200; 2975 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 2976 CacheTestFillBuffer(buffer->data(), kSize, true); 2977 2978 for (int i = 0; i < 10; i++) { 2979 SCOPED_TRACE(i); 2980 // Allocate 2MB for this entry. 2981 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true)); 2982 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true)); 2983 EXPECT_EQ(kSize, 2984 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false)); 2985 EXPECT_EQ(kSize, 2986 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false)); 2987 2988 // Delete one of the buffers and truncate the other. 2989 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true)); 2990 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true)); 2991 2992 // Delete the second buffer, writing 10 bytes to disk. 2993 entry->Close(); 2994 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); 2995 } 2996 2997 entry->Close(); 2998 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize()); 2999 } 3000 3001 // This test assumes at least 150MB of system memory. 3002 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) { 3003 InitCache(); 3004 3005 const int kOneMB = 1024 * 1024; 3006 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); 3007 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize()); 3008 3009 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); 3010 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize()); 3011 3012 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); 3013 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize()); 3014 3015 cache_impl_->BufferDeleted(kOneMB); 3016 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize()); 3017 3018 // Check the upper limit. 3019 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB)); 3020 3021 for (int i = 0; i < 30; i++) 3022 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result. 3023 3024 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB)); 3025 } 3026 3027 // Tests that sharing of external files works and we are able to delete the 3028 // files when we need to. 3029 TEST_F(DiskCacheBackendTest, FileSharing) { 3030 InitCache(); 3031 3032 disk_cache::Addr address(0x80000001); 3033 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address)); 3034 base::FilePath name = cache_impl_->GetFileName(address); 3035 3036 scoped_refptr<disk_cache::File> file(new disk_cache::File(false)); 3037 file->Init(name); 3038 3039 #if defined(OS_WIN) 3040 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE; 3041 DWORD access = GENERIC_READ | GENERIC_WRITE; 3042 base::win::ScopedHandle file2(CreateFile( 3043 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL)); 3044 EXPECT_FALSE(file2.IsValid()); 3045 3046 sharing |= FILE_SHARE_DELETE; 3047 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL, 3048 OPEN_EXISTING, 0, NULL)); 3049 EXPECT_TRUE(file2.IsValid()); 3050 #endif 3051 3052 EXPECT_TRUE(base::DeleteFile(name, false)); 3053 3054 // We should be able to use the file. 3055 const int kSize = 200; 3056 char buffer1[kSize]; 3057 char buffer2[kSize]; 3058 memset(buffer1, 't', kSize); 3059 memset(buffer2, 0, kSize); 3060 EXPECT_TRUE(file->Write(buffer1, kSize, 0)); 3061 EXPECT_TRUE(file->Read(buffer2, kSize, 0)); 3062 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize)); 3063 3064 EXPECT_TRUE(disk_cache::DeleteCacheFile(name)); 3065 } 3066 3067 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) { 3068 InitCache(); 3069 3070 disk_cache::Entry* entry; 3071 3072 for (int i = 0; i < 2; ++i) { 3073 std::string key = base::StringPrintf("key%d", i); 3074 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 3075 entry->Close(); 3076 } 3077 3078 // Ping the oldest entry. 3079 cache_->OnExternalCacheHit("key0"); 3080 3081 TrimForTest(false); 3082 3083 // Make sure the older key remains. 3084 EXPECT_EQ(1, cache_->GetEntryCount()); 3085 ASSERT_EQ(net::OK, OpenEntry("key0", &entry)); 3086 entry->Close(); 3087 } 3088 3089 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) { 3090 SetCacheType(net::SHADER_CACHE); 3091 InitCache(); 3092 3093 disk_cache::Entry* entry; 3094 3095 for (int i = 0; i < 2; ++i) { 3096 std::string key = base::StringPrintf("key%d", i); 3097 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 3098 entry->Close(); 3099 } 3100 3101 // Ping the oldest entry. 3102 cache_->OnExternalCacheHit("key0"); 3103 3104 TrimForTest(false); 3105 3106 // Make sure the older key remains. 3107 EXPECT_EQ(1, cache_->GetEntryCount()); 3108 ASSERT_EQ(net::OK, OpenEntry("key0", &entry)); 3109 entry->Close(); 3110 } 3111 3112 void DiskCacheBackendTest::TracingBackendBasics() { 3113 InitCache(); 3114 cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass())); 3115 cache_impl_ = NULL; 3116 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType()); 3117 if (!simple_cache_mode_) { 3118 EXPECT_EQ(0, cache_->GetEntryCount()); 3119 } 3120 3121 net::TestCompletionCallback cb; 3122 disk_cache::Entry* entry = NULL; 3123 EXPECT_NE(net::OK, OpenEntry("key", &entry)); 3124 EXPECT_TRUE(NULL == entry); 3125 3126 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); 3127 EXPECT_TRUE(NULL != entry); 3128 3129 disk_cache::Entry* same_entry = NULL; 3130 ASSERT_EQ(net::OK, OpenEntry("key", &same_entry)); 3131 EXPECT_TRUE(NULL != same_entry); 3132 3133 if (!simple_cache_mode_) { 3134 EXPECT_EQ(1, cache_->GetEntryCount()); 3135 } 3136 entry->Close(); 3137 entry = NULL; 3138 same_entry->Close(); 3139 same_entry = NULL; 3140 } 3141 3142 TEST_F(DiskCacheBackendTest, TracingBackendBasics) { 3143 TracingBackendBasics(); 3144 } 3145 3146 // The Simple Cache backend requires a few guarantees from the filesystem like 3147 // atomic renaming of recently open files. Those guarantees are not provided in 3148 // general on Windows. 3149 #if defined(OS_POSIX) 3150 3151 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) { 3152 SetCacheType(net::APP_CACHE); 3153 SetSimpleCacheMode(); 3154 BackendShutdownWithPendingCreate(false); 3155 } 3156 3157 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) { 3158 SetCacheType(net::APP_CACHE); 3159 SetSimpleCacheMode(); 3160 BackendShutdownWithPendingFileIO(false); 3161 } 3162 3163 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) { 3164 SetSimpleCacheMode(); 3165 BackendBasics(); 3166 } 3167 3168 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) { 3169 SetCacheType(net::APP_CACHE); 3170 SetSimpleCacheMode(); 3171 BackendBasics(); 3172 } 3173 3174 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) { 3175 SetSimpleCacheMode(); 3176 BackendKeying(); 3177 } 3178 3179 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) { 3180 SetSimpleCacheMode(); 3181 SetCacheType(net::APP_CACHE); 3182 BackendKeying(); 3183 } 3184 3185 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) { 3186 SetSimpleCacheMode(); 3187 BackendSetSize(); 3188 } 3189 3190 // MacOS has a default open file limit of 256 files, which is incompatible with 3191 // this simple cache test. 3192 #if defined(OS_MACOSX) 3193 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName 3194 #else 3195 #define SIMPLE_MAYBE_MACOS(TestName) TestName 3196 #endif 3197 3198 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) { 3199 SetMaxSize(0x100000); 3200 SetSimpleCacheMode(); 3201 BackendLoad(); 3202 } 3203 3204 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) { 3205 SetCacheType(net::APP_CACHE); 3206 SetSimpleCacheMode(); 3207 SetMaxSize(0x100000); 3208 BackendLoad(); 3209 } 3210 3211 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) { 3212 SetSimpleCacheMode(); 3213 BackendDoomRecent(); 3214 } 3215 3216 TEST_F(DiskCacheBackendTest, SimpleDoomBetween) { 3217 SetSimpleCacheMode(); 3218 BackendDoomBetween(); 3219 } 3220 3221 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) { 3222 SetSimpleCacheMode(); 3223 BackendDoomAll(); 3224 } 3225 3226 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) { 3227 SetCacheType(net::APP_CACHE); 3228 SetSimpleCacheMode(); 3229 BackendDoomAll(); 3230 } 3231 3232 TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) { 3233 SetSimpleCacheMode(); 3234 TracingBackendBasics(); 3235 // TODO(pasko): implement integrity checking on the Simple Backend. 3236 DisableIntegrityCheck(); 3237 } 3238 3239 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) { 3240 SetSimpleCacheMode(); 3241 InitCache(); 3242 3243 const char* key = "the first key"; 3244 disk_cache::Entry* entry = NULL; 3245 3246 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 3247 ASSERT_TRUE(entry != NULL); 3248 entry->Close(); 3249 entry = NULL; 3250 3251 // To make sure the file creation completed we need to call open again so that 3252 // we block until it actually created the files. 3253 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); 3254 ASSERT_TRUE(entry != NULL); 3255 entry->Close(); 3256 entry = NULL; 3257 3258 // Delete one of the files in the entry. 3259 base::FilePath to_delete_file = cache_path_.AppendASCII( 3260 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); 3261 EXPECT_TRUE(base::PathExists(to_delete_file)); 3262 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file)); 3263 3264 // Failing to open the entry should delete the rest of these files. 3265 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); 3266 3267 // Confirm the rest of the files are gone. 3268 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) { 3269 base::FilePath should_be_gone_file(cache_path_.AppendASCII( 3270 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i))); 3271 EXPECT_FALSE(base::PathExists(should_be_gone_file)); 3272 } 3273 } 3274 3275 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) { 3276 SetSimpleCacheMode(); 3277 InitCache(); 3278 3279 const char* key = "the first key"; 3280 disk_cache::Entry* entry = NULL; 3281 3282 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); 3283 disk_cache::Entry* null = NULL; 3284 ASSERT_NE(null, entry); 3285 entry->Close(); 3286 entry = NULL; 3287 3288 // To make sure the file creation completed we need to call open again so that 3289 // we block until it actually created the files. 3290 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); 3291 ASSERT_NE(null, entry); 3292 entry->Close(); 3293 entry = NULL; 3294 3295 // Write an invalid header for stream 0 and stream 1. 3296 base::FilePath entry_file1_path = cache_path_.AppendASCII( 3297 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); 3298 3299 disk_cache::SimpleFileHeader header; 3300 header.initial_magic_number = GG_UINT64_C(0xbadf00d); 3301 EXPECT_EQ( 3302 implicit_cast<int>(sizeof(header)), 3303 file_util::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header), 3304 sizeof(header))); 3305 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); 3306 } 3307 3308 // Tests that the Simple Cache Backend fails to initialize with non-matching 3309 // file structure on disk. 3310 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) { 3311 // Create a cache structure with the |BackendImpl|. 3312 InitCache(); 3313 disk_cache::Entry* entry; 3314 const int kSize = 50; 3315 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 3316 CacheTestFillBuffer(buffer->data(), kSize, false); 3317 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); 3318 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false)); 3319 entry->Close(); 3320 cache_.reset(); 3321 3322 // Check that the |SimpleBackendImpl| does not favor this structure. 3323 base::Thread cache_thread("CacheThread"); 3324 ASSERT_TRUE(cache_thread.StartWithOptions( 3325 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 3326 disk_cache::SimpleBackendImpl* simple_cache = 3327 new disk_cache::SimpleBackendImpl(cache_path_, 3328 0, 3329 net::DISK_CACHE, 3330 cache_thread.message_loop_proxy().get(), 3331 NULL); 3332 net::TestCompletionCallback cb; 3333 int rv = simple_cache->Init(cb.callback()); 3334 EXPECT_NE(net::OK, cb.GetResult(rv)); 3335 delete simple_cache; 3336 DisableIntegrityCheck(); 3337 } 3338 3339 // Tests that the |BackendImpl| refuses to initialize on top of the files 3340 // generated by the Simple Cache Backend. 3341 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) { 3342 // Create a cache structure with the |SimpleBackendImpl|. 3343 SetSimpleCacheMode(); 3344 InitCache(); 3345 disk_cache::Entry* entry; 3346 const int kSize = 50; 3347 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 3348 CacheTestFillBuffer(buffer->data(), kSize, false); 3349 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); 3350 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false)); 3351 entry->Close(); 3352 cache_.reset(); 3353 3354 // Check that the |BackendImpl| does not favor this structure. 3355 base::Thread cache_thread("CacheThread"); 3356 ASSERT_TRUE(cache_thread.StartWithOptions( 3357 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); 3358 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl( 3359 cache_path_, base::MessageLoopProxy::current().get(), NULL); 3360 cache->SetUnitTestMode(); 3361 net::TestCompletionCallback cb; 3362 int rv = cache->Init(cb.callback()); 3363 EXPECT_NE(net::OK, cb.GetResult(rv)); 3364 delete cache; 3365 DisableIntegrityCheck(); 3366 } 3367 3368 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) { 3369 SetSimpleCacheMode(); 3370 BackendFixEnumerators(); 3371 } 3372 3373 // Tests basic functionality of the SimpleBackend implementation of the 3374 // enumeration API. 3375 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) { 3376 SetSimpleCacheMode(); 3377 InitCache(); 3378 std::set<std::string> key_pool; 3379 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); 3380 3381 // Check that enumeration returns all entries. 3382 std::set<std::string> keys_to_match(key_pool); 3383 void* iter = NULL; 3384 size_t count = 0; 3385 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); 3386 cache_->EndEnumeration(&iter); 3387 EXPECT_EQ(key_pool.size(), count); 3388 EXPECT_TRUE(keys_to_match.empty()); 3389 3390 // Check that opening entries does not affect enumeration. 3391 keys_to_match = key_pool; 3392 iter = NULL; 3393 count = 0; 3394 disk_cache::Entry* entry_opened_before; 3395 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before)); 3396 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2, 3397 &iter, 3398 &keys_to_match, 3399 &count)); 3400 3401 disk_cache::Entry* entry_opened_middle; 3402 ASSERT_EQ(net::OK, 3403 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle)); 3404 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); 3405 cache_->EndEnumeration(&iter); 3406 entry_opened_before->Close(); 3407 entry_opened_middle->Close(); 3408 3409 EXPECT_EQ(key_pool.size(), count); 3410 EXPECT_TRUE(keys_to_match.empty()); 3411 } 3412 3413 // Tests that the enumerations are not affected by dooming an entry in the 3414 // middle. 3415 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) { 3416 SetSimpleCacheMode(); 3417 InitCache(); 3418 std::set<std::string> key_pool; 3419 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); 3420 3421 // Check that enumeration returns all entries but the doomed one. 3422 std::set<std::string> keys_to_match(key_pool); 3423 void* iter = NULL; 3424 size_t count = 0; 3425 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2, 3426 &iter, 3427 &keys_to_match, 3428 &count)); 3429 3430 std::string key_to_delete = *(keys_to_match.begin()); 3431 DoomEntry(key_to_delete); 3432 keys_to_match.erase(key_to_delete); 3433 key_pool.erase(key_to_delete); 3434 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); 3435 cache_->EndEnumeration(&iter); 3436 3437 EXPECT_EQ(key_pool.size(), count); 3438 EXPECT_TRUE(keys_to_match.empty()); 3439 } 3440 3441 // Tests that enumerations are not affected by corrupt files. 3442 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) { 3443 SetSimpleCacheMode(); 3444 InitCache(); 3445 std::set<std::string> key_pool; 3446 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); 3447 3448 // Create a corrupt entry. The write/read sequence ensures that the entry will 3449 // have been created before corrupting the platform files, in the case of 3450 // optimistic operations. 3451 const std::string key = "the key"; 3452 disk_cache::Entry* corrupted_entry; 3453 3454 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry)); 3455 ASSERT_TRUE(corrupted_entry); 3456 const int kSize = 50; 3457 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); 3458 CacheTestFillBuffer(buffer->data(), kSize, false); 3459 ASSERT_EQ(kSize, 3460 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false)); 3461 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize)); 3462 corrupted_entry->Close(); 3463 3464 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests( 3465 key, cache_path_)); 3466 EXPECT_EQ(key_pool.size() + 1, 3467 implicit_cast<size_t>(cache_->GetEntryCount())); 3468 3469 // Check that enumeration returns all entries but the corrupt one. 3470 std::set<std::string> keys_to_match(key_pool); 3471 void* iter = NULL; 3472 size_t count = 0; 3473 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); 3474 cache_->EndEnumeration(&iter); 3475 3476 EXPECT_EQ(key_pool.size(), count); 3477 EXPECT_TRUE(keys_to_match.empty()); 3478 } 3479 3480 #endif // defined(OS_POSIX) 3481