1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/disk_cache/entry_impl.h" 6 7 #include "base/hash.h" 8 #include "base/message_loop/message_loop.h" 9 #include "base/metrics/histogram.h" 10 #include "base/strings/string_util.h" 11 #include "net/base/io_buffer.h" 12 #include "net/base/net_errors.h" 13 #include "net/disk_cache/backend_impl.h" 14 #include "net/disk_cache/bitmap.h" 15 #include "net/disk_cache/cache_util.h" 16 #include "net/disk_cache/histogram_macros.h" 17 #include "net/disk_cache/net_log_parameters.h" 18 #include "net/disk_cache/sparse_control.h" 19 20 using base::Time; 21 using base::TimeDelta; 22 using base::TimeTicks; 23 24 namespace { 25 26 const int kMaxBufferSize = 1024 * 1024; // 1 MB. 27 28 } // namespace 29 30 namespace disk_cache { 31 32 // This class handles individual memory buffers that store data before it is 33 // sent to disk. The buffer can start at any offset, but if we try to write to 34 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to 35 // zero. The buffer grows up to a size determined by the backend, to keep the 36 // total memory used under control. 37 class EntryImpl::UserBuffer { 38 public: 39 explicit UserBuffer(BackendImpl* backend) 40 : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) { 41 buffer_.reserve(kMaxBlockSize); 42 } 43 ~UserBuffer() { 44 if (backend_) 45 backend_->BufferDeleted(capacity() - kMaxBlockSize); 46 } 47 48 // Returns true if we can handle writing |len| bytes to |offset|. 49 bool PreWrite(int offset, int len); 50 51 // Truncates the buffer to |offset| bytes. 52 void Truncate(int offset); 53 54 // Writes |len| bytes from |buf| at the given |offset|. 55 void Write(int offset, IOBuffer* buf, int len); 56 57 // Returns true if we can read |len| bytes from |offset|, given that the 58 // actual file has |eof| bytes stored. Note that the number of bytes to read 59 // may be modified by this method even though it returns false: that means we 60 // should do a smaller read from disk. 61 bool PreRead(int eof, int offset, int* len); 62 63 // Read |len| bytes from |buf| at the given |offset|. 64 int Read(int offset, IOBuffer* buf, int len); 65 66 // Prepare this buffer for reuse. 67 void Reset(); 68 69 char* Data() { return buffer_.size() ? &buffer_[0] : NULL; } 70 int Size() { return static_cast<int>(buffer_.size()); } 71 int Start() { return offset_; } 72 int End() { return offset_ + Size(); } 73 74 private: 75 int capacity() { return static_cast<int>(buffer_.capacity()); } 76 bool GrowBuffer(int required, int limit); 77 78 base::WeakPtr<BackendImpl> backend_; 79 int offset_; 80 std::vector<char> buffer_; 81 bool grow_allowed_; 82 DISALLOW_COPY_AND_ASSIGN(UserBuffer); 83 }; 84 85 bool EntryImpl::UserBuffer::PreWrite(int offset, int len) { 86 DCHECK_GE(offset, 0); 87 DCHECK_GE(len, 0); 88 DCHECK_GE(offset + len, 0); 89 90 // We don't want to write before our current start. 91 if (offset < offset_) 92 return false; 93 94 // Lets get the common case out of the way. 95 if (offset + len <= capacity()) 96 return true; 97 98 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the 99 // buffer offset_ at 0. 100 if (!Size() && offset > kMaxBlockSize) 101 return GrowBuffer(len, kMaxBufferSize); 102 103 int required = offset - offset_ + len; 104 return GrowBuffer(required, kMaxBufferSize * 6 / 5); 105 } 106 107 void EntryImpl::UserBuffer::Truncate(int offset) { 108 DCHECK_GE(offset, 0); 109 DCHECK_GE(offset, offset_); 110 DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_; 111 112 offset -= offset_; 113 if (Size() >= offset) 114 buffer_.resize(offset); 115 } 116 117 void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) { 118 DCHECK_GE(offset, 0); 119 DCHECK_GE(len, 0); 120 DCHECK_GE(offset + len, 0); 121 DCHECK_GE(offset, offset_); 122 DVLOG(3) << "Buffer write at " << offset << " current " << offset_; 123 124 if (!Size() && offset > kMaxBlockSize) 125 offset_ = offset; 126 127 offset -= offset_; 128 129 if (offset > Size()) 130 buffer_.resize(offset); 131 132 if (!len) 133 return; 134 135 char* buffer = buf->data(); 136 int valid_len = Size() - offset; 137 int copy_len = std::min(valid_len, len); 138 if (copy_len) { 139 memcpy(&buffer_[offset], buffer, copy_len); 140 len -= copy_len; 141 buffer += copy_len; 142 } 143 if (!len) 144 return; 145 146 buffer_.insert(buffer_.end(), buffer, buffer + len); 147 } 148 149 bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) { 150 DCHECK_GE(offset, 0); 151 DCHECK_GT(*len, 0); 152 153 if (offset < offset_) { 154 // We are reading before this buffer. 155 if (offset >= eof) 156 return true; 157 158 // If the read overlaps with the buffer, change its length so that there is 159 // no overlap. 160 *len = std::min(*len, offset_ - offset); 161 *len = std::min(*len, eof - offset); 162 163 // We should read from disk. 164 return false; 165 } 166 167 if (!Size()) 168 return false; 169 170 // See if we can fulfill the first part of the operation. 171 return (offset - offset_ < Size()); 172 } 173 174 int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) { 175 DCHECK_GE(offset, 0); 176 DCHECK_GT(len, 0); 177 DCHECK(Size() || offset < offset_); 178 179 int clean_bytes = 0; 180 if (offset < offset_) { 181 // We don't have a file so lets fill the first part with 0. 182 clean_bytes = std::min(offset_ - offset, len); 183 memset(buf->data(), 0, clean_bytes); 184 if (len == clean_bytes) 185 return len; 186 offset = offset_; 187 len -= clean_bytes; 188 } 189 190 int start = offset - offset_; 191 int available = Size() - start; 192 DCHECK_GE(start, 0); 193 DCHECK_GE(available, 0); 194 len = std::min(len, available); 195 memcpy(buf->data() + clean_bytes, &buffer_[start], len); 196 return len + clean_bytes; 197 } 198 199 void EntryImpl::UserBuffer::Reset() { 200 if (!grow_allowed_) { 201 if (backend_) 202 backend_->BufferDeleted(capacity() - kMaxBlockSize); 203 grow_allowed_ = true; 204 std::vector<char> tmp; 205 buffer_.swap(tmp); 206 buffer_.reserve(kMaxBlockSize); 207 } 208 offset_ = 0; 209 buffer_.clear(); 210 } 211 212 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) { 213 DCHECK_GE(required, 0); 214 int current_size = capacity(); 215 if (required <= current_size) 216 return true; 217 218 if (required > limit) 219 return false; 220 221 if (!backend_) 222 return false; 223 224 int to_add = std::max(required - current_size, kMaxBlockSize * 4); 225 to_add = std::max(current_size, to_add); 226 required = std::min(current_size + to_add, limit); 227 228 grow_allowed_ = backend_->IsAllocAllowed(current_size, required); 229 if (!grow_allowed_) 230 return false; 231 232 DVLOG(3) << "Buffer grow to " << required; 233 234 buffer_.reserve(required); 235 return true; 236 } 237 238 // ------------------------------------------------------------------------ 239 240 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) 241 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), 242 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), 243 dirty_(false) { 244 entry_.LazyInit(backend->File(address), address); 245 for (int i = 0; i < kNumStreams; i++) { 246 unreported_size_[i] = 0; 247 } 248 } 249 250 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, 251 uint32 hash) { 252 Trace("Create entry In"); 253 EntryStore* entry_store = entry_.Data(); 254 RankingsNode* node = node_.Data(); 255 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); 256 memset(node, 0, sizeof(RankingsNode)); 257 if (!node_.LazyInit(backend_->File(node_address), node_address)) 258 return false; 259 260 entry_store->rankings_node = node_address.value(); 261 node->contents = entry_.address().value(); 262 263 entry_store->hash = hash; 264 entry_store->creation_time = Time::Now().ToInternalValue(); 265 entry_store->key_len = static_cast<int32>(key.size()); 266 if (entry_store->key_len > kMaxInternalKeyLength) { 267 Addr address(0); 268 if (!CreateBlock(entry_store->key_len + 1, &address)) 269 return false; 270 271 entry_store->long_key = address.value(); 272 File* key_file = GetBackingFile(address, kKeyFileIndex); 273 key_ = key; 274 275 size_t offset = 0; 276 if (address.is_block_file()) 277 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; 278 279 if (!key_file || !key_file->Write(key.data(), key.size(), offset)) { 280 DeleteData(address, kKeyFileIndex); 281 return false; 282 } 283 284 if (address.is_separate_file()) 285 key_file->SetLength(key.size() + 1); 286 } else { 287 memcpy(entry_store->key, key.data(), key.size()); 288 entry_store->key[key.size()] = '\0'; 289 } 290 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); 291 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size())); 292 node->dirty = backend_->GetCurrentEntryId(); 293 Log("Create Entry "); 294 return true; 295 } 296 297 uint32 EntryImpl::GetHash() { 298 return entry_.Data()->hash; 299 } 300 301 bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { 302 if (entry_.Data()->hash != hash || 303 static_cast<size_t>(entry_.Data()->key_len) != key.size()) 304 return false; 305 306 return (key.compare(GetKey()) == 0); 307 } 308 309 void EntryImpl::InternalDoom() { 310 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); 311 DCHECK(node_.HasData()); 312 if (!node_.Data()->dirty) { 313 node_.Data()->dirty = backend_->GetCurrentEntryId(); 314 node_.Store(); 315 } 316 doomed_ = true; 317 } 318 319 // This only includes checks that relate to the first block of the entry (the 320 // first 256 bytes), and values that should be set from the entry creation. 321 // Basically, even if there is something wrong with this entry, we want to see 322 // if it is possible to load the rankings node and delete them together. 323 bool EntryImpl::SanityCheck() { 324 if (!entry_.VerifyHash()) 325 return false; 326 327 EntryStore* stored = entry_.Data(); 328 if (!stored->rankings_node || stored->key_len <= 0) 329 return false; 330 331 if (stored->reuse_count < 0 || stored->refetch_count < 0) 332 return false; 333 334 Addr rankings_addr(stored->rankings_node); 335 if (!rankings_addr.SanityCheckForRankings()) 336 return false; 337 338 Addr next_addr(stored->next); 339 if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) { 340 STRESS_NOTREACHED(); 341 return false; 342 } 343 STRESS_DCHECK(next_addr.value() != entry_.address().value()); 344 345 if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL) 346 return false; 347 348 Addr key_addr(stored->long_key); 349 if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) || 350 (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized())) 351 return false; 352 353 if (!key_addr.SanityCheck()) 354 return false; 355 356 if (key_addr.is_initialized() && 357 ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) || 358 (stored->key_len >= kMaxBlockSize && key_addr.is_block_file()))) 359 return false; 360 361 int num_blocks = NumBlocksForEntry(stored->key_len); 362 if (entry_.address().num_blocks() != num_blocks) 363 return false; 364 365 return true; 366 } 367 368 bool EntryImpl::DataSanityCheck() { 369 EntryStore* stored = entry_.Data(); 370 Addr key_addr(stored->long_key); 371 372 // The key must be NULL terminated. 373 if (!key_addr.is_initialized() && stored->key[stored->key_len]) 374 return false; 375 376 if (stored->hash != base::Hash(GetKey())) 377 return false; 378 379 for (int i = 0; i < kNumStreams; i++) { 380 Addr data_addr(stored->data_addr[i]); 381 int data_size = stored->data_size[i]; 382 if (data_size < 0) 383 return false; 384 if (!data_size && data_addr.is_initialized()) 385 return false; 386 if (!data_addr.SanityCheck()) 387 return false; 388 if (!data_size) 389 continue; 390 if (data_size <= kMaxBlockSize && data_addr.is_separate_file()) 391 return false; 392 if (data_size > kMaxBlockSize && data_addr.is_block_file()) 393 return false; 394 } 395 return true; 396 } 397 398 void EntryImpl::FixForDelete() { 399 EntryStore* stored = entry_.Data(); 400 Addr key_addr(stored->long_key); 401 402 if (!key_addr.is_initialized()) 403 stored->key[stored->key_len] = '\0'; 404 405 for (int i = 0; i < kNumStreams; i++) { 406 Addr data_addr(stored->data_addr[i]); 407 int data_size = stored->data_size[i]; 408 if (data_addr.is_initialized()) { 409 if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) || 410 (data_size > kMaxBlockSize && data_addr.is_block_file()) || 411 !data_addr.SanityCheck()) { 412 STRESS_NOTREACHED(); 413 // The address is weird so don't attempt to delete it. 414 stored->data_addr[i] = 0; 415 // In general, trust the stored size as it should be in sync with the 416 // total size tracked by the backend. 417 } 418 } 419 if (data_size < 0) 420 stored->data_size[i] = 0; 421 } 422 entry_.Store(); 423 } 424 425 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { 426 node_.Data()->last_used = last_used.ToInternalValue(); 427 node_.Data()->last_modified = last_modified.ToInternalValue(); 428 node_.set_modified(); 429 } 430 431 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { 432 DCHECK(!net_log_.net_log()); 433 net_log_ = net::BoundNetLog::Make( 434 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); 435 net_log_.BeginEvent( 436 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, 437 CreateNetLogEntryCreationCallback(this, created)); 438 } 439 440 const net::BoundNetLog& EntryImpl::net_log() const { 441 return net_log_; 442 } 443 444 // ------------------------------------------------------------------------ 445 446 void EntryImpl::Doom() { 447 if (background_queue_) 448 background_queue_->DoomEntryImpl(this); 449 } 450 451 void EntryImpl::DoomImpl() { 452 if (doomed_ || !backend_) 453 return; 454 455 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); 456 backend_->InternalDoomEntry(this); 457 } 458 459 void EntryImpl::Close() { 460 if (background_queue_) 461 background_queue_->CloseEntryImpl(this); 462 } 463 464 std::string EntryImpl::GetKey() const { 465 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); 466 int key_len = entry->Data()->key_len; 467 if (key_len <= kMaxInternalKeyLength) 468 return std::string(entry->Data()->key); 469 470 // We keep a copy of the key so that we can always return it, even if the 471 // backend is disabled. 472 if (!key_.empty()) 473 return key_; 474 475 Addr address(entry->Data()->long_key); 476 DCHECK(address.is_initialized()); 477 size_t offset = 0; 478 if (address.is_block_file()) 479 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; 480 481 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); 482 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, 483 kKeyFileIndex); 484 if (!key_file) 485 return std::string(); 486 487 ++key_len; // We store a trailing \0 on disk that we read back below. 488 if (!offset && key_file->GetLength() != static_cast<size_t>(key_len)) 489 return std::string(); 490 491 if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset)) 492 key_.clear(); 493 return key_; 494 } 495 496 Time EntryImpl::GetLastUsed() const { 497 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); 498 return Time::FromInternalValue(node->Data()->last_used); 499 } 500 501 Time EntryImpl::GetLastModified() const { 502 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); 503 return Time::FromInternalValue(node->Data()->last_modified); 504 } 505 506 int32 EntryImpl::GetDataSize(int index) const { 507 if (index < 0 || index >= kNumStreams) 508 return 0; 509 510 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); 511 return entry->Data()->data_size[index]; 512 } 513 514 int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, 515 const CompletionCallback& callback) { 516 if (callback.is_null()) 517 return ReadDataImpl(index, offset, buf, buf_len, callback); 518 519 DCHECK(node_.Data()->dirty || read_only_); 520 if (index < 0 || index >= kNumStreams) 521 return net::ERR_INVALID_ARGUMENT; 522 523 int entry_size = entry_.Data()->data_size[index]; 524 if (offset >= entry_size || offset < 0 || !buf_len) 525 return 0; 526 527 if (buf_len < 0) 528 return net::ERR_INVALID_ARGUMENT; 529 530 if (!background_queue_) 531 return net::ERR_UNEXPECTED; 532 533 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); 534 return net::ERR_IO_PENDING; 535 } 536 537 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, 538 const CompletionCallback& callback) { 539 if (net_log_.IsLoggingAllEvents()) { 540 net_log_.BeginEvent( 541 net::NetLog::TYPE_ENTRY_READ_DATA, 542 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); 543 } 544 545 int result = InternalReadData(index, offset, buf, buf_len, callback); 546 547 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { 548 net_log_.EndEvent( 549 net::NetLog::TYPE_ENTRY_READ_DATA, 550 CreateNetLogReadWriteCompleteCallback(result)); 551 } 552 return result; 553 } 554 555 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, 556 const CompletionCallback& callback, bool truncate) { 557 if (callback.is_null()) 558 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); 559 560 DCHECK(node_.Data()->dirty || read_only_); 561 if (index < 0 || index >= kNumStreams) 562 return net::ERR_INVALID_ARGUMENT; 563 564 if (offset < 0 || buf_len < 0) 565 return net::ERR_INVALID_ARGUMENT; 566 567 if (!background_queue_) 568 return net::ERR_UNEXPECTED; 569 570 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, 571 callback); 572 return net::ERR_IO_PENDING; 573 } 574 575 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, 576 const CompletionCallback& callback, 577 bool truncate) { 578 if (net_log_.IsLoggingAllEvents()) { 579 net_log_.BeginEvent( 580 net::NetLog::TYPE_ENTRY_WRITE_DATA, 581 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); 582 } 583 584 int result = InternalWriteData(index, offset, buf, buf_len, callback, 585 truncate); 586 587 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { 588 net_log_.EndEvent( 589 net::NetLog::TYPE_ENTRY_WRITE_DATA, 590 CreateNetLogReadWriteCompleteCallback(result)); 591 } 592 return result; 593 } 594 595 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, 596 const CompletionCallback& callback) { 597 if (callback.is_null()) 598 return ReadSparseDataImpl(offset, buf, buf_len, callback); 599 600 if (!background_queue_) 601 return net::ERR_UNEXPECTED; 602 603 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); 604 return net::ERR_IO_PENDING; 605 } 606 607 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, 608 const CompletionCallback& callback) { 609 DCHECK(node_.Data()->dirty || read_only_); 610 int result = InitSparseData(); 611 if (net::OK != result) 612 return result; 613 614 TimeTicks start = TimeTicks::Now(); 615 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, 616 callback); 617 ReportIOTime(kSparseRead, start); 618 return result; 619 } 620 621 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, 622 const CompletionCallback& callback) { 623 if (callback.is_null()) 624 return WriteSparseDataImpl(offset, buf, buf_len, callback); 625 626 if (!background_queue_) 627 return net::ERR_UNEXPECTED; 628 629 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); 630 return net::ERR_IO_PENDING; 631 } 632 633 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, 634 const CompletionCallback& callback) { 635 DCHECK(node_.Data()->dirty || read_only_); 636 int result = InitSparseData(); 637 if (net::OK != result) 638 return result; 639 640 TimeTicks start = TimeTicks::Now(); 641 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, 642 buf_len, callback); 643 ReportIOTime(kSparseWrite, start); 644 return result; 645 } 646 647 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, 648 const CompletionCallback& callback) { 649 if (!background_queue_) 650 return net::ERR_UNEXPECTED; 651 652 background_queue_->GetAvailableRange(this, offset, len, start, callback); 653 return net::ERR_IO_PENDING; 654 } 655 656 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { 657 int result = InitSparseData(); 658 if (net::OK != result) 659 return result; 660 661 return sparse_->GetAvailableRange(offset, len, start); 662 } 663 664 bool EntryImpl::CouldBeSparse() const { 665 if (sparse_.get()) 666 return true; 667 668 scoped_ptr<SparseControl> sparse; 669 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); 670 return sparse->CouldBeSparse(); 671 } 672 673 void EntryImpl::CancelSparseIO() { 674 if (background_queue_) 675 background_queue_->CancelSparseIO(this); 676 } 677 678 void EntryImpl::CancelSparseIOImpl() { 679 if (!sparse_.get()) 680 return; 681 682 sparse_->CancelIO(); 683 } 684 685 int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { 686 if (!sparse_.get()) 687 return net::OK; 688 689 if (!background_queue_) 690 return net::ERR_UNEXPECTED; 691 692 background_queue_->ReadyForSparseIO(this, callback); 693 return net::ERR_IO_PENDING; 694 } 695 696 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { 697 DCHECK(sparse_.get()); 698 return sparse_->ReadyToUse(callback); 699 } 700 701 // ------------------------------------------------------------------------ 702 703 // When an entry is deleted from the cache, we clean up all the data associated 704 // with it for two reasons: to simplify the reuse of the block (we know that any 705 // unused block is filled with zeros), and to simplify the handling of write / 706 // read partial information from an entry (don't have to worry about returning 707 // data related to a previous cache entry because the range was not fully 708 // written before). 709 EntryImpl::~EntryImpl() { 710 if (!backend_) { 711 entry_.clear_modified(); 712 node_.clear_modified(); 713 return; 714 } 715 Log("~EntryImpl in"); 716 717 // Save the sparse info to disk. This will generate IO for this entry and 718 // maybe for a child entry, so it is important to do it before deleting this 719 // entry. 720 sparse_.reset(); 721 722 // Remove this entry from the list of open entries. 723 backend_->OnEntryDestroyBegin(entry_.address()); 724 725 if (doomed_) { 726 DeleteEntryData(true); 727 } else { 728 #if defined(NET_BUILD_STRESS_CACHE) 729 SanityCheck(); 730 #endif 731 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE); 732 bool ret = true; 733 for (int index = 0; index < kNumStreams; index++) { 734 if (user_buffers_[index].get()) { 735 if (!(ret = Flush(index, 0))) 736 LOG(ERROR) << "Failed to save user data"; 737 } 738 if (unreported_size_[index]) { 739 backend_->ModifyStorageSize( 740 entry_.Data()->data_size[index] - unreported_size_[index], 741 entry_.Data()->data_size[index]); 742 } 743 } 744 745 if (!ret) { 746 // There was a failure writing the actual data. Mark the entry as dirty. 747 int current_id = backend_->GetCurrentEntryId(); 748 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1; 749 node_.Store(); 750 } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) { 751 node_.Data()->dirty = 0; 752 node_.Store(); 753 } 754 } 755 756 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); 757 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); 758 backend_->OnEntryDestroyEnd(); 759 } 760 761 int EntryImpl::InternalReadData(int index, int offset, 762 IOBuffer* buf, int buf_len, 763 const CompletionCallback& callback) { 764 DCHECK(node_.Data()->dirty || read_only_); 765 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; 766 if (index < 0 || index >= kNumStreams) 767 return net::ERR_INVALID_ARGUMENT; 768 769 int entry_size = entry_.Data()->data_size[index]; 770 if (offset >= entry_size || offset < 0 || !buf_len) 771 return 0; 772 773 if (buf_len < 0) 774 return net::ERR_INVALID_ARGUMENT; 775 776 if (!backend_) 777 return net::ERR_UNEXPECTED; 778 779 TimeTicks start = TimeTicks::Now(); 780 781 if (offset + buf_len > entry_size) 782 buf_len = entry_size - offset; 783 784 UpdateRank(false); 785 786 backend_->OnEvent(Stats::READ_DATA); 787 backend_->OnRead(buf_len); 788 789 Addr address(entry_.Data()->data_addr[index]); 790 int eof = address.is_initialized() ? entry_size : 0; 791 if (user_buffers_[index].get() && 792 user_buffers_[index]->PreRead(eof, offset, &buf_len)) { 793 // Complete the operation locally. 794 buf_len = user_buffers_[index]->Read(offset, buf, buf_len); 795 ReportIOTime(kRead, start); 796 return buf_len; 797 } 798 799 address.set_value(entry_.Data()->data_addr[index]); 800 DCHECK(address.is_initialized()); 801 if (!address.is_initialized()) { 802 DoomImpl(); 803 return net::ERR_FAILED; 804 } 805 806 File* file = GetBackingFile(address, index); 807 if (!file) { 808 DoomImpl(); 809 LOG(ERROR) << "No file for " << std::hex << address.value(); 810 return net::ERR_FILE_NOT_FOUND; 811 } 812 813 size_t file_offset = offset; 814 if (address.is_block_file()) { 815 DCHECK_LE(offset + buf_len, kMaxBlockSize); 816 file_offset += address.start_block() * address.BlockSize() + 817 kBlockHeaderSize; 818 } 819 820 SyncCallback* io_callback = NULL; 821 if (!callback.is_null()) { 822 io_callback = new SyncCallback(this, buf, callback, 823 net::NetLog::TYPE_ENTRY_READ_DATA); 824 } 825 826 TimeTicks start_async = TimeTicks::Now(); 827 828 bool completed; 829 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { 830 if (io_callback) 831 io_callback->Discard(); 832 DoomImpl(); 833 return net::ERR_CACHE_READ_FAILURE; 834 } 835 836 if (io_callback && completed) 837 io_callback->Discard(); 838 839 if (io_callback) 840 ReportIOTime(kReadAsync1, start_async); 841 842 ReportIOTime(kRead, start); 843 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; 844 } 845 846 int EntryImpl::InternalWriteData(int index, int offset, 847 IOBuffer* buf, int buf_len, 848 const CompletionCallback& callback, 849 bool truncate) { 850 DCHECK(node_.Data()->dirty || read_only_); 851 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; 852 if (index < 0 || index >= kNumStreams) 853 return net::ERR_INVALID_ARGUMENT; 854 855 if (offset < 0 || buf_len < 0) 856 return net::ERR_INVALID_ARGUMENT; 857 858 if (!backend_) 859 return net::ERR_UNEXPECTED; 860 861 int max_file_size = backend_->MaxFileSize(); 862 863 // offset or buf_len could be negative numbers. 864 if (offset > max_file_size || buf_len > max_file_size || 865 offset + buf_len > max_file_size) { 866 int size = offset + buf_len; 867 if (size <= max_file_size) 868 size = kint32max; 869 backend_->TooMuchStorageRequested(size); 870 return net::ERR_FAILED; 871 } 872 873 TimeTicks start = TimeTicks::Now(); 874 875 // Read the size at this point (it may change inside prepare). 876 int entry_size = entry_.Data()->data_size[index]; 877 bool extending = entry_size < offset + buf_len; 878 truncate = truncate && entry_size > offset + buf_len; 879 Trace("To PrepareTarget 0x%x", entry_.address().value()); 880 if (!PrepareTarget(index, offset, buf_len, truncate)) 881 return net::ERR_FAILED; 882 883 Trace("From PrepareTarget 0x%x", entry_.address().value()); 884 if (extending || truncate) 885 UpdateSize(index, entry_size, offset + buf_len); 886 887 UpdateRank(true); 888 889 backend_->OnEvent(Stats::WRITE_DATA); 890 backend_->OnWrite(buf_len); 891 892 if (user_buffers_[index].get()) { 893 // Complete the operation locally. 894 user_buffers_[index]->Write(offset, buf, buf_len); 895 ReportIOTime(kWrite, start); 896 return buf_len; 897 } 898 899 Addr address(entry_.Data()->data_addr[index]); 900 if (offset + buf_len == 0) { 901 if (truncate) { 902 DCHECK(!address.is_initialized()); 903 } 904 return 0; 905 } 906 907 File* file = GetBackingFile(address, index); 908 if (!file) 909 return net::ERR_FILE_NOT_FOUND; 910 911 size_t file_offset = offset; 912 if (address.is_block_file()) { 913 DCHECK_LE(offset + buf_len, kMaxBlockSize); 914 file_offset += address.start_block() * address.BlockSize() + 915 kBlockHeaderSize; 916 } else if (truncate || (extending && !buf_len)) { 917 if (!file->SetLength(offset + buf_len)) 918 return net::ERR_FAILED; 919 } 920 921 if (!buf_len) 922 return 0; 923 924 SyncCallback* io_callback = NULL; 925 if (!callback.is_null()) { 926 io_callback = new SyncCallback(this, buf, callback, 927 net::NetLog::TYPE_ENTRY_WRITE_DATA); 928 } 929 930 TimeTicks start_async = TimeTicks::Now(); 931 932 bool completed; 933 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, 934 &completed)) { 935 if (io_callback) 936 io_callback->Discard(); 937 return net::ERR_CACHE_WRITE_FAILURE; 938 } 939 940 if (io_callback && completed) 941 io_callback->Discard(); 942 943 if (io_callback) 944 ReportIOTime(kWriteAsync1, start_async); 945 946 ReportIOTime(kWrite, start); 947 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; 948 } 949 950 // ------------------------------------------------------------------------ 951 952 bool EntryImpl::CreateDataBlock(int index, int size) { 953 DCHECK(index >= 0 && index < kNumStreams); 954 955 Addr address(entry_.Data()->data_addr[index]); 956 if (!CreateBlock(size, &address)) 957 return false; 958 959 entry_.Data()->data_addr[index] = address.value(); 960 entry_.Store(); 961 return true; 962 } 963 964 bool EntryImpl::CreateBlock(int size, Addr* address) { 965 DCHECK(!address->is_initialized()); 966 if (!backend_) 967 return false; 968 969 FileType file_type = Addr::RequiredFileType(size); 970 if (EXTERNAL == file_type) { 971 if (size > backend_->MaxFileSize()) 972 return false; 973 if (!backend_->CreateExternalFile(address)) 974 return false; 975 } else { 976 int num_blocks = Addr::RequiredBlocks(size, file_type); 977 978 if (!backend_->CreateBlock(file_type, num_blocks, address)) 979 return false; 980 } 981 return true; 982 } 983 984 // Note that this method may end up modifying a block file so upon return the 985 // involved block will be free, and could be reused for something else. If there 986 // is a crash after that point (and maybe before returning to the caller), the 987 // entry will be left dirty... and at some point it will be discarded; it is 988 // important that the entry doesn't keep a reference to this address, or we'll 989 // end up deleting the contents of |address| once again. 990 void EntryImpl::DeleteData(Addr address, int index) { 991 DCHECK(backend_); 992 if (!address.is_initialized()) 993 return; 994 if (address.is_separate_file()) { 995 int failure = !DeleteCacheFile(backend_->GetFileName(address)); 996 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); 997 if (failure) { 998 LOG(ERROR) << "Failed to delete " << 999 backend_->GetFileName(address).value() << " from the cache."; 1000 } 1001 if (files_[index]) 1002 files_[index] = NULL; // Releases the object. 1003 } else { 1004 backend_->DeleteBlock(address, true); 1005 } 1006 } 1007 1008 void EntryImpl::UpdateRank(bool modified) { 1009 if (!backend_) 1010 return; 1011 1012 if (!doomed_) { 1013 // Everything is handled by the backend. 1014 backend_->UpdateRank(this, modified); 1015 return; 1016 } 1017 1018 Time current = Time::Now(); 1019 node_.Data()->last_used = current.ToInternalValue(); 1020 1021 if (modified) 1022 node_.Data()->last_modified = current.ToInternalValue(); 1023 } 1024 1025 void EntryImpl::DeleteEntryData(bool everything) { 1026 DCHECK(doomed_ || !everything); 1027 1028 if (GetEntryFlags() & PARENT_ENTRY) { 1029 // We have some child entries that must go away. 1030 SparseControl::DeleteChildren(this); 1031 } 1032 1033 if (GetDataSize(0)) 1034 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); 1035 if (GetDataSize(1)) 1036 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); 1037 for (int index = 0; index < kNumStreams; index++) { 1038 Addr address(entry_.Data()->data_addr[index]); 1039 if (address.is_initialized()) { 1040 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - 1041 unreported_size_[index], 0); 1042 entry_.Data()->data_addr[index] = 0; 1043 entry_.Data()->data_size[index] = 0; 1044 entry_.Store(); 1045 DeleteData(address, index); 1046 } 1047 } 1048 1049 if (!everything) 1050 return; 1051 1052 // Remove all traces of this entry. 1053 backend_->RemoveEntry(this); 1054 1055 // Note that at this point node_ and entry_ are just two blocks of data, and 1056 // even if they reference each other, nobody should be referencing them. 1057 1058 Addr address(entry_.Data()->long_key); 1059 DeleteData(address, kKeyFileIndex); 1060 backend_->ModifyStorageSize(entry_.Data()->key_len, 0); 1061 1062 backend_->DeleteBlock(entry_.address(), true); 1063 entry_.Discard(); 1064 1065 if (!LeaveRankingsBehind()) { 1066 backend_->DeleteBlock(node_.address(), true); 1067 node_.Discard(); 1068 } 1069 } 1070 1071 // We keep a memory buffer for everything that ends up stored on a block file 1072 // (because we don't know yet the final data size), and for some of the data 1073 // that end up on external files. This function will initialize that memory 1074 // buffer and / or the files needed to store the data. 1075 // 1076 // In general, a buffer may overlap data already stored on disk, and in that 1077 // case, the contents of the buffer are the most accurate. It may also extend 1078 // the file, but we don't want to read from disk just to keep the buffer up to 1079 // date. This means that as soon as there is a chance to get confused about what 1080 // is the most recent version of some part of a file, we'll flush the buffer and 1081 // reuse it for the new data. Keep in mind that the normal use pattern is quite 1082 // simple (write sequentially from the beginning), so we optimize for handling 1083 // that case. 1084 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, 1085 bool truncate) { 1086 if (truncate) 1087 return HandleTruncation(index, offset, buf_len); 1088 1089 if (!offset && !buf_len) 1090 return true; 1091 1092 Addr address(entry_.Data()->data_addr[index]); 1093 if (address.is_initialized()) { 1094 if (address.is_block_file() && !MoveToLocalBuffer(index)) 1095 return false; 1096 1097 if (!user_buffers_[index].get() && offset < kMaxBlockSize) { 1098 // We are about to create a buffer for the first 16KB, make sure that we 1099 // preserve existing data. 1100 if (!CopyToLocalBuffer(index)) 1101 return false; 1102 } 1103 } 1104 1105 if (!user_buffers_[index].get()) 1106 user_buffers_[index].reset(new UserBuffer(backend_.get())); 1107 1108 return PrepareBuffer(index, offset, buf_len); 1109 } 1110 1111 // We get to this function with some data already stored. If there is a 1112 // truncation that results on data stored internally, we'll explicitly 1113 // handle the case here. 1114 bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) { 1115 Addr address(entry_.Data()->data_addr[index]); 1116 1117 int current_size = entry_.Data()->data_size[index]; 1118 int new_size = offset + buf_len; 1119 1120 if (!new_size) { 1121 // This is by far the most common scenario. 1122 backend_->ModifyStorageSize(current_size - unreported_size_[index], 0); 1123 entry_.Data()->data_addr[index] = 0; 1124 entry_.Data()->data_size[index] = 0; 1125 unreported_size_[index] = 0; 1126 entry_.Store(); 1127 DeleteData(address, index); 1128 1129 user_buffers_[index].reset(); 1130 return true; 1131 } 1132 1133 // We never postpone truncating a file, if there is one, but we may postpone 1134 // telling the backend about the size reduction. 1135 if (user_buffers_[index].get()) { 1136 DCHECK_GE(current_size, user_buffers_[index]->Start()); 1137 if (!address.is_initialized()) { 1138 // There is no overlap between the buffer and disk. 1139 if (new_size > user_buffers_[index]->Start()) { 1140 // Just truncate our buffer. 1141 DCHECK_LT(new_size, user_buffers_[index]->End()); 1142 user_buffers_[index]->Truncate(new_size); 1143 return true; 1144 } 1145 1146 // Just discard our buffer. 1147 user_buffers_[index]->Reset(); 1148 return PrepareBuffer(index, offset, buf_len); 1149 } 1150 1151 // There is some overlap or we need to extend the file before the 1152 // truncation. 1153 if (offset > user_buffers_[index]->Start()) 1154 user_buffers_[index]->Truncate(new_size); 1155 UpdateSize(index, current_size, new_size); 1156 if (!Flush(index, 0)) 1157 return false; 1158 user_buffers_[index].reset(); 1159 } 1160 1161 // We have data somewhere, and it is not in a buffer. 1162 DCHECK(!user_buffers_[index].get()); 1163 DCHECK(address.is_initialized()); 1164 1165 if (new_size > kMaxBlockSize) 1166 return true; // Let the operation go directly to disk. 1167 1168 return ImportSeparateFile(index, offset + buf_len); 1169 } 1170 1171 bool EntryImpl::CopyToLocalBuffer(int index) { 1172 Addr address(entry_.Data()->data_addr[index]); 1173 DCHECK(!user_buffers_[index].get()); 1174 DCHECK(address.is_initialized()); 1175 1176 int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize); 1177 user_buffers_[index].reset(new UserBuffer(backend_.get())); 1178 user_buffers_[index]->Write(len, NULL, 0); 1179 1180 File* file = GetBackingFile(address, index); 1181 int offset = 0; 1182 1183 if (address.is_block_file()) 1184 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; 1185 1186 if (!file || 1187 !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) { 1188 user_buffers_[index].reset(); 1189 return false; 1190 } 1191 return true; 1192 } 1193 1194 bool EntryImpl::MoveToLocalBuffer(int index) { 1195 if (!CopyToLocalBuffer(index)) 1196 return false; 1197 1198 Addr address(entry_.Data()->data_addr[index]); 1199 entry_.Data()->data_addr[index] = 0; 1200 entry_.Store(); 1201 DeleteData(address, index); 1202 1203 // If we lose this entry we'll see it as zero sized. 1204 int len = entry_.Data()->data_size[index]; 1205 backend_->ModifyStorageSize(len - unreported_size_[index], 0); 1206 unreported_size_[index] = len; 1207 return true; 1208 } 1209 1210 bool EntryImpl::ImportSeparateFile(int index, int new_size) { 1211 if (entry_.Data()->data_size[index] > new_size) 1212 UpdateSize(index, entry_.Data()->data_size[index], new_size); 1213 1214 return MoveToLocalBuffer(index); 1215 } 1216 1217 bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) { 1218 DCHECK(user_buffers_[index].get()); 1219 if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) || 1220 offset > entry_.Data()->data_size[index]) { 1221 // We are about to extend the buffer or the file (with zeros), so make sure 1222 // that we are not overwriting anything. 1223 Addr address(entry_.Data()->data_addr[index]); 1224 if (address.is_initialized() && address.is_separate_file()) { 1225 if (!Flush(index, 0)) 1226 return false; 1227 // There is an actual file already, and we don't want to keep track of 1228 // its length so we let this operation go straight to disk. 1229 // The only case when a buffer is allowed to extend the file (as in fill 1230 // with zeros before the start) is when there is no file yet to extend. 1231 user_buffers_[index].reset(); 1232 return true; 1233 } 1234 } 1235 1236 if (!user_buffers_[index]->PreWrite(offset, buf_len)) { 1237 if (!Flush(index, offset + buf_len)) 1238 return false; 1239 1240 // Lets try again. 1241 if (offset > user_buffers_[index]->End() || 1242 !user_buffers_[index]->PreWrite(offset, buf_len)) { 1243 // We cannot complete the operation with a buffer. 1244 DCHECK(!user_buffers_[index]->Size()); 1245 DCHECK(!user_buffers_[index]->Start()); 1246 user_buffers_[index].reset(); 1247 } 1248 } 1249 return true; 1250 } 1251 1252 bool EntryImpl::Flush(int index, int min_len) { 1253 Addr address(entry_.Data()->data_addr[index]); 1254 DCHECK(user_buffers_[index].get()); 1255 DCHECK(!address.is_initialized() || address.is_separate_file()); 1256 DVLOG(3) << "Flush"; 1257 1258 int size = std::max(entry_.Data()->data_size[index], min_len); 1259 if (size && !address.is_initialized() && !CreateDataBlock(index, size)) 1260 return false; 1261 1262 if (!entry_.Data()->data_size[index]) { 1263 DCHECK(!user_buffers_[index]->Size()); 1264 return true; 1265 } 1266 1267 address.set_value(entry_.Data()->data_addr[index]); 1268 1269 int len = user_buffers_[index]->Size(); 1270 int offset = user_buffers_[index]->Start(); 1271 if (!len && !offset) 1272 return true; 1273 1274 if (address.is_block_file()) { 1275 DCHECK_EQ(len, entry_.Data()->data_size[index]); 1276 DCHECK(!offset); 1277 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; 1278 } 1279 1280 File* file = GetBackingFile(address, index); 1281 if (!file) 1282 return false; 1283 1284 if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL)) 1285 return false; 1286 user_buffers_[index]->Reset(); 1287 1288 return true; 1289 } 1290 1291 void EntryImpl::UpdateSize(int index, int old_size, int new_size) { 1292 if (entry_.Data()->data_size[index] == new_size) 1293 return; 1294 1295 unreported_size_[index] += new_size - old_size; 1296 entry_.Data()->data_size[index] = new_size; 1297 entry_.set_modified(); 1298 } 1299 1300 int EntryImpl::InitSparseData() { 1301 if (sparse_.get()) 1302 return net::OK; 1303 1304 // Use a local variable so that sparse_ never goes from 'valid' to NULL. 1305 scoped_ptr<SparseControl> sparse(new SparseControl(this)); 1306 int result = sparse->Init(); 1307 if (net::OK == result) 1308 sparse_.swap(sparse); 1309 1310 return result; 1311 } 1312 1313 void EntryImpl::SetEntryFlags(uint32 flags) { 1314 entry_.Data()->flags |= flags; 1315 entry_.set_modified(); 1316 } 1317 1318 uint32 EntryImpl::GetEntryFlags() { 1319 return entry_.Data()->flags; 1320 } 1321 1322 void EntryImpl::GetData(int index, char** buffer, Addr* address) { 1323 DCHECK(backend_); 1324 if (user_buffers_[index].get() && user_buffers_[index]->Size() && 1325 !user_buffers_[index]->Start()) { 1326 // The data is already in memory, just copy it and we're done. 1327 int data_len = entry_.Data()->data_size[index]; 1328 if (data_len <= user_buffers_[index]->Size()) { 1329 DCHECK(!user_buffers_[index]->Start()); 1330 *buffer = new char[data_len]; 1331 memcpy(*buffer, user_buffers_[index]->Data(), data_len); 1332 return; 1333 } 1334 } 1335 1336 // Bad news: we'd have to read the info from disk so instead we'll just tell 1337 // the caller where to read from. 1338 *buffer = NULL; 1339 address->set_value(entry_.Data()->data_addr[index]); 1340 if (address->is_initialized()) { 1341 // Prevent us from deleting the block from the backing store. 1342 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - 1343 unreported_size_[index], 0); 1344 entry_.Data()->data_addr[index] = 0; 1345 entry_.Data()->data_size[index] = 0; 1346 } 1347 } 1348 1349 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { 1350 if (!backend_) 1351 return; 1352 1353 switch (op) { 1354 case kRead: 1355 CACHE_UMA(AGE_MS, "ReadTime", 0, start); 1356 break; 1357 case kWrite: 1358 CACHE_UMA(AGE_MS, "WriteTime", 0, start); 1359 break; 1360 case kSparseRead: 1361 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); 1362 break; 1363 case kSparseWrite: 1364 CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); 1365 break; 1366 case kAsyncIO: 1367 CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start); 1368 break; 1369 case kReadAsync1: 1370 CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start); 1371 break; 1372 case kWriteAsync1: 1373 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); 1374 break; 1375 default: 1376 NOTREACHED(); 1377 } 1378 } 1379 1380 void EntryImpl::Log(const char* msg) { 1381 int dirty = 0; 1382 if (node_.HasData()) { 1383 dirty = node_.Data()->dirty; 1384 } 1385 1386 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), 1387 entry_.address().value(), node_.address().value()); 1388 1389 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], 1390 entry_.Data()->data_addr[1], entry_.Data()->long_key); 1391 1392 Trace(" doomed: %d 0x%x", doomed_, dirty); 1393 } 1394 1395 } // namespace disk_cache 1396