Home | History | Annotate | Download | only in appcache
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "content/browser/appcache/appcache_response.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/bind_helpers.h"
      9 #include "base/compiler_specific.h"
     10 #include "base/logging.h"
     11 #include "base/message_loop/message_loop.h"
     12 #include "base/pickle.h"
     13 #include "base/strings/string_util.h"
     14 #include "content/browser/appcache/appcache_storage.h"
     15 #include "net/base/completion_callback.h"
     16 #include "net/base/io_buffer.h"
     17 #include "net/base/net_errors.h"
     18 
     19 namespace content {
     20 
     21 namespace {
     22 
     23 // Disk cache entry data indices.
     24 enum {
     25   kResponseInfoIndex,
     26   kResponseContentIndex
     27 };
     28 
     29 // An IOBuffer that wraps a pickle's data. Ownership of the
     30 // pickle is transfered to the WrappedPickleIOBuffer object.
     31 class WrappedPickleIOBuffer : public net::WrappedIOBuffer {
     32  public:
     33   explicit WrappedPickleIOBuffer(const Pickle* pickle) :
     34       net::WrappedIOBuffer(reinterpret_cast<const char*>(pickle->data())),
     35       pickle_(pickle) {
     36     DCHECK(pickle->data());
     37   }
     38 
     39  private:
     40   virtual ~WrappedPickleIOBuffer() {}
     41 
     42   scoped_ptr<const Pickle> pickle_;
     43 };
     44 
     45 }  // anon namespace
     46 
     47 
     48 // AppCacheResponseInfo ----------------------------------------------
     49 
     50 AppCacheResponseInfo::AppCacheResponseInfo(
     51     AppCacheStorage* storage, const GURL& manifest_url,
     52     int64 response_id,  net::HttpResponseInfo* http_info,
     53     int64 response_data_size)
     54     : manifest_url_(manifest_url), response_id_(response_id),
     55       http_response_info_(http_info), response_data_size_(response_data_size),
     56       storage_(storage) {
     57   DCHECK(http_info);
     58   DCHECK(response_id != kAppCacheNoResponseId);
     59   storage_->working_set()->AddResponseInfo(this);
     60 }
     61 
     62 AppCacheResponseInfo::~AppCacheResponseInfo() {
     63   storage_->working_set()->RemoveResponseInfo(this);
     64 }
     65 
     66 // HttpResponseInfoIOBuffer ------------------------------------------
     67 
     68 HttpResponseInfoIOBuffer::HttpResponseInfoIOBuffer()
     69     : response_data_size(kUnkownResponseDataSize) {}
     70 
     71 HttpResponseInfoIOBuffer::HttpResponseInfoIOBuffer(net::HttpResponseInfo* info)
     72     : http_info(info), response_data_size(kUnkownResponseDataSize) {}
     73 
     74 HttpResponseInfoIOBuffer::~HttpResponseInfoIOBuffer() {}
     75 
     76 // AppCacheResponseIO ----------------------------------------------
     77 
     78 AppCacheResponseIO::AppCacheResponseIO(
     79     int64 response_id, int64 group_id, AppCacheDiskCacheInterface* disk_cache)
     80     : response_id_(response_id),
     81       group_id_(group_id),
     82       disk_cache_(disk_cache),
     83       entry_(NULL),
     84       buffer_len_(0),
     85       weak_factory_(this) {
     86 }
     87 
     88 AppCacheResponseIO::~AppCacheResponseIO() {
     89   if (entry_)
     90     entry_->Close();
     91 }
     92 
     93 void AppCacheResponseIO::ScheduleIOCompletionCallback(int result) {
     94   base::MessageLoop::current()->PostTask(
     95       FROM_HERE, base::Bind(&AppCacheResponseIO::OnIOComplete,
     96                             weak_factory_.GetWeakPtr(), result));
     97 }
     98 
     99 void AppCacheResponseIO::InvokeUserCompletionCallback(int result) {
    100   // Clear the user callback and buffers prior to invoking the callback
    101   // so the caller can schedule additional operations in the callback.
    102   buffer_ = NULL;
    103   info_buffer_ = NULL;
    104   net::CompletionCallback cb = callback_;
    105   callback_.Reset();
    106   cb.Run(result);
    107 }
    108 
    109 void AppCacheResponseIO::ReadRaw(int index, int offset,
    110                                  net::IOBuffer* buf, int buf_len) {
    111   DCHECK(entry_);
    112   int rv = entry_->Read(
    113       index, offset, buf, buf_len,
    114       base::Bind(&AppCacheResponseIO::OnRawIOComplete,
    115                  weak_factory_.GetWeakPtr()));
    116   if (rv != net::ERR_IO_PENDING)
    117     ScheduleIOCompletionCallback(rv);
    118 }
    119 
    120 void AppCacheResponseIO::WriteRaw(int index, int offset,
    121                                  net::IOBuffer* buf, int buf_len) {
    122   DCHECK(entry_);
    123   int rv = entry_->Write(
    124       index, offset, buf, buf_len,
    125       base::Bind(&AppCacheResponseIO::OnRawIOComplete,
    126                  weak_factory_.GetWeakPtr()));
    127   if (rv != net::ERR_IO_PENDING)
    128     ScheduleIOCompletionCallback(rv);
    129 }
    130 
    131 void AppCacheResponseIO::OnRawIOComplete(int result) {
    132   DCHECK_NE(net::ERR_IO_PENDING, result);
    133   OnIOComplete(result);
    134 }
    135 
    136 
    137 // AppCacheResponseReader ----------------------------------------------
    138 
    139 AppCacheResponseReader::AppCacheResponseReader(
    140     int64 response_id, int64 group_id, AppCacheDiskCacheInterface* disk_cache)
    141     : AppCacheResponseIO(response_id, group_id, disk_cache),
    142       range_offset_(0),
    143       range_length_(kint32max),
    144       read_position_(0),
    145       weak_factory_(this) {
    146 }
    147 
    148 AppCacheResponseReader::~AppCacheResponseReader() {
    149 }
    150 
    151 void AppCacheResponseReader::ReadInfo(HttpResponseInfoIOBuffer* info_buf,
    152                                       const net::CompletionCallback& callback) {
    153   DCHECK(!callback.is_null());
    154   DCHECK(!IsReadPending());
    155   DCHECK(info_buf);
    156   DCHECK(!info_buf->http_info.get());
    157   DCHECK(!buffer_.get());
    158   DCHECK(!info_buffer_.get());
    159 
    160   info_buffer_ = info_buf;
    161   callback_ = callback;  // cleared on completion
    162   OpenEntryIfNeededAndContinue();
    163 }
    164 
    165 void AppCacheResponseReader::ContinueReadInfo() {
    166   if (!entry_)  {
    167     ScheduleIOCompletionCallback(net::ERR_CACHE_MISS);
    168     return;
    169   }
    170 
    171   int size = entry_->GetSize(kResponseInfoIndex);
    172   if (size <= 0) {
    173     ScheduleIOCompletionCallback(net::ERR_CACHE_MISS);
    174     return;
    175   }
    176 
    177   buffer_ = new net::IOBuffer(size);
    178   ReadRaw(kResponseInfoIndex, 0, buffer_.get(), size);
    179 }
    180 
    181 void AppCacheResponseReader::ReadData(net::IOBuffer* buf, int buf_len,
    182                                       const net::CompletionCallback& callback) {
    183   DCHECK(!callback.is_null());
    184   DCHECK(!IsReadPending());
    185   DCHECK(buf);
    186   DCHECK(buf_len >= 0);
    187   DCHECK(!buffer_.get());
    188   DCHECK(!info_buffer_.get());
    189 
    190   buffer_ = buf;
    191   buffer_len_ = buf_len;
    192   callback_ = callback;  // cleared on completion
    193   OpenEntryIfNeededAndContinue();
    194 }
    195 
    196 void AppCacheResponseReader::ContinueReadData() {
    197   if (!entry_)  {
    198     ScheduleIOCompletionCallback(net::ERR_CACHE_MISS);
    199     return;
    200   }
    201 
    202   if (read_position_ + buffer_len_ > range_length_) {
    203     // TODO(michaeln): What about integer overflows?
    204     DCHECK(range_length_ >= read_position_);
    205     buffer_len_ = range_length_ - read_position_;
    206   }
    207   ReadRaw(kResponseContentIndex,
    208           range_offset_ + read_position_,
    209           buffer_.get(),
    210           buffer_len_);
    211 }
    212 
    213 void AppCacheResponseReader::SetReadRange(int offset, int length) {
    214   DCHECK(!IsReadPending() && !read_position_);
    215   range_offset_ = offset;
    216   range_length_ = length;
    217 }
    218 
    219 void AppCacheResponseReader::OnIOComplete(int result) {
    220   if (result >= 0) {
    221     if (info_buffer_.get()) {
    222       // Deserialize the http info structure, ensuring we got headers.
    223       Pickle pickle(buffer_->data(), result);
    224       scoped_ptr<net::HttpResponseInfo> info(new net::HttpResponseInfo);
    225       bool response_truncated = false;
    226       if (!info->InitFromPickle(pickle, &response_truncated) ||
    227           !info->headers.get()) {
    228         InvokeUserCompletionCallback(net::ERR_FAILED);
    229         return;
    230       }
    231       DCHECK(!response_truncated);
    232       info_buffer_->http_info.reset(info.release());
    233 
    234       // Also return the size of the response body
    235       DCHECK(entry_);
    236       info_buffer_->response_data_size =
    237           entry_->GetSize(kResponseContentIndex);
    238     } else {
    239       read_position_ += result;
    240     }
    241   }
    242   InvokeUserCompletionCallback(result);
    243 }
    244 
    245 void AppCacheResponseReader::OpenEntryIfNeededAndContinue() {
    246   int rv;
    247   AppCacheDiskCacheInterface::Entry** entry_ptr = NULL;
    248   if (entry_) {
    249     rv = net::OK;
    250   } else if (!disk_cache_) {
    251     rv = net::ERR_FAILED;
    252   } else {
    253     entry_ptr = new AppCacheDiskCacheInterface::Entry*;
    254     open_callback_ =
    255         base::Bind(&AppCacheResponseReader::OnOpenEntryComplete,
    256                    weak_factory_.GetWeakPtr(), base::Owned(entry_ptr));
    257     rv = disk_cache_->OpenEntry(response_id_, entry_ptr, open_callback_);
    258   }
    259 
    260   if (rv != net::ERR_IO_PENDING)
    261     OnOpenEntryComplete(entry_ptr, rv);
    262 }
    263 
    264 void AppCacheResponseReader::OnOpenEntryComplete(
    265     AppCacheDiskCacheInterface::Entry** entry, int rv) {
    266   DCHECK(info_buffer_.get() || buffer_.get());
    267 
    268   if (!open_callback_.is_null()) {
    269     if (rv == net::OK) {
    270       DCHECK(entry);
    271       entry_ = *entry;
    272     }
    273     open_callback_.Reset();
    274   }
    275 
    276   if (info_buffer_.get())
    277     ContinueReadInfo();
    278   else
    279     ContinueReadData();
    280 }
    281 
    282 // AppCacheResponseWriter ----------------------------------------------
    283 
    284 AppCacheResponseWriter::AppCacheResponseWriter(
    285     int64 response_id, int64 group_id, AppCacheDiskCacheInterface* disk_cache)
    286     : AppCacheResponseIO(response_id, group_id, disk_cache),
    287       info_size_(0),
    288       write_position_(0),
    289       write_amount_(0),
    290       creation_phase_(INITIAL_ATTEMPT),
    291       weak_factory_(this) {
    292 }
    293 
    294 AppCacheResponseWriter::~AppCacheResponseWriter() {
    295 }
    296 
    297 void AppCacheResponseWriter::WriteInfo(
    298     HttpResponseInfoIOBuffer* info_buf,
    299     const net::CompletionCallback& callback) {
    300   DCHECK(!callback.is_null());
    301   DCHECK(!IsWritePending());
    302   DCHECK(info_buf);
    303   DCHECK(info_buf->http_info.get());
    304   DCHECK(!buffer_.get());
    305   DCHECK(!info_buffer_.get());
    306   DCHECK(info_buf->http_info->headers.get());
    307 
    308   info_buffer_ = info_buf;
    309   callback_ = callback;  // cleared on completion
    310   CreateEntryIfNeededAndContinue();
    311 }
    312 
    313 void AppCacheResponseWriter::ContinueWriteInfo() {
    314   if (!entry_) {
    315     ScheduleIOCompletionCallback(net::ERR_FAILED);
    316     return;
    317   }
    318 
    319   const bool kSkipTransientHeaders = true;
    320   const bool kTruncated = false;
    321   Pickle* pickle = new Pickle;
    322   info_buffer_->http_info->Persist(pickle, kSkipTransientHeaders, kTruncated);
    323   write_amount_ = static_cast<int>(pickle->size());
    324   buffer_ = new WrappedPickleIOBuffer(pickle);  // takes ownership of pickle
    325   WriteRaw(kResponseInfoIndex, 0, buffer_.get(), write_amount_);
    326 }
    327 
    328 void AppCacheResponseWriter::WriteData(
    329     net::IOBuffer* buf, int buf_len, const net::CompletionCallback& callback) {
    330   DCHECK(!callback.is_null());
    331   DCHECK(!IsWritePending());
    332   DCHECK(buf);
    333   DCHECK(buf_len >= 0);
    334   DCHECK(!buffer_.get());
    335   DCHECK(!info_buffer_.get());
    336 
    337   buffer_ = buf;
    338   write_amount_ = buf_len;
    339   callback_ = callback;  // cleared on completion
    340   CreateEntryIfNeededAndContinue();
    341 }
    342 
    343 void AppCacheResponseWriter::ContinueWriteData() {
    344   if (!entry_) {
    345     ScheduleIOCompletionCallback(net::ERR_FAILED);
    346     return;
    347   }
    348   WriteRaw(
    349       kResponseContentIndex, write_position_, buffer_.get(), write_amount_);
    350 }
    351 
    352 void AppCacheResponseWriter::OnIOComplete(int result) {
    353   if (result >= 0) {
    354     DCHECK(write_amount_ == result);
    355     if (!info_buffer_.get())
    356       write_position_ += result;
    357     else
    358       info_size_ = result;
    359   }
    360   InvokeUserCompletionCallback(result);
    361 }
    362 
    363 void AppCacheResponseWriter::CreateEntryIfNeededAndContinue() {
    364   int rv;
    365   AppCacheDiskCacheInterface::Entry** entry_ptr = NULL;
    366   if (entry_) {
    367     creation_phase_ = NO_ATTEMPT;
    368     rv = net::OK;
    369   } else if (!disk_cache_) {
    370     creation_phase_ = NO_ATTEMPT;
    371     rv = net::ERR_FAILED;
    372   } else {
    373     creation_phase_ = INITIAL_ATTEMPT;
    374     entry_ptr = new AppCacheDiskCacheInterface::Entry*;
    375     create_callback_ =
    376         base::Bind(&AppCacheResponseWriter::OnCreateEntryComplete,
    377                    weak_factory_.GetWeakPtr(), base::Owned(entry_ptr));
    378     rv = disk_cache_->CreateEntry(response_id_, entry_ptr, create_callback_);
    379   }
    380   if (rv != net::ERR_IO_PENDING)
    381     OnCreateEntryComplete(entry_ptr, rv);
    382 }
    383 
    384 void AppCacheResponseWriter::OnCreateEntryComplete(
    385     AppCacheDiskCacheInterface::Entry** entry, int rv) {
    386   DCHECK(info_buffer_.get() || buffer_.get());
    387 
    388   if (creation_phase_ == INITIAL_ATTEMPT) {
    389     if (rv != net::OK) {
    390       // We may try to overwrite existing entries.
    391       creation_phase_ = DOOM_EXISTING;
    392       rv = disk_cache_->DoomEntry(response_id_, create_callback_);
    393       if (rv != net::ERR_IO_PENDING)
    394         OnCreateEntryComplete(NULL, rv);
    395       return;
    396     }
    397   } else if (creation_phase_ == DOOM_EXISTING) {
    398     creation_phase_ = SECOND_ATTEMPT;
    399     AppCacheDiskCacheInterface::Entry** entry_ptr =
    400         new AppCacheDiskCacheInterface::Entry*;
    401     create_callback_ =
    402         base::Bind(&AppCacheResponseWriter::OnCreateEntryComplete,
    403                    weak_factory_.GetWeakPtr(), base::Owned(entry_ptr));
    404     rv = disk_cache_->CreateEntry(response_id_, entry_ptr, create_callback_);
    405     if (rv != net::ERR_IO_PENDING)
    406       OnCreateEntryComplete(entry_ptr, rv);
    407     return;
    408   }
    409 
    410   if (!create_callback_.is_null()) {
    411     if (rv == net::OK)
    412       entry_ = *entry;
    413 
    414     create_callback_.Reset();
    415   }
    416 
    417   if (info_buffer_.get())
    418     ContinueWriteInfo();
    419   else
    420     ContinueWriteData();
    421 }
    422 
    423 }  // namespace content
    424