Home | History | Annotate | Download | only in url_request
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/url_request/url_request_job.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/compiler_specific.h"
      9 #include "base/message_loop/message_loop.h"
     10 #include "base/power_monitor/power_monitor.h"
     11 #include "base/strings/string_number_conversions.h"
     12 #include "base/strings/string_util.h"
     13 #include "net/base/auth.h"
     14 #include "net/base/host_port_pair.h"
     15 #include "net/base/io_buffer.h"
     16 #include "net/base/load_states.h"
     17 #include "net/base/net_errors.h"
     18 #include "net/base/network_delegate.h"
     19 #include "net/http/http_response_headers.h"
     20 #include "net/url_request/url_request.h"
     21 
     22 namespace net {
     23 
     24 URLRequestJob::URLRequestJob(URLRequest* request,
     25                              NetworkDelegate* network_delegate)
     26     : request_(request),
     27       done_(false),
     28       prefilter_bytes_read_(0),
     29       postfilter_bytes_read_(0),
     30       filter_input_byte_count_(0),
     31       filter_needs_more_output_space_(false),
     32       filtered_read_buffer_len_(0),
     33       has_handled_response_(false),
     34       expected_content_size_(-1),
     35       deferred_redirect_status_code_(-1),
     36       network_delegate_(network_delegate),
     37       weak_factory_(this) {
     38   base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
     39   if (power_monitor)
     40     power_monitor->AddObserver(this);
     41 }
     42 
     43 void URLRequestJob::SetUpload(UploadDataStream* upload) {
     44 }
     45 
     46 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders& headers) {
     47 }
     48 
     49 void URLRequestJob::SetPriority(RequestPriority priority) {
     50 }
     51 
     52 void URLRequestJob::Kill() {
     53   weak_factory_.InvalidateWeakPtrs();
     54   // Make sure the request is notified that we are done.  We assume that the
     55   // request took care of setting its error status before calling Kill.
     56   if (request_)
     57     NotifyCanceled();
     58 }
     59 
     60 void URLRequestJob::DetachRequest() {
     61   request_ = NULL;
     62   OnDetachRequest();
     63 }
     64 
     65 // This function calls ReadData to get stream data. If a filter exists, passes
     66 // the data to the attached filter. Then returns the output from filter back to
     67 // the caller.
     68 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
     69   bool rv = false;
     70 
     71   DCHECK_LT(buf_size, 1000000);  // Sanity check.
     72   DCHECK(buf);
     73   DCHECK(bytes_read);
     74   DCHECK(filtered_read_buffer_.get() == NULL);
     75   DCHECK_EQ(0, filtered_read_buffer_len_);
     76 
     77   *bytes_read = 0;
     78 
     79   // Skip Filter if not present.
     80   if (!filter_.get()) {
     81     rv = ReadRawDataHelper(buf, buf_size, bytes_read);
     82   } else {
     83     // Save the caller's buffers while we do IO
     84     // in the filter's buffers.
     85     filtered_read_buffer_ = buf;
     86     filtered_read_buffer_len_ = buf_size;
     87 
     88     if (ReadFilteredData(bytes_read)) {
     89       rv = true;   // We have data to return.
     90 
     91       // It is fine to call DoneReading even if ReadFilteredData receives 0
     92       // bytes from the net, but we avoid making that call if we know for
     93       // sure that's the case (ReadRawDataHelper path).
     94       if (*bytes_read == 0)
     95         DoneReading();
     96     } else {
     97       rv = false;  // Error, or a new IO is pending.
     98     }
     99   }
    100   if (rv && *bytes_read == 0)
    101     NotifyDone(URLRequestStatus());
    102   return rv;
    103 }
    104 
    105 void URLRequestJob::StopCaching() {
    106   // Nothing to do here.
    107 }
    108 
    109 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
    110   // Most job types don't send request headers.
    111   return false;
    112 }
    113 
    114 LoadState URLRequestJob::GetLoadState() const {
    115   return LOAD_STATE_IDLE;
    116 }
    117 
    118 UploadProgress URLRequestJob::GetUploadProgress() const {
    119   return UploadProgress();
    120 }
    121 
    122 bool URLRequestJob::GetCharset(std::string* charset) {
    123   return false;
    124 }
    125 
    126 void URLRequestJob::GetResponseInfo(HttpResponseInfo* info) {
    127 }
    128 
    129 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo* load_timing_info) const {
    130   // Only certain request types return more than just request start times.
    131 }
    132 
    133 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
    134   return false;
    135 }
    136 
    137 Filter* URLRequestJob::SetupFilter() const {
    138   return NULL;
    139 }
    140 
    141 bool URLRequestJob::IsRedirectResponse(GURL* location,
    142                                        int* http_status_code) {
    143   // For non-HTTP jobs, headers will be null.
    144   HttpResponseHeaders* headers = request_->response_headers();
    145   if (!headers)
    146     return false;
    147 
    148   std::string value;
    149   if (!headers->IsRedirect(&value))
    150     return false;
    151 
    152   *location = request_->url().Resolve(value);
    153   *http_status_code = headers->response_code();
    154   return true;
    155 }
    156 
    157 bool URLRequestJob::IsSafeRedirect(const GURL& location) {
    158   return true;
    159 }
    160 
    161 bool URLRequestJob::NeedsAuth() {
    162   return false;
    163 }
    164 
    165 void URLRequestJob::GetAuthChallengeInfo(
    166     scoped_refptr<AuthChallengeInfo>* auth_info) {
    167   // This will only be called if NeedsAuth() returns true, in which
    168   // case the derived class should implement this!
    169   NOTREACHED();
    170 }
    171 
    172 void URLRequestJob::SetAuth(const AuthCredentials& credentials) {
    173   // This will only be called if NeedsAuth() returns true, in which
    174   // case the derived class should implement this!
    175   NOTREACHED();
    176 }
    177 
    178 void URLRequestJob::CancelAuth() {
    179   // This will only be called if NeedsAuth() returns true, in which
    180   // case the derived class should implement this!
    181   NOTREACHED();
    182 }
    183 
    184 void URLRequestJob::ContinueWithCertificate(
    185     X509Certificate* client_cert) {
    186   // The derived class should implement this!
    187   NOTREACHED();
    188 }
    189 
    190 void URLRequestJob::ContinueDespiteLastError() {
    191   // Implementations should know how to recover from errors they generate.
    192   // If this code was reached, we are trying to recover from an error that
    193   // we don't know how to recover from.
    194   NOTREACHED();
    195 }
    196 
    197 void URLRequestJob::FollowDeferredRedirect() {
    198   DCHECK(deferred_redirect_status_code_ != -1);
    199 
    200   // NOTE: deferred_redirect_url_ may be invalid, and attempting to redirect to
    201   // such an URL will fail inside FollowRedirect.  The DCHECK above asserts
    202   // that we called OnReceivedRedirect.
    203 
    204   // It is also possible that FollowRedirect will drop the last reference to
    205   // this job, so we need to reset our members before calling it.
    206 
    207   GURL redirect_url = deferred_redirect_url_;
    208   int redirect_status_code = deferred_redirect_status_code_;
    209 
    210   deferred_redirect_url_ = GURL();
    211   deferred_redirect_status_code_ = -1;
    212 
    213   FollowRedirect(redirect_url, redirect_status_code);
    214 }
    215 
    216 bool URLRequestJob::GetMimeType(std::string* mime_type) const {
    217   return false;
    218 }
    219 
    220 int URLRequestJob::GetResponseCode() const {
    221   return -1;
    222 }
    223 
    224 HostPortPair URLRequestJob::GetSocketAddress() const {
    225   return HostPortPair();
    226 }
    227 
    228 void URLRequestJob::OnSuspend() {
    229   Kill();
    230 }
    231 
    232 void URLRequestJob::NotifyURLRequestDestroyed() {
    233 }
    234 
    235 URLRequestJob::~URLRequestJob() {
    236   base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
    237   if (power_monitor)
    238     power_monitor->RemoveObserver(this);
    239 }
    240 
    241 void URLRequestJob::NotifyCertificateRequested(
    242     SSLCertRequestInfo* cert_request_info) {
    243   if (!request_)
    244     return;  // The request was destroyed, so there is no more work to do.
    245 
    246   request_->NotifyCertificateRequested(cert_request_info);
    247 }
    248 
    249 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info,
    250                                               bool fatal) {
    251   if (!request_)
    252     return;  // The request was destroyed, so there is no more work to do.
    253 
    254   request_->NotifySSLCertificateError(ssl_info, fatal);
    255 }
    256 
    257 bool URLRequestJob::CanGetCookies(const CookieList& cookie_list) const {
    258   if (!request_)
    259     return false;  // The request was destroyed, so there is no more work to do.
    260 
    261   return request_->CanGetCookies(cookie_list);
    262 }
    263 
    264 bool URLRequestJob::CanSetCookie(const std::string& cookie_line,
    265                                  CookieOptions* options) const {
    266   if (!request_)
    267     return false;  // The request was destroyed, so there is no more work to do.
    268 
    269   return request_->CanSetCookie(cookie_line, options);
    270 }
    271 
    272 bool URLRequestJob::CanEnablePrivacyMode() const {
    273   if (!request_)
    274     return false;  // The request was destroyed, so there is no more work to do.
    275 
    276   return request_->CanEnablePrivacyMode();
    277 }
    278 
    279 void URLRequestJob::NotifyHeadersComplete() {
    280   if (!request_ || !request_->has_delegate())
    281     return;  // The request was destroyed, so there is no more work to do.
    282 
    283   if (has_handled_response_)
    284     return;
    285 
    286   DCHECK(!request_->status().is_io_pending());
    287 
    288   // Initialize to the current time, and let the subclass optionally override
    289   // the time stamps if it has that information.  The default request_time is
    290   // set by URLRequest before it calls our Start method.
    291   request_->response_info_.response_time = base::Time::Now();
    292   GetResponseInfo(&request_->response_info_);
    293 
    294   // When notifying the delegate, the delegate can release the request
    295   // (and thus release 'this').  After calling to the delgate, we must
    296   // check the request pointer to see if it still exists, and return
    297   // immediately if it has been destroyed.  self_preservation ensures our
    298   // survival until we can get out of this method.
    299   scoped_refptr<URLRequestJob> self_preservation(this);
    300 
    301   if (request_)
    302     request_->OnHeadersComplete();
    303 
    304   GURL new_location;
    305   int http_status_code;
    306   if (IsRedirectResponse(&new_location, &http_status_code)) {
    307     const GURL& url = request_->url();
    308 
    309     // Move the reference fragment of the old location to the new one if the
    310     // new one has none. This duplicates mozilla's behavior.
    311     if (url.is_valid() && url.has_ref() && !new_location.has_ref()) {
    312       GURL::Replacements replacements;
    313       // Reference the |ref| directly out of the original URL to avoid a
    314       // malloc.
    315       replacements.SetRef(url.spec().data(),
    316                           url.parsed_for_possibly_invalid_spec().ref);
    317       new_location = new_location.ReplaceComponents(replacements);
    318     }
    319 
    320     // Redirect response bodies are not read. Notify the transaction
    321     // so it does not treat being stopped as an error.
    322     DoneReading();
    323 
    324     bool defer_redirect = false;
    325     request_->NotifyReceivedRedirect(new_location, &defer_redirect);
    326 
    327     // Ensure that the request wasn't detached or destroyed in
    328     // NotifyReceivedRedirect
    329     if (!request_ || !request_->has_delegate())
    330       return;
    331 
    332     // If we were not cancelled, then maybe follow the redirect.
    333     if (request_->status().is_success()) {
    334       if (defer_redirect) {
    335         deferred_redirect_url_ = new_location;
    336         deferred_redirect_status_code_ = http_status_code;
    337       } else {
    338         FollowRedirect(new_location, http_status_code);
    339       }
    340       return;
    341     }
    342   } else if (NeedsAuth()) {
    343     scoped_refptr<AuthChallengeInfo> auth_info;
    344     GetAuthChallengeInfo(&auth_info);
    345     // Need to check for a NULL auth_info because the server may have failed
    346     // to send a challenge with the 401 response.
    347     if (auth_info.get()) {
    348       request_->NotifyAuthRequired(auth_info.get());
    349       // Wait for SetAuth or CancelAuth to be called.
    350       return;
    351     }
    352   }
    353 
    354   has_handled_response_ = true;
    355   if (request_->status().is_success())
    356     filter_.reset(SetupFilter());
    357 
    358   if (!filter_.get()) {
    359     std::string content_length;
    360     request_->GetResponseHeaderByName("content-length", &content_length);
    361     if (!content_length.empty())
    362       base::StringToInt64(content_length, &expected_content_size_);
    363   }
    364 
    365   request_->NotifyResponseStarted();
    366 }
    367 
    368 void URLRequestJob::NotifyReadComplete(int bytes_read) {
    369   if (!request_ || !request_->has_delegate())
    370     return;  // The request was destroyed, so there is no more work to do.
    371 
    372   // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
    373   // unit_tests have been fixed to not trip this.
    374   //DCHECK(!request_->status().is_io_pending());
    375 
    376   // The headers should be complete before reads complete
    377   DCHECK(has_handled_response_);
    378 
    379   OnRawReadComplete(bytes_read);
    380 
    381   // Don't notify if we had an error.
    382   if (!request_->status().is_success())
    383     return;
    384 
    385   // When notifying the delegate, the delegate can release the request
    386   // (and thus release 'this').  After calling to the delegate, we must
    387   // check the request pointer to see if it still exists, and return
    388   // immediately if it has been destroyed.  self_preservation ensures our
    389   // survival until we can get out of this method.
    390   scoped_refptr<URLRequestJob> self_preservation(this);
    391 
    392   if (filter_.get()) {
    393     // Tell the filter that it has more data
    394     FilteredDataRead(bytes_read);
    395 
    396     // Filter the data.
    397     int filter_bytes_read = 0;
    398     if (ReadFilteredData(&filter_bytes_read)) {
    399       if (!filter_bytes_read)
    400         DoneReading();
    401       request_->NotifyReadCompleted(filter_bytes_read);
    402     }
    403   } else {
    404     request_->NotifyReadCompleted(bytes_read);
    405   }
    406   DVLOG(1) << __FUNCTION__ << "() "
    407            << "\"" << (request_ ? request_->url().spec() : "???") << "\""
    408            << " pre bytes read = " << bytes_read
    409            << " pre total = " << prefilter_bytes_read_
    410            << " post total = " << postfilter_bytes_read_;
    411 }
    412 
    413 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
    414   DCHECK(!has_handled_response_);
    415   has_handled_response_ = true;
    416   if (request_) {
    417     request_->set_status(status);
    418     request_->NotifyResponseStarted();
    419   }
    420 }
    421 
    422 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
    423   DCHECK(!done_) << "Job sending done notification twice";
    424   if (done_)
    425     return;
    426   done_ = true;
    427 
    428   // Unless there was an error, we should have at least tried to handle
    429   // the response before getting here.
    430   DCHECK(has_handled_response_ || !status.is_success());
    431 
    432   // As with NotifyReadComplete, we need to take care to notice if we were
    433   // destroyed during a delegate callback.
    434   if (request_) {
    435     request_->set_is_pending(false);
    436     // With async IO, it's quite possible to have a few outstanding
    437     // requests.  We could receive a request to Cancel, followed shortly
    438     // by a successful IO.  For tracking the status(), once there is
    439     // an error, we do not change the status back to success.  To
    440     // enforce this, only set the status if the job is so far
    441     // successful.
    442     if (request_->status().is_success()) {
    443       if (status.status() == URLRequestStatus::FAILED) {
    444         request_->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED,
    445                                                      status.error());
    446       }
    447       request_->set_status(status);
    448     }
    449   }
    450 
    451   // Complete this notification later.  This prevents us from re-entering the
    452   // delegate if we're done because of a synchronous call.
    453   base::MessageLoop::current()->PostTask(
    454       FROM_HERE,
    455       base::Bind(&URLRequestJob::CompleteNotifyDone,
    456                  weak_factory_.GetWeakPtr()));
    457 }
    458 
    459 void URLRequestJob::CompleteNotifyDone() {
    460   // Check if we should notify the delegate that we're done because of an error.
    461   if (request_ &&
    462       !request_->status().is_success() &&
    463       request_->has_delegate()) {
    464     // We report the error differently depending on whether we've called
    465     // OnResponseStarted yet.
    466     if (has_handled_response_) {
    467       // We signal the error by calling OnReadComplete with a bytes_read of -1.
    468       request_->NotifyReadCompleted(-1);
    469     } else {
    470       has_handled_response_ = true;
    471       request_->NotifyResponseStarted();
    472     }
    473   }
    474 }
    475 
    476 void URLRequestJob::NotifyCanceled() {
    477   if (!done_) {
    478     NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED, ERR_ABORTED));
    479   }
    480 }
    481 
    482 void URLRequestJob::NotifyRestartRequired() {
    483   DCHECK(!has_handled_response_);
    484   if (GetStatus().status() != URLRequestStatus::CANCELED)
    485     request_->Restart();
    486 }
    487 
    488 void URLRequestJob::OnCallToDelegate() {
    489   request_->OnCallToDelegate();
    490 }
    491 
    492 void URLRequestJob::OnCallToDelegateComplete() {
    493   request_->OnCallToDelegateComplete();
    494 }
    495 
    496 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
    497                                 int *bytes_read) {
    498   DCHECK(bytes_read);
    499   *bytes_read = 0;
    500   return true;
    501 }
    502 
    503 void URLRequestJob::DoneReading() {
    504   // Do nothing.
    505 }
    506 
    507 void URLRequestJob::FilteredDataRead(int bytes_read) {
    508   DCHECK(filter_.get());  // don't add data if there is no filter
    509   filter_->FlushStreamBuffer(bytes_read);
    510 }
    511 
    512 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
    513   DCHECK(filter_.get());  // don't add data if there is no filter
    514   DCHECK(filtered_read_buffer_.get() !=
    515          NULL);                             // we need to have a buffer to fill
    516   DCHECK_GT(filtered_read_buffer_len_, 0);  // sanity check
    517   DCHECK_LT(filtered_read_buffer_len_, 1000000);  // sanity check
    518   DCHECK(raw_read_buffer_.get() ==
    519          NULL);  // there should be no raw read buffer yet
    520 
    521   bool rv = false;
    522   *bytes_read = 0;
    523 
    524   if (is_done())
    525     return true;
    526 
    527   if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
    528     // We don't have any raw data to work with, so
    529     // read from the socket.
    530     int filtered_data_read;
    531     if (ReadRawDataForFilter(&filtered_data_read)) {
    532       if (filtered_data_read > 0) {
    533         filter_->FlushStreamBuffer(filtered_data_read);  // Give data to filter.
    534       } else {
    535         return true;  // EOF
    536       }
    537     } else {
    538       return false;  // IO Pending (or error)
    539     }
    540   }
    541 
    542   if ((filter_->stream_data_len() || filter_needs_more_output_space_)
    543       && !is_done()) {
    544     // Get filtered data.
    545     int filtered_data_len = filtered_read_buffer_len_;
    546     Filter::FilterStatus status;
    547     int output_buffer_size = filtered_data_len;
    548     status = filter_->ReadData(filtered_read_buffer_->data(),
    549                                &filtered_data_len);
    550 
    551     if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
    552       // filter_needs_more_output_space_ was mistaken... there are no more bytes
    553       // and we should have at least tried to fill up the filter's input buffer.
    554       // Correct the state, and try again.
    555       filter_needs_more_output_space_ = false;
    556       return ReadFilteredData(bytes_read);
    557     }
    558 
    559     switch (status) {
    560       case Filter::FILTER_DONE: {
    561         filter_needs_more_output_space_ = false;
    562         *bytes_read = filtered_data_len;
    563         postfilter_bytes_read_ += filtered_data_len;
    564         rv = true;
    565         break;
    566       }
    567       case Filter::FILTER_NEED_MORE_DATA: {
    568         filter_needs_more_output_space_ =
    569             (filtered_data_len == output_buffer_size);
    570         // We have finished filtering all data currently in the buffer.
    571         // There might be some space left in the output buffer. One can
    572         // consider reading more data from the stream to feed the filter
    573         // and filling up the output buffer. This leads to more complicated
    574         // buffer management and data notification mechanisms.
    575         // We can revisit this issue if there is a real perf need.
    576         if (filtered_data_len > 0) {
    577           *bytes_read = filtered_data_len;
    578           postfilter_bytes_read_ += filtered_data_len;
    579           rv = true;
    580         } else {
    581           // Read again since we haven't received enough data yet (e.g., we may
    582           // not have a complete gzip header yet)
    583           rv = ReadFilteredData(bytes_read);
    584         }
    585         break;
    586       }
    587       case Filter::FILTER_OK: {
    588         filter_needs_more_output_space_ =
    589             (filtered_data_len == output_buffer_size);
    590         *bytes_read = filtered_data_len;
    591         postfilter_bytes_read_ += filtered_data_len;
    592         rv = true;
    593         break;
    594       }
    595       case Filter::FILTER_ERROR: {
    596         DVLOG(1) << __FUNCTION__ << "() "
    597                  << "\"" << (request_ ? request_->url().spec() : "???") << "\""
    598                  << " Filter Error";
    599         filter_needs_more_output_space_ = false;
    600         NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
    601                    ERR_CONTENT_DECODING_FAILED));
    602         rv = false;
    603         break;
    604       }
    605       default: {
    606         NOTREACHED();
    607         filter_needs_more_output_space_ = false;
    608         rv = false;
    609         break;
    610       }
    611     }
    612     DVLOG(2) << __FUNCTION__ << "() "
    613              << "\"" << (request_ ? request_->url().spec() : "???") << "\""
    614              << " rv = " << rv
    615              << " post bytes read = " << filtered_data_len
    616              << " pre total = " << prefilter_bytes_read_
    617              << " post total = "
    618              << postfilter_bytes_read_;
    619     // If logging all bytes is enabled, log the filtered bytes read.
    620     if (rv && request() && request()->net_log().IsLoggingBytes() &&
    621         filtered_data_len > 0) {
    622       request()->net_log().AddByteTransferEvent(
    623           NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ,
    624           filtered_data_len, filtered_read_buffer_->data());
    625     }
    626   } else {
    627     // we are done, or there is no data left.
    628     rv = true;
    629   }
    630 
    631   if (rv) {
    632     // When we successfully finished a read, we no longer need to
    633     // save the caller's buffers. Release our reference.
    634     filtered_read_buffer_ = NULL;
    635     filtered_read_buffer_len_ = 0;
    636   }
    637   return rv;
    638 }
    639 
    640 const URLRequestStatus URLRequestJob::GetStatus() {
    641   if (request_)
    642     return request_->status();
    643   // If the request is gone, we must be cancelled.
    644   return URLRequestStatus(URLRequestStatus::CANCELED,
    645                           ERR_ABORTED);
    646 }
    647 
    648 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
    649   if (request_)
    650     request_->set_status(status);
    651 }
    652 
    653 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
    654   bool rv = false;
    655 
    656   DCHECK(bytes_read);
    657   DCHECK(filter_.get());
    658 
    659   *bytes_read = 0;
    660 
    661   // Get more pre-filtered data if needed.
    662   // TODO(mbelshe): is it possible that the filter needs *MORE* data
    663   //    when there is some data already in the buffer?
    664   if (!filter_->stream_data_len() && !is_done()) {
    665     IOBuffer* stream_buffer = filter_->stream_buffer();
    666     int stream_buffer_size = filter_->stream_buffer_size();
    667     rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
    668   }
    669   return rv;
    670 }
    671 
    672 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
    673                                       int* bytes_read) {
    674   DCHECK(!request_->status().is_io_pending());
    675   DCHECK(raw_read_buffer_.get() == NULL);
    676 
    677   // Keep a pointer to the read buffer, so we have access to it in the
    678   // OnRawReadComplete() callback in the event that the read completes
    679   // asynchronously.
    680   raw_read_buffer_ = buf;
    681   bool rv = ReadRawData(buf, buf_size, bytes_read);
    682 
    683   if (!request_->status().is_io_pending()) {
    684     // If |filter_| is NULL, and logging all bytes is enabled, log the raw
    685     // bytes read.
    686     if (!filter_.get() && request() && request()->net_log().IsLoggingBytes() &&
    687         *bytes_read > 0) {
    688       request()->net_log().AddByteTransferEvent(
    689           NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
    690           *bytes_read, raw_read_buffer_->data());
    691     }
    692 
    693     // If the read completes synchronously, either success or failure,
    694     // invoke the OnRawReadComplete callback so we can account for the
    695     // completed read.
    696     OnRawReadComplete(*bytes_read);
    697   }
    698   return rv;
    699 }
    700 
    701 void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
    702   int rv = request_->Redirect(location, http_status_code);
    703   if (rv != OK)
    704     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
    705 }
    706 
    707 void URLRequestJob::OnRawReadComplete(int bytes_read) {
    708   DCHECK(raw_read_buffer_.get());
    709   if (bytes_read > 0) {
    710     RecordBytesRead(bytes_read);
    711   }
    712   raw_read_buffer_ = NULL;
    713 }
    714 
    715 void URLRequestJob::RecordBytesRead(int bytes_read) {
    716   filter_input_byte_count_ += bytes_read;
    717   prefilter_bytes_read_ += bytes_read;
    718   if (!filter_.get())
    719     postfilter_bytes_read_ += bytes_read;
    720   DVLOG(2) << __FUNCTION__ << "() "
    721            << "\"" << (request_ ? request_->url().spec() : "???") << "\""
    722            << " pre bytes read = " << bytes_read
    723            << " pre total = " << prefilter_bytes_read_
    724            << " post total = " << postfilter_bytes_read_;
    725   UpdatePacketReadTimes();  // Facilitate stats recording if it is active.
    726   if (network_delegate_)
    727     network_delegate_->NotifyRawBytesRead(*request_, bytes_read);
    728 }
    729 
    730 bool URLRequestJob::FilterHasData() {
    731     return filter_.get() && filter_->stream_data_len();
    732 }
    733 
    734 void URLRequestJob::UpdatePacketReadTimes() {
    735 }
    736 
    737 }  // namespace net
    738