Home | History | Annotate | Download | only in url_request
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/url_request/url_request_job.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/compiler_specific.h"
      9 #include "base/message_loop/message_loop.h"
     10 #include "base/power_monitor/power_monitor.h"
     11 #include "base/strings/string_number_conversions.h"
     12 #include "base/strings/string_util.h"
     13 #include "net/base/auth.h"
     14 #include "net/base/host_port_pair.h"
     15 #include "net/base/io_buffer.h"
     16 #include "net/base/load_states.h"
     17 #include "net/base/net_errors.h"
     18 #include "net/base/network_delegate.h"
     19 #include "net/http/http_response_headers.h"
     20 #include "net/url_request/url_request.h"
     21 
     22 namespace net {
     23 
     24 URLRequestJob::URLRequestJob(URLRequest* request,
     25                              NetworkDelegate* network_delegate)
     26     : request_(request),
     27       done_(false),
     28       prefilter_bytes_read_(0),
     29       postfilter_bytes_read_(0),
     30       filter_input_byte_count_(0),
     31       filter_needs_more_output_space_(false),
     32       filtered_read_buffer_len_(0),
     33       has_handled_response_(false),
     34       expected_content_size_(-1),
     35       deferred_redirect_status_code_(-1),
     36       network_delegate_(network_delegate),
     37       weak_factory_(this) {
     38   base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
     39   if (power_monitor)
     40     power_monitor->AddObserver(this);
     41 }
     42 
     43 void URLRequestJob::SetUpload(UploadDataStream* upload) {
     44 }
     45 
     46 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders& headers) {
     47 }
     48 
     49 void URLRequestJob::SetPriority(RequestPriority priority) {
     50 }
     51 
     52 void URLRequestJob::Kill() {
     53   weak_factory_.InvalidateWeakPtrs();
     54   // Make sure the request is notified that we are done.  We assume that the
     55   // request took care of setting its error status before calling Kill.
     56   if (request_)
     57     NotifyCanceled();
     58 }
     59 
     60 void URLRequestJob::DetachRequest() {
     61   request_ = NULL;
     62   OnDetachRequest();
     63 }
     64 
     65 // This function calls ReadData to get stream data. If a filter exists, passes
     66 // the data to the attached filter. Then returns the output from filter back to
     67 // the caller.
     68 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
     69   bool rv = false;
     70 
     71   DCHECK_LT(buf_size, 1000000);  // Sanity check.
     72   DCHECK(buf);
     73   DCHECK(bytes_read);
     74   DCHECK(filtered_read_buffer_.get() == NULL);
     75   DCHECK_EQ(0, filtered_read_buffer_len_);
     76 
     77   *bytes_read = 0;
     78 
     79   // Skip Filter if not present.
     80   if (!filter_.get()) {
     81     rv = ReadRawDataHelper(buf, buf_size, bytes_read);
     82   } else {
     83     // Save the caller's buffers while we do IO
     84     // in the filter's buffers.
     85     filtered_read_buffer_ = buf;
     86     filtered_read_buffer_len_ = buf_size;
     87 
     88     if (ReadFilteredData(bytes_read)) {
     89       rv = true;   // We have data to return.
     90 
     91       // It is fine to call DoneReading even if ReadFilteredData receives 0
     92       // bytes from the net, but we avoid making that call if we know for
     93       // sure that's the case (ReadRawDataHelper path).
     94       if (*bytes_read == 0)
     95         DoneReading();
     96     } else {
     97       rv = false;  // Error, or a new IO is pending.
     98     }
     99   }
    100   if (rv && *bytes_read == 0)
    101     NotifyDone(URLRequestStatus());
    102   return rv;
    103 }
    104 
    105 void URLRequestJob::StopCaching() {
    106   // Nothing to do here.
    107 }
    108 
    109 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
    110   // Most job types don't send request headers.
    111   return false;
    112 }
    113 
    114 LoadState URLRequestJob::GetLoadState() const {
    115   return LOAD_STATE_IDLE;
    116 }
    117 
    118 UploadProgress URLRequestJob::GetUploadProgress() const {
    119   return UploadProgress();
    120 }
    121 
    122 bool URLRequestJob::GetCharset(std::string* charset) {
    123   return false;
    124 }
    125 
    126 void URLRequestJob::GetResponseInfo(HttpResponseInfo* info) {
    127 }
    128 
    129 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo* load_timing_info) const {
    130   // Only certain request types return more than just request start times.
    131 }
    132 
    133 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
    134   return false;
    135 }
    136 
    137 Filter* URLRequestJob::SetupFilter() const {
    138   return NULL;
    139 }
    140 
    141 bool URLRequestJob::IsRedirectResponse(GURL* location,
    142                                        int* http_status_code) {
    143   // For non-HTTP jobs, headers will be null.
    144   HttpResponseHeaders* headers = request_->response_headers();
    145   if (!headers)
    146     return false;
    147 
    148   std::string value;
    149   if (!headers->IsRedirect(&value))
    150     return false;
    151 
    152   *location = request_->url().Resolve(value);
    153   *http_status_code = headers->response_code();
    154   return true;
    155 }
    156 
    157 bool URLRequestJob::IsSafeRedirect(const GURL& location) {
    158   return true;
    159 }
    160 
    161 bool URLRequestJob::NeedsAuth() {
    162   return false;
    163 }
    164 
    165 void URLRequestJob::GetAuthChallengeInfo(
    166     scoped_refptr<AuthChallengeInfo>* auth_info) {
    167   // This will only be called if NeedsAuth() returns true, in which
    168   // case the derived class should implement this!
    169   NOTREACHED();
    170 }
    171 
    172 void URLRequestJob::SetAuth(const AuthCredentials& credentials) {
    173   // This will only be called if NeedsAuth() returns true, in which
    174   // case the derived class should implement this!
    175   NOTREACHED();
    176 }
    177 
    178 void URLRequestJob::CancelAuth() {
    179   // This will only be called if NeedsAuth() returns true, in which
    180   // case the derived class should implement this!
    181   NOTREACHED();
    182 }
    183 
    184 void URLRequestJob::ContinueWithCertificate(
    185     X509Certificate* client_cert) {
    186   // The derived class should implement this!
    187   NOTREACHED();
    188 }
    189 
    190 void URLRequestJob::ContinueDespiteLastError() {
    191   // Implementations should know how to recover from errors they generate.
    192   // If this code was reached, we are trying to recover from an error that
    193   // we don't know how to recover from.
    194   NOTREACHED();
    195 }
    196 
    197 void URLRequestJob::FollowDeferredRedirect() {
    198   DCHECK(deferred_redirect_status_code_ != -1);
    199 
    200   // NOTE: deferred_redirect_url_ may be invalid, and attempting to redirect to
    201   // such an URL will fail inside FollowRedirect.  The DCHECK above asserts
    202   // that we called OnReceivedRedirect.
    203 
    204   // It is also possible that FollowRedirect will drop the last reference to
    205   // this job, so we need to reset our members before calling it.
    206 
    207   SetUnblockedOnDelegate();
    208 
    209   GURL redirect_url = deferred_redirect_url_;
    210   int redirect_status_code = deferred_redirect_status_code_;
    211 
    212   deferred_redirect_url_ = GURL();
    213   deferred_redirect_status_code_ = -1;
    214 
    215   FollowRedirect(redirect_url, redirect_status_code);
    216 }
    217 
    218 bool URLRequestJob::GetMimeType(std::string* mime_type) const {
    219   return false;
    220 }
    221 
    222 int URLRequestJob::GetResponseCode() const {
    223   return -1;
    224 }
    225 
    226 HostPortPair URLRequestJob::GetSocketAddress() const {
    227   return HostPortPair();
    228 }
    229 
    230 void URLRequestJob::OnSuspend() {
    231   Kill();
    232 }
    233 
    234 void URLRequestJob::NotifyURLRequestDestroyed() {
    235 }
    236 
    237 URLRequestJob::~URLRequestJob() {
    238   base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
    239   if (power_monitor)
    240     power_monitor->RemoveObserver(this);
    241 }
    242 
    243 void URLRequestJob::NotifyCertificateRequested(
    244     SSLCertRequestInfo* cert_request_info) {
    245   if (!request_)
    246     return;  // The request was destroyed, so there is no more work to do.
    247 
    248   request_->NotifyCertificateRequested(cert_request_info);
    249 }
    250 
    251 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info,
    252                                               bool fatal) {
    253   if (!request_)
    254     return;  // The request was destroyed, so there is no more work to do.
    255 
    256   request_->NotifySSLCertificateError(ssl_info, fatal);
    257 }
    258 
    259 bool URLRequestJob::CanGetCookies(const CookieList& cookie_list) const {
    260   if (!request_)
    261     return false;  // The request was destroyed, so there is no more work to do.
    262 
    263   return request_->CanGetCookies(cookie_list);
    264 }
    265 
    266 bool URLRequestJob::CanSetCookie(const std::string& cookie_line,
    267                                  CookieOptions* options) const {
    268   if (!request_)
    269     return false;  // The request was destroyed, so there is no more work to do.
    270 
    271   return request_->CanSetCookie(cookie_line, options);
    272 }
    273 
    274 bool URLRequestJob::CanEnablePrivacyMode() const {
    275   if (!request_)
    276     return false;  // The request was destroyed, so there is no more work to do.
    277 
    278   return request_->CanEnablePrivacyMode();
    279 }
    280 
    281 void URLRequestJob::NotifyHeadersComplete() {
    282   if (!request_ || !request_->has_delegate())
    283     return;  // The request was destroyed, so there is no more work to do.
    284 
    285   if (has_handled_response_)
    286     return;
    287 
    288   DCHECK(!request_->status().is_io_pending());
    289 
    290   // Initialize to the current time, and let the subclass optionally override
    291   // the time stamps if it has that information.  The default request_time is
    292   // set by URLRequest before it calls our Start method.
    293   request_->response_info_.response_time = base::Time::Now();
    294   GetResponseInfo(&request_->response_info_);
    295 
    296   // When notifying the delegate, the delegate can release the request
    297   // (and thus release 'this').  After calling to the delgate, we must
    298   // check the request pointer to see if it still exists, and return
    299   // immediately if it has been destroyed.  self_preservation ensures our
    300   // survival until we can get out of this method.
    301   scoped_refptr<URLRequestJob> self_preservation(this);
    302 
    303   if (request_)
    304     request_->OnHeadersComplete();
    305 
    306   GURL new_location;
    307   int http_status_code;
    308   if (IsRedirectResponse(&new_location, &http_status_code)) {
    309     const GURL& url = request_->url();
    310 
    311     // Move the reference fragment of the old location to the new one if the
    312     // new one has none. This duplicates mozilla's behavior.
    313     if (url.is_valid() && url.has_ref() && !new_location.has_ref()) {
    314       GURL::Replacements replacements;
    315       // Reference the |ref| directly out of the original URL to avoid a
    316       // malloc.
    317       replacements.SetRef(url.spec().data(),
    318                           url.parsed_for_possibly_invalid_spec().ref);
    319       new_location = new_location.ReplaceComponents(replacements);
    320     }
    321 
    322     bool defer_redirect = false;
    323     request_->NotifyReceivedRedirect(new_location, &defer_redirect);
    324 
    325     // Ensure that the request wasn't detached or destroyed in
    326     // NotifyReceivedRedirect
    327     if (!request_ || !request_->has_delegate())
    328       return;
    329 
    330     // If we were not cancelled, then maybe follow the redirect.
    331     if (request_->status().is_success()) {
    332       if (defer_redirect) {
    333         deferred_redirect_url_ = new_location;
    334         deferred_redirect_status_code_ = http_status_code;
    335         SetBlockedOnDelegate();
    336       } else {
    337         FollowRedirect(new_location, http_status_code);
    338       }
    339       return;
    340     }
    341   } else if (NeedsAuth()) {
    342     scoped_refptr<AuthChallengeInfo> auth_info;
    343     GetAuthChallengeInfo(&auth_info);
    344     // Need to check for a NULL auth_info because the server may have failed
    345     // to send a challenge with the 401 response.
    346     if (auth_info.get()) {
    347       request_->NotifyAuthRequired(auth_info.get());
    348       // Wait for SetAuth or CancelAuth to be called.
    349       return;
    350     }
    351   }
    352 
    353   has_handled_response_ = true;
    354   if (request_->status().is_success())
    355     filter_.reset(SetupFilter());
    356 
    357   if (!filter_.get()) {
    358     std::string content_length;
    359     request_->GetResponseHeaderByName("content-length", &content_length);
    360     if (!content_length.empty())
    361       base::StringToInt64(content_length, &expected_content_size_);
    362   }
    363 
    364   request_->NotifyResponseStarted();
    365 }
    366 
    367 void URLRequestJob::NotifyReadComplete(int bytes_read) {
    368   if (!request_ || !request_->has_delegate())
    369     return;  // The request was destroyed, so there is no more work to do.
    370 
    371   // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
    372   // unit_tests have been fixed to not trip this.
    373   //DCHECK(!request_->status().is_io_pending());
    374 
    375   // The headers should be complete before reads complete
    376   DCHECK(has_handled_response_);
    377 
    378   OnRawReadComplete(bytes_read);
    379 
    380   // Don't notify if we had an error.
    381   if (!request_->status().is_success())
    382     return;
    383 
    384   // When notifying the delegate, the delegate can release the request
    385   // (and thus release 'this').  After calling to the delegate, we must
    386   // check the request pointer to see if it still exists, and return
    387   // immediately if it has been destroyed.  self_preservation ensures our
    388   // survival until we can get out of this method.
    389   scoped_refptr<URLRequestJob> self_preservation(this);
    390 
    391   if (filter_.get()) {
    392     // Tell the filter that it has more data
    393     FilteredDataRead(bytes_read);
    394 
    395     // Filter the data.
    396     int filter_bytes_read = 0;
    397     if (ReadFilteredData(&filter_bytes_read)) {
    398       if (!filter_bytes_read)
    399         DoneReading();
    400       request_->NotifyReadCompleted(filter_bytes_read);
    401     }
    402   } else {
    403     request_->NotifyReadCompleted(bytes_read);
    404   }
    405   DVLOG(1) << __FUNCTION__ << "() "
    406            << "\"" << (request_ ? request_->url().spec() : "???") << "\""
    407            << " pre bytes read = " << bytes_read
    408            << " pre total = " << prefilter_bytes_read_
    409            << " post total = " << postfilter_bytes_read_;
    410 }
    411 
    412 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
    413   DCHECK(!has_handled_response_);
    414   has_handled_response_ = true;
    415   if (request_) {
    416     request_->set_status(status);
    417     request_->NotifyResponseStarted();
    418   }
    419 }
    420 
    421 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
    422   DCHECK(!done_) << "Job sending done notification twice";
    423   if (done_)
    424     return;
    425   done_ = true;
    426 
    427   // Unless there was an error, we should have at least tried to handle
    428   // the response before getting here.
    429   DCHECK(has_handled_response_ || !status.is_success());
    430 
    431   // As with NotifyReadComplete, we need to take care to notice if we were
    432   // destroyed during a delegate callback.
    433   if (request_) {
    434     request_->set_is_pending(false);
    435     // With async IO, it's quite possible to have a few outstanding
    436     // requests.  We could receive a request to Cancel, followed shortly
    437     // by a successful IO.  For tracking the status(), once there is
    438     // an error, we do not change the status back to success.  To
    439     // enforce this, only set the status if the job is so far
    440     // successful.
    441     if (request_->status().is_success()) {
    442       if (status.status() == URLRequestStatus::FAILED) {
    443         request_->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED,
    444                                                      status.error());
    445       }
    446       request_->set_status(status);
    447     }
    448   }
    449 
    450   // Complete this notification later.  This prevents us from re-entering the
    451   // delegate if we're done because of a synchronous call.
    452   base::MessageLoop::current()->PostTask(
    453       FROM_HERE,
    454       base::Bind(&URLRequestJob::CompleteNotifyDone,
    455                  weak_factory_.GetWeakPtr()));
    456 }
    457 
    458 void URLRequestJob::CompleteNotifyDone() {
    459   // Check if we should notify the delegate that we're done because of an error.
    460   if (request_ &&
    461       !request_->status().is_success() &&
    462       request_->has_delegate()) {
    463     // We report the error differently depending on whether we've called
    464     // OnResponseStarted yet.
    465     if (has_handled_response_) {
    466       // We signal the error by calling OnReadComplete with a bytes_read of -1.
    467       request_->NotifyReadCompleted(-1);
    468     } else {
    469       has_handled_response_ = true;
    470       request_->NotifyResponseStarted();
    471     }
    472   }
    473 }
    474 
    475 void URLRequestJob::NotifyCanceled() {
    476   if (!done_) {
    477     NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED, ERR_ABORTED));
    478   }
    479 }
    480 
    481 void URLRequestJob::NotifyRestartRequired() {
    482   DCHECK(!has_handled_response_);
    483   if (GetStatus().status() != URLRequestStatus::CANCELED)
    484     request_->Restart();
    485 }
    486 
    487 void URLRequestJob::SetBlockedOnDelegate() {
    488   request_->SetBlockedOnDelegate();
    489 }
    490 
    491 void URLRequestJob::SetUnblockedOnDelegate() {
    492   request_->SetUnblockedOnDelegate();
    493 }
    494 
    495 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
    496                                 int *bytes_read) {
    497   DCHECK(bytes_read);
    498   *bytes_read = 0;
    499   return true;
    500 }
    501 
    502 void URLRequestJob::DoneReading() {
    503   // Do nothing.
    504 }
    505 
    506 void URLRequestJob::FilteredDataRead(int bytes_read) {
    507   DCHECK(filter_.get());  // don't add data if there is no filter
    508   filter_->FlushStreamBuffer(bytes_read);
    509 }
    510 
    511 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
    512   DCHECK(filter_.get());  // don't add data if there is no filter
    513   DCHECK(filtered_read_buffer_.get() !=
    514          NULL);                             // we need to have a buffer to fill
    515   DCHECK_GT(filtered_read_buffer_len_, 0);  // sanity check
    516   DCHECK_LT(filtered_read_buffer_len_, 1000000);  // sanity check
    517   DCHECK(raw_read_buffer_.get() ==
    518          NULL);  // there should be no raw read buffer yet
    519 
    520   bool rv = false;
    521   *bytes_read = 0;
    522 
    523   if (is_done())
    524     return true;
    525 
    526   if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
    527     // We don't have any raw data to work with, so
    528     // read from the socket.
    529     int filtered_data_read;
    530     if (ReadRawDataForFilter(&filtered_data_read)) {
    531       if (filtered_data_read > 0) {
    532         filter_->FlushStreamBuffer(filtered_data_read);  // Give data to filter.
    533       } else {
    534         return true;  // EOF
    535       }
    536     } else {
    537       return false;  // IO Pending (or error)
    538     }
    539   }
    540 
    541   if ((filter_->stream_data_len() || filter_needs_more_output_space_)
    542       && !is_done()) {
    543     // Get filtered data.
    544     int filtered_data_len = filtered_read_buffer_len_;
    545     Filter::FilterStatus status;
    546     int output_buffer_size = filtered_data_len;
    547     status = filter_->ReadData(filtered_read_buffer_->data(),
    548                                &filtered_data_len);
    549 
    550     if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
    551       // filter_needs_more_output_space_ was mistaken... there are no more bytes
    552       // and we should have at least tried to fill up the filter's input buffer.
    553       // Correct the state, and try again.
    554       filter_needs_more_output_space_ = false;
    555       return ReadFilteredData(bytes_read);
    556     }
    557 
    558     switch (status) {
    559       case Filter::FILTER_DONE: {
    560         filter_needs_more_output_space_ = false;
    561         *bytes_read = filtered_data_len;
    562         postfilter_bytes_read_ += filtered_data_len;
    563         rv = true;
    564         break;
    565       }
    566       case Filter::FILTER_NEED_MORE_DATA: {
    567         filter_needs_more_output_space_ =
    568             (filtered_data_len == output_buffer_size);
    569         // We have finished filtering all data currently in the buffer.
    570         // There might be some space left in the output buffer. One can
    571         // consider reading more data from the stream to feed the filter
    572         // and filling up the output buffer. This leads to more complicated
    573         // buffer management and data notification mechanisms.
    574         // We can revisit this issue if there is a real perf need.
    575         if (filtered_data_len > 0) {
    576           *bytes_read = filtered_data_len;
    577           postfilter_bytes_read_ += filtered_data_len;
    578           rv = true;
    579         } else {
    580           // Read again since we haven't received enough data yet (e.g., we may
    581           // not have a complete gzip header yet)
    582           rv = ReadFilteredData(bytes_read);
    583         }
    584         break;
    585       }
    586       case Filter::FILTER_OK: {
    587         filter_needs_more_output_space_ =
    588             (filtered_data_len == output_buffer_size);
    589         *bytes_read = filtered_data_len;
    590         postfilter_bytes_read_ += filtered_data_len;
    591         rv = true;
    592         break;
    593       }
    594       case Filter::FILTER_ERROR: {
    595         DVLOG(1) << __FUNCTION__ << "() "
    596                  << "\"" << (request_ ? request_->url().spec() : "???") << "\""
    597                  << " Filter Error";
    598         filter_needs_more_output_space_ = false;
    599         NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
    600                    ERR_CONTENT_DECODING_FAILED));
    601         rv = false;
    602         break;
    603       }
    604       default: {
    605         NOTREACHED();
    606         filter_needs_more_output_space_ = false;
    607         rv = false;
    608         break;
    609       }
    610     }
    611     DVLOG(2) << __FUNCTION__ << "() "
    612              << "\"" << (request_ ? request_->url().spec() : "???") << "\""
    613              << " rv = " << rv
    614              << " post bytes read = " << filtered_data_len
    615              << " pre total = " << prefilter_bytes_read_
    616              << " post total = "
    617              << postfilter_bytes_read_;
    618     // If logging all bytes is enabled, log the filtered bytes read.
    619     if (rv && request() && request()->net_log().IsLoggingBytes() &&
    620         filtered_data_len > 0) {
    621       request()->net_log().AddByteTransferEvent(
    622           NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ,
    623           filtered_data_len, filtered_read_buffer_->data());
    624     }
    625   } else {
    626     // we are done, or there is no data left.
    627     rv = true;
    628   }
    629 
    630   if (rv) {
    631     // When we successfully finished a read, we no longer need to
    632     // save the caller's buffers. Release our reference.
    633     filtered_read_buffer_ = NULL;
    634     filtered_read_buffer_len_ = 0;
    635   }
    636   return rv;
    637 }
    638 
    639 const URLRequestStatus URLRequestJob::GetStatus() {
    640   if (request_)
    641     return request_->status();
    642   // If the request is gone, we must be cancelled.
    643   return URLRequestStatus(URLRequestStatus::CANCELED,
    644                           ERR_ABORTED);
    645 }
    646 
    647 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
    648   if (request_)
    649     request_->set_status(status);
    650 }
    651 
    652 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
    653   bool rv = false;
    654 
    655   DCHECK(bytes_read);
    656   DCHECK(filter_.get());
    657 
    658   *bytes_read = 0;
    659 
    660   // Get more pre-filtered data if needed.
    661   // TODO(mbelshe): is it possible that the filter needs *MORE* data
    662   //    when there is some data already in the buffer?
    663   if (!filter_->stream_data_len() && !is_done()) {
    664     IOBuffer* stream_buffer = filter_->stream_buffer();
    665     int stream_buffer_size = filter_->stream_buffer_size();
    666     rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
    667   }
    668   return rv;
    669 }
    670 
    671 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
    672                                       int* bytes_read) {
    673   DCHECK(!request_->status().is_io_pending());
    674   DCHECK(raw_read_buffer_.get() == NULL);
    675 
    676   // Keep a pointer to the read buffer, so we have access to it in the
    677   // OnRawReadComplete() callback in the event that the read completes
    678   // asynchronously.
    679   raw_read_buffer_ = buf;
    680   bool rv = ReadRawData(buf, buf_size, bytes_read);
    681 
    682   if (!request_->status().is_io_pending()) {
    683     // If |filter_| is NULL, and logging all bytes is enabled, log the raw
    684     // bytes read.
    685     if (!filter_.get() && request() && request()->net_log().IsLoggingBytes() &&
    686         *bytes_read > 0) {
    687       request()->net_log().AddByteTransferEvent(
    688           NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
    689           *bytes_read, raw_read_buffer_->data());
    690     }
    691 
    692     // If the read completes synchronously, either success or failure,
    693     // invoke the OnRawReadComplete callback so we can account for the
    694     // completed read.
    695     OnRawReadComplete(*bytes_read);
    696   }
    697   return rv;
    698 }
    699 
    700 void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
    701   int rv = request_->Redirect(location, http_status_code);
    702   if (rv != OK)
    703     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
    704 }
    705 
    706 void URLRequestJob::OnRawReadComplete(int bytes_read) {
    707   DCHECK(raw_read_buffer_.get());
    708   if (bytes_read > 0) {
    709     RecordBytesRead(bytes_read);
    710   }
    711   raw_read_buffer_ = NULL;
    712 }
    713 
    714 void URLRequestJob::RecordBytesRead(int bytes_read) {
    715   filter_input_byte_count_ += bytes_read;
    716   prefilter_bytes_read_ += bytes_read;
    717   if (!filter_.get())
    718     postfilter_bytes_read_ += bytes_read;
    719   DVLOG(2) << __FUNCTION__ << "() "
    720            << "\"" << (request_ ? request_->url().spec() : "???") << "\""
    721            << " pre bytes read = " << bytes_read
    722            << " pre total = " << prefilter_bytes_read_
    723            << " post total = " << postfilter_bytes_read_;
    724   UpdatePacketReadTimes();  // Facilitate stats recording if it is active.
    725   if (network_delegate_)
    726     network_delegate_->NotifyRawBytesRead(*request_, bytes_read);
    727 }
    728 
    729 bool URLRequestJob::FilterHasData() {
    730     return filter_.get() && filter_->stream_data_len();
    731 }
    732 
    733 void URLRequestJob::UpdatePacketReadTimes() {
    734 }
    735 
    736 }  // namespace net
    737