Home | History | Annotate | Download | only in url_request
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/url_request/url_request_http_job.h"
      6 
      7 #include "base/base_switches.h"
      8 #include "base/bind.h"
      9 #include "base/bind_helpers.h"
     10 #include "base/command_line.h"
     11 #include "base/compiler_specific.h"
     12 #include "base/file_version_info.h"
     13 #include "base/message_loop/message_loop.h"
     14 #include "base/metrics/field_trial.h"
     15 #include "base/metrics/histogram.h"
     16 #include "base/rand_util.h"
     17 #include "base/strings/string_util.h"
     18 #include "base/time/time.h"
     19 #include "net/base/filter.h"
     20 #include "net/base/host_port_pair.h"
     21 #include "net/base/load_flags.h"
     22 #include "net/base/mime_util.h"
     23 #include "net/base/net_errors.h"
     24 #include "net/base/net_util.h"
     25 #include "net/base/network_delegate.h"
     26 #include "net/base/sdch_manager.h"
     27 #include "net/cert/cert_status_flags.h"
     28 #include "net/cookies/cookie_monster.h"
     29 #include "net/http/http_network_session.h"
     30 #include "net/http/http_request_headers.h"
     31 #include "net/http/http_response_headers.h"
     32 #include "net/http/http_response_info.h"
     33 #include "net/http/http_status_code.h"
     34 #include "net/http/http_transaction.h"
     35 #include "net/http/http_transaction_delegate.h"
     36 #include "net/http/http_transaction_factory.h"
     37 #include "net/http/http_util.h"
     38 #include "net/ssl/ssl_cert_request_info.h"
     39 #include "net/ssl/ssl_config_service.h"
     40 #include "net/url_request/fraudulent_certificate_reporter.h"
     41 #include "net/url_request/http_user_agent_settings.h"
     42 #include "net/url_request/url_request.h"
     43 #include "net/url_request/url_request_context.h"
     44 #include "net/url_request/url_request_error_job.h"
     45 #include "net/url_request/url_request_job_factory.h"
     46 #include "net/url_request/url_request_redirect_job.h"
     47 #include "net/url_request/url_request_throttler_header_adapter.h"
     48 #include "net/url_request/url_request_throttler_manager.h"
     49 
     50 static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
     51 
     52 namespace net {
     53 
     54 class URLRequestHttpJob::HttpFilterContext : public FilterContext {
     55  public:
     56   explicit HttpFilterContext(URLRequestHttpJob* job);
     57   virtual ~HttpFilterContext();
     58 
     59   // FilterContext implementation.
     60   virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
     61   virtual bool GetURL(GURL* gurl) const OVERRIDE;
     62   virtual base::Time GetRequestTime() const OVERRIDE;
     63   virtual bool IsCachedContent() const OVERRIDE;
     64   virtual bool IsDownload() const OVERRIDE;
     65   virtual bool IsSdchResponse() const OVERRIDE;
     66   virtual int64 GetByteReadCount() const OVERRIDE;
     67   virtual int GetResponseCode() const OVERRIDE;
     68   virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE;
     69 
     70   // Method to allow us to reset filter context for a response that should have
     71   // been SDCH encoded when there is an update due to an explicit HTTP header.
     72   void ResetSdchResponseToFalse();
     73 
     74  private:
     75   URLRequestHttpJob* job_;
     76 
     77   DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
     78 };
     79 
     80 class URLRequestHttpJob::HttpTransactionDelegateImpl
     81     : public HttpTransactionDelegate {
     82  public:
     83   HttpTransactionDelegateImpl(URLRequest* request,
     84                               NetworkDelegate* network_delegate)
     85       : request_(request),
     86         network_delegate_(network_delegate),
     87         state_(NONE_ACTIVE) {}
     88   virtual ~HttpTransactionDelegateImpl() { OnDetachRequest(); }
     89   void OnDetachRequest() {
     90     if (!IsRequestAndDelegateActive())
     91       return;
     92     NotifyStateChange(NetworkDelegate::REQUEST_WAIT_STATE_RESET);
     93     state_ = NONE_ACTIVE;
     94     request_ = NULL;
     95   }
     96   virtual void OnCacheActionStart() OVERRIDE {
     97     HandleStateChange(NONE_ACTIVE,
     98                       CACHE_ACTIVE,
     99                       NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START);
    100   }
    101   virtual void OnCacheActionFinish() OVERRIDE {
    102     HandleStateChange(CACHE_ACTIVE,
    103                       NONE_ACTIVE,
    104                       NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH);
    105   }
    106   virtual void OnNetworkActionStart() OVERRIDE {
    107     HandleStateChange(NONE_ACTIVE,
    108                       NETWORK_ACTIVE,
    109                       NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START);
    110   }
    111   virtual void OnNetworkActionFinish() OVERRIDE {
    112     HandleStateChange(NETWORK_ACTIVE,
    113                       NONE_ACTIVE,
    114                       NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH);
    115   }
    116 
    117  private:
    118   enum State {
    119     NONE_ACTIVE,
    120     CACHE_ACTIVE,
    121     NETWORK_ACTIVE
    122   };
    123 
    124   // Returns true if this object still has an active request and network
    125   // delegate.
    126   bool IsRequestAndDelegateActive() const {
    127     return request_ && network_delegate_;
    128   }
    129 
    130   // Notifies the |network_delegate_| object of a change in the state of the
    131   // |request_| to the state given by the |request_wait_state| argument.
    132   void NotifyStateChange(NetworkDelegate::RequestWaitState request_wait_state) {
    133     network_delegate_->NotifyRequestWaitStateChange(*request_,
    134                                                     request_wait_state);
    135   }
    136 
    137   // Checks the request and delegate are still active, changes |state_| from
    138   // |expected_state| to |next_state|, and then notifies the network delegate of
    139   // the change to |request_wait_state|.
    140   void HandleStateChange(State expected_state,
    141                          State next_state,
    142                          NetworkDelegate::RequestWaitState request_wait_state) {
    143     if (!IsRequestAndDelegateActive())
    144       return;
    145     DCHECK_EQ(expected_state, state_);
    146     state_ = next_state;
    147     NotifyStateChange(request_wait_state);
    148   }
    149 
    150   URLRequest* request_;
    151   NetworkDelegate* network_delegate_;
    152   // Internal state tracking, for sanity checking.
    153   State state_;
    154 
    155   DISALLOW_COPY_AND_ASSIGN(HttpTransactionDelegateImpl);
    156 };
    157 
    158 URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
    159     : job_(job) {
    160   DCHECK(job_);
    161 }
    162 
    163 URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
    164 }
    165 
    166 bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
    167     std::string* mime_type) const {
    168   return job_->GetMimeType(mime_type);
    169 }
    170 
    171 bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
    172   if (!job_->request())
    173     return false;
    174   *gurl = job_->request()->url();
    175   return true;
    176 }
    177 
    178 base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
    179   return job_->request() ? job_->request()->request_time() : base::Time();
    180 }
    181 
    182 bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
    183   return job_->is_cached_content_;
    184 }
    185 
    186 bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
    187   return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
    188 }
    189 
    190 void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() {
    191   DCHECK(job_->sdch_dictionary_advertised_);
    192   job_->sdch_dictionary_advertised_ = false;
    193 }
    194 
    195 bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
    196   return job_->sdch_dictionary_advertised_;
    197 }
    198 
    199 int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
    200   return job_->filter_input_byte_count();
    201 }
    202 
    203 int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
    204   return job_->GetResponseCode();
    205 }
    206 
    207 void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
    208     StatisticSelector statistic) const {
    209   job_->RecordPacketStats(statistic);
    210 }
    211 
    212 // TODO(darin): make sure the port blocking code is not lost
    213 // static
    214 URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
    215                                           NetworkDelegate* network_delegate,
    216                                           const std::string& scheme) {
    217   DCHECK(scheme == "http" || scheme == "https");
    218 
    219   if (!request->context()->http_transaction_factory()) {
    220     NOTREACHED() << "requires a valid context";
    221     return new URLRequestErrorJob(
    222         request, network_delegate, ERR_INVALID_ARGUMENT);
    223   }
    224 
    225   GURL redirect_url;
    226   if (request->GetHSTSRedirect(&redirect_url)) {
    227     return new URLRequestRedirectJob(
    228         request, network_delegate, redirect_url,
    229         // Use status code 307 to preserve the method, so POST requests work.
    230         URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT);
    231   }
    232   return new URLRequestHttpJob(request,
    233                                network_delegate,
    234                                request->context()->http_user_agent_settings());
    235 }
    236 
    237 URLRequestHttpJob::URLRequestHttpJob(
    238     URLRequest* request,
    239     NetworkDelegate* network_delegate,
    240     const HttpUserAgentSettings* http_user_agent_settings)
    241     : URLRequestJob(request, network_delegate),
    242       priority_(DEFAULT_PRIORITY),
    243       response_info_(NULL),
    244       response_cookies_save_index_(0),
    245       proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
    246       server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
    247       start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted,
    248                                  base::Unretained(this))),
    249       notify_before_headers_sent_callback_(
    250           base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback,
    251                      base::Unretained(this))),
    252       read_in_progress_(false),
    253       throttling_entry_(NULL),
    254       sdch_dictionary_advertised_(false),
    255       sdch_test_activated_(false),
    256       sdch_test_control_(false),
    257       is_cached_content_(false),
    258       request_creation_time_(),
    259       packet_timing_enabled_(false),
    260       done_(false),
    261       bytes_observed_in_packets_(0),
    262       request_time_snapshot_(),
    263       final_packet_time_(),
    264       filter_context_(new HttpFilterContext(this)),
    265       weak_factory_(this),
    266       on_headers_received_callback_(
    267           base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback,
    268                      base::Unretained(this))),
    269       awaiting_callback_(false),
    270       http_transaction_delegate_(
    271           new HttpTransactionDelegateImpl(request, network_delegate)),
    272       http_user_agent_settings_(http_user_agent_settings) {
    273   URLRequestThrottlerManager* manager = request->context()->throttler_manager();
    274   if (manager)
    275     throttling_entry_ = manager->RegisterRequestUrl(request->url());
    276 
    277   ResetTimer();
    278 }
    279 
    280 URLRequestHttpJob::~URLRequestHttpJob() {
    281   CHECK(!awaiting_callback_);
    282 
    283   DCHECK(!sdch_test_control_ || !sdch_test_activated_);
    284   if (!is_cached_content_) {
    285     if (sdch_test_control_)
    286       RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
    287     if (sdch_test_activated_)
    288       RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
    289   }
    290   // Make sure SDCH filters are told to emit histogram data while
    291   // filter_context_ is still alive.
    292   DestroyFilters();
    293 
    294   if (sdch_dictionary_url_.is_valid()) {
    295     // Prior to reaching the destructor, request_ has been set to a NULL
    296     // pointer, so request_->url() is no longer valid in the destructor, and we
    297     // use an alternate copy |request_info_.url|.
    298     SdchManager* manager = SdchManager::Global();
    299     // To be extra safe, since this is a "different time" from when we decided
    300     // to get the dictionary, we'll validate that an SdchManager is available.
    301     // At shutdown time, care is taken to be sure that we don't delete this
    302     // globally useful instance "too soon," so this check is just defensive
    303     // coding to assure that IF the system is shutting down, we don't have any
    304     // problem if the manager was deleted ahead of time.
    305     if (manager)  // Defensive programming.
    306       manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
    307   }
    308   DoneWithRequest(ABORTED);
    309 }
    310 
    311 void URLRequestHttpJob::SetPriority(RequestPriority priority) {
    312   priority_ = priority;
    313   if (transaction_)
    314     transaction_->SetPriority(priority_);
    315 }
    316 
    317 void URLRequestHttpJob::Start() {
    318   DCHECK(!transaction_.get());
    319 
    320   // URLRequest::SetReferrer ensures that we do not send username and password
    321   // fields in the referrer.
    322   GURL referrer(request_->referrer());
    323 
    324   request_info_.url = request_->url();
    325   request_info_.method = request_->method();
    326   request_info_.load_flags = request_->load_flags();
    327   request_info_.request_id = request_->identifier();
    328   // Enable privacy mode if cookie settings or flags tell us not send or
    329   // save cookies.
    330   bool enable_privacy_mode =
    331       (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) ||
    332       (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) ||
    333       CanEnablePrivacyMode();
    334   // Privacy mode could still be disabled in OnCookiesLoaded if we are going
    335   // to send previously saved cookies.
    336   request_info_.privacy_mode = enable_privacy_mode ?
    337       kPrivacyModeEnabled : kPrivacyModeDisabled;
    338 
    339   // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins
    340   // from overriding headers that are controlled using other means. Otherwise a
    341   // plugin could set a referrer although sending the referrer is inhibited.
    342   request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer);
    343 
    344   // Our consumer should have made sure that this is a safe referrer.  See for
    345   // instance WebCore::FrameLoader::HideReferrer.
    346   if (referrer.is_valid()) {
    347     request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer,
    348                                           referrer.spec());
    349   }
    350 
    351   request_info_.extra_headers.SetHeaderIfMissing(
    352       HttpRequestHeaders::kUserAgent,
    353       http_user_agent_settings_ ?
    354           http_user_agent_settings_->GetUserAgent(request_->url()) :
    355           EmptyString());
    356 
    357   AddExtraHeaders();
    358   AddCookieHeaderAndStart();
    359 }
    360 
    361 void URLRequestHttpJob::Kill() {
    362   http_transaction_delegate_->OnDetachRequest();
    363 
    364   if (!transaction_.get())
    365     return;
    366 
    367   weak_factory_.InvalidateWeakPtrs();
    368   DestroyTransaction();
    369   URLRequestJob::Kill();
    370 }
    371 
    372 void URLRequestHttpJob::NotifyHeadersComplete() {
    373   DCHECK(!response_info_);
    374 
    375   response_info_ = transaction_->GetResponseInfo();
    376 
    377   // Save boolean, as we'll need this info at destruction time, and filters may
    378   // also need this info.
    379   is_cached_content_ = response_info_->was_cached;
    380 
    381   if (!is_cached_content_ && throttling_entry_.get()) {
    382     URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
    383     throttling_entry_->UpdateWithResponse(request_info_.url.host(),
    384                                           &response_adapter);
    385   }
    386 
    387   // The ordering of these calls is not important.
    388   ProcessStrictTransportSecurityHeader();
    389   ProcessPublicKeyPinsHeader();
    390 
    391   if (SdchManager::Global() &&
    392       SdchManager::Global()->IsInSupportedDomain(request_->url())) {
    393     const std::string name = "Get-Dictionary";
    394     std::string url_text;
    395     void* iter = NULL;
    396     // TODO(jar): We need to not fetch dictionaries the first time they are
    397     // seen, but rather wait until we can justify their usefulness.
    398     // For now, we will only fetch the first dictionary, which will at least
    399     // require multiple suggestions before we get additional ones for this site.
    400     // Eventually we should wait until a dictionary is requested several times
    401     // before we even download it (so that we don't waste memory or bandwidth).
    402     if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
    403       // request_->url() won't be valid in the destructor, so we use an
    404       // alternate copy.
    405       DCHECK_EQ(request_->url(), request_info_.url);
    406       // Resolve suggested URL relative to request url.
    407       sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
    408     }
    409   }
    410 
    411   // The HTTP transaction may be restarted several times for the purposes
    412   // of sending authorization information. Each time it restarts, we get
    413   // notified of the headers completion so that we can update the cookie store.
    414   if (transaction_->IsReadyToRestartForAuth()) {
    415     DCHECK(!response_info_->auth_challenge.get());
    416     // TODO(battre): This breaks the webrequest API for
    417     // URLRequestTestHTTP.BasicAuthWithCookies
    418     // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders
    419     // occurs.
    420     RestartTransactionWithAuth(AuthCredentials());
    421     return;
    422   }
    423 
    424   URLRequestJob::NotifyHeadersComplete();
    425 }
    426 
    427 void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
    428   DoneWithRequest(FINISHED);
    429   URLRequestJob::NotifyDone(status);
    430 }
    431 
    432 void URLRequestHttpJob::DestroyTransaction() {
    433   DCHECK(transaction_.get());
    434 
    435   DoneWithRequest(ABORTED);
    436   transaction_.reset();
    437   response_info_ = NULL;
    438   receive_headers_end_ = base::TimeTicks();
    439 }
    440 
    441 void URLRequestHttpJob::StartTransaction() {
    442   if (network_delegate()) {
    443     int rv = network_delegate()->NotifyBeforeSendHeaders(
    444         request_, notify_before_headers_sent_callback_,
    445         &request_info_.extra_headers);
    446     // If an extension blocks the request, we rely on the callback to
    447     // MaybeStartTransactionInternal().
    448     if (rv == ERR_IO_PENDING) {
    449       SetBlockedOnDelegate();
    450       return;
    451     }
    452     MaybeStartTransactionInternal(rv);
    453     return;
    454   }
    455   StartTransactionInternal();
    456 }
    457 
    458 void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) {
    459   SetUnblockedOnDelegate();
    460 
    461   // Check that there are no callbacks to already canceled requests.
    462   DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
    463 
    464   MaybeStartTransactionInternal(result);
    465 }
    466 
    467 void URLRequestHttpJob::MaybeStartTransactionInternal(int result) {
    468   if (result == OK) {
    469     StartTransactionInternal();
    470   } else {
    471     std::string source("delegate");
    472     request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
    473                                  NetLog::StringCallback("source", &source));
    474     NotifyCanceled();
    475     NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
    476   }
    477 }
    478 
    479 void URLRequestHttpJob::StartTransactionInternal() {
    480   // NOTE: This method assumes that request_info_ is already setup properly.
    481 
    482   // If we already have a transaction, then we should restart the transaction
    483   // with auth provided by auth_credentials_.
    484 
    485   int rv;
    486 
    487   if (network_delegate()) {
    488     network_delegate()->NotifySendHeaders(
    489         request_, request_info_.extra_headers);
    490   }
    491 
    492   if (transaction_.get()) {
    493     rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_);
    494     auth_credentials_ = AuthCredentials();
    495   } else {
    496     DCHECK(request_->context()->http_transaction_factory());
    497 
    498     rv = request_->context()->http_transaction_factory()->CreateTransaction(
    499         priority_, &transaction_, http_transaction_delegate_.get());
    500     if (rv == OK) {
    501       if (!throttling_entry_.get() ||
    502           !throttling_entry_->ShouldRejectRequest(*request_)) {
    503         rv = transaction_->Start(
    504             &request_info_, start_callback_, request_->net_log());
    505         start_time_ = base::TimeTicks::Now();
    506       } else {
    507         // Special error code for the exponential back-off module.
    508         rv = ERR_TEMPORARILY_THROTTLED;
    509       }
    510     }
    511   }
    512 
    513   if (rv == ERR_IO_PENDING)
    514     return;
    515 
    516   // The transaction started synchronously, but we need to notify the
    517   // URLRequest delegate via the message loop.
    518   base::MessageLoop::current()->PostTask(
    519       FROM_HERE,
    520       base::Bind(&URLRequestHttpJob::OnStartCompleted,
    521                  weak_factory_.GetWeakPtr(), rv));
    522 }
    523 
    524 void URLRequestHttpJob::AddExtraHeaders() {
    525   // Supply Accept-Encoding field only if it is not already provided.
    526   // It should be provided IF the content is known to have restrictions on
    527   // potential encoding, such as streaming multi-media.
    528   // For details see bug 47381.
    529   // TODO(jar, enal): jpeg files etc. should set up a request header if
    530   // possible. Right now it is done only by buffered_resource_loader and
    531   // simple_data_source.
    532   if (!request_info_.extra_headers.HasHeader(
    533       HttpRequestHeaders::kAcceptEncoding)) {
    534     bool advertise_sdch = SdchManager::Global() &&
    535         SdchManager::Global()->IsInSupportedDomain(request_->url());
    536     std::string avail_dictionaries;
    537     if (advertise_sdch) {
    538       SdchManager::Global()->GetAvailDictionaryList(request_->url(),
    539                                                     &avail_dictionaries);
    540 
    541       // The AllowLatencyExperiment() is only true if we've successfully done a
    542       // full SDCH compression recently in this browser session for this host.
    543       // Note that for this path, there might be no applicable dictionaries,
    544       // and hence we can't participate in the experiment.
    545       if (!avail_dictionaries.empty() &&
    546           SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
    547         // We are participating in the test (or control), and hence we'll
    548         // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
    549         // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
    550         packet_timing_enabled_ = true;
    551         if (base::RandDouble() < .01) {
    552           sdch_test_control_ = true;  // 1% probability.
    553           advertise_sdch = false;
    554         } else {
    555           sdch_test_activated_ = true;
    556         }
    557       }
    558     }
    559 
    560     // Supply Accept-Encoding headers first so that it is more likely that they
    561     // will be in the first transmitted packet.  This can sometimes make it
    562     // easier to filter and analyze the streams to assure that a proxy has not
    563     // damaged these headers.  Some proxies deliberately corrupt Accept-Encoding
    564     // headers.
    565     if (!advertise_sdch) {
    566       // Tell the server what compression formats we support (other than SDCH).
    567       request_info_.extra_headers.SetHeader(
    568           HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
    569     } else {
    570       // Include SDCH in acceptable list.
    571       request_info_.extra_headers.SetHeader(
    572           HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
    573       if (!avail_dictionaries.empty()) {
    574         request_info_.extra_headers.SetHeader(
    575             kAvailDictionaryHeader,
    576             avail_dictionaries);
    577         sdch_dictionary_advertised_ = true;
    578         // Since we're tagging this transaction as advertising a dictionary,
    579         // we'll definitely employ an SDCH filter (or tentative sdch filter)
    580         // when we get a response.  When done, we'll record histograms via
    581         // SDCH_DECODE or SDCH_PASSTHROUGH.  Hence we need to record packet
    582         // arrival times.
    583         packet_timing_enabled_ = true;
    584       }
    585     }
    586   }
    587 
    588   if (http_user_agent_settings_) {
    589     // Only add default Accept-Language if the request didn't have it
    590     // specified.
    591     std::string accept_language =
    592         http_user_agent_settings_->GetAcceptLanguage();
    593     if (!accept_language.empty()) {
    594       request_info_.extra_headers.SetHeaderIfMissing(
    595           HttpRequestHeaders::kAcceptLanguage,
    596           accept_language);
    597     }
    598   }
    599 }
    600 
    601 void URLRequestHttpJob::AddCookieHeaderAndStart() {
    602   // No matter what, we want to report our status as IO pending since we will
    603   // be notifying our consumer asynchronously via OnStartCompleted.
    604   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
    605 
    606   // If the request was destroyed, then there is no more work to do.
    607   if (!request_)
    608     return;
    609 
    610   CookieStore* cookie_store = request_->context()->cookie_store();
    611   if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) {
    612     net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster();
    613     if (cookie_monster) {
    614       cookie_monster->GetAllCookiesForURLAsync(
    615           request_->url(),
    616           base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad,
    617                      weak_factory_.GetWeakPtr()));
    618     } else {
    619       CheckCookiePolicyAndLoad(CookieList());
    620     }
    621   } else {
    622     DoStartTransaction();
    623   }
    624 }
    625 
    626 void URLRequestHttpJob::DoLoadCookies() {
    627   CookieOptions options;
    628   options.set_include_httponly();
    629   request_->context()->cookie_store()->GetCookiesWithOptionsAsync(
    630       request_->url(), options,
    631       base::Bind(&URLRequestHttpJob::OnCookiesLoaded,
    632                  weak_factory_.GetWeakPtr()));
    633 }
    634 
    635 void URLRequestHttpJob::CheckCookiePolicyAndLoad(
    636     const CookieList& cookie_list) {
    637   if (CanGetCookies(cookie_list))
    638     DoLoadCookies();
    639   else
    640     DoStartTransaction();
    641 }
    642 
    643 void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) {
    644   if (!cookie_line.empty()) {
    645     request_info_.extra_headers.SetHeader(
    646         HttpRequestHeaders::kCookie, cookie_line);
    647     // Disable privacy mode as we are sending cookies anyway.
    648     request_info_.privacy_mode = kPrivacyModeDisabled;
    649   }
    650   DoStartTransaction();
    651 }
    652 
    653 void URLRequestHttpJob::DoStartTransaction() {
    654   // We may have been canceled while retrieving cookies.
    655   if (GetStatus().is_success()) {
    656     StartTransaction();
    657   } else {
    658     NotifyCanceled();
    659   }
    660 }
    661 
    662 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
    663   if (result != net::OK) {
    664     std::string source("delegate");
    665     request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
    666                                  NetLog::StringCallback("source", &source));
    667     NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
    668     return;
    669   }
    670 
    671   DCHECK(transaction_.get());
    672 
    673   const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
    674   DCHECK(response_info);
    675 
    676   response_cookies_.clear();
    677   response_cookies_save_index_ = 0;
    678 
    679   FetchResponseCookies(&response_cookies_);
    680 
    681   if (!GetResponseHeaders()->GetDateValue(&response_date_))
    682     response_date_ = base::Time();
    683 
    684   // Now, loop over the response cookies, and attempt to persist each.
    685   SaveNextCookie();
    686 }
    687 
    688 // If the save occurs synchronously, SaveNextCookie will loop and save the next
    689 // cookie. If the save is deferred, the callback is responsible for continuing
    690 // to iterate through the cookies.
    691 // TODO(erikwright): Modify the CookieStore API to indicate via return value
    692 // whether it completed synchronously or asynchronously.
    693 // See http://crbug.com/131066.
    694 void URLRequestHttpJob::SaveNextCookie() {
    695   // No matter what, we want to report our status as IO pending since we will
    696   // be notifying our consumer asynchronously via OnStartCompleted.
    697   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
    698 
    699   // Used to communicate with the callback. See the implementation of
    700   // OnCookieSaved.
    701   scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false);
    702   scoped_refptr<SharedBoolean> save_next_cookie_running =
    703       new SharedBoolean(true);
    704 
    705   if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) &&
    706       request_->context()->cookie_store() &&
    707       response_cookies_.size() > 0) {
    708     CookieOptions options;
    709     options.set_include_httponly();
    710     options.set_server_time(response_date_);
    711 
    712     net::CookieStore::SetCookiesCallback callback(
    713         base::Bind(&URLRequestHttpJob::OnCookieSaved,
    714                    weak_factory_.GetWeakPtr(),
    715                    save_next_cookie_running,
    716                    callback_pending));
    717 
    718     // Loop through the cookies as long as SetCookieWithOptionsAsync completes
    719     // synchronously.
    720     while (!callback_pending->data &&
    721            response_cookies_save_index_ < response_cookies_.size()) {
    722       if (CanSetCookie(
    723           response_cookies_[response_cookies_save_index_], &options)) {
    724         callback_pending->data = true;
    725         request_->context()->cookie_store()->SetCookieWithOptionsAsync(
    726             request_->url(), response_cookies_[response_cookies_save_index_],
    727             options, callback);
    728       }
    729       ++response_cookies_save_index_;
    730     }
    731   }
    732 
    733   save_next_cookie_running->data = false;
    734 
    735   if (!callback_pending->data) {
    736     response_cookies_.clear();
    737     response_cookies_save_index_ = 0;
    738     SetStatus(URLRequestStatus());  // Clear the IO_PENDING status
    739     NotifyHeadersComplete();
    740     return;
    741   }
    742 }
    743 
    744 // |save_next_cookie_running| is true when the callback is bound and set to
    745 // false when SaveNextCookie exits, allowing the callback to determine if the
    746 // save occurred synchronously or asynchronously.
    747 // |callback_pending| is false when the callback is invoked and will be set to
    748 // true by the callback, allowing SaveNextCookie to detect whether the save
    749 // occurred synchronously.
    750 // See SaveNextCookie() for more information.
    751 void URLRequestHttpJob::OnCookieSaved(
    752     scoped_refptr<SharedBoolean> save_next_cookie_running,
    753     scoped_refptr<SharedBoolean> callback_pending,
    754     bool cookie_status) {
    755   callback_pending->data = false;
    756 
    757   // If we were called synchronously, return.
    758   if (save_next_cookie_running->data) {
    759     return;
    760   }
    761 
    762   // We were called asynchronously, so trigger the next save.
    763   // We may have been canceled within OnSetCookie.
    764   if (GetStatus().is_success()) {
    765     SaveNextCookie();
    766   } else {
    767     NotifyCanceled();
    768   }
    769 }
    770 
    771 void URLRequestHttpJob::FetchResponseCookies(
    772     std::vector<std::string>* cookies) {
    773   const std::string name = "Set-Cookie";
    774   std::string value;
    775 
    776   void* iter = NULL;
    777   HttpResponseHeaders* headers = GetResponseHeaders();
    778   while (headers->EnumerateHeader(&iter, name, &value)) {
    779     if (!value.empty())
    780       cookies->push_back(value);
    781   }
    782 }
    783 
    784 // NOTE: |ProcessStrictTransportSecurityHeader| and
    785 // |ProcessPublicKeyPinsHeader| have very similar structures, by design.
    786 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
    787   DCHECK(response_info_);
    788   TransportSecurityState* security_state =
    789       request_->context()->transport_security_state();
    790   const SSLInfo& ssl_info = response_info_->ssl_info;
    791 
    792   // Only accept HSTS headers on HTTPS connections that have no
    793   // certificate errors.
    794   if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
    795       !security_state)
    796     return;
    797 
    798   // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec:
    799   //
    800   //   If a UA receives more than one STS header field in a HTTP response
    801   //   message over secure transport, then the UA MUST process only the
    802   //   first such header field.
    803   HttpResponseHeaders* headers = GetResponseHeaders();
    804   std::string value;
    805   if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value))
    806     security_state->AddHSTSHeader(request_info_.url.host(), value);
    807 }
    808 
    809 void URLRequestHttpJob::ProcessPublicKeyPinsHeader() {
    810   DCHECK(response_info_);
    811   TransportSecurityState* security_state =
    812       request_->context()->transport_security_state();
    813   const SSLInfo& ssl_info = response_info_->ssl_info;
    814 
    815   // Only accept HPKP headers on HTTPS connections that have no
    816   // certificate errors.
    817   if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
    818       !security_state)
    819     return;
    820 
    821   // http://tools.ietf.org/html/draft-ietf-websec-key-pinning:
    822   //
    823   //   If a UA receives more than one PKP header field in an HTTP
    824   //   response message over secure transport, then the UA MUST process
    825   //   only the first such header field.
    826   HttpResponseHeaders* headers = GetResponseHeaders();
    827   std::string value;
    828   if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value))
    829     security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info);
    830 }
    831 
    832 void URLRequestHttpJob::OnStartCompleted(int result) {
    833   RecordTimer();
    834 
    835   // If the request was destroyed, then there is no more work to do.
    836   if (!request_)
    837     return;
    838 
    839   // If the transaction was destroyed, then the job was cancelled, and
    840   // we can just ignore this notification.
    841   if (!transaction_.get())
    842     return;
    843 
    844   receive_headers_end_ = base::TimeTicks::Now();
    845 
    846   // Clear the IO_PENDING status
    847   SetStatus(URLRequestStatus());
    848 
    849   const URLRequestContext* context = request_->context();
    850 
    851   if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN &&
    852       transaction_->GetResponseInfo() != NULL) {
    853     FraudulentCertificateReporter* reporter =
    854       context->fraudulent_certificate_reporter();
    855     if (reporter != NULL) {
    856       const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
    857       bool sni_available = SSLConfigService::IsSNIAvailable(
    858           context->ssl_config_service());
    859       const std::string& host = request_->url().host();
    860 
    861       reporter->SendReport(host, ssl_info, sni_available);
    862     }
    863   }
    864 
    865   if (result == OK) {
    866     scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
    867     if (network_delegate()) {
    868       // Note that |this| may not be deleted until
    869       // |on_headers_received_callback_| or
    870       // |NetworkDelegate::URLRequestDestroyed()| has been called.
    871       int error = network_delegate()->NotifyHeadersReceived(
    872           request_,
    873           on_headers_received_callback_,
    874           headers.get(),
    875           &override_response_headers_);
    876       if (error != net::OK) {
    877         if (error == net::ERR_IO_PENDING) {
    878           awaiting_callback_ = true;
    879           SetBlockedOnDelegate();
    880         } else {
    881           std::string source("delegate");
    882           request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
    883                                        NetLog::StringCallback("source",
    884                                                               &source));
    885           NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
    886         }
    887         return;
    888       }
    889     }
    890 
    891     SaveCookiesAndNotifyHeadersComplete(net::OK);
    892   } else if (IsCertificateError(result)) {
    893     // We encountered an SSL certificate error.  Ask our delegate to decide
    894     // what we should do.
    895 
    896     TransportSecurityState::DomainState domain_state;
    897     const URLRequestContext* context = request_->context();
    898     const bool fatal = context->transport_security_state() &&
    899         context->transport_security_state()->GetDomainState(
    900             request_info_.url.host(),
    901             SSLConfigService::IsSNIAvailable(context->ssl_config_service()),
    902             &domain_state) &&
    903         domain_state.ShouldSSLErrorsBeFatal();
    904     NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info, fatal);
    905   } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
    906     NotifyCertificateRequested(
    907         transaction_->GetResponseInfo()->cert_request_info.get());
    908   } else {
    909     NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
    910   }
    911 }
    912 
    913 void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
    914   SetUnblockedOnDelegate();
    915   awaiting_callback_ = false;
    916 
    917   // Check that there are no callbacks to already canceled requests.
    918   DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
    919 
    920   SaveCookiesAndNotifyHeadersComplete(result);
    921 }
    922 
    923 void URLRequestHttpJob::OnReadCompleted(int result) {
    924   read_in_progress_ = false;
    925 
    926   if (ShouldFixMismatchedContentLength(result))
    927     result = OK;
    928 
    929   if (result == OK) {
    930     NotifyDone(URLRequestStatus());
    931   } else if (result < 0) {
    932     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
    933   } else {
    934     // Clear the IO_PENDING status
    935     SetStatus(URLRequestStatus());
    936   }
    937 
    938   NotifyReadComplete(result);
    939 }
    940 
    941 void URLRequestHttpJob::RestartTransactionWithAuth(
    942     const AuthCredentials& credentials) {
    943   auth_credentials_ = credentials;
    944 
    945   // These will be reset in OnStartCompleted.
    946   response_info_ = NULL;
    947   receive_headers_end_ = base::TimeTicks();
    948   response_cookies_.clear();
    949 
    950   ResetTimer();
    951 
    952   // Update the cookies, since the cookie store may have been updated from the
    953   // headers in the 401/407. Since cookies were already appended to
    954   // extra_headers, we need to strip them out before adding them again.
    955   request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
    956 
    957   AddCookieHeaderAndStart();
    958 }
    959 
    960 void URLRequestHttpJob::SetUpload(UploadDataStream* upload) {
    961   DCHECK(!transaction_.get()) << "cannot change once started";
    962   request_info_.upload_data_stream = upload;
    963 }
    964 
    965 void URLRequestHttpJob::SetExtraRequestHeaders(
    966     const HttpRequestHeaders& headers) {
    967   DCHECK(!transaction_.get()) << "cannot change once started";
    968   request_info_.extra_headers.CopyFrom(headers);
    969 }
    970 
    971 LoadState URLRequestHttpJob::GetLoadState() const {
    972   return transaction_.get() ?
    973       transaction_->GetLoadState() : LOAD_STATE_IDLE;
    974 }
    975 
    976 UploadProgress URLRequestHttpJob::GetUploadProgress() const {
    977   return transaction_.get() ?
    978       transaction_->GetUploadProgress() : UploadProgress();
    979 }
    980 
    981 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
    982   DCHECK(transaction_.get());
    983 
    984   if (!response_info_)
    985     return false;
    986 
    987   return GetResponseHeaders()->GetMimeType(mime_type);
    988 }
    989 
    990 bool URLRequestHttpJob::GetCharset(std::string* charset) {
    991   DCHECK(transaction_.get());
    992 
    993   if (!response_info_)
    994     return false;
    995 
    996   return GetResponseHeaders()->GetCharset(charset);
    997 }
    998 
    999 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
   1000   DCHECK(request_);
   1001   DCHECK(transaction_.get());
   1002 
   1003   if (response_info_) {
   1004     *info = *response_info_;
   1005     if (override_response_headers_.get())
   1006       info->headers = override_response_headers_;
   1007   }
   1008 }
   1009 
   1010 void URLRequestHttpJob::GetLoadTimingInfo(
   1011     LoadTimingInfo* load_timing_info) const {
   1012   // If haven't made it far enough to receive any headers, don't return
   1013   // anything.  This makes for more consistent behavior in the case of errors.
   1014   if (!transaction_ || receive_headers_end_.is_null())
   1015     return;
   1016   if (transaction_->GetLoadTimingInfo(load_timing_info))
   1017     load_timing_info->receive_headers_end = receive_headers_end_;
   1018 }
   1019 
   1020 bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
   1021   DCHECK(transaction_.get());
   1022 
   1023   if (!response_info_)
   1024     return false;
   1025 
   1026   // TODO(darin): Why are we extracting response cookies again?  Perhaps we
   1027   // should just leverage response_cookies_.
   1028 
   1029   cookies->clear();
   1030   FetchResponseCookies(cookies);
   1031   return true;
   1032 }
   1033 
   1034 int URLRequestHttpJob::GetResponseCode() const {
   1035   DCHECK(transaction_.get());
   1036 
   1037   if (!response_info_)
   1038     return -1;
   1039 
   1040   return GetResponseHeaders()->response_code();
   1041 }
   1042 
   1043 Filter* URLRequestHttpJob::SetupFilter() const {
   1044   DCHECK(transaction_.get());
   1045   if (!response_info_)
   1046     return NULL;
   1047 
   1048   std::vector<Filter::FilterType> encoding_types;
   1049   std::string encoding_type;
   1050   HttpResponseHeaders* headers = GetResponseHeaders();
   1051   void* iter = NULL;
   1052   while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
   1053     encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
   1054   }
   1055 
   1056   if (filter_context_->IsSdchResponse()) {
   1057     // We are wary of proxies that discard or damage SDCH encoding.  If a server
   1058     // explicitly states that this is not SDCH content, then we can correct our
   1059     // assumption that this is an SDCH response, and avoid the need to recover
   1060     // as though the content is corrupted (when we discover it is not SDCH
   1061     // encoded).
   1062     std::string sdch_response_status;
   1063     iter = NULL;
   1064     while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
   1065                                     &sdch_response_status)) {
   1066       if (sdch_response_status == "0") {
   1067         filter_context_->ResetSdchResponseToFalse();
   1068         break;
   1069       }
   1070     }
   1071   }
   1072 
   1073   // Even if encoding types are empty, there is a chance that we need to add
   1074   // some decoding, as some proxies strip encoding completely. In such cases,
   1075   // we may need to add (for example) SDCH filtering (when the context suggests
   1076   // it is appropriate).
   1077   Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
   1078 
   1079   return !encoding_types.empty()
   1080       ? Filter::Factory(encoding_types, *filter_context_) : NULL;
   1081 }
   1082 
   1083 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
   1084   // HTTP is always safe.
   1085   // TODO(pauljensen): Remove once crbug.com/146591 is fixed.
   1086   if (location.is_valid() &&
   1087       (location.scheme() == "http" || location.scheme() == "https")) {
   1088     return true;
   1089   }
   1090   // Query URLRequestJobFactory as to whether |location| would be safe to
   1091   // redirect to.
   1092   return request_->context()->job_factory() &&
   1093       request_->context()->job_factory()->IsSafeRedirectTarget(location);
   1094 }
   1095 
   1096 bool URLRequestHttpJob::NeedsAuth() {
   1097   int code = GetResponseCode();
   1098   if (code == -1)
   1099     return false;
   1100 
   1101   // Check if we need either Proxy or WWW Authentication.  This could happen
   1102   // because we either provided no auth info, or provided incorrect info.
   1103   switch (code) {
   1104     case 407:
   1105       if (proxy_auth_state_ == AUTH_STATE_CANCELED)
   1106         return false;
   1107       proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
   1108       return true;
   1109     case 401:
   1110       if (server_auth_state_ == AUTH_STATE_CANCELED)
   1111         return false;
   1112       server_auth_state_ = AUTH_STATE_NEED_AUTH;
   1113       return true;
   1114   }
   1115   return false;
   1116 }
   1117 
   1118 void URLRequestHttpJob::GetAuthChallengeInfo(
   1119     scoped_refptr<AuthChallengeInfo>* result) {
   1120   DCHECK(transaction_.get());
   1121   DCHECK(response_info_);
   1122 
   1123   // sanity checks:
   1124   DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
   1125          server_auth_state_ == AUTH_STATE_NEED_AUTH);
   1126   DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) ||
   1127          (GetResponseHeaders()->response_code() ==
   1128           HTTP_PROXY_AUTHENTICATION_REQUIRED));
   1129 
   1130   *result = response_info_->auth_challenge;
   1131 }
   1132 
   1133 void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) {
   1134   DCHECK(transaction_.get());
   1135 
   1136   // Proxy gets set first, then WWW.
   1137   if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
   1138     proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
   1139   } else {
   1140     DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
   1141     server_auth_state_ = AUTH_STATE_HAVE_AUTH;
   1142   }
   1143 
   1144   RestartTransactionWithAuth(credentials);
   1145 }
   1146 
   1147 void URLRequestHttpJob::CancelAuth() {
   1148   // Proxy gets set first, then WWW.
   1149   if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
   1150     proxy_auth_state_ = AUTH_STATE_CANCELED;
   1151   } else {
   1152     DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
   1153     server_auth_state_ = AUTH_STATE_CANCELED;
   1154   }
   1155 
   1156   // These will be reset in OnStartCompleted.
   1157   response_info_ = NULL;
   1158   receive_headers_end_ = base::TimeTicks::Now();
   1159   response_cookies_.clear();
   1160 
   1161   ResetTimer();
   1162 
   1163   // OK, let the consumer read the error page...
   1164   //
   1165   // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
   1166   // which will cause the consumer to receive OnResponseStarted instead of
   1167   // OnAuthRequired.
   1168   //
   1169   // We have to do this via InvokeLater to avoid "recursing" the consumer.
   1170   //
   1171   base::MessageLoop::current()->PostTask(
   1172       FROM_HERE,
   1173       base::Bind(&URLRequestHttpJob::OnStartCompleted,
   1174                  weak_factory_.GetWeakPtr(), OK));
   1175 }
   1176 
   1177 void URLRequestHttpJob::ContinueWithCertificate(
   1178     X509Certificate* client_cert) {
   1179   DCHECK(transaction_.get());
   1180 
   1181   DCHECK(!response_info_) << "should not have a response yet";
   1182   receive_headers_end_ = base::TimeTicks();
   1183 
   1184   ResetTimer();
   1185 
   1186   // No matter what, we want to report our status as IO pending since we will
   1187   // be notifying our consumer asynchronously via OnStartCompleted.
   1188   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1189 
   1190   int rv = transaction_->RestartWithCertificate(client_cert, start_callback_);
   1191   if (rv == ERR_IO_PENDING)
   1192     return;
   1193 
   1194   // The transaction started synchronously, but we need to notify the
   1195   // URLRequest delegate via the message loop.
   1196   base::MessageLoop::current()->PostTask(
   1197       FROM_HERE,
   1198       base::Bind(&URLRequestHttpJob::OnStartCompleted,
   1199                  weak_factory_.GetWeakPtr(), rv));
   1200 }
   1201 
   1202 void URLRequestHttpJob::ContinueDespiteLastError() {
   1203   // If the transaction was destroyed, then the job was cancelled.
   1204   if (!transaction_.get())
   1205     return;
   1206 
   1207   DCHECK(!response_info_) << "should not have a response yet";
   1208   receive_headers_end_ = base::TimeTicks();
   1209 
   1210   ResetTimer();
   1211 
   1212   // No matter what, we want to report our status as IO pending since we will
   1213   // be notifying our consumer asynchronously via OnStartCompleted.
   1214   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1215 
   1216   int rv = transaction_->RestartIgnoringLastError(start_callback_);
   1217   if (rv == ERR_IO_PENDING)
   1218     return;
   1219 
   1220   // The transaction started synchronously, but we need to notify the
   1221   // URLRequest delegate via the message loop.
   1222   base::MessageLoop::current()->PostTask(
   1223       FROM_HERE,
   1224       base::Bind(&URLRequestHttpJob::OnStartCompleted,
   1225                  weak_factory_.GetWeakPtr(), rv));
   1226 }
   1227 
   1228 bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
   1229   // Some servers send the body compressed, but specify the content length as
   1230   // the uncompressed size.  Although this violates the HTTP spec we want to
   1231   // support it (as IE and FireFox do), but *only* for an exact match.
   1232   // See http://crbug.com/79694.
   1233   if (rv == net::ERR_CONTENT_LENGTH_MISMATCH ||
   1234       rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) {
   1235     if (request_ && request_->response_headers()) {
   1236       int64 expected_length = request_->response_headers()->GetContentLength();
   1237       VLOG(1) << __FUNCTION__ << "() "
   1238               << "\"" << request_->url().spec() << "\""
   1239               << " content-length = " << expected_length
   1240               << " pre total = " << prefilter_bytes_read()
   1241               << " post total = " << postfilter_bytes_read();
   1242       if (postfilter_bytes_read() == expected_length) {
   1243         // Clear the error.
   1244         return true;
   1245       }
   1246     }
   1247   }
   1248   return false;
   1249 }
   1250 
   1251 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
   1252                                     int* bytes_read) {
   1253   DCHECK_NE(buf_size, 0);
   1254   DCHECK(bytes_read);
   1255   DCHECK(!read_in_progress_);
   1256 
   1257   int rv = transaction_->Read(
   1258       buf, buf_size,
   1259       base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
   1260 
   1261   if (ShouldFixMismatchedContentLength(rv))
   1262     rv = 0;
   1263 
   1264   if (rv >= 0) {
   1265     *bytes_read = rv;
   1266     if (!rv)
   1267       DoneWithRequest(FINISHED);
   1268     return true;
   1269   }
   1270 
   1271   if (rv == ERR_IO_PENDING) {
   1272     read_in_progress_ = true;
   1273     SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1274   } else {
   1275     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
   1276   }
   1277 
   1278   return false;
   1279 }
   1280 
   1281 void URLRequestHttpJob::StopCaching() {
   1282   if (transaction_.get())
   1283     transaction_->StopCaching();
   1284 }
   1285 
   1286 bool URLRequestHttpJob::GetFullRequestHeaders(
   1287     HttpRequestHeaders* headers) const {
   1288   if (!transaction_)
   1289     return false;
   1290 
   1291   return transaction_->GetFullRequestHeaders(headers);
   1292 }
   1293 
   1294 void URLRequestHttpJob::DoneReading() {
   1295   if (transaction_.get())
   1296     transaction_->DoneReading();
   1297   DoneWithRequest(FINISHED);
   1298 }
   1299 
   1300 HostPortPair URLRequestHttpJob::GetSocketAddress() const {
   1301   return response_info_ ? response_info_->socket_address : HostPortPair();
   1302 }
   1303 
   1304 void URLRequestHttpJob::RecordTimer() {
   1305   if (request_creation_time_.is_null()) {
   1306     NOTREACHED()
   1307         << "The same transaction shouldn't start twice without new timing.";
   1308     return;
   1309   }
   1310 
   1311   base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
   1312   request_creation_time_ = base::Time();
   1313 
   1314   UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
   1315 }
   1316 
   1317 void URLRequestHttpJob::ResetTimer() {
   1318   if (!request_creation_time_.is_null()) {
   1319     NOTREACHED()
   1320         << "The timer was reset before it was recorded.";
   1321     return;
   1322   }
   1323   request_creation_time_ = base::Time::Now();
   1324 }
   1325 
   1326 void URLRequestHttpJob::UpdatePacketReadTimes() {
   1327   if (!packet_timing_enabled_)
   1328     return;
   1329 
   1330   if (filter_input_byte_count() <= bytes_observed_in_packets_) {
   1331     DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
   1332     return;  // No new bytes have arrived.
   1333   }
   1334 
   1335   final_packet_time_ = base::Time::Now();
   1336   if (!bytes_observed_in_packets_)
   1337     request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
   1338 
   1339   bytes_observed_in_packets_ = filter_input_byte_count();
   1340 }
   1341 
   1342 void URLRequestHttpJob::RecordPacketStats(
   1343     FilterContext::StatisticSelector statistic) const {
   1344   if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
   1345     return;
   1346 
   1347   base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
   1348   switch (statistic) {
   1349     case FilterContext::SDCH_DECODE: {
   1350       UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
   1351           static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
   1352       return;
   1353     }
   1354     case FilterContext::SDCH_PASSTHROUGH: {
   1355       // Despite advertising a dictionary, we handled non-sdch compressed
   1356       // content.
   1357       return;
   1358     }
   1359 
   1360     case FilterContext::SDCH_EXPERIMENT_DECODE: {
   1361       UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
   1362                                   duration,
   1363                                   base::TimeDelta::FromMilliseconds(20),
   1364                                   base::TimeDelta::FromMinutes(10), 100);
   1365       return;
   1366     }
   1367     case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
   1368       UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
   1369                                   duration,
   1370                                   base::TimeDelta::FromMilliseconds(20),
   1371                                   base::TimeDelta::FromMinutes(10), 100);
   1372       return;
   1373     }
   1374     default:
   1375       NOTREACHED();
   1376       return;
   1377   }
   1378 }
   1379 
   1380 // The common type of histogram we use for all compression-tracking histograms.
   1381 #define COMPRESSION_HISTOGRAM(name, sample) \
   1382     do { \
   1383       UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
   1384                                   500, 1000000, 100); \
   1385     } while (0)
   1386 
   1387 void URLRequestHttpJob::RecordCompressionHistograms() {
   1388   DCHECK(request_);
   1389   if (!request_)
   1390     return;
   1391 
   1392   if (is_cached_content_ ||                // Don't record cached content
   1393       !GetStatus().is_success() ||         // Don't record failed content
   1394       !IsCompressibleContent() ||          // Only record compressible content
   1395       !prefilter_bytes_read())       // Zero-byte responses aren't useful.
   1396     return;
   1397 
   1398   // Miniature requests aren't really compressible.  Don't count them.
   1399   const int kMinSize = 16;
   1400   if (prefilter_bytes_read() < kMinSize)
   1401     return;
   1402 
   1403   // Only record for http or https urls.
   1404   bool is_http = request_->url().SchemeIs("http");
   1405   bool is_https = request_->url().SchemeIs("https");
   1406   if (!is_http && !is_https)
   1407     return;
   1408 
   1409   int compressed_B = prefilter_bytes_read();
   1410   int decompressed_B = postfilter_bytes_read();
   1411   bool was_filtered = HasFilter();
   1412 
   1413   // We want to record how often downloaded resources are compressed.
   1414   // But, we recognize that different protocols may have different
   1415   // properties.  So, for each request, we'll put it into one of 3
   1416   // groups:
   1417   //      a) SSL resources
   1418   //         Proxies cannot tamper with compression headers with SSL.
   1419   //      b) Non-SSL, loaded-via-proxy resources
   1420   //         In this case, we know a proxy might have interfered.
   1421   //      c) Non-SSL, loaded-without-proxy resources
   1422   //         In this case, we know there was no explicit proxy.  However,
   1423   //         it is possible that a transparent proxy was still interfering.
   1424   //
   1425   // For each group, we record the same 3 histograms.
   1426 
   1427   if (is_https) {
   1428     if (was_filtered) {
   1429       COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
   1430       COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
   1431     } else {
   1432       COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
   1433     }
   1434     return;
   1435   }
   1436 
   1437   if (request_->was_fetched_via_proxy()) {
   1438     if (was_filtered) {
   1439       COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
   1440       COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
   1441     } else {
   1442       COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
   1443     }
   1444     return;
   1445   }
   1446 
   1447   if (was_filtered) {
   1448     COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
   1449     COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
   1450   } else {
   1451     COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
   1452   }
   1453 }
   1454 
   1455 bool URLRequestHttpJob::IsCompressibleContent() const {
   1456   std::string mime_type;
   1457   return GetMimeType(&mime_type) &&
   1458       (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
   1459        IsSupportedNonImageMimeType(mime_type.c_str()));
   1460 }
   1461 
   1462 void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) {
   1463   if (start_time_.is_null())
   1464     return;
   1465 
   1466   base::TimeDelta total_time = base::TimeTicks::Now() - start_time_;
   1467   UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time);
   1468 
   1469   if (reason == FINISHED) {
   1470     UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time);
   1471   } else {
   1472     UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time);
   1473   }
   1474 
   1475   if (response_info_) {
   1476     if (response_info_->was_cached) {
   1477       UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time);
   1478     } else  {
   1479       UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time);
   1480     }
   1481   }
   1482 
   1483   start_time_ = base::TimeTicks();
   1484 }
   1485 
   1486 void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
   1487   if (done_)
   1488     return;
   1489   done_ = true;
   1490   RecordPerfHistograms(reason);
   1491   if (reason == FINISHED) {
   1492     request_->set_received_response_content_length(prefilter_bytes_read());
   1493     RecordCompressionHistograms();
   1494   }
   1495 }
   1496 
   1497 HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
   1498   DCHECK(transaction_.get());
   1499   DCHECK(transaction_->GetResponseInfo());
   1500   return override_response_headers_.get() ?
   1501              override_response_headers_.get() :
   1502              transaction_->GetResponseInfo()->headers.get();
   1503 }
   1504 
   1505 void URLRequestHttpJob::NotifyURLRequestDestroyed() {
   1506   awaiting_callback_ = false;
   1507 }
   1508 
   1509 void URLRequestHttpJob::OnDetachRequest() {
   1510   http_transaction_delegate_->OnDetachRequest();
   1511 }
   1512 
   1513 }  // namespace net
   1514