Home | History | Annotate | Download | only in url_request
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/url_request/url_request_http_job.h"
      6 
      7 #include "base/base_switches.h"
      8 #include "base/bind.h"
      9 #include "base/bind_helpers.h"
     10 #include "base/command_line.h"
     11 #include "base/compiler_specific.h"
     12 #include "base/file_version_info.h"
     13 #include "base/message_loop/message_loop.h"
     14 #include "base/metrics/field_trial.h"
     15 #include "base/metrics/histogram.h"
     16 #include "base/rand_util.h"
     17 #include "base/strings/string_util.h"
     18 #include "base/time/time.h"
     19 #include "net/base/filter.h"
     20 #include "net/base/host_port_pair.h"
     21 #include "net/base/load_flags.h"
     22 #include "net/base/mime_util.h"
     23 #include "net/base/net_errors.h"
     24 #include "net/base/net_util.h"
     25 #include "net/base/network_delegate.h"
     26 #include "net/base/sdch_manager.h"
     27 #include "net/cert/cert_status_flags.h"
     28 #include "net/cookies/cookie_monster.h"
     29 #include "net/http/http_network_session.h"
     30 #include "net/http/http_request_headers.h"
     31 #include "net/http/http_response_headers.h"
     32 #include "net/http/http_response_info.h"
     33 #include "net/http/http_status_code.h"
     34 #include "net/http/http_transaction.h"
     35 #include "net/http/http_transaction_delegate.h"
     36 #include "net/http/http_transaction_factory.h"
     37 #include "net/http/http_util.h"
     38 #include "net/ssl/ssl_cert_request_info.h"
     39 #include "net/ssl/ssl_config_service.h"
     40 #include "net/url_request/fraudulent_certificate_reporter.h"
     41 #include "net/url_request/http_user_agent_settings.h"
     42 #include "net/url_request/url_request.h"
     43 #include "net/url_request/url_request_context.h"
     44 #include "net/url_request/url_request_error_job.h"
     45 #include "net/url_request/url_request_job_factory.h"
     46 #include "net/url_request/url_request_redirect_job.h"
     47 #include "net/url_request/url_request_throttler_header_adapter.h"
     48 #include "net/url_request/url_request_throttler_manager.h"
     49 #include "net/websockets/websocket_handshake_stream_base.h"
     50 
     51 static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
     52 
     53 namespace net {
     54 
     55 class URLRequestHttpJob::HttpFilterContext : public FilterContext {
     56  public:
     57   explicit HttpFilterContext(URLRequestHttpJob* job);
     58   virtual ~HttpFilterContext();
     59 
     60   // FilterContext implementation.
     61   virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
     62   virtual bool GetURL(GURL* gurl) const OVERRIDE;
     63   virtual base::Time GetRequestTime() const OVERRIDE;
     64   virtual bool IsCachedContent() const OVERRIDE;
     65   virtual bool IsDownload() const OVERRIDE;
     66   virtual bool IsSdchResponse() const OVERRIDE;
     67   virtual int64 GetByteReadCount() const OVERRIDE;
     68   virtual int GetResponseCode() const OVERRIDE;
     69   virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE;
     70 
     71   // Method to allow us to reset filter context for a response that should have
     72   // been SDCH encoded when there is an update due to an explicit HTTP header.
     73   void ResetSdchResponseToFalse();
     74 
     75  private:
     76   URLRequestHttpJob* job_;
     77 
     78   DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
     79 };
     80 
     81 class URLRequestHttpJob::HttpTransactionDelegateImpl
     82     : public HttpTransactionDelegate {
     83  public:
     84   HttpTransactionDelegateImpl(URLRequest* request,
     85                               NetworkDelegate* network_delegate)
     86       : request_(request),
     87         network_delegate_(network_delegate),
     88         state_(NONE_ACTIVE) {}
     89   virtual ~HttpTransactionDelegateImpl() { OnDetachRequest(); }
     90   void OnDetachRequest() {
     91     if (!IsRequestAndDelegateActive())
     92       return;
     93     NotifyStateChange(NetworkDelegate::REQUEST_WAIT_STATE_RESET);
     94     state_ = NONE_ACTIVE;
     95     request_ = NULL;
     96   }
     97   virtual void OnCacheActionStart() OVERRIDE {
     98     HandleStateChange(NONE_ACTIVE,
     99                       CACHE_ACTIVE,
    100                       NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START);
    101   }
    102   virtual void OnCacheActionFinish() OVERRIDE {
    103     HandleStateChange(CACHE_ACTIVE,
    104                       NONE_ACTIVE,
    105                       NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH);
    106   }
    107   virtual void OnNetworkActionStart() OVERRIDE {
    108     HandleStateChange(NONE_ACTIVE,
    109                       NETWORK_ACTIVE,
    110                       NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START);
    111   }
    112   virtual void OnNetworkActionFinish() OVERRIDE {
    113     HandleStateChange(NETWORK_ACTIVE,
    114                       NONE_ACTIVE,
    115                       NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH);
    116   }
    117 
    118  private:
    119   enum State {
    120     NONE_ACTIVE,
    121     CACHE_ACTIVE,
    122     NETWORK_ACTIVE
    123   };
    124 
    125   // Returns true if this object still has an active request and network
    126   // delegate.
    127   bool IsRequestAndDelegateActive() const {
    128     return request_ && network_delegate_;
    129   }
    130 
    131   // Notifies the |network_delegate_| object of a change in the state of the
    132   // |request_| to the state given by the |request_wait_state| argument.
    133   void NotifyStateChange(NetworkDelegate::RequestWaitState request_wait_state) {
    134     network_delegate_->NotifyRequestWaitStateChange(*request_,
    135                                                     request_wait_state);
    136   }
    137 
    138   // Checks the request and delegate are still active, changes |state_| from
    139   // |expected_state| to |next_state|, and then notifies the network delegate of
    140   // the change to |request_wait_state|.
    141   void HandleStateChange(State expected_state,
    142                          State next_state,
    143                          NetworkDelegate::RequestWaitState request_wait_state) {
    144     if (!IsRequestAndDelegateActive())
    145       return;
    146     DCHECK_EQ(expected_state, state_);
    147     state_ = next_state;
    148     NotifyStateChange(request_wait_state);
    149   }
    150 
    151   URLRequest* request_;
    152   NetworkDelegate* network_delegate_;
    153   // Internal state tracking, for sanity checking.
    154   State state_;
    155 
    156   DISALLOW_COPY_AND_ASSIGN(HttpTransactionDelegateImpl);
    157 };
    158 
    159 URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
    160     : job_(job) {
    161   DCHECK(job_);
    162 }
    163 
    164 URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
    165 }
    166 
    167 bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
    168     std::string* mime_type) const {
    169   return job_->GetMimeType(mime_type);
    170 }
    171 
    172 bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
    173   if (!job_->request())
    174     return false;
    175   *gurl = job_->request()->url();
    176   return true;
    177 }
    178 
    179 base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
    180   return job_->request() ? job_->request()->request_time() : base::Time();
    181 }
    182 
    183 bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
    184   return job_->is_cached_content_;
    185 }
    186 
    187 bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
    188   return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
    189 }
    190 
    191 void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() {
    192   DCHECK(job_->sdch_dictionary_advertised_);
    193   job_->sdch_dictionary_advertised_ = false;
    194 }
    195 
    196 bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
    197   return job_->sdch_dictionary_advertised_;
    198 }
    199 
    200 int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
    201   return job_->filter_input_byte_count();
    202 }
    203 
    204 int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
    205   return job_->GetResponseCode();
    206 }
    207 
    208 void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
    209     StatisticSelector statistic) const {
    210   job_->RecordPacketStats(statistic);
    211 }
    212 
    213 // TODO(darin): make sure the port blocking code is not lost
    214 // static
    215 URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
    216                                           NetworkDelegate* network_delegate,
    217                                           const std::string& scheme) {
    218   DCHECK(scheme == "http" || scheme == "https" || scheme == "ws" ||
    219          scheme == "wss");
    220 
    221   if (!request->context()->http_transaction_factory()) {
    222     NOTREACHED() << "requires a valid context";
    223     return new URLRequestErrorJob(
    224         request, network_delegate, ERR_INVALID_ARGUMENT);
    225   }
    226 
    227   GURL redirect_url;
    228   if (request->GetHSTSRedirect(&redirect_url)) {
    229     return new URLRequestRedirectJob(
    230         request, network_delegate, redirect_url,
    231         // Use status code 307 to preserve the method, so POST requests work.
    232         URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT);
    233   }
    234   return new URLRequestHttpJob(request,
    235                                network_delegate,
    236                                request->context()->http_user_agent_settings());
    237 }
    238 
    239 URLRequestHttpJob::URLRequestHttpJob(
    240     URLRequest* request,
    241     NetworkDelegate* network_delegate,
    242     const HttpUserAgentSettings* http_user_agent_settings)
    243     : URLRequestJob(request, network_delegate),
    244       priority_(DEFAULT_PRIORITY),
    245       response_info_(NULL),
    246       response_cookies_save_index_(0),
    247       proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
    248       server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
    249       start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted,
    250                                  base::Unretained(this))),
    251       notify_before_headers_sent_callback_(
    252           base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback,
    253                      base::Unretained(this))),
    254       read_in_progress_(false),
    255       throttling_entry_(NULL),
    256       sdch_dictionary_advertised_(false),
    257       sdch_test_activated_(false),
    258       sdch_test_control_(false),
    259       is_cached_content_(false),
    260       request_creation_time_(),
    261       packet_timing_enabled_(false),
    262       done_(false),
    263       bytes_observed_in_packets_(0),
    264       request_time_snapshot_(),
    265       final_packet_time_(),
    266       filter_context_(new HttpFilterContext(this)),
    267       weak_factory_(this),
    268       on_headers_received_callback_(
    269           base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback,
    270                      base::Unretained(this))),
    271       awaiting_callback_(false),
    272       http_transaction_delegate_(
    273           new HttpTransactionDelegateImpl(request, network_delegate)),
    274       http_user_agent_settings_(http_user_agent_settings) {
    275   URLRequestThrottlerManager* manager = request->context()->throttler_manager();
    276   if (manager)
    277     throttling_entry_ = manager->RegisterRequestUrl(request->url());
    278 
    279   ResetTimer();
    280 }
    281 
    282 URLRequestHttpJob::~URLRequestHttpJob() {
    283   CHECK(!awaiting_callback_);
    284 
    285   DCHECK(!sdch_test_control_ || !sdch_test_activated_);
    286   if (!is_cached_content_) {
    287     if (sdch_test_control_)
    288       RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
    289     if (sdch_test_activated_)
    290       RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
    291   }
    292   // Make sure SDCH filters are told to emit histogram data while
    293   // filter_context_ is still alive.
    294   DestroyFilters();
    295 
    296   if (sdch_dictionary_url_.is_valid()) {
    297     // Prior to reaching the destructor, request_ has been set to a NULL
    298     // pointer, so request_->url() is no longer valid in the destructor, and we
    299     // use an alternate copy |request_info_.url|.
    300     SdchManager* manager = SdchManager::Global();
    301     // To be extra safe, since this is a "different time" from when we decided
    302     // to get the dictionary, we'll validate that an SdchManager is available.
    303     // At shutdown time, care is taken to be sure that we don't delete this
    304     // globally useful instance "too soon," so this check is just defensive
    305     // coding to assure that IF the system is shutting down, we don't have any
    306     // problem if the manager was deleted ahead of time.
    307     if (manager)  // Defensive programming.
    308       manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
    309   }
    310   DoneWithRequest(ABORTED);
    311 }
    312 
    313 void URLRequestHttpJob::SetPriority(RequestPriority priority) {
    314   priority_ = priority;
    315   if (transaction_)
    316     transaction_->SetPriority(priority_);
    317 }
    318 
    319 void URLRequestHttpJob::Start() {
    320   DCHECK(!transaction_.get());
    321 
    322   // URLRequest::SetReferrer ensures that we do not send username and password
    323   // fields in the referrer.
    324   GURL referrer(request_->referrer());
    325 
    326   request_info_.url = request_->url();
    327   request_info_.method = request_->method();
    328   request_info_.load_flags = request_->load_flags();
    329   // Enable privacy mode if cookie settings or flags tell us not send or
    330   // save cookies.
    331   bool enable_privacy_mode =
    332       (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) ||
    333       (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) ||
    334       CanEnablePrivacyMode();
    335   // Privacy mode could still be disabled in OnCookiesLoaded if we are going
    336   // to send previously saved cookies.
    337   request_info_.privacy_mode = enable_privacy_mode ?
    338       kPrivacyModeEnabled : kPrivacyModeDisabled;
    339 
    340   // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins
    341   // from overriding headers that are controlled using other means. Otherwise a
    342   // plugin could set a referrer although sending the referrer is inhibited.
    343   request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer);
    344 
    345   // Our consumer should have made sure that this is a safe referrer.  See for
    346   // instance WebCore::FrameLoader::HideReferrer.
    347   if (referrer.is_valid()) {
    348     request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer,
    349                                           referrer.spec());
    350   }
    351 
    352   request_info_.extra_headers.SetHeaderIfMissing(
    353       HttpRequestHeaders::kUserAgent,
    354       http_user_agent_settings_ ?
    355           http_user_agent_settings_->GetUserAgent(request_->url()) :
    356           std::string());
    357 
    358   AddExtraHeaders();
    359   AddCookieHeaderAndStart();
    360 }
    361 
    362 void URLRequestHttpJob::Kill() {
    363   http_transaction_delegate_->OnDetachRequest();
    364 
    365   if (!transaction_.get())
    366     return;
    367 
    368   weak_factory_.InvalidateWeakPtrs();
    369   DestroyTransaction();
    370   URLRequestJob::Kill();
    371 }
    372 
    373 void URLRequestHttpJob::NotifyHeadersComplete() {
    374   DCHECK(!response_info_);
    375 
    376   response_info_ = transaction_->GetResponseInfo();
    377 
    378   // Save boolean, as we'll need this info at destruction time, and filters may
    379   // also need this info.
    380   is_cached_content_ = response_info_->was_cached;
    381 
    382   if (!is_cached_content_ && throttling_entry_.get()) {
    383     URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
    384     throttling_entry_->UpdateWithResponse(request_info_.url.host(),
    385                                           &response_adapter);
    386   }
    387 
    388   // The ordering of these calls is not important.
    389   ProcessStrictTransportSecurityHeader();
    390   ProcessPublicKeyPinsHeader();
    391 
    392   if (SdchManager::Global() &&
    393       SdchManager::Global()->IsInSupportedDomain(request_->url())) {
    394     const std::string name = "Get-Dictionary";
    395     std::string url_text;
    396     void* iter = NULL;
    397     // TODO(jar): We need to not fetch dictionaries the first time they are
    398     // seen, but rather wait until we can justify their usefulness.
    399     // For now, we will only fetch the first dictionary, which will at least
    400     // require multiple suggestions before we get additional ones for this site.
    401     // Eventually we should wait until a dictionary is requested several times
    402     // before we even download it (so that we don't waste memory or bandwidth).
    403     if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
    404       // request_->url() won't be valid in the destructor, so we use an
    405       // alternate copy.
    406       DCHECK_EQ(request_->url(), request_info_.url);
    407       // Resolve suggested URL relative to request url.
    408       sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
    409     }
    410   }
    411 
    412   // The HTTP transaction may be restarted several times for the purposes
    413   // of sending authorization information. Each time it restarts, we get
    414   // notified of the headers completion so that we can update the cookie store.
    415   if (transaction_->IsReadyToRestartForAuth()) {
    416     DCHECK(!response_info_->auth_challenge.get());
    417     // TODO(battre): This breaks the webrequest API for
    418     // URLRequestTestHTTP.BasicAuthWithCookies
    419     // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders
    420     // occurs.
    421     RestartTransactionWithAuth(AuthCredentials());
    422     return;
    423   }
    424 
    425   URLRequestJob::NotifyHeadersComplete();
    426 }
    427 
    428 void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
    429   DoneWithRequest(FINISHED);
    430   URLRequestJob::NotifyDone(status);
    431 }
    432 
    433 void URLRequestHttpJob::DestroyTransaction() {
    434   DCHECK(transaction_.get());
    435 
    436   DoneWithRequest(ABORTED);
    437   transaction_.reset();
    438   response_info_ = NULL;
    439   receive_headers_end_ = base::TimeTicks();
    440 }
    441 
    442 void URLRequestHttpJob::StartTransaction() {
    443   if (network_delegate()) {
    444     OnCallToDelegate();
    445     int rv = network_delegate()->NotifyBeforeSendHeaders(
    446         request_, notify_before_headers_sent_callback_,
    447         &request_info_.extra_headers);
    448     // If an extension blocks the request, we rely on the callback to
    449     // MaybeStartTransactionInternal().
    450     if (rv == ERR_IO_PENDING)
    451       return;
    452     MaybeStartTransactionInternal(rv);
    453     return;
    454   }
    455   StartTransactionInternal();
    456 }
    457 
    458 void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) {
    459   // Check that there are no callbacks to already canceled requests.
    460   DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
    461 
    462   MaybeStartTransactionInternal(result);
    463 }
    464 
    465 void URLRequestHttpJob::MaybeStartTransactionInternal(int result) {
    466   OnCallToDelegateComplete();
    467   if (result == OK) {
    468     StartTransactionInternal();
    469   } else {
    470     std::string source("delegate");
    471     request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
    472                                  NetLog::StringCallback("source", &source));
    473     NotifyCanceled();
    474     NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
    475   }
    476 }
    477 
    478 void URLRequestHttpJob::StartTransactionInternal() {
    479   // NOTE: This method assumes that request_info_ is already setup properly.
    480 
    481   // If we already have a transaction, then we should restart the transaction
    482   // with auth provided by auth_credentials_.
    483 
    484   int rv;
    485 
    486   if (network_delegate()) {
    487     network_delegate()->NotifySendHeaders(
    488         request_, request_info_.extra_headers);
    489   }
    490 
    491   if (transaction_.get()) {
    492     rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_);
    493     auth_credentials_ = AuthCredentials();
    494   } else {
    495     DCHECK(request_->context()->http_transaction_factory());
    496 
    497     rv = request_->context()->http_transaction_factory()->CreateTransaction(
    498         priority_, &transaction_, http_transaction_delegate_.get());
    499 
    500     if (rv == OK && request_info_.url.SchemeIsWSOrWSS()) {
    501       // TODO(ricea): Implement WebSocket throttling semantics as defined in
    502       // RFC6455 Section 4.1.
    503       base::SupportsUserData::Data* data = request_->GetUserData(
    504           WebSocketHandshakeStreamBase::CreateHelper::DataKey());
    505       if (data) {
    506         transaction_->SetWebSocketHandshakeStreamCreateHelper(
    507             static_cast<WebSocketHandshakeStreamBase::CreateHelper*>(data));
    508       } else {
    509         rv = ERR_DISALLOWED_URL_SCHEME;
    510       }
    511     }
    512 
    513     if (rv == OK) {
    514       if (!throttling_entry_.get() ||
    515           !throttling_entry_->ShouldRejectRequest(*request_)) {
    516         rv = transaction_->Start(
    517             &request_info_, start_callback_, request_->net_log());
    518         start_time_ = base::TimeTicks::Now();
    519       } else {
    520         // Special error code for the exponential back-off module.
    521         rv = ERR_TEMPORARILY_THROTTLED;
    522       }
    523     }
    524   }
    525 
    526   if (rv == ERR_IO_PENDING)
    527     return;
    528 
    529   // The transaction started synchronously, but we need to notify the
    530   // URLRequest delegate via the message loop.
    531   base::MessageLoop::current()->PostTask(
    532       FROM_HERE,
    533       base::Bind(&URLRequestHttpJob::OnStartCompleted,
    534                  weak_factory_.GetWeakPtr(), rv));
    535 }
    536 
    537 void URLRequestHttpJob::AddExtraHeaders() {
    538   // Supply Accept-Encoding field only if it is not already provided.
    539   // It should be provided IF the content is known to have restrictions on
    540   // potential encoding, such as streaming multi-media.
    541   // For details see bug 47381.
    542   // TODO(jar, enal): jpeg files etc. should set up a request header if
    543   // possible. Right now it is done only by buffered_resource_loader and
    544   // simple_data_source.
    545   if (!request_info_.extra_headers.HasHeader(
    546       HttpRequestHeaders::kAcceptEncoding)) {
    547     bool advertise_sdch = SdchManager::Global() &&
    548         SdchManager::Global()->IsInSupportedDomain(request_->url());
    549     std::string avail_dictionaries;
    550     if (advertise_sdch) {
    551       SdchManager::Global()->GetAvailDictionaryList(request_->url(),
    552                                                     &avail_dictionaries);
    553 
    554       // The AllowLatencyExperiment() is only true if we've successfully done a
    555       // full SDCH compression recently in this browser session for this host.
    556       // Note that for this path, there might be no applicable dictionaries,
    557       // and hence we can't participate in the experiment.
    558       if (!avail_dictionaries.empty() &&
    559           SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
    560         // We are participating in the test (or control), and hence we'll
    561         // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
    562         // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
    563         packet_timing_enabled_ = true;
    564         if (base::RandDouble() < .01) {
    565           sdch_test_control_ = true;  // 1% probability.
    566           advertise_sdch = false;
    567         } else {
    568           sdch_test_activated_ = true;
    569         }
    570       }
    571     }
    572 
    573     // Supply Accept-Encoding headers first so that it is more likely that they
    574     // will be in the first transmitted packet.  This can sometimes make it
    575     // easier to filter and analyze the streams to assure that a proxy has not
    576     // damaged these headers.  Some proxies deliberately corrupt Accept-Encoding
    577     // headers.
    578     if (!advertise_sdch) {
    579       // Tell the server what compression formats we support (other than SDCH).
    580       request_info_.extra_headers.SetHeader(
    581           HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
    582     } else {
    583       // Include SDCH in acceptable list.
    584       request_info_.extra_headers.SetHeader(
    585           HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
    586       if (!avail_dictionaries.empty()) {
    587         request_info_.extra_headers.SetHeader(
    588             kAvailDictionaryHeader,
    589             avail_dictionaries);
    590         sdch_dictionary_advertised_ = true;
    591         // Since we're tagging this transaction as advertising a dictionary,
    592         // we'll definitely employ an SDCH filter (or tentative sdch filter)
    593         // when we get a response.  When done, we'll record histograms via
    594         // SDCH_DECODE or SDCH_PASSTHROUGH.  Hence we need to record packet
    595         // arrival times.
    596         packet_timing_enabled_ = true;
    597       }
    598     }
    599   }
    600 
    601   if (http_user_agent_settings_) {
    602     // Only add default Accept-Language if the request didn't have it
    603     // specified.
    604     std::string accept_language =
    605         http_user_agent_settings_->GetAcceptLanguage();
    606     if (!accept_language.empty()) {
    607       request_info_.extra_headers.SetHeaderIfMissing(
    608           HttpRequestHeaders::kAcceptLanguage,
    609           accept_language);
    610     }
    611   }
    612 }
    613 
    614 void URLRequestHttpJob::AddCookieHeaderAndStart() {
    615   // No matter what, we want to report our status as IO pending since we will
    616   // be notifying our consumer asynchronously via OnStartCompleted.
    617   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
    618 
    619   // If the request was destroyed, then there is no more work to do.
    620   if (!request_)
    621     return;
    622 
    623   CookieStore* cookie_store = request_->context()->cookie_store();
    624   if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) {
    625     net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster();
    626     if (cookie_monster) {
    627       cookie_monster->GetAllCookiesForURLAsync(
    628           request_->url(),
    629           base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad,
    630                      weak_factory_.GetWeakPtr()));
    631     } else {
    632       CheckCookiePolicyAndLoad(CookieList());
    633     }
    634   } else {
    635     DoStartTransaction();
    636   }
    637 }
    638 
    639 void URLRequestHttpJob::DoLoadCookies() {
    640   CookieOptions options;
    641   options.set_include_httponly();
    642   request_->context()->cookie_store()->GetCookiesWithOptionsAsync(
    643       request_->url(), options,
    644       base::Bind(&URLRequestHttpJob::OnCookiesLoaded,
    645                  weak_factory_.GetWeakPtr()));
    646 }
    647 
    648 void URLRequestHttpJob::CheckCookiePolicyAndLoad(
    649     const CookieList& cookie_list) {
    650   if (CanGetCookies(cookie_list))
    651     DoLoadCookies();
    652   else
    653     DoStartTransaction();
    654 }
    655 
    656 void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) {
    657   if (!cookie_line.empty()) {
    658     request_info_.extra_headers.SetHeader(
    659         HttpRequestHeaders::kCookie, cookie_line);
    660     // Disable privacy mode as we are sending cookies anyway.
    661     request_info_.privacy_mode = kPrivacyModeDisabled;
    662   }
    663   DoStartTransaction();
    664 }
    665 
    666 void URLRequestHttpJob::DoStartTransaction() {
    667   // We may have been canceled while retrieving cookies.
    668   if (GetStatus().is_success()) {
    669     StartTransaction();
    670   } else {
    671     NotifyCanceled();
    672   }
    673 }
    674 
    675 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
    676   // End of the call started in OnStartCompleted.
    677   OnCallToDelegateComplete();
    678 
    679   if (result != net::OK) {
    680     std::string source("delegate");
    681     request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
    682                                  NetLog::StringCallback("source", &source));
    683     NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
    684     return;
    685   }
    686 
    687   DCHECK(transaction_.get());
    688 
    689   const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
    690   DCHECK(response_info);
    691 
    692   response_cookies_.clear();
    693   response_cookies_save_index_ = 0;
    694 
    695   FetchResponseCookies(&response_cookies_);
    696 
    697   if (!GetResponseHeaders()->GetDateValue(&response_date_))
    698     response_date_ = base::Time();
    699 
    700   // Now, loop over the response cookies, and attempt to persist each.
    701   SaveNextCookie();
    702 }
    703 
    704 // If the save occurs synchronously, SaveNextCookie will loop and save the next
    705 // cookie. If the save is deferred, the callback is responsible for continuing
    706 // to iterate through the cookies.
    707 // TODO(erikwright): Modify the CookieStore API to indicate via return value
    708 // whether it completed synchronously or asynchronously.
    709 // See http://crbug.com/131066.
    710 void URLRequestHttpJob::SaveNextCookie() {
    711   // No matter what, we want to report our status as IO pending since we will
    712   // be notifying our consumer asynchronously via OnStartCompleted.
    713   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
    714 
    715   // Used to communicate with the callback. See the implementation of
    716   // OnCookieSaved.
    717   scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false);
    718   scoped_refptr<SharedBoolean> save_next_cookie_running =
    719       new SharedBoolean(true);
    720 
    721   if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) &&
    722       request_->context()->cookie_store() &&
    723       response_cookies_.size() > 0) {
    724     CookieOptions options;
    725     options.set_include_httponly();
    726     options.set_server_time(response_date_);
    727 
    728     net::CookieStore::SetCookiesCallback callback(
    729         base::Bind(&URLRequestHttpJob::OnCookieSaved,
    730                    weak_factory_.GetWeakPtr(),
    731                    save_next_cookie_running,
    732                    callback_pending));
    733 
    734     // Loop through the cookies as long as SetCookieWithOptionsAsync completes
    735     // synchronously.
    736     while (!callback_pending->data &&
    737            response_cookies_save_index_ < response_cookies_.size()) {
    738       if (CanSetCookie(
    739           response_cookies_[response_cookies_save_index_], &options)) {
    740         callback_pending->data = true;
    741         request_->context()->cookie_store()->SetCookieWithOptionsAsync(
    742             request_->url(), response_cookies_[response_cookies_save_index_],
    743             options, callback);
    744       }
    745       ++response_cookies_save_index_;
    746     }
    747   }
    748 
    749   save_next_cookie_running->data = false;
    750 
    751   if (!callback_pending->data) {
    752     response_cookies_.clear();
    753     response_cookies_save_index_ = 0;
    754     SetStatus(URLRequestStatus());  // Clear the IO_PENDING status
    755     NotifyHeadersComplete();
    756     return;
    757   }
    758 }
    759 
    760 // |save_next_cookie_running| is true when the callback is bound and set to
    761 // false when SaveNextCookie exits, allowing the callback to determine if the
    762 // save occurred synchronously or asynchronously.
    763 // |callback_pending| is false when the callback is invoked and will be set to
    764 // true by the callback, allowing SaveNextCookie to detect whether the save
    765 // occurred synchronously.
    766 // See SaveNextCookie() for more information.
    767 void URLRequestHttpJob::OnCookieSaved(
    768     scoped_refptr<SharedBoolean> save_next_cookie_running,
    769     scoped_refptr<SharedBoolean> callback_pending,
    770     bool cookie_status) {
    771   callback_pending->data = false;
    772 
    773   // If we were called synchronously, return.
    774   if (save_next_cookie_running->data) {
    775     return;
    776   }
    777 
    778   // We were called asynchronously, so trigger the next save.
    779   // We may have been canceled within OnSetCookie.
    780   if (GetStatus().is_success()) {
    781     SaveNextCookie();
    782   } else {
    783     NotifyCanceled();
    784   }
    785 }
    786 
    787 void URLRequestHttpJob::FetchResponseCookies(
    788     std::vector<std::string>* cookies) {
    789   const std::string name = "Set-Cookie";
    790   std::string value;
    791 
    792   void* iter = NULL;
    793   HttpResponseHeaders* headers = GetResponseHeaders();
    794   while (headers->EnumerateHeader(&iter, name, &value)) {
    795     if (!value.empty())
    796       cookies->push_back(value);
    797   }
    798 }
    799 
    800 // NOTE: |ProcessStrictTransportSecurityHeader| and
    801 // |ProcessPublicKeyPinsHeader| have very similar structures, by design.
    802 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
    803   DCHECK(response_info_);
    804   TransportSecurityState* security_state =
    805       request_->context()->transport_security_state();
    806   const SSLInfo& ssl_info = response_info_->ssl_info;
    807 
    808   // Only accept HSTS headers on HTTPS connections that have no
    809   // certificate errors.
    810   if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
    811       !security_state)
    812     return;
    813 
    814   // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec:
    815   //
    816   //   If a UA receives more than one STS header field in a HTTP response
    817   //   message over secure transport, then the UA MUST process only the
    818   //   first such header field.
    819   HttpResponseHeaders* headers = GetResponseHeaders();
    820   std::string value;
    821   if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value))
    822     security_state->AddHSTSHeader(request_info_.url.host(), value);
    823 }
    824 
    825 void URLRequestHttpJob::ProcessPublicKeyPinsHeader() {
    826   DCHECK(response_info_);
    827   TransportSecurityState* security_state =
    828       request_->context()->transport_security_state();
    829   const SSLInfo& ssl_info = response_info_->ssl_info;
    830 
    831   // Only accept HPKP headers on HTTPS connections that have no
    832   // certificate errors.
    833   if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
    834       !security_state)
    835     return;
    836 
    837   // http://tools.ietf.org/html/draft-ietf-websec-key-pinning:
    838   //
    839   //   If a UA receives more than one PKP header field in an HTTP
    840   //   response message over secure transport, then the UA MUST process
    841   //   only the first such header field.
    842   HttpResponseHeaders* headers = GetResponseHeaders();
    843   std::string value;
    844   if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value))
    845     security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info);
    846 }
    847 
    848 void URLRequestHttpJob::OnStartCompleted(int result) {
    849   RecordTimer();
    850 
    851   // If the request was destroyed, then there is no more work to do.
    852   if (!request_)
    853     return;
    854 
    855   // If the transaction was destroyed, then the job was cancelled, and
    856   // we can just ignore this notification.
    857   if (!transaction_.get())
    858     return;
    859 
    860   receive_headers_end_ = base::TimeTicks::Now();
    861 
    862   // Clear the IO_PENDING status
    863   SetStatus(URLRequestStatus());
    864 
    865   const URLRequestContext* context = request_->context();
    866 
    867   if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN &&
    868       transaction_->GetResponseInfo() != NULL) {
    869     FraudulentCertificateReporter* reporter =
    870       context->fraudulent_certificate_reporter();
    871     if (reporter != NULL) {
    872       const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
    873       bool sni_available = SSLConfigService::IsSNIAvailable(
    874           context->ssl_config_service());
    875       const std::string& host = request_->url().host();
    876 
    877       reporter->SendReport(host, ssl_info, sni_available);
    878     }
    879   }
    880 
    881   if (result == OK) {
    882     scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
    883     if (network_delegate()) {
    884       // Note that |this| may not be deleted until
    885       // |on_headers_received_callback_| or
    886       // |NetworkDelegate::URLRequestDestroyed()| has been called.
    887       OnCallToDelegate();
    888       int error = network_delegate()->NotifyHeadersReceived(
    889           request_,
    890           on_headers_received_callback_,
    891           headers.get(),
    892           &override_response_headers_);
    893       if (error != net::OK) {
    894         if (error == net::ERR_IO_PENDING) {
    895           awaiting_callback_ = true;
    896         } else {
    897           std::string source("delegate");
    898           request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
    899                                        NetLog::StringCallback("source",
    900                                                               &source));
    901           OnCallToDelegateComplete();
    902           NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
    903         }
    904         return;
    905       }
    906     }
    907 
    908     SaveCookiesAndNotifyHeadersComplete(net::OK);
    909   } else if (IsCertificateError(result)) {
    910     // We encountered an SSL certificate error.
    911     if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY ||
    912         result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) {
    913       // These are hard failures. They're handled separately and don't have
    914       // the correct cert status, so set it here.
    915       SSLInfo info(transaction_->GetResponseInfo()->ssl_info);
    916       info.cert_status = MapNetErrorToCertStatus(result);
    917       NotifySSLCertificateError(info, true);
    918     } else {
    919       // Maybe overridable, maybe not. Ask the delegate to decide.
    920       TransportSecurityState::DomainState domain_state;
    921       const URLRequestContext* context = request_->context();
    922       const bool fatal = context->transport_security_state() &&
    923           context->transport_security_state()->GetDomainState(
    924               request_info_.url.host(),
    925               SSLConfigService::IsSNIAvailable(context->ssl_config_service()),
    926               &domain_state) &&
    927           domain_state.ShouldSSLErrorsBeFatal();
    928       NotifySSLCertificateError(
    929           transaction_->GetResponseInfo()->ssl_info, fatal);
    930     }
    931   } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
    932     NotifyCertificateRequested(
    933         transaction_->GetResponseInfo()->cert_request_info.get());
    934   } else {
    935     NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
    936   }
    937 }
    938 
    939 void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
    940   awaiting_callback_ = false;
    941 
    942   // Check that there are no callbacks to already canceled requests.
    943   DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
    944 
    945   SaveCookiesAndNotifyHeadersComplete(result);
    946 }
    947 
    948 void URLRequestHttpJob::OnReadCompleted(int result) {
    949   read_in_progress_ = false;
    950 
    951   if (ShouldFixMismatchedContentLength(result))
    952     result = OK;
    953 
    954   if (result == OK) {
    955     NotifyDone(URLRequestStatus());
    956   } else if (result < 0) {
    957     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
    958   } else {
    959     // Clear the IO_PENDING status
    960     SetStatus(URLRequestStatus());
    961   }
    962 
    963   NotifyReadComplete(result);
    964 }
    965 
    966 void URLRequestHttpJob::RestartTransactionWithAuth(
    967     const AuthCredentials& credentials) {
    968   auth_credentials_ = credentials;
    969 
    970   // These will be reset in OnStartCompleted.
    971   response_info_ = NULL;
    972   receive_headers_end_ = base::TimeTicks();
    973   response_cookies_.clear();
    974 
    975   ResetTimer();
    976 
    977   // Update the cookies, since the cookie store may have been updated from the
    978   // headers in the 401/407. Since cookies were already appended to
    979   // extra_headers, we need to strip them out before adding them again.
    980   request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
    981 
    982   AddCookieHeaderAndStart();
    983 }
    984 
    985 void URLRequestHttpJob::SetUpload(UploadDataStream* upload) {
    986   DCHECK(!transaction_.get()) << "cannot change once started";
    987   request_info_.upload_data_stream = upload;
    988 }
    989 
    990 void URLRequestHttpJob::SetExtraRequestHeaders(
    991     const HttpRequestHeaders& headers) {
    992   DCHECK(!transaction_.get()) << "cannot change once started";
    993   request_info_.extra_headers.CopyFrom(headers);
    994 }
    995 
    996 LoadState URLRequestHttpJob::GetLoadState() const {
    997   return transaction_.get() ?
    998       transaction_->GetLoadState() : LOAD_STATE_IDLE;
    999 }
   1000 
   1001 UploadProgress URLRequestHttpJob::GetUploadProgress() const {
   1002   return transaction_.get() ?
   1003       transaction_->GetUploadProgress() : UploadProgress();
   1004 }
   1005 
   1006 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
   1007   DCHECK(transaction_.get());
   1008 
   1009   if (!response_info_)
   1010     return false;
   1011 
   1012   return GetResponseHeaders()->GetMimeType(mime_type);
   1013 }
   1014 
   1015 bool URLRequestHttpJob::GetCharset(std::string* charset) {
   1016   DCHECK(transaction_.get());
   1017 
   1018   if (!response_info_)
   1019     return false;
   1020 
   1021   return GetResponseHeaders()->GetCharset(charset);
   1022 }
   1023 
   1024 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
   1025   DCHECK(request_);
   1026   DCHECK(transaction_.get());
   1027 
   1028   if (response_info_) {
   1029     *info = *response_info_;
   1030     if (override_response_headers_.get())
   1031       info->headers = override_response_headers_;
   1032   }
   1033 }
   1034 
   1035 void URLRequestHttpJob::GetLoadTimingInfo(
   1036     LoadTimingInfo* load_timing_info) const {
   1037   // If haven't made it far enough to receive any headers, don't return
   1038   // anything.  This makes for more consistent behavior in the case of errors.
   1039   if (!transaction_ || receive_headers_end_.is_null())
   1040     return;
   1041   if (transaction_->GetLoadTimingInfo(load_timing_info))
   1042     load_timing_info->receive_headers_end = receive_headers_end_;
   1043 }
   1044 
   1045 bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
   1046   DCHECK(transaction_.get());
   1047 
   1048   if (!response_info_)
   1049     return false;
   1050 
   1051   // TODO(darin): Why are we extracting response cookies again?  Perhaps we
   1052   // should just leverage response_cookies_.
   1053 
   1054   cookies->clear();
   1055   FetchResponseCookies(cookies);
   1056   return true;
   1057 }
   1058 
   1059 int URLRequestHttpJob::GetResponseCode() const {
   1060   DCHECK(transaction_.get());
   1061 
   1062   if (!response_info_)
   1063     return -1;
   1064 
   1065   return GetResponseHeaders()->response_code();
   1066 }
   1067 
   1068 Filter* URLRequestHttpJob::SetupFilter() const {
   1069   DCHECK(transaction_.get());
   1070   if (!response_info_)
   1071     return NULL;
   1072 
   1073   std::vector<Filter::FilterType> encoding_types;
   1074   std::string encoding_type;
   1075   HttpResponseHeaders* headers = GetResponseHeaders();
   1076   void* iter = NULL;
   1077   while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
   1078     encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
   1079   }
   1080 
   1081   if (filter_context_->IsSdchResponse()) {
   1082     // We are wary of proxies that discard or damage SDCH encoding.  If a server
   1083     // explicitly states that this is not SDCH content, then we can correct our
   1084     // assumption that this is an SDCH response, and avoid the need to recover
   1085     // as though the content is corrupted (when we discover it is not SDCH
   1086     // encoded).
   1087     std::string sdch_response_status;
   1088     iter = NULL;
   1089     while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
   1090                                     &sdch_response_status)) {
   1091       if (sdch_response_status == "0") {
   1092         filter_context_->ResetSdchResponseToFalse();
   1093         break;
   1094       }
   1095     }
   1096   }
   1097 
   1098   // Even if encoding types are empty, there is a chance that we need to add
   1099   // some decoding, as some proxies strip encoding completely. In such cases,
   1100   // we may need to add (for example) SDCH filtering (when the context suggests
   1101   // it is appropriate).
   1102   Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
   1103 
   1104   return !encoding_types.empty()
   1105       ? Filter::Factory(encoding_types, *filter_context_) : NULL;
   1106 }
   1107 
   1108 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
   1109   // HTTP is always safe.
   1110   // TODO(pauljensen): Remove once crbug.com/146591 is fixed.
   1111   if (location.is_valid() &&
   1112       (location.scheme() == "http" || location.scheme() == "https")) {
   1113     return true;
   1114   }
   1115   // Query URLRequestJobFactory as to whether |location| would be safe to
   1116   // redirect to.
   1117   return request_->context()->job_factory() &&
   1118       request_->context()->job_factory()->IsSafeRedirectTarget(location);
   1119 }
   1120 
   1121 bool URLRequestHttpJob::NeedsAuth() {
   1122   int code = GetResponseCode();
   1123   if (code == -1)
   1124     return false;
   1125 
   1126   // Check if we need either Proxy or WWW Authentication.  This could happen
   1127   // because we either provided no auth info, or provided incorrect info.
   1128   switch (code) {
   1129     case 407:
   1130       if (proxy_auth_state_ == AUTH_STATE_CANCELED)
   1131         return false;
   1132       proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
   1133       return true;
   1134     case 401:
   1135       if (server_auth_state_ == AUTH_STATE_CANCELED)
   1136         return false;
   1137       server_auth_state_ = AUTH_STATE_NEED_AUTH;
   1138       return true;
   1139   }
   1140   return false;
   1141 }
   1142 
   1143 void URLRequestHttpJob::GetAuthChallengeInfo(
   1144     scoped_refptr<AuthChallengeInfo>* result) {
   1145   DCHECK(transaction_.get());
   1146   DCHECK(response_info_);
   1147 
   1148   // sanity checks:
   1149   DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
   1150          server_auth_state_ == AUTH_STATE_NEED_AUTH);
   1151   DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) ||
   1152          (GetResponseHeaders()->response_code() ==
   1153           HTTP_PROXY_AUTHENTICATION_REQUIRED));
   1154 
   1155   *result = response_info_->auth_challenge;
   1156 }
   1157 
   1158 void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) {
   1159   DCHECK(transaction_.get());
   1160 
   1161   // Proxy gets set first, then WWW.
   1162   if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
   1163     proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
   1164   } else {
   1165     DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
   1166     server_auth_state_ = AUTH_STATE_HAVE_AUTH;
   1167   }
   1168 
   1169   RestartTransactionWithAuth(credentials);
   1170 }
   1171 
   1172 void URLRequestHttpJob::CancelAuth() {
   1173   // Proxy gets set first, then WWW.
   1174   if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
   1175     proxy_auth_state_ = AUTH_STATE_CANCELED;
   1176   } else {
   1177     DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
   1178     server_auth_state_ = AUTH_STATE_CANCELED;
   1179   }
   1180 
   1181   // These will be reset in OnStartCompleted.
   1182   response_info_ = NULL;
   1183   receive_headers_end_ = base::TimeTicks::Now();
   1184   response_cookies_.clear();
   1185 
   1186   ResetTimer();
   1187 
   1188   // OK, let the consumer read the error page...
   1189   //
   1190   // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
   1191   // which will cause the consumer to receive OnResponseStarted instead of
   1192   // OnAuthRequired.
   1193   //
   1194   // We have to do this via InvokeLater to avoid "recursing" the consumer.
   1195   //
   1196   base::MessageLoop::current()->PostTask(
   1197       FROM_HERE,
   1198       base::Bind(&URLRequestHttpJob::OnStartCompleted,
   1199                  weak_factory_.GetWeakPtr(), OK));
   1200 }
   1201 
   1202 void URLRequestHttpJob::ContinueWithCertificate(
   1203     X509Certificate* client_cert) {
   1204   DCHECK(transaction_.get());
   1205 
   1206   DCHECK(!response_info_) << "should not have a response yet";
   1207   receive_headers_end_ = base::TimeTicks();
   1208 
   1209   ResetTimer();
   1210 
   1211   // No matter what, we want to report our status as IO pending since we will
   1212   // be notifying our consumer asynchronously via OnStartCompleted.
   1213   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1214 
   1215   int rv = transaction_->RestartWithCertificate(client_cert, start_callback_);
   1216   if (rv == ERR_IO_PENDING)
   1217     return;
   1218 
   1219   // The transaction started synchronously, but we need to notify the
   1220   // URLRequest delegate via the message loop.
   1221   base::MessageLoop::current()->PostTask(
   1222       FROM_HERE,
   1223       base::Bind(&URLRequestHttpJob::OnStartCompleted,
   1224                  weak_factory_.GetWeakPtr(), rv));
   1225 }
   1226 
   1227 void URLRequestHttpJob::ContinueDespiteLastError() {
   1228   // If the transaction was destroyed, then the job was cancelled.
   1229   if (!transaction_.get())
   1230     return;
   1231 
   1232   DCHECK(!response_info_) << "should not have a response yet";
   1233   receive_headers_end_ = base::TimeTicks();
   1234 
   1235   ResetTimer();
   1236 
   1237   // No matter what, we want to report our status as IO pending since we will
   1238   // be notifying our consumer asynchronously via OnStartCompleted.
   1239   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1240 
   1241   int rv = transaction_->RestartIgnoringLastError(start_callback_);
   1242   if (rv == ERR_IO_PENDING)
   1243     return;
   1244 
   1245   // The transaction started synchronously, but we need to notify the
   1246   // URLRequest delegate via the message loop.
   1247   base::MessageLoop::current()->PostTask(
   1248       FROM_HERE,
   1249       base::Bind(&URLRequestHttpJob::OnStartCompleted,
   1250                  weak_factory_.GetWeakPtr(), rv));
   1251 }
   1252 
   1253 bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
   1254   // Some servers send the body compressed, but specify the content length as
   1255   // the uncompressed size.  Although this violates the HTTP spec we want to
   1256   // support it (as IE and FireFox do), but *only* for an exact match.
   1257   // See http://crbug.com/79694.
   1258   if (rv == net::ERR_CONTENT_LENGTH_MISMATCH ||
   1259       rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) {
   1260     if (request_ && request_->response_headers()) {
   1261       int64 expected_length = request_->response_headers()->GetContentLength();
   1262       VLOG(1) << __FUNCTION__ << "() "
   1263               << "\"" << request_->url().spec() << "\""
   1264               << " content-length = " << expected_length
   1265               << " pre total = " << prefilter_bytes_read()
   1266               << " post total = " << postfilter_bytes_read();
   1267       if (postfilter_bytes_read() == expected_length) {
   1268         // Clear the error.
   1269         return true;
   1270       }
   1271     }
   1272   }
   1273   return false;
   1274 }
   1275 
   1276 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
   1277                                     int* bytes_read) {
   1278   DCHECK_NE(buf_size, 0);
   1279   DCHECK(bytes_read);
   1280   DCHECK(!read_in_progress_);
   1281 
   1282   int rv = transaction_->Read(
   1283       buf, buf_size,
   1284       base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
   1285 
   1286   if (ShouldFixMismatchedContentLength(rv))
   1287     rv = 0;
   1288 
   1289   if (rv >= 0) {
   1290     *bytes_read = rv;
   1291     if (!rv)
   1292       DoneWithRequest(FINISHED);
   1293     return true;
   1294   }
   1295 
   1296   if (rv == ERR_IO_PENDING) {
   1297     read_in_progress_ = true;
   1298     SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1299   } else {
   1300     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
   1301   }
   1302 
   1303   return false;
   1304 }
   1305 
   1306 void URLRequestHttpJob::StopCaching() {
   1307   if (transaction_.get())
   1308     transaction_->StopCaching();
   1309 }
   1310 
   1311 bool URLRequestHttpJob::GetFullRequestHeaders(
   1312     HttpRequestHeaders* headers) const {
   1313   if (!transaction_)
   1314     return false;
   1315 
   1316   return transaction_->GetFullRequestHeaders(headers);
   1317 }
   1318 
   1319 void URLRequestHttpJob::DoneReading() {
   1320   if (transaction_.get())
   1321     transaction_->DoneReading();
   1322   DoneWithRequest(FINISHED);
   1323 }
   1324 
   1325 HostPortPair URLRequestHttpJob::GetSocketAddress() const {
   1326   return response_info_ ? response_info_->socket_address : HostPortPair();
   1327 }
   1328 
   1329 void URLRequestHttpJob::RecordTimer() {
   1330   if (request_creation_time_.is_null()) {
   1331     NOTREACHED()
   1332         << "The same transaction shouldn't start twice without new timing.";
   1333     return;
   1334   }
   1335 
   1336   base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
   1337   request_creation_time_ = base::Time();
   1338 
   1339   UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
   1340 }
   1341 
   1342 void URLRequestHttpJob::ResetTimer() {
   1343   if (!request_creation_time_.is_null()) {
   1344     NOTREACHED()
   1345         << "The timer was reset before it was recorded.";
   1346     return;
   1347   }
   1348   request_creation_time_ = base::Time::Now();
   1349 }
   1350 
   1351 void URLRequestHttpJob::UpdatePacketReadTimes() {
   1352   if (!packet_timing_enabled_)
   1353     return;
   1354 
   1355   if (filter_input_byte_count() <= bytes_observed_in_packets_) {
   1356     DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
   1357     return;  // No new bytes have arrived.
   1358   }
   1359 
   1360   final_packet_time_ = base::Time::Now();
   1361   if (!bytes_observed_in_packets_)
   1362     request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
   1363 
   1364   bytes_observed_in_packets_ = filter_input_byte_count();
   1365 }
   1366 
   1367 void URLRequestHttpJob::RecordPacketStats(
   1368     FilterContext::StatisticSelector statistic) const {
   1369   if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
   1370     return;
   1371 
   1372   base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
   1373   switch (statistic) {
   1374     case FilterContext::SDCH_DECODE: {
   1375       UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
   1376           static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
   1377       return;
   1378     }
   1379     case FilterContext::SDCH_PASSTHROUGH: {
   1380       // Despite advertising a dictionary, we handled non-sdch compressed
   1381       // content.
   1382       return;
   1383     }
   1384 
   1385     case FilterContext::SDCH_EXPERIMENT_DECODE: {
   1386       UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
   1387                                   duration,
   1388                                   base::TimeDelta::FromMilliseconds(20),
   1389                                   base::TimeDelta::FromMinutes(10), 100);
   1390       return;
   1391     }
   1392     case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
   1393       UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
   1394                                   duration,
   1395                                   base::TimeDelta::FromMilliseconds(20),
   1396                                   base::TimeDelta::FromMinutes(10), 100);
   1397       return;
   1398     }
   1399     default:
   1400       NOTREACHED();
   1401       return;
   1402   }
   1403 }
   1404 
   1405 // The common type of histogram we use for all compression-tracking histograms.
   1406 #define COMPRESSION_HISTOGRAM(name, sample) \
   1407     do { \
   1408       UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
   1409                                   500, 1000000, 100); \
   1410     } while (0)
   1411 
   1412 void URLRequestHttpJob::RecordCompressionHistograms() {
   1413   DCHECK(request_);
   1414   if (!request_)
   1415     return;
   1416 
   1417   if (is_cached_content_ ||                // Don't record cached content
   1418       !GetStatus().is_success() ||         // Don't record failed content
   1419       !IsCompressibleContent() ||          // Only record compressible content
   1420       !prefilter_bytes_read())       // Zero-byte responses aren't useful.
   1421     return;
   1422 
   1423   // Miniature requests aren't really compressible.  Don't count them.
   1424   const int kMinSize = 16;
   1425   if (prefilter_bytes_read() < kMinSize)
   1426     return;
   1427 
   1428   // Only record for http or https urls.
   1429   bool is_http = request_->url().SchemeIs("http");
   1430   bool is_https = request_->url().SchemeIs("https");
   1431   if (!is_http && !is_https)
   1432     return;
   1433 
   1434   int compressed_B = prefilter_bytes_read();
   1435   int decompressed_B = postfilter_bytes_read();
   1436   bool was_filtered = HasFilter();
   1437 
   1438   // We want to record how often downloaded resources are compressed.
   1439   // But, we recognize that different protocols may have different
   1440   // properties.  So, for each request, we'll put it into one of 3
   1441   // groups:
   1442   //      a) SSL resources
   1443   //         Proxies cannot tamper with compression headers with SSL.
   1444   //      b) Non-SSL, loaded-via-proxy resources
   1445   //         In this case, we know a proxy might have interfered.
   1446   //      c) Non-SSL, loaded-without-proxy resources
   1447   //         In this case, we know there was no explicit proxy.  However,
   1448   //         it is possible that a transparent proxy was still interfering.
   1449   //
   1450   // For each group, we record the same 3 histograms.
   1451 
   1452   if (is_https) {
   1453     if (was_filtered) {
   1454       COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
   1455       COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
   1456     } else {
   1457       COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
   1458     }
   1459     return;
   1460   }
   1461 
   1462   if (request_->was_fetched_via_proxy()) {
   1463     if (was_filtered) {
   1464       COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
   1465       COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
   1466     } else {
   1467       COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
   1468     }
   1469     return;
   1470   }
   1471 
   1472   if (was_filtered) {
   1473     COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
   1474     COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
   1475   } else {
   1476     COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
   1477   }
   1478 }
   1479 
   1480 bool URLRequestHttpJob::IsCompressibleContent() const {
   1481   std::string mime_type;
   1482   return GetMimeType(&mime_type) &&
   1483       (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
   1484        IsSupportedNonImageMimeType(mime_type.c_str()));
   1485 }
   1486 
   1487 void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) {
   1488   if (start_time_.is_null())
   1489     return;
   1490 
   1491   base::TimeDelta total_time = base::TimeTicks::Now() - start_time_;
   1492   UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time);
   1493 
   1494   if (reason == FINISHED) {
   1495     UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time);
   1496   } else {
   1497     UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time);
   1498   }
   1499 
   1500   if (response_info_) {
   1501     if (response_info_->was_cached) {
   1502       UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time);
   1503     } else  {
   1504       UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time);
   1505     }
   1506   }
   1507 
   1508   if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached())
   1509     UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork",
   1510                          prefilter_bytes_read());
   1511 
   1512   start_time_ = base::TimeTicks();
   1513 }
   1514 
   1515 void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
   1516   if (done_)
   1517     return;
   1518   done_ = true;
   1519   RecordPerfHistograms(reason);
   1520   if (reason == FINISHED) {
   1521     request_->set_received_response_content_length(prefilter_bytes_read());
   1522     RecordCompressionHistograms();
   1523   }
   1524 }
   1525 
   1526 HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
   1527   DCHECK(transaction_.get());
   1528   DCHECK(transaction_->GetResponseInfo());
   1529   return override_response_headers_.get() ?
   1530              override_response_headers_.get() :
   1531              transaction_->GetResponseInfo()->headers.get();
   1532 }
   1533 
   1534 void URLRequestHttpJob::NotifyURLRequestDestroyed() {
   1535   awaiting_callback_ = false;
   1536 }
   1537 
   1538 void URLRequestHttpJob::OnDetachRequest() {
   1539   http_transaction_delegate_->OnDetachRequest();
   1540 }
   1541 
   1542 }  // namespace net
   1543