Home | History | Annotate | Download | only in url_request
      1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/url_request/url_request_http_job.h"
      6 
      7 #include "base/base_switches.h"
      8 #include "base/command_line.h"
      9 #include "base/compiler_specific.h"
     10 #include "base/file_util.h"
     11 #include "base/file_version_info.h"
     12 #include "base/message_loop.h"
     13 #include "base/metrics/field_trial.h"
     14 #include "base/metrics/histogram.h"
     15 #include "base/rand_util.h"
     16 #include "base/string_util.h"
     17 #include "base/time.h"
     18 #include "net/base/cert_status_flags.h"
     19 #include "net/base/cookie_policy.h"
     20 #include "net/base/cookie_store.h"
     21 #include "net/base/filter.h"
     22 #include "net/base/host_port_pair.h"
     23 #include "net/base/load_flags.h"
     24 #include "net/base/mime_util.h"
     25 #include "net/base/net_errors.h"
     26 #include "net/base/net_util.h"
     27 #include "net/base/sdch_manager.h"
     28 #include "net/base/ssl_cert_request_info.h"
     29 #include "net/base/transport_security_state.h"
     30 #include "net/http/http_request_headers.h"
     31 #include "net/http/http_response_headers.h"
     32 #include "net/http/http_response_info.h"
     33 #include "net/http/http_transaction.h"
     34 #include "net/http/http_transaction_factory.h"
     35 #include "net/http/http_util.h"
     36 #include "net/url_request/https_prober.h"
     37 #include "net/url_request/url_request.h"
     38 #include "net/url_request/url_request_context.h"
     39 #include "net/url_request/url_request_error_job.h"
     40 #include "net/url_request/url_request_redirect_job.h"
     41 #include "net/url_request/url_request_throttler_header_adapter.h"
     42 #include "net/url_request/url_request_throttler_manager.h"
     43 
     44 static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
     45 
     46 // When histogramming results related to SDCH and/or an SDCH latency test, the
     47 // number of packets for which we need to record arrival times so as to
     48 // calculate interpacket latencies.  We currently are only looking at the
     49 // first few packets, as we're monitoring the impact of the initial TCP
     50 // congestion window on stalling of transmissions.
     51 static const size_t kSdchPacketHistogramCount = 5;
     52 
     53 namespace net {
     54 
     55 namespace {
     56 
     57 class HTTPSProberDelegateImpl : public HTTPSProberDelegate {
     58  public:
     59   HTTPSProberDelegateImpl(const std::string& host, int max_age,
     60                           bool include_subdomains,
     61                           TransportSecurityState* sts)
     62       : host_(host),
     63         max_age_(max_age),
     64         include_subdomains_(include_subdomains),
     65         sts_(sts) { }
     66 
     67   virtual void ProbeComplete(bool result) {
     68     if (result) {
     69       base::Time current_time(base::Time::Now());
     70       base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age_);
     71 
     72       TransportSecurityState::DomainState domain_state;
     73       domain_state.expiry = current_time + max_age_delta;
     74       domain_state.mode =
     75           TransportSecurityState::DomainState::MODE_OPPORTUNISTIC;
     76       domain_state.include_subdomains = include_subdomains_;
     77 
     78       sts_->EnableHost(host_, domain_state);
     79     }
     80 
     81     delete this;
     82   }
     83 
     84  private:
     85   const std::string host_;
     86   const int max_age_;
     87   const bool include_subdomains_;
     88   scoped_refptr<TransportSecurityState> sts_;
     89 };
     90 
     91 }  // namespace
     92 
     93 class URLRequestHttpJob::HttpFilterContext : public FilterContext {
     94  public:
     95   explicit HttpFilterContext(URLRequestHttpJob* job);
     96   virtual ~HttpFilterContext();
     97 
     98   // FilterContext implementation.
     99   virtual bool GetMimeType(std::string* mime_type) const;
    100   virtual bool GetURL(GURL* gurl) const;
    101   virtual base::Time GetRequestTime() const;
    102   virtual bool IsCachedContent() const;
    103   virtual bool IsDownload() const;
    104   virtual bool IsSdchResponse() const;
    105   virtual int64 GetByteReadCount() const;
    106   virtual int GetResponseCode() const;
    107   virtual void RecordPacketStats(StatisticSelector statistic) const;
    108 
    109  private:
    110   URLRequestHttpJob* job_;
    111 
    112   DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
    113 };
    114 
    115 URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
    116     : job_(job) {
    117   DCHECK(job_);
    118 }
    119 
    120 URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
    121 }
    122 
    123 bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
    124     std::string* mime_type) const {
    125   return job_->GetMimeType(mime_type);
    126 }
    127 
    128 bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
    129   if (!job_->request())
    130     return false;
    131   *gurl = job_->request()->url();
    132   return true;
    133 }
    134 
    135 base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
    136   return job_->request() ? job_->request()->request_time() : base::Time();
    137 }
    138 
    139 bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
    140   return job_->is_cached_content_;
    141 }
    142 
    143 bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
    144   return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
    145 }
    146 
    147 bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
    148   return job_->sdch_dictionary_advertised_;
    149 }
    150 
    151 int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
    152   return job_->filter_input_byte_count();
    153 }
    154 
    155 int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
    156   return job_->GetResponseCode();
    157 }
    158 
    159 void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
    160     StatisticSelector statistic) const {
    161   job_->RecordPacketStats(statistic);
    162 }
    163 
    164 // TODO(darin): make sure the port blocking code is not lost
    165 // static
    166 URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
    167                                           const std::string& scheme) {
    168   DCHECK(scheme == "http" || scheme == "https");
    169 
    170   int port = request->url().IntPort();
    171   if (!IsPortAllowedByDefault(port) && !IsPortAllowedByOverride(port))
    172     return new URLRequestErrorJob(request, ERR_UNSAFE_PORT);
    173 
    174   if (!request->context() ||
    175       !request->context()->http_transaction_factory()) {
    176     NOTREACHED() << "requires a valid context";
    177     return new URLRequestErrorJob(request, ERR_INVALID_ARGUMENT);
    178   }
    179 
    180   TransportSecurityState::DomainState domain_state;
    181   if (scheme == "http" &&
    182       request->context()->transport_security_state() &&
    183       request->context()->transport_security_state()->IsEnabledForHost(
    184           &domain_state,
    185           request->url().host(),
    186           request->context()->IsSNIAvailable())) {
    187     if (domain_state.mode ==
    188          TransportSecurityState::DomainState::MODE_STRICT) {
    189       DCHECK_EQ(request->url().scheme(), "http");
    190       url_canon::Replacements<char> replacements;
    191       static const char kNewScheme[] = "https";
    192       replacements.SetScheme(kNewScheme,
    193                              url_parse::Component(0, strlen(kNewScheme)));
    194       GURL new_location = request->url().ReplaceComponents(replacements);
    195       return new URLRequestRedirectJob(request, new_location);
    196     } else {
    197       // TODO(agl): implement opportunistic HTTPS upgrade.
    198     }
    199   }
    200 
    201   return new URLRequestHttpJob(request);
    202 }
    203 
    204 
    205 URLRequestHttpJob::URLRequestHttpJob(URLRequest* request)
    206     : URLRequestJob(request),
    207       response_info_(NULL),
    208       response_cookies_save_index_(0),
    209       proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
    210       server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
    211       ALLOW_THIS_IN_INITIALIZER_LIST(start_callback_(
    212           this, &URLRequestHttpJob::OnStartCompleted)),
    213       ALLOW_THIS_IN_INITIALIZER_LIST(read_callback_(
    214           this, &URLRequestHttpJob::OnReadCompleted)),
    215       read_in_progress_(false),
    216       transaction_(NULL),
    217       throttling_entry_(URLRequestThrottlerManager::GetInstance()->
    218           RegisterRequestUrl(request->url())),
    219       sdch_dictionary_advertised_(false),
    220       sdch_test_activated_(false),
    221       sdch_test_control_(false),
    222       is_cached_content_(false),
    223       request_creation_time_(),
    224       packet_timing_enabled_(false),
    225       bytes_observed_in_packets_(0),
    226       packet_times_(),
    227       request_time_snapshot_(),
    228       final_packet_time_(),
    229       observed_packet_count_(0),
    230       ALLOW_THIS_IN_INITIALIZER_LIST(
    231           filter_context_(new HttpFilterContext(this))),
    232       ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) {
    233   ResetTimer();
    234 }
    235 
    236 void URLRequestHttpJob::NotifyHeadersComplete() {
    237   DCHECK(!response_info_);
    238 
    239   response_info_ = transaction_->GetResponseInfo();
    240 
    241   // Save boolean, as we'll need this info at destruction time, and filters may
    242   // also need this info.
    243   is_cached_content_ = response_info_->was_cached;
    244 
    245   if (!is_cached_content_) {
    246     URLRequestThrottlerHeaderAdapter response_adapter(
    247         response_info_->headers);
    248     throttling_entry_->UpdateWithResponse(request_info_.url.host(),
    249                                           &response_adapter);
    250   }
    251 
    252   ProcessStrictTransportSecurityHeader();
    253 
    254   if (SdchManager::Global() &&
    255       SdchManager::Global()->IsInSupportedDomain(request_->url())) {
    256     static const std::string name = "Get-Dictionary";
    257     std::string url_text;
    258     void* iter = NULL;
    259     // TODO(jar): We need to not fetch dictionaries the first time they are
    260     // seen, but rather wait until we can justify their usefulness.
    261     // For now, we will only fetch the first dictionary, which will at least
    262     // require multiple suggestions before we get additional ones for this site.
    263     // Eventually we should wait until a dictionary is requested several times
    264     // before we even download it (so that we don't waste memory or bandwidth).
    265     if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) {
    266       // request_->url() won't be valid in the destructor, so we use an
    267       // alternate copy.
    268       DCHECK_EQ(request_->url(), request_info_.url);
    269       // Resolve suggested URL relative to request url.
    270       sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
    271     }
    272   }
    273 
    274   // The HTTP transaction may be restarted several times for the purposes
    275   // of sending authorization information. Each time it restarts, we get
    276   // notified of the headers completion so that we can update the cookie store.
    277   if (transaction_->IsReadyToRestartForAuth()) {
    278     DCHECK(!response_info_->auth_challenge.get());
    279     RestartTransactionWithAuth(string16(), string16());
    280     return;
    281   }
    282 
    283   URLRequestJob::NotifyHeadersComplete();
    284 }
    285 
    286 void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
    287   RecordCompressionHistograms();
    288   URLRequestJob::NotifyDone(status);
    289 }
    290 
    291 void URLRequestHttpJob::DestroyTransaction() {
    292   DCHECK(transaction_.get());
    293 
    294   transaction_.reset();
    295   response_info_ = NULL;
    296   context_ = NULL;
    297 }
    298 
    299 void URLRequestHttpJob::StartTransaction() {
    300   // NOTE: This method assumes that request_info_ is already setup properly.
    301 
    302   // If we already have a transaction, then we should restart the transaction
    303   // with auth provided by username_ and password_.
    304 
    305   int rv;
    306 
    307   if (transaction_.get()) {
    308     rv = transaction_->RestartWithAuth(username_, password_, &start_callback_);
    309     username_.clear();
    310     password_.clear();
    311   } else {
    312     DCHECK(request_->context());
    313     DCHECK(request_->context()->http_transaction_factory());
    314 
    315     rv = request_->context()->http_transaction_factory()->CreateTransaction(
    316         &transaction_);
    317     if (rv == OK) {
    318       if (!URLRequestThrottlerManager::GetInstance()->enforce_throttling() ||
    319           !throttling_entry_->IsDuringExponentialBackoff()) {
    320         rv = transaction_->Start(
    321             &request_info_, &start_callback_, request_->net_log());
    322       } else {
    323         // Special error code for the exponential back-off module.
    324         rv = ERR_TEMPORARILY_THROTTLED;
    325       }
    326       // Make sure the context is alive for the duration of the
    327       // transaction.
    328       context_ = request_->context();
    329     }
    330   }
    331 
    332   if (rv == ERR_IO_PENDING)
    333     return;
    334 
    335   // The transaction started synchronously, but we need to notify the
    336   // URLRequest delegate via the message loop.
    337   MessageLoop::current()->PostTask(
    338       FROM_HERE,
    339       method_factory_.NewRunnableMethod(
    340           &URLRequestHttpJob::OnStartCompleted, rv));
    341 }
    342 
    343 void URLRequestHttpJob::AddExtraHeaders() {
    344   // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is
    345   // probably an img or such (and SDCH encoding is not likely).
    346   bool advertise_sdch = SdchManager::Global() &&
    347       SdchManager::Global()->IsInSupportedDomain(request_->url());
    348   std::string avail_dictionaries;
    349   if (advertise_sdch) {
    350     SdchManager::Global()->GetAvailDictionaryList(request_->url(),
    351                                                   &avail_dictionaries);
    352 
    353     // The AllowLatencyExperiment() is only true if we've successfully done a
    354     // full SDCH compression recently in this browser session for this host.
    355     // Note that for this path, there might be no applicable dictionaries, and
    356     // hence we can't participate in the experiment.
    357     if (!avail_dictionaries.empty() &&
    358         SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
    359       // We are participating in the test (or control), and hence we'll
    360       // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
    361       // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
    362       packet_timing_enabled_ = true;
    363       if (base::RandDouble() < .01) {
    364         sdch_test_control_ = true;  // 1% probability.
    365         advertise_sdch = false;
    366       } else {
    367         sdch_test_activated_ = true;
    368       }
    369     }
    370   }
    371 
    372   // Supply Accept-Encoding headers first so that it is more likely that they
    373   // will be in the first transmitted packet.  This can sometimes make it easier
    374   // to filter and analyze the streams to assure that a proxy has not damaged
    375   // these headers.  Some proxies deliberately corrupt Accept-Encoding headers.
    376   if (!advertise_sdch) {
    377     // Tell the server what compression formats we support (other than SDCH).
    378     request_info_.extra_headers.SetHeader(
    379         HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
    380   } else {
    381     // Include SDCH in acceptable list.
    382     request_info_.extra_headers.SetHeader(
    383         HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
    384     if (!avail_dictionaries.empty()) {
    385       request_info_.extra_headers.SetHeader(
    386           kAvailDictionaryHeader,
    387           avail_dictionaries);
    388       sdch_dictionary_advertised_ = true;
    389       // Since we're tagging this transaction as advertising a dictionary, we'll
    390       // definately employ an SDCH filter (or tentative sdch filter) when we get
    391       // a response.  When done, we'll record histograms via SDCH_DECODE or
    392       // SDCH_PASSTHROUGH.  Hence we need to record packet arrival times.
    393       packet_timing_enabled_ = true;
    394     }
    395   }
    396 
    397   URLRequestContext* context = request_->context();
    398   if (context) {
    399     // Only add default Accept-Language and Accept-Charset if the request
    400     // didn't have them specified.
    401     if (!context->accept_language().empty()) {
    402       request_info_.extra_headers.SetHeaderIfMissing(
    403           HttpRequestHeaders::kAcceptLanguage,
    404           context->accept_language());
    405     }
    406     if (!context->accept_charset().empty()) {
    407       request_info_.extra_headers.SetHeaderIfMissing(
    408           HttpRequestHeaders::kAcceptCharset,
    409           context->accept_charset());
    410     }
    411   }
    412 }
    413 
    414 void URLRequestHttpJob::AddCookieHeaderAndStart() {
    415   // No matter what, we want to report our status as IO pending since we will
    416   // be notifying our consumer asynchronously via OnStartCompleted.
    417   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
    418 
    419   int policy = OK;
    420 
    421   if (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) {
    422     policy = ERR_FAILED;
    423   } else if (request_->context()->cookie_policy()) {
    424     policy = request_->context()->cookie_policy()->CanGetCookies(
    425         request_->url(),
    426         request_->first_party_for_cookies());
    427   }
    428 
    429   OnCanGetCookiesCompleted(policy);
    430 }
    431 
    432 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete() {
    433   DCHECK(transaction_.get());
    434 
    435   const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
    436   DCHECK(response_info);
    437 
    438   response_cookies_.clear();
    439   response_cookies_save_index_ = 0;
    440 
    441   FetchResponseCookies(response_info, &response_cookies_);
    442 
    443   // Now, loop over the response cookies, and attempt to persist each.
    444   SaveNextCookie();
    445 }
    446 
    447 void URLRequestHttpJob::SaveNextCookie() {
    448   if (response_cookies_save_index_ == response_cookies_.size()) {
    449     response_cookies_.clear();
    450     response_cookies_save_index_ = 0;
    451     SetStatus(URLRequestStatus());  // Clear the IO_PENDING status
    452     NotifyHeadersComplete();
    453     return;
    454   }
    455 
    456   // No matter what, we want to report our status as IO pending since we will
    457   // be notifying our consumer asynchronously via OnStartCompleted.
    458   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
    459 
    460   int policy = OK;
    461 
    462   if (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) {
    463     policy = ERR_FAILED;
    464   } else if (request_->context()->cookie_policy()) {
    465     policy = request_->context()->cookie_policy()->CanSetCookie(
    466         request_->url(),
    467         request_->first_party_for_cookies(),
    468         response_cookies_[response_cookies_save_index_]);
    469   }
    470 
    471   OnCanSetCookieCompleted(policy);
    472 }
    473 
    474 void URLRequestHttpJob::FetchResponseCookies(
    475     const HttpResponseInfo* response_info,
    476     std::vector<std::string>* cookies) {
    477   std::string name = "Set-Cookie";
    478   std::string value;
    479 
    480   void* iter = NULL;
    481   while (response_info->headers->EnumerateHeader(&iter, name, &value)) {
    482     if (!value.empty())
    483       cookies->push_back(value);
    484   }
    485 }
    486 
    487 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
    488   DCHECK(response_info_);
    489 
    490   URLRequestContext* ctx = request_->context();
    491   if (!ctx || !ctx->transport_security_state())
    492     return;
    493 
    494   const bool https = response_info_->ssl_info.is_valid();
    495   const bool valid_https =
    496       https && !IsCertStatusError(response_info_->ssl_info.cert_status);
    497 
    498   std::string name = "Strict-Transport-Security";
    499   std::string value;
    500 
    501   int max_age;
    502   bool include_subdomains;
    503 
    504   void* iter = NULL;
    505   while (response_info_->headers->EnumerateHeader(&iter, name, &value)) {
    506     const bool ok = TransportSecurityState::ParseHeader(
    507         value, &max_age, &include_subdomains);
    508     if (!ok)
    509       continue;
    510     // We will only accept strict mode if we saw the header from an HTTPS
    511     // connection with no certificate problems.
    512     if (!valid_https)
    513       continue;
    514     base::Time current_time(base::Time::Now());
    515     base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age);
    516 
    517     TransportSecurityState::DomainState domain_state;
    518     domain_state.expiry = current_time + max_age_delta;
    519     domain_state.mode = TransportSecurityState::DomainState::MODE_STRICT;
    520     domain_state.include_subdomains = include_subdomains;
    521 
    522     ctx->transport_security_state()->EnableHost(request_info_.url.host(),
    523                                                 domain_state);
    524   }
    525 
    526   // TODO(agl): change this over when we have fixed things at the server end.
    527   // The string should be "Opportunistic-Transport-Security";
    528   name = "X-Bodge-Transport-Security";
    529 
    530   while (response_info_->headers->EnumerateHeader(&iter, name, &value)) {
    531     const bool ok = TransportSecurityState::ParseHeader(
    532         value, &max_age, &include_subdomains);
    533     if (!ok)
    534       continue;
    535     // If we saw an opportunistic request over HTTPS, then clearly we can make
    536     // HTTPS connections to the host so we should remember this.
    537     if (https) {
    538       base::Time current_time(base::Time::Now());
    539       base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age);
    540 
    541       TransportSecurityState::DomainState domain_state;
    542       domain_state.expiry = current_time + max_age_delta;
    543       domain_state.mode =
    544           TransportSecurityState::DomainState::MODE_SPDY_ONLY;
    545       domain_state.include_subdomains = include_subdomains;
    546 
    547       ctx->transport_security_state()->EnableHost(request_info_.url.host(),
    548                                                   domain_state);
    549       continue;
    550     }
    551 
    552     if (!request())
    553       break;
    554 
    555     // At this point, we have a request for opportunistic encryption over HTTP.
    556     // In this case we need to probe to check that we can make HTTPS
    557     // connections to that host.
    558     HTTPSProber* const prober = HTTPSProber::GetInstance();
    559     if (prober->HaveProbed(request_info_.url.host()) ||
    560         prober->InFlight(request_info_.url.host())) {
    561       continue;
    562     }
    563 
    564     HTTPSProberDelegateImpl* delegate =
    565         new HTTPSProberDelegateImpl(request_info_.url.host(), max_age,
    566                                     include_subdomains,
    567                                     ctx->transport_security_state());
    568     if (!prober->ProbeHost(request_info_.url.host(), request()->context(),
    569                            delegate)) {
    570       delete delegate;
    571     }
    572   }
    573 }
    574 
    575 void URLRequestHttpJob::OnCanGetCookiesCompleted(int policy) {
    576   // If the request was destroyed, then there is no more work to do.
    577   if (request_ && request_->delegate()) {
    578     if (request_->context()->cookie_store()) {
    579       if (policy == ERR_ACCESS_DENIED) {
    580         request_->delegate()->OnGetCookies(request_, true);
    581       } else if (policy == OK) {
    582         request_->delegate()->OnGetCookies(request_, false);
    583         CookieOptions options;
    584         options.set_include_httponly();
    585         std::string cookies =
    586             request_->context()->cookie_store()->GetCookiesWithOptions(
    587                 request_->url(), options);
    588         if (!cookies.empty()) {
    589           request_info_.extra_headers.SetHeader(
    590               HttpRequestHeaders::kCookie, cookies);
    591         }
    592       }
    593     }
    594     // We may have been canceled within OnGetCookies.
    595     if (GetStatus().is_success()) {
    596       StartTransaction();
    597     } else {
    598       NotifyCanceled();
    599     }
    600   }
    601 }
    602 
    603 void URLRequestHttpJob::OnCanSetCookieCompleted(int policy) {
    604   // If the request was destroyed, then there is no more work to do.
    605   if (request_ && request_->delegate()) {
    606     if (request_->context()->cookie_store()) {
    607       if (policy == ERR_ACCESS_DENIED) {
    608         CookieOptions options;
    609         options.set_include_httponly();
    610         request_->delegate()->OnSetCookie(
    611             request_,
    612             response_cookies_[response_cookies_save_index_],
    613             options,
    614             true);
    615       } else if (policy == OK || policy == OK_FOR_SESSION_ONLY) {
    616         // OK to save the current response cookie now.
    617         CookieOptions options;
    618         options.set_include_httponly();
    619         if (policy == OK_FOR_SESSION_ONLY)
    620           options.set_force_session();
    621         request_->context()->cookie_store()->SetCookieWithOptions(
    622             request_->url(), response_cookies_[response_cookies_save_index_],
    623             options);
    624         request_->delegate()->OnSetCookie(
    625             request_,
    626             response_cookies_[response_cookies_save_index_],
    627             options,
    628             false);
    629       }
    630     }
    631     response_cookies_save_index_++;
    632     // We may have been canceled within OnSetCookie.
    633     if (GetStatus().is_success()) {
    634       SaveNextCookie();
    635     } else {
    636       NotifyCanceled();
    637     }
    638   }
    639 }
    640 
    641 void URLRequestHttpJob::OnStartCompleted(int result) {
    642   RecordTimer();
    643 
    644   // If the request was destroyed, then there is no more work to do.
    645   if (!request_ || !request_->delegate())
    646     return;
    647 
    648   // If the transaction was destroyed, then the job was cancelled, and
    649   // we can just ignore this notification.
    650   if (!transaction_.get())
    651     return;
    652 
    653   // Clear the IO_PENDING status
    654   SetStatus(URLRequestStatus());
    655 
    656   // Take care of any mandates for public key pinning.
    657   // TODO(agl): we might have an issue here where a request for foo.example.com
    658   // merges into a SPDY connection to www.example.com, and gets a different
    659   // certificate.
    660   const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
    661   if (result == OK &&
    662       ssl_info.is_valid() &&
    663       context_->transport_security_state()) {
    664     TransportSecurityState::DomainState domain_state;
    665     if (context_->transport_security_state()->IsEnabledForHost(
    666             &domain_state,
    667             request_->url().host(),
    668             context_->IsSNIAvailable()) &&
    669         ssl_info.is_issued_by_known_root &&
    670         !domain_state.IsChainOfPublicKeysPermitted(ssl_info.public_key_hashes)){
    671       result = ERR_CERT_INVALID;
    672     }
    673   }
    674 
    675   if (result == OK) {
    676     SaveCookiesAndNotifyHeadersComplete();
    677   } else if (ShouldTreatAsCertificateError(result)) {
    678     // We encountered an SSL certificate error.  Ask our delegate to decide
    679     // what we should do.
    680     // TODO(wtc): also pass ssl_info.cert_status, or just pass the whole
    681     // ssl_info.
    682     request_->delegate()->OnSSLCertificateError(
    683         request_, result, transaction_->GetResponseInfo()->ssl_info.cert);
    684   } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
    685     request_->delegate()->OnCertificateRequested(
    686         request_, transaction_->GetResponseInfo()->cert_request_info);
    687   } else {
    688     NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
    689   }
    690 }
    691 
    692 void URLRequestHttpJob::OnReadCompleted(int result) {
    693   read_in_progress_ = false;
    694 
    695   if (result == 0) {
    696     NotifyDone(URLRequestStatus());
    697   } else if (result < 0) {
    698     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
    699   } else {
    700     // Clear the IO_PENDING status
    701     SetStatus(URLRequestStatus());
    702   }
    703 
    704   NotifyReadComplete(result);
    705 }
    706 
    707 bool URLRequestHttpJob::ShouldTreatAsCertificateError(int result) {
    708   if (!IsCertificateError(result))
    709     return false;
    710 
    711   // Revocation check failures are always certificate errors, even if the host
    712   // is using Strict-Transport-Security.
    713   if (result == ERR_CERT_UNABLE_TO_CHECK_REVOCATION)
    714     return true;
    715 
    716   // Check whether our context is using Strict-Transport-Security.
    717   if (!context_->transport_security_state())
    718     return true;
    719 
    720   TransportSecurityState::DomainState domain_state;
    721   // TODO(agl): don't ignore opportunistic mode.
    722   const bool r = context_->transport_security_state()->IsEnabledForHost(
    723       &domain_state, request_info_.url.host(), context_->IsSNIAvailable());
    724 
    725   return !r || domain_state.mode ==
    726                TransportSecurityState::DomainState::MODE_OPPORTUNISTIC;
    727 }
    728 
    729 void URLRequestHttpJob::RestartTransactionWithAuth(
    730     const string16& username,
    731     const string16& password) {
    732   username_ = username;
    733   password_ = password;
    734 
    735   // These will be reset in OnStartCompleted.
    736   response_info_ = NULL;
    737   response_cookies_.clear();
    738 
    739   ResetTimer();
    740 
    741   // Update the cookies, since the cookie store may have been updated from the
    742   // headers in the 401/407. Since cookies were already appended to
    743   // extra_headers, we need to strip them out before adding them again.
    744   request_info_.extra_headers.RemoveHeader(
    745       HttpRequestHeaders::kCookie);
    746 
    747   AddCookieHeaderAndStart();
    748 }
    749 
    750 void URLRequestHttpJob::SetUpload(UploadData* upload) {
    751   DCHECK(!transaction_.get()) << "cannot change once started";
    752   request_info_.upload_data = upload;
    753 }
    754 
    755 void URLRequestHttpJob::SetExtraRequestHeaders(
    756     const HttpRequestHeaders& headers) {
    757   DCHECK(!transaction_.get()) << "cannot change once started";
    758   request_info_.extra_headers.CopyFrom(headers);
    759 }
    760 
    761 void URLRequestHttpJob::Start() {
    762   DCHECK(!transaction_.get());
    763 
    764   // Ensure that we do not send username and password fields in the referrer.
    765   GURL referrer(request_->GetSanitizedReferrer());
    766 
    767   request_info_.url = request_->url();
    768   request_info_.referrer = referrer;
    769   request_info_.method = request_->method();
    770   request_info_.load_flags = request_->load_flags();
    771   request_info_.priority = request_->priority();
    772   request_info_.request_id = request_->identifier();
    773 
    774   if (request_->context()) {
    775     request_info_.extra_headers.SetHeaderIfMissing(
    776         HttpRequestHeaders::kUserAgent,
    777         request_->context()->GetUserAgent(request_->url()));
    778   }
    779 
    780   AddExtraHeaders();
    781 
    782 #ifdef ANDROID
    783   // Attribute network traffic to the UID of the caller
    784   request_info_.valid_uid = false;
    785   request_info_.calling_uid = 0;
    786 
    787   if (request_->context()) {
    788     uid_t uid;
    789     if(request_->context()->getUID(&uid)) {
    790       request_info_.valid_uid = true;
    791       request_info_.calling_uid = uid;
    792     }
    793   }
    794 #endif
    795 
    796   AddCookieHeaderAndStart();
    797 }
    798 
    799 void URLRequestHttpJob::Kill() {
    800   if (!transaction_.get())
    801     return;
    802 
    803   DestroyTransaction();
    804   URLRequestJob::Kill();
    805 }
    806 
    807 LoadState URLRequestHttpJob::GetLoadState() const {
    808   return transaction_.get() ?
    809       transaction_->GetLoadState() : LOAD_STATE_IDLE;
    810 }
    811 
    812 uint64 URLRequestHttpJob::GetUploadProgress() const {
    813   return transaction_.get() ? transaction_->GetUploadProgress() : 0;
    814 }
    815 
    816 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
    817   DCHECK(transaction_.get());
    818 
    819   if (!response_info_)
    820     return false;
    821 
    822   return response_info_->headers->GetMimeType(mime_type);
    823 }
    824 
    825 bool URLRequestHttpJob::GetCharset(std::string* charset) {
    826   DCHECK(transaction_.get());
    827 
    828   if (!response_info_)
    829     return false;
    830 
    831   return response_info_->headers->GetCharset(charset);
    832 }
    833 
    834 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
    835   DCHECK(request_);
    836   DCHECK(transaction_.get());
    837 
    838   if (response_info_)
    839     *info = *response_info_;
    840 }
    841 
    842 bool URLRequestHttpJob::GetResponseCookies(
    843     std::vector<std::string>* cookies) {
    844   DCHECK(transaction_.get());
    845 
    846   if (!response_info_)
    847     return false;
    848 
    849   // TODO(darin): Why are we extracting response cookies again?  Perhaps we
    850   // should just leverage response_cookies_.
    851 
    852   cookies->clear();
    853   FetchResponseCookies(response_info_, cookies);
    854   return true;
    855 }
    856 
    857 int URLRequestHttpJob::GetResponseCode() const {
    858   DCHECK(transaction_.get());
    859 
    860   if (!response_info_)
    861     return -1;
    862 
    863   return response_info_->headers->response_code();
    864 }
    865 
    866 Filter* URLRequestHttpJob::SetupFilter() const {
    867   DCHECK(transaction_.get());
    868   if (!response_info_)
    869     return NULL;
    870 
    871   std::vector<Filter::FilterType> encoding_types;
    872   std::string encoding_type;
    873   void* iter = NULL;
    874   while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding",
    875                                                   &encoding_type)) {
    876     encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
    877   }
    878 
    879   // Even if encoding types are empty, there is a chance that we need to add
    880   // some decoding, as some proxies strip encoding completely. In such cases,
    881   // we may need to add (for example) SDCH filtering (when the context suggests
    882   // it is appropriate).
    883   Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
    884 
    885   return !encoding_types.empty()
    886       ? Filter::Factory(encoding_types, *filter_context_) : NULL;
    887 }
    888 
    889 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
    890   // We only allow redirects to certain "safe" protocols.  This does not
    891   // restrict redirects to externally handled protocols.  Our consumer would
    892   // need to take care of those.
    893 
    894   if (!URLRequest::IsHandledURL(location))
    895     return true;
    896 
    897   static const char* kSafeSchemes[] = {
    898     "http",
    899     "https",
    900     "ftp"
    901   };
    902 
    903   for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
    904     if (location.SchemeIs(kSafeSchemes[i]))
    905       return true;
    906   }
    907 
    908   return false;
    909 }
    910 
    911 bool URLRequestHttpJob::NeedsAuth() {
    912   int code = GetResponseCode();
    913   if (code == -1)
    914     return false;
    915 
    916   // Check if we need either Proxy or WWW Authentication.  This could happen
    917   // because we either provided no auth info, or provided incorrect info.
    918   switch (code) {
    919     case 407:
    920       if (proxy_auth_state_ == AUTH_STATE_CANCELED)
    921         return false;
    922       proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
    923       return true;
    924     case 401:
    925       if (server_auth_state_ == AUTH_STATE_CANCELED)
    926         return false;
    927       server_auth_state_ = AUTH_STATE_NEED_AUTH;
    928       return true;
    929   }
    930   return false;
    931 }
    932 
    933 void URLRequestHttpJob::GetAuthChallengeInfo(
    934     scoped_refptr<AuthChallengeInfo>* result) {
    935   DCHECK(transaction_.get());
    936   DCHECK(response_info_);
    937 
    938   // sanity checks:
    939   DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
    940          server_auth_state_ == AUTH_STATE_NEED_AUTH);
    941   DCHECK(response_info_->headers->response_code() == 401 ||
    942          response_info_->headers->response_code() == 407);
    943 
    944   *result = response_info_->auth_challenge;
    945 }
    946 
    947 void URLRequestHttpJob::SetAuth(const string16& username,
    948                                 const string16& password) {
    949   DCHECK(transaction_.get());
    950 
    951   // Proxy gets set first, then WWW.
    952   if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
    953     proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
    954   } else {
    955     DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
    956     server_auth_state_ = AUTH_STATE_HAVE_AUTH;
    957   }
    958 
    959   RestartTransactionWithAuth(username, password);
    960 }
    961 
    962 void URLRequestHttpJob::CancelAuth() {
    963   // Proxy gets set first, then WWW.
    964   if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
    965     proxy_auth_state_ = AUTH_STATE_CANCELED;
    966   } else {
    967     DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
    968     server_auth_state_ = AUTH_STATE_CANCELED;
    969   }
    970 
    971   // These will be reset in OnStartCompleted.
    972   response_info_ = NULL;
    973   response_cookies_.clear();
    974 
    975   ResetTimer();
    976 
    977   // OK, let the consumer read the error page...
    978   //
    979   // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
    980   // which will cause the consumer to receive OnResponseStarted instead of
    981   // OnAuthRequired.
    982   //
    983   // We have to do this via InvokeLater to avoid "recursing" the consumer.
    984   //
    985   MessageLoop::current()->PostTask(
    986       FROM_HERE,
    987       method_factory_.NewRunnableMethod(
    988           &URLRequestHttpJob::OnStartCompleted, OK));
    989 }
    990 
    991 void URLRequestHttpJob::ContinueWithCertificate(
    992     X509Certificate* client_cert) {
    993   DCHECK(transaction_.get());
    994 
    995   DCHECK(!response_info_) << "should not have a response yet";
    996 
    997   ResetTimer();
    998 
    999   // No matter what, we want to report our status as IO pending since we will
   1000   // be notifying our consumer asynchronously via OnStartCompleted.
   1001   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1002 
   1003   int rv = transaction_->RestartWithCertificate(client_cert, &start_callback_);
   1004   if (rv == ERR_IO_PENDING)
   1005     return;
   1006 
   1007   // The transaction started synchronously, but we need to notify the
   1008   // URLRequest delegate via the message loop.
   1009   MessageLoop::current()->PostTask(
   1010       FROM_HERE,
   1011       method_factory_.NewRunnableMethod(
   1012           &URLRequestHttpJob::OnStartCompleted, rv));
   1013 }
   1014 
   1015 void URLRequestHttpJob::ContinueDespiteLastError() {
   1016   // If the transaction was destroyed, then the job was cancelled.
   1017   if (!transaction_.get())
   1018     return;
   1019 
   1020   DCHECK(!response_info_) << "should not have a response yet";
   1021 
   1022   ResetTimer();
   1023 
   1024   // No matter what, we want to report our status as IO pending since we will
   1025   // be notifying our consumer asynchronously via OnStartCompleted.
   1026   SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1027 
   1028   int rv = transaction_->RestartIgnoringLastError(&start_callback_);
   1029   if (rv == ERR_IO_PENDING)
   1030     return;
   1031 
   1032   // The transaction started synchronously, but we need to notify the
   1033   // URLRequest delegate via the message loop.
   1034   MessageLoop::current()->PostTask(
   1035       FROM_HERE,
   1036       method_factory_.NewRunnableMethod(
   1037           &URLRequestHttpJob::OnStartCompleted, rv));
   1038 }
   1039 
   1040 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
   1041                                     int *bytes_read) {
   1042   DCHECK_NE(buf_size, 0);
   1043   DCHECK(bytes_read);
   1044   DCHECK(!read_in_progress_);
   1045 
   1046   int rv = transaction_->Read(buf, buf_size, &read_callback_);
   1047   if (rv >= 0) {
   1048     *bytes_read = rv;
   1049     return true;
   1050   }
   1051 
   1052   if (rv == ERR_IO_PENDING) {
   1053     read_in_progress_ = true;
   1054     SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
   1055   } else {
   1056     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
   1057   }
   1058 
   1059   return false;
   1060 }
   1061 
   1062 void URLRequestHttpJob::StopCaching() {
   1063   if (transaction_.get())
   1064     transaction_->StopCaching();
   1065 }
   1066 
   1067 HostPortPair URLRequestHttpJob::GetSocketAddress() const {
   1068   return response_info_ ? response_info_->socket_address : HostPortPair();
   1069 }
   1070 
   1071 URLRequestHttpJob::~URLRequestHttpJob() {
   1072   DCHECK(!sdch_test_control_ || !sdch_test_activated_);
   1073   if (!is_cached_content_) {
   1074     if (sdch_test_control_)
   1075       RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
   1076     if (sdch_test_activated_)
   1077       RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
   1078   }
   1079   // Make sure SDCH filters are told to emit histogram data while
   1080   // filter_context_ is still alive.
   1081   DestroyFilters();
   1082 
   1083   if (sdch_dictionary_url_.is_valid()) {
   1084     // Prior to reaching the destructor, request_ has been set to a NULL
   1085     // pointer, so request_->url() is no longer valid in the destructor, and we
   1086     // use an alternate copy |request_info_.url|.
   1087     SdchManager* manager = SdchManager::Global();
   1088     // To be extra safe, since this is a "different time" from when we decided
   1089     // to get the dictionary, we'll validate that an SdchManager is available.
   1090     // At shutdown time, care is taken to be sure that we don't delete this
   1091     // globally useful instance "too soon," so this check is just defensive
   1092     // coding to assure that IF the system is shutting down, we don't have any
   1093     // problem if the manager was deleted ahead of time.
   1094     if (manager)  // Defensive programming.
   1095       manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
   1096   }
   1097 }
   1098 
   1099 void URLRequestHttpJob::RecordTimer() {
   1100   if (request_creation_time_.is_null()) {
   1101     NOTREACHED()
   1102         << "The same transaction shouldn't start twice without new timing.";
   1103     return;
   1104   }
   1105 
   1106   base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
   1107   request_creation_time_ = base::Time();
   1108 
   1109   static const bool use_prefetch_histogram =
   1110       base::FieldTrialList::Find("Prefetch") &&
   1111       !base::FieldTrialList::Find("Prefetch")->group_name().empty();
   1112 
   1113   UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
   1114   if (use_prefetch_histogram) {
   1115     UMA_HISTOGRAM_MEDIUM_TIMES(
   1116         base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
   1117                                    "Prefetch"),
   1118         to_start);
   1119   }
   1120 
   1121   const bool is_prerender = !!(request_info_.load_flags & LOAD_PRERENDER);
   1122   if (is_prerender) {
   1123     UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte_Prerender",
   1124                                to_start);
   1125     if (use_prefetch_histogram) {
   1126       UMA_HISTOGRAM_MEDIUM_TIMES(
   1127           base::FieldTrial::MakeName("Net.HttpTimeToFirstByte_Prerender",
   1128                                      "Prefetch"),
   1129           to_start);
   1130     }
   1131   } else {
   1132     UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte_NonPrerender",
   1133                                to_start);
   1134     if (use_prefetch_histogram) {
   1135       UMA_HISTOGRAM_MEDIUM_TIMES(
   1136           base::FieldTrial::MakeName("Net.HttpTimeToFirstByte_NonPrerender",
   1137                                      "Prefetch"),
   1138           to_start);
   1139     }
   1140   }
   1141 }
   1142 
   1143 void URLRequestHttpJob::ResetTimer() {
   1144   if (!request_creation_time_.is_null()) {
   1145     NOTREACHED()
   1146         << "The timer was reset before it was recorded.";
   1147     return;
   1148   }
   1149   request_creation_time_ = base::Time::Now();
   1150 }
   1151 
   1152 void URLRequestHttpJob::UpdatePacketReadTimes() {
   1153   if (!packet_timing_enabled_)
   1154     return;
   1155 
   1156   if (filter_input_byte_count() <= bytes_observed_in_packets_) {
   1157     DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
   1158     return;  // No new bytes have arrived.
   1159   }
   1160 
   1161   if (!bytes_observed_in_packets_)
   1162     request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
   1163 
   1164   final_packet_time_ = base::Time::Now();
   1165   const size_t kTypicalPacketSize = 1430;
   1166   while (filter_input_byte_count() > bytes_observed_in_packets_) {
   1167     ++observed_packet_count_;
   1168     if (packet_times_.size() < kSdchPacketHistogramCount) {
   1169       packet_times_.push_back(final_packet_time_);
   1170       DCHECK_EQ(static_cast<size_t>(observed_packet_count_),
   1171                 packet_times_.size());
   1172     }
   1173     bytes_observed_in_packets_ += kTypicalPacketSize;
   1174   }
   1175   // Since packets may not be full, we'll remember the number of bytes we've
   1176   // accounted for in packets thus far.
   1177   bytes_observed_in_packets_ = filter_input_byte_count();
   1178 }
   1179 
   1180 void URLRequestHttpJob::RecordPacketStats(
   1181     FilterContext::StatisticSelector statistic) const {
   1182   if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
   1183     return;
   1184 
   1185   base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
   1186   switch (statistic) {
   1187     case FilterContext::SDCH_DECODE: {
   1188       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration,
   1189                                   base::TimeDelta::FromMilliseconds(20),
   1190                                   base::TimeDelta::FromMinutes(10), 100);
   1191       UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b",
   1192                                static_cast<int>(observed_packet_count_));
   1193       UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
   1194           static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
   1195       if (packet_times_.empty())
   1196         return;
   1197       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a",
   1198                                   final_packet_time_ - packet_times_[0],
   1199                                   base::TimeDelta::FromMilliseconds(20),
   1200                                   base::TimeDelta::FromMinutes(10), 100);
   1201 
   1202       DCHECK_GT(kSdchPacketHistogramCount, 4u);
   1203       if (packet_times_.size() <= 4)
   1204         return;
   1205       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c",
   1206                                   packet_times_[1] - packet_times_[0],
   1207                                   base::TimeDelta::FromMilliseconds(1),
   1208                                   base::TimeDelta::FromSeconds(10), 100);
   1209       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c",
   1210                                   packet_times_[2] - packet_times_[1],
   1211                                   base::TimeDelta::FromMilliseconds(1),
   1212                                   base::TimeDelta::FromSeconds(10), 100);
   1213       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c",
   1214                                   packet_times_[3] - packet_times_[2],
   1215                                   base::TimeDelta::FromMilliseconds(1),
   1216                                   base::TimeDelta::FromSeconds(10), 100);
   1217       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c",
   1218                                   packet_times_[4] - packet_times_[3],
   1219                                   base::TimeDelta::FromMilliseconds(1),
   1220                                   base::TimeDelta::FromSeconds(10), 100);
   1221       return;
   1222     }
   1223     case FilterContext::SDCH_PASSTHROUGH: {
   1224       // Despite advertising a dictionary, we handled non-sdch compressed
   1225       // content.
   1226       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a",
   1227                                   duration,
   1228                                   base::TimeDelta::FromMilliseconds(20),
   1229                                   base::TimeDelta::FromMinutes(10), 100);
   1230       UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b",
   1231                                observed_packet_count_);
   1232       if (packet_times_.empty())
   1233         return;
   1234       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a",
   1235                                   final_packet_time_ - packet_times_[0],
   1236                                   base::TimeDelta::FromMilliseconds(20),
   1237                                   base::TimeDelta::FromMinutes(10), 100);
   1238       DCHECK_GT(kSdchPacketHistogramCount, 4u);
   1239       if (packet_times_.size() <= 4)
   1240         return;
   1241       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c",
   1242                                   packet_times_[1] - packet_times_[0],
   1243                                   base::TimeDelta::FromMilliseconds(1),
   1244                                   base::TimeDelta::FromSeconds(10), 100);
   1245       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c",
   1246                                   packet_times_[2] - packet_times_[1],
   1247                                   base::TimeDelta::FromMilliseconds(1),
   1248                                   base::TimeDelta::FromSeconds(10), 100);
   1249       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c",
   1250                                   packet_times_[3] - packet_times_[2],
   1251                                   base::TimeDelta::FromMilliseconds(1),
   1252                                   base::TimeDelta::FromSeconds(10), 100);
   1253       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c",
   1254                                   packet_times_[4] - packet_times_[3],
   1255                                   base::TimeDelta::FromMilliseconds(1),
   1256                                   base::TimeDelta::FromSeconds(10), 100);
   1257       return;
   1258     }
   1259 
   1260     case FilterContext::SDCH_EXPERIMENT_DECODE: {
   1261       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode",
   1262                                   duration,
   1263                                   base::TimeDelta::FromMilliseconds(20),
   1264                                   base::TimeDelta::FromMinutes(10), 100);
   1265       // We already provided interpacket histograms above in the SDCH_DECODE
   1266       // case, so we don't need them here.
   1267       return;
   1268     }
   1269     case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
   1270       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback",
   1271                                   duration,
   1272                                   base::TimeDelta::FromMilliseconds(20),
   1273                                   base::TimeDelta::FromMinutes(10), 100);
   1274       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a",
   1275                                   final_packet_time_ - packet_times_[0],
   1276                                   base::TimeDelta::FromMilliseconds(20),
   1277                                   base::TimeDelta::FromMinutes(10), 100);
   1278 
   1279       DCHECK_GT(kSdchPacketHistogramCount, 4u);
   1280       if (packet_times_.size() <= 4)
   1281         return;
   1282       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c",
   1283                                   packet_times_[1] - packet_times_[0],
   1284                                   base::TimeDelta::FromMilliseconds(1),
   1285                                   base::TimeDelta::FromSeconds(10), 100);
   1286       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c",
   1287                                   packet_times_[2] - packet_times_[1],
   1288                                   base::TimeDelta::FromMilliseconds(1),
   1289                                   base::TimeDelta::FromSeconds(10), 100);
   1290       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c",
   1291                                   packet_times_[3] - packet_times_[2],
   1292                                   base::TimeDelta::FromMilliseconds(1),
   1293                                   base::TimeDelta::FromSeconds(10), 100);
   1294       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c",
   1295                                   packet_times_[4] - packet_times_[3],
   1296                                   base::TimeDelta::FromMilliseconds(1),
   1297                                   base::TimeDelta::FromSeconds(10), 100);
   1298       return;
   1299     }
   1300     default:
   1301       NOTREACHED();
   1302       return;
   1303   }
   1304 }
   1305 
   1306 // The common type of histogram we use for all compression-tracking histograms.
   1307 #define COMPRESSION_HISTOGRAM(name, sample) \
   1308     do { \
   1309       UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
   1310                                   500, 1000000, 100); \
   1311     } while(0)
   1312 
   1313 void URLRequestHttpJob::RecordCompressionHistograms() {
   1314   DCHECK(request_);
   1315   if (!request_)
   1316     return;
   1317 
   1318   if (is_cached_content_ ||                // Don't record cached content
   1319       !GetStatus().is_success() ||         // Don't record failed content
   1320       !IsCompressibleContent() ||          // Only record compressible content
   1321       !prefilter_bytes_read())       // Zero-byte responses aren't useful.
   1322     return;
   1323 
   1324   // Miniature requests aren't really compressible.  Don't count them.
   1325   const int kMinSize = 16;
   1326   if (prefilter_bytes_read() < kMinSize)
   1327     return;
   1328 
   1329   // Only record for http or https urls.
   1330   bool is_http = request_->url().SchemeIs("http");
   1331   bool is_https = request_->url().SchemeIs("https");
   1332   if (!is_http && !is_https)
   1333     return;
   1334 
   1335   int compressed_B = prefilter_bytes_read();
   1336   int decompressed_B = postfilter_bytes_read();
   1337   bool was_filtered = HasFilter();
   1338 
   1339   // We want to record how often downloaded resources are compressed.
   1340   // But, we recognize that different protocols may have different
   1341   // properties.  So, for each request, we'll put it into one of 3
   1342   // groups:
   1343   //      a) SSL resources
   1344   //         Proxies cannot tamper with compression headers with SSL.
   1345   //      b) Non-SSL, loaded-via-proxy resources
   1346   //         In this case, we know a proxy might have interfered.
   1347   //      c) Non-SSL, loaded-without-proxy resources
   1348   //         In this case, we know there was no explicit proxy.  However,
   1349   //         it is possible that a transparent proxy was still interfering.
   1350   //
   1351   // For each group, we record the same 3 histograms.
   1352 
   1353   if (is_https) {
   1354     if (was_filtered) {
   1355       COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
   1356       COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
   1357     } else {
   1358       COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
   1359     }
   1360     return;
   1361   }
   1362 
   1363   if (request_->was_fetched_via_proxy()) {
   1364     if (was_filtered) {
   1365       COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
   1366       COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
   1367     } else {
   1368       COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
   1369     }
   1370     return;
   1371   }
   1372 
   1373   if (was_filtered) {
   1374     COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
   1375     COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
   1376   } else {
   1377     COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
   1378   }
   1379 }
   1380 
   1381 bool URLRequestHttpJob::IsCompressibleContent() const {
   1382   std::string mime_type;
   1383   return GetMimeType(&mime_type) &&
   1384       (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
   1385        IsSupportedNonImageMimeType(mime_type.c_str()));
   1386 }
   1387 
   1388 }  // namespace net
   1389