1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/url_request/url_request_http_job.h" 6 7 #include "base/base_switches.h" 8 #include "base/bind.h" 9 #include "base/bind_helpers.h" 10 #include "base/command_line.h" 11 #include "base/compiler_specific.h" 12 #include "base/file_version_info.h" 13 #include "base/message_loop/message_loop.h" 14 #include "base/metrics/field_trial.h" 15 #include "base/metrics/histogram.h" 16 #include "base/rand_util.h" 17 #include "base/strings/string_util.h" 18 #include "base/time/time.h" 19 #include "net/base/host_port_pair.h" 20 #include "net/base/load_flags.h" 21 #include "net/base/mime_util.h" 22 #include "net/base/net_errors.h" 23 #include "net/base/net_util.h" 24 #include "net/base/network_delegate.h" 25 #include "net/base/sdch_manager.h" 26 #include "net/cert/cert_status_flags.h" 27 #include "net/cookies/cookie_store.h" 28 #include "net/http/http_content_disposition.h" 29 #include "net/http/http_network_session.h" 30 #include "net/http/http_request_headers.h" 31 #include "net/http/http_response_headers.h" 32 #include "net/http/http_response_info.h" 33 #include "net/http/http_status_code.h" 34 #include "net/http/http_transaction.h" 35 #include "net/http/http_transaction_factory.h" 36 #include "net/http/http_util.h" 37 #include "net/ssl/ssl_cert_request_info.h" 38 #include "net/ssl/ssl_config_service.h" 39 #include "net/url_request/fraudulent_certificate_reporter.h" 40 #include "net/url_request/http_user_agent_settings.h" 41 #include "net/url_request/url_request.h" 42 #include "net/url_request/url_request_context.h" 43 #include "net/url_request/url_request_error_job.h" 44 #include "net/url_request/url_request_job_factory.h" 45 #include "net/url_request/url_request_redirect_job.h" 46 #include "net/url_request/url_request_throttler_header_adapter.h" 47 #include "net/url_request/url_request_throttler_manager.h" 48 #include "net/websockets/websocket_handshake_stream_base.h" 49 50 static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 51 52 namespace net { 53 54 class URLRequestHttpJob::HttpFilterContext : public FilterContext { 55 public: 56 explicit HttpFilterContext(URLRequestHttpJob* job); 57 virtual ~HttpFilterContext(); 58 59 // FilterContext implementation. 60 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 61 virtual bool GetURL(GURL* gurl) const OVERRIDE; 62 virtual bool GetContentDisposition(std::string* disposition) const OVERRIDE; 63 virtual base::Time GetRequestTime() const OVERRIDE; 64 virtual bool IsCachedContent() const OVERRIDE; 65 virtual bool IsDownload() const OVERRIDE; 66 virtual bool IsSdchResponse() const OVERRIDE; 67 virtual int64 GetByteReadCount() const OVERRIDE; 68 virtual int GetResponseCode() const OVERRIDE; 69 virtual const URLRequestContext* GetURLRequestContext() const OVERRIDE; 70 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 71 72 // Method to allow us to reset filter context for a response that should have 73 // been SDCH encoded when there is an update due to an explicit HTTP header. 74 void ResetSdchResponseToFalse(); 75 76 private: 77 URLRequestHttpJob* job_; 78 79 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 80 }; 81 82 URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 83 : job_(job) { 84 DCHECK(job_); 85 } 86 87 URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 88 } 89 90 bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 91 std::string* mime_type) const { 92 return job_->GetMimeType(mime_type); 93 } 94 95 bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 96 if (!job_->request()) 97 return false; 98 *gurl = job_->request()->url(); 99 return true; 100 } 101 102 bool URLRequestHttpJob::HttpFilterContext::GetContentDisposition( 103 std::string* disposition) const { 104 HttpResponseHeaders* headers = job_->GetResponseHeaders(); 105 void *iter = NULL; 106 return headers->EnumerateHeader(&iter, "Content-Disposition", disposition); 107 } 108 109 base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 110 return job_->request() ? job_->request()->request_time() : base::Time(); 111 } 112 113 bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 114 return job_->is_cached_content_; 115 } 116 117 bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 118 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 119 } 120 121 void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 122 DCHECK(job_->sdch_dictionary_advertised_); 123 job_->sdch_dictionary_advertised_ = false; 124 } 125 126 bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 127 return job_->sdch_dictionary_advertised_; 128 } 129 130 int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 131 return job_->filter_input_byte_count(); 132 } 133 134 int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 135 return job_->GetResponseCode(); 136 } 137 138 const URLRequestContext* 139 URLRequestHttpJob::HttpFilterContext::GetURLRequestContext() const { 140 return job_->request() ? job_->request()->context() : NULL; 141 } 142 143 void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 144 StatisticSelector statistic) const { 145 job_->RecordPacketStats(statistic); 146 } 147 148 // TODO(darin): make sure the port blocking code is not lost 149 // static 150 URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 151 NetworkDelegate* network_delegate, 152 const std::string& scheme) { 153 DCHECK(scheme == "http" || scheme == "https" || scheme == "ws" || 154 scheme == "wss"); 155 156 if (!request->context()->http_transaction_factory()) { 157 NOTREACHED() << "requires a valid context"; 158 return new URLRequestErrorJob( 159 request, network_delegate, ERR_INVALID_ARGUMENT); 160 } 161 162 GURL redirect_url; 163 if (request->GetHSTSRedirect(&redirect_url)) { 164 return new URLRequestRedirectJob( 165 request, network_delegate, redirect_url, 166 // Use status code 307 to preserve the method, so POST requests work. 167 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT, "HSTS"); 168 } 169 return new URLRequestHttpJob(request, 170 network_delegate, 171 request->context()->http_user_agent_settings()); 172 } 173 174 URLRequestHttpJob::URLRequestHttpJob( 175 URLRequest* request, 176 NetworkDelegate* network_delegate, 177 const HttpUserAgentSettings* http_user_agent_settings) 178 : URLRequestJob(request, network_delegate), 179 priority_(DEFAULT_PRIORITY), 180 response_info_(NULL), 181 response_cookies_save_index_(0), 182 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 183 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 184 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted, 185 base::Unretained(this))), 186 notify_before_headers_sent_callback_( 187 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 188 base::Unretained(this))), 189 read_in_progress_(false), 190 throttling_entry_(NULL), 191 sdch_dictionary_advertised_(false), 192 sdch_test_activated_(false), 193 sdch_test_control_(false), 194 is_cached_content_(false), 195 request_creation_time_(), 196 packet_timing_enabled_(false), 197 done_(false), 198 bytes_observed_in_packets_(0), 199 request_time_snapshot_(), 200 final_packet_time_(), 201 filter_context_(new HttpFilterContext(this)), 202 on_headers_received_callback_( 203 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback, 204 base::Unretained(this))), 205 awaiting_callback_(false), 206 http_user_agent_settings_(http_user_agent_settings), 207 weak_factory_(this) { 208 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 209 if (manager) 210 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 211 212 ResetTimer(); 213 } 214 215 URLRequestHttpJob::~URLRequestHttpJob() { 216 CHECK(!awaiting_callback_); 217 218 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 219 if (!is_cached_content_) { 220 if (sdch_test_control_) 221 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 222 if (sdch_test_activated_) 223 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 224 } 225 // Make sure SDCH filters are told to emit histogram data while 226 // filter_context_ is still alive. 227 DestroyFilters(); 228 229 DoneWithRequest(ABORTED); 230 } 231 232 void URLRequestHttpJob::SetPriority(RequestPriority priority) { 233 priority_ = priority; 234 if (transaction_) 235 transaction_->SetPriority(priority_); 236 } 237 238 void URLRequestHttpJob::Start() { 239 DCHECK(!transaction_.get()); 240 241 // URLRequest::SetReferrer ensures that we do not send username and password 242 // fields in the referrer. 243 GURL referrer(request_->referrer()); 244 245 request_info_.url = request_->url(); 246 request_info_.method = request_->method(); 247 request_info_.load_flags = request_->load_flags(); 248 // Enable privacy mode if cookie settings or flags tell us not send or 249 // save cookies. 250 bool enable_privacy_mode = 251 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || 252 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || 253 CanEnablePrivacyMode(); 254 // Privacy mode could still be disabled in OnCookiesLoaded if we are going 255 // to send previously saved cookies. 256 request_info_.privacy_mode = enable_privacy_mode ? 257 PRIVACY_MODE_ENABLED : PRIVACY_MODE_DISABLED; 258 259 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 260 // from overriding headers that are controlled using other means. Otherwise a 261 // plugin could set a referrer although sending the referrer is inhibited. 262 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 263 264 // Our consumer should have made sure that this is a safe referrer. See for 265 // instance WebCore::FrameLoader::HideReferrer. 266 if (referrer.is_valid()) { 267 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 268 referrer.spec()); 269 } 270 271 request_info_.extra_headers.SetHeaderIfMissing( 272 HttpRequestHeaders::kUserAgent, 273 http_user_agent_settings_ ? 274 http_user_agent_settings_->GetUserAgent() : std::string()); 275 276 AddExtraHeaders(); 277 AddCookieHeaderAndStart(); 278 } 279 280 void URLRequestHttpJob::Kill() { 281 if (!transaction_.get()) 282 return; 283 284 weak_factory_.InvalidateWeakPtrs(); 285 DestroyTransaction(); 286 URLRequestJob::Kill(); 287 } 288 289 void URLRequestHttpJob::NotifyHeadersComplete() { 290 DCHECK(!response_info_); 291 292 response_info_ = transaction_->GetResponseInfo(); 293 294 // Save boolean, as we'll need this info at destruction time, and filters may 295 // also need this info. 296 is_cached_content_ = response_info_->was_cached; 297 298 if (!is_cached_content_ && throttling_entry_.get()) { 299 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 300 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 301 &response_adapter); 302 } 303 304 // The ordering of these calls is not important. 305 ProcessStrictTransportSecurityHeader(); 306 ProcessPublicKeyPinsHeader(); 307 308 SdchManager* sdch_manager(request()->context()->sdch_manager()); 309 if (sdch_manager && sdch_manager->IsInSupportedDomain(request_->url())) { 310 const std::string name = "Get-Dictionary"; 311 std::string url_text; 312 void* iter = NULL; 313 // TODO(jar): We need to not fetch dictionaries the first time they are 314 // seen, but rather wait until we can justify their usefulness. 315 // For now, we will only fetch the first dictionary, which will at least 316 // require multiple suggestions before we get additional ones for this site. 317 // Eventually we should wait until a dictionary is requested several times 318 // before we even download it (so that we don't waste memory or bandwidth). 319 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 320 // Resolve suggested URL relative to request url. 321 GURL sdch_dictionary_url = request_->url().Resolve(url_text); 322 if (sdch_dictionary_url.is_valid()) { 323 sdch_manager->FetchDictionary(request_->url(), sdch_dictionary_url); 324 } 325 } 326 } 327 328 // The HTTP transaction may be restarted several times for the purposes 329 // of sending authorization information. Each time it restarts, we get 330 // notified of the headers completion so that we can update the cookie store. 331 if (transaction_->IsReadyToRestartForAuth()) { 332 DCHECK(!response_info_->auth_challenge.get()); 333 // TODO(battre): This breaks the webrequest API for 334 // URLRequestTestHTTP.BasicAuthWithCookies 335 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 336 // occurs. 337 RestartTransactionWithAuth(AuthCredentials()); 338 return; 339 } 340 341 URLRequestJob::NotifyHeadersComplete(); 342 } 343 344 void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 345 DoneWithRequest(FINISHED); 346 URLRequestJob::NotifyDone(status); 347 } 348 349 void URLRequestHttpJob::DestroyTransaction() { 350 DCHECK(transaction_.get()); 351 352 DoneWithRequest(ABORTED); 353 transaction_.reset(); 354 response_info_ = NULL; 355 receive_headers_end_ = base::TimeTicks(); 356 } 357 358 void URLRequestHttpJob::StartTransaction() { 359 if (network_delegate()) { 360 OnCallToDelegate(); 361 int rv = network_delegate()->NotifyBeforeSendHeaders( 362 request_, notify_before_headers_sent_callback_, 363 &request_info_.extra_headers); 364 // If an extension blocks the request, we rely on the callback to 365 // MaybeStartTransactionInternal(). 366 if (rv == ERR_IO_PENDING) 367 return; 368 MaybeStartTransactionInternal(rv); 369 return; 370 } 371 StartTransactionInternal(); 372 } 373 374 void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 375 // Check that there are no callbacks to already canceled requests. 376 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 377 378 MaybeStartTransactionInternal(result); 379 } 380 381 void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 382 OnCallToDelegateComplete(); 383 if (result == OK) { 384 StartTransactionInternal(); 385 } else { 386 std::string source("delegate"); 387 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 388 NetLog::StringCallback("source", &source)); 389 NotifyCanceled(); 390 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 391 } 392 } 393 394 void URLRequestHttpJob::StartTransactionInternal() { 395 // NOTE: This method assumes that request_info_ is already setup properly. 396 397 // If we already have a transaction, then we should restart the transaction 398 // with auth provided by auth_credentials_. 399 400 int rv; 401 402 if (network_delegate()) { 403 network_delegate()->NotifySendHeaders( 404 request_, request_info_.extra_headers); 405 } 406 407 if (transaction_.get()) { 408 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 409 auth_credentials_ = AuthCredentials(); 410 } else { 411 DCHECK(request_->context()->http_transaction_factory()); 412 413 rv = request_->context()->http_transaction_factory()->CreateTransaction( 414 priority_, &transaction_); 415 416 if (rv == OK && request_info_.url.SchemeIsWSOrWSS()) { 417 // TODO(ricea): Implement WebSocket throttling semantics as defined in 418 // RFC6455 Section 4.1. 419 base::SupportsUserData::Data* data = request_->GetUserData( 420 WebSocketHandshakeStreamBase::CreateHelper::DataKey()); 421 if (data) { 422 transaction_->SetWebSocketHandshakeStreamCreateHelper( 423 static_cast<WebSocketHandshakeStreamBase::CreateHelper*>(data)); 424 } else { 425 rv = ERR_DISALLOWED_URL_SCHEME; 426 } 427 } 428 429 if (rv == OK) { 430 transaction_->SetBeforeNetworkStartCallback( 431 base::Bind(&URLRequestHttpJob::NotifyBeforeNetworkStart, 432 base::Unretained(this))); 433 434 if (!throttling_entry_.get() || 435 !throttling_entry_->ShouldRejectRequest(*request_)) { 436 rv = transaction_->Start( 437 &request_info_, start_callback_, request_->net_log()); 438 start_time_ = base::TimeTicks::Now(); 439 } else { 440 // Special error code for the exponential back-off module. 441 rv = ERR_TEMPORARILY_THROTTLED; 442 } 443 } 444 } 445 446 if (rv == ERR_IO_PENDING) 447 return; 448 449 // The transaction started synchronously, but we need to notify the 450 // URLRequest delegate via the message loop. 451 base::MessageLoop::current()->PostTask( 452 FROM_HERE, 453 base::Bind(&URLRequestHttpJob::OnStartCompleted, 454 weak_factory_.GetWeakPtr(), rv)); 455 } 456 457 void URLRequestHttpJob::AddExtraHeaders() { 458 SdchManager* sdch_manager = request()->context()->sdch_manager(); 459 460 // Supply Accept-Encoding field only if it is not already provided. 461 // It should be provided IF the content is known to have restrictions on 462 // potential encoding, such as streaming multi-media. 463 // For details see bug 47381. 464 // TODO(jar, enal): jpeg files etc. should set up a request header if 465 // possible. Right now it is done only by buffered_resource_loader and 466 // simple_data_source. 467 if (!request_info_.extra_headers.HasHeader( 468 HttpRequestHeaders::kAcceptEncoding)) { 469 bool advertise_sdch = sdch_manager && 470 // We don't support SDCH responses to POST as there is a possibility 471 // of having SDCH encoded responses returned (e.g. by the cache) 472 // which we cannot decode, and in those situations, we will need 473 // to retransmit the request without SDCH, which is illegal for a POST. 474 request()->method() != "POST" && 475 sdch_manager->IsInSupportedDomain(request_->url()); 476 std::string avail_dictionaries; 477 if (advertise_sdch) { 478 sdch_manager->GetAvailDictionaryList(request_->url(), 479 &avail_dictionaries); 480 481 // The AllowLatencyExperiment() is only true if we've successfully done a 482 // full SDCH compression recently in this browser session for this host. 483 // Note that for this path, there might be no applicable dictionaries, 484 // and hence we can't participate in the experiment. 485 if (!avail_dictionaries.empty() && 486 sdch_manager->AllowLatencyExperiment(request_->url())) { 487 // We are participating in the test (or control), and hence we'll 488 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 489 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 490 packet_timing_enabled_ = true; 491 if (base::RandDouble() < .01) { 492 sdch_test_control_ = true; // 1% probability. 493 advertise_sdch = false; 494 } else { 495 sdch_test_activated_ = true; 496 } 497 } 498 } 499 500 // Supply Accept-Encoding headers first so that it is more likely that they 501 // will be in the first transmitted packet. This can sometimes make it 502 // easier to filter and analyze the streams to assure that a proxy has not 503 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 504 // headers. 505 if (!advertise_sdch) { 506 // Tell the server what compression formats we support (other than SDCH). 507 request_info_.extra_headers.SetHeader( 508 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 509 } else { 510 // Include SDCH in acceptable list. 511 request_info_.extra_headers.SetHeader( 512 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 513 if (!avail_dictionaries.empty()) { 514 request_info_.extra_headers.SetHeader( 515 kAvailDictionaryHeader, 516 avail_dictionaries); 517 sdch_dictionary_advertised_ = true; 518 // Since we're tagging this transaction as advertising a dictionary, 519 // we'll definitely employ an SDCH filter (or tentative sdch filter) 520 // when we get a response. When done, we'll record histograms via 521 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 522 // arrival times. 523 packet_timing_enabled_ = true; 524 } 525 } 526 } 527 528 if (http_user_agent_settings_) { 529 // Only add default Accept-Language if the request didn't have it 530 // specified. 531 std::string accept_language = 532 http_user_agent_settings_->GetAcceptLanguage(); 533 if (!accept_language.empty()) { 534 request_info_.extra_headers.SetHeaderIfMissing( 535 HttpRequestHeaders::kAcceptLanguage, 536 accept_language); 537 } 538 } 539 } 540 541 void URLRequestHttpJob::AddCookieHeaderAndStart() { 542 // No matter what, we want to report our status as IO pending since we will 543 // be notifying our consumer asynchronously via OnStartCompleted. 544 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 545 546 // If the request was destroyed, then there is no more work to do. 547 if (!request_) 548 return; 549 550 CookieStore* cookie_store = GetCookieStore(); 551 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 552 cookie_store->GetAllCookiesForURLAsync( 553 request_->url(), 554 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 555 weak_factory_.GetWeakPtr())); 556 } else { 557 DoStartTransaction(); 558 } 559 } 560 561 void URLRequestHttpJob::DoLoadCookies() { 562 CookieOptions options; 563 options.set_include_httponly(); 564 GetCookieStore()->GetCookiesWithOptionsAsync( 565 request_->url(), options, 566 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 567 weak_factory_.GetWeakPtr())); 568 } 569 570 void URLRequestHttpJob::CheckCookiePolicyAndLoad( 571 const CookieList& cookie_list) { 572 if (CanGetCookies(cookie_list)) 573 DoLoadCookies(); 574 else 575 DoStartTransaction(); 576 } 577 578 void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 579 if (!cookie_line.empty()) { 580 request_info_.extra_headers.SetHeader( 581 HttpRequestHeaders::kCookie, cookie_line); 582 // Disable privacy mode as we are sending cookies anyway. 583 request_info_.privacy_mode = PRIVACY_MODE_DISABLED; 584 } 585 DoStartTransaction(); 586 } 587 588 void URLRequestHttpJob::DoStartTransaction() { 589 // We may have been canceled while retrieving cookies. 590 if (GetStatus().is_success()) { 591 StartTransaction(); 592 } else { 593 NotifyCanceled(); 594 } 595 } 596 597 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 598 // End of the call started in OnStartCompleted. 599 OnCallToDelegateComplete(); 600 601 if (result != net::OK) { 602 std::string source("delegate"); 603 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 604 NetLog::StringCallback("source", &source)); 605 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 606 return; 607 } 608 609 DCHECK(transaction_.get()); 610 611 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 612 DCHECK(response_info); 613 614 response_cookies_.clear(); 615 response_cookies_save_index_ = 0; 616 617 FetchResponseCookies(&response_cookies_); 618 619 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 620 response_date_ = base::Time(); 621 622 // Now, loop over the response cookies, and attempt to persist each. 623 SaveNextCookie(); 624 } 625 626 // If the save occurs synchronously, SaveNextCookie will loop and save the next 627 // cookie. If the save is deferred, the callback is responsible for continuing 628 // to iterate through the cookies. 629 // TODO(erikwright): Modify the CookieStore API to indicate via return value 630 // whether it completed synchronously or asynchronously. 631 // See http://crbug.com/131066. 632 void URLRequestHttpJob::SaveNextCookie() { 633 // No matter what, we want to report our status as IO pending since we will 634 // be notifying our consumer asynchronously via OnStartCompleted. 635 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 636 637 // Used to communicate with the callback. See the implementation of 638 // OnCookieSaved. 639 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 640 scoped_refptr<SharedBoolean> save_next_cookie_running = 641 new SharedBoolean(true); 642 643 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 644 GetCookieStore() && response_cookies_.size() > 0) { 645 CookieOptions options; 646 options.set_include_httponly(); 647 options.set_server_time(response_date_); 648 649 net::CookieStore::SetCookiesCallback callback( 650 base::Bind(&URLRequestHttpJob::OnCookieSaved, 651 weak_factory_.GetWeakPtr(), 652 save_next_cookie_running, 653 callback_pending)); 654 655 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 656 // synchronously. 657 while (!callback_pending->data && 658 response_cookies_save_index_ < response_cookies_.size()) { 659 if (CanSetCookie( 660 response_cookies_[response_cookies_save_index_], &options)) { 661 callback_pending->data = true; 662 GetCookieStore()->SetCookieWithOptionsAsync( 663 request_->url(), response_cookies_[response_cookies_save_index_], 664 options, callback); 665 } 666 ++response_cookies_save_index_; 667 } 668 } 669 670 save_next_cookie_running->data = false; 671 672 if (!callback_pending->data) { 673 response_cookies_.clear(); 674 response_cookies_save_index_ = 0; 675 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 676 NotifyHeadersComplete(); 677 return; 678 } 679 } 680 681 // |save_next_cookie_running| is true when the callback is bound and set to 682 // false when SaveNextCookie exits, allowing the callback to determine if the 683 // save occurred synchronously or asynchronously. 684 // |callback_pending| is false when the callback is invoked and will be set to 685 // true by the callback, allowing SaveNextCookie to detect whether the save 686 // occurred synchronously. 687 // See SaveNextCookie() for more information. 688 void URLRequestHttpJob::OnCookieSaved( 689 scoped_refptr<SharedBoolean> save_next_cookie_running, 690 scoped_refptr<SharedBoolean> callback_pending, 691 bool cookie_status) { 692 callback_pending->data = false; 693 694 // If we were called synchronously, return. 695 if (save_next_cookie_running->data) { 696 return; 697 } 698 699 // We were called asynchronously, so trigger the next save. 700 // We may have been canceled within OnSetCookie. 701 if (GetStatus().is_success()) { 702 SaveNextCookie(); 703 } else { 704 NotifyCanceled(); 705 } 706 } 707 708 void URLRequestHttpJob::FetchResponseCookies( 709 std::vector<std::string>* cookies) { 710 const std::string name = "Set-Cookie"; 711 std::string value; 712 713 void* iter = NULL; 714 HttpResponseHeaders* headers = GetResponseHeaders(); 715 while (headers->EnumerateHeader(&iter, name, &value)) { 716 if (!value.empty()) 717 cookies->push_back(value); 718 } 719 } 720 721 // NOTE: |ProcessStrictTransportSecurityHeader| and 722 // |ProcessPublicKeyPinsHeader| have very similar structures, by design. 723 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 724 DCHECK(response_info_); 725 TransportSecurityState* security_state = 726 request_->context()->transport_security_state(); 727 const SSLInfo& ssl_info = response_info_->ssl_info; 728 729 // Only accept HSTS headers on HTTPS connections that have no 730 // certificate errors. 731 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 732 !security_state) 733 return; 734 735 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 736 // 737 // If a UA receives more than one STS header field in a HTTP response 738 // message over secure transport, then the UA MUST process only the 739 // first such header field. 740 HttpResponseHeaders* headers = GetResponseHeaders(); 741 std::string value; 742 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 743 security_state->AddHSTSHeader(request_info_.url.host(), value); 744 } 745 746 void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 747 DCHECK(response_info_); 748 TransportSecurityState* security_state = 749 request_->context()->transport_security_state(); 750 const SSLInfo& ssl_info = response_info_->ssl_info; 751 752 // Only accept HPKP headers on HTTPS connections that have no 753 // certificate errors. 754 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 755 !security_state) 756 return; 757 758 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 759 // 760 // If a UA receives more than one PKP header field in an HTTP 761 // response message over secure transport, then the UA MUST process 762 // only the first such header field. 763 HttpResponseHeaders* headers = GetResponseHeaders(); 764 std::string value; 765 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 766 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 767 } 768 769 void URLRequestHttpJob::OnStartCompleted(int result) { 770 RecordTimer(); 771 772 // If the request was destroyed, then there is no more work to do. 773 if (!request_) 774 return; 775 776 // If the job is done (due to cancellation), can just ignore this 777 // notification. 778 if (done_) 779 return; 780 781 receive_headers_end_ = base::TimeTicks::Now(); 782 783 // Clear the IO_PENDING status 784 SetStatus(URLRequestStatus()); 785 786 const URLRequestContext* context = request_->context(); 787 788 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 789 transaction_->GetResponseInfo() != NULL) { 790 FraudulentCertificateReporter* reporter = 791 context->fraudulent_certificate_reporter(); 792 if (reporter != NULL) { 793 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 794 bool sni_available = SSLConfigService::IsSNIAvailable( 795 context->ssl_config_service()); 796 const std::string& host = request_->url().host(); 797 798 reporter->SendReport(host, ssl_info, sni_available); 799 } 800 } 801 802 if (result == OK) { 803 if (transaction_ && transaction_->GetResponseInfo()) { 804 SetProxyServer(transaction_->GetResponseInfo()->proxy_server); 805 } 806 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 807 if (network_delegate()) { 808 // Note that |this| may not be deleted until 809 // |on_headers_received_callback_| or 810 // |NetworkDelegate::URLRequestDestroyed()| has been called. 811 OnCallToDelegate(); 812 allowed_unsafe_redirect_url_ = GURL(); 813 int error = network_delegate()->NotifyHeadersReceived( 814 request_, 815 on_headers_received_callback_, 816 headers.get(), 817 &override_response_headers_, 818 &allowed_unsafe_redirect_url_); 819 if (error != net::OK) { 820 if (error == net::ERR_IO_PENDING) { 821 awaiting_callback_ = true; 822 } else { 823 std::string source("delegate"); 824 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 825 NetLog::StringCallback("source", 826 &source)); 827 OnCallToDelegateComplete(); 828 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 829 } 830 return; 831 } 832 } 833 834 SaveCookiesAndNotifyHeadersComplete(net::OK); 835 } else if (IsCertificateError(result)) { 836 // We encountered an SSL certificate error. 837 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY || 838 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) { 839 // These are hard failures. They're handled separately and don't have 840 // the correct cert status, so set it here. 841 SSLInfo info(transaction_->GetResponseInfo()->ssl_info); 842 info.cert_status = MapNetErrorToCertStatus(result); 843 NotifySSLCertificateError(info, true); 844 } else { 845 // Maybe overridable, maybe not. Ask the delegate to decide. 846 const URLRequestContext* context = request_->context(); 847 TransportSecurityState* state = context->transport_security_state(); 848 const bool fatal = 849 state && 850 state->ShouldSSLErrorsBeFatal( 851 request_info_.url.host(), 852 SSLConfigService::IsSNIAvailable(context->ssl_config_service())); 853 NotifySSLCertificateError( 854 transaction_->GetResponseInfo()->ssl_info, fatal); 855 } 856 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 857 NotifyCertificateRequested( 858 transaction_->GetResponseInfo()->cert_request_info.get()); 859 } else { 860 // Even on an error, there may be useful information in the response 861 // info (e.g. whether there's a cached copy). 862 if (transaction_.get()) 863 response_info_ = transaction_->GetResponseInfo(); 864 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 865 } 866 } 867 868 void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 869 awaiting_callback_ = false; 870 871 // Check that there are no callbacks to already canceled requests. 872 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 873 874 SaveCookiesAndNotifyHeadersComplete(result); 875 } 876 877 void URLRequestHttpJob::OnReadCompleted(int result) { 878 read_in_progress_ = false; 879 880 if (ShouldFixMismatchedContentLength(result)) 881 result = OK; 882 883 if (result == OK) { 884 NotifyDone(URLRequestStatus()); 885 } else if (result < 0) { 886 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 887 } else { 888 // Clear the IO_PENDING status 889 SetStatus(URLRequestStatus()); 890 } 891 892 NotifyReadComplete(result); 893 } 894 895 void URLRequestHttpJob::RestartTransactionWithAuth( 896 const AuthCredentials& credentials) { 897 auth_credentials_ = credentials; 898 899 // These will be reset in OnStartCompleted. 900 response_info_ = NULL; 901 receive_headers_end_ = base::TimeTicks(); 902 response_cookies_.clear(); 903 904 ResetTimer(); 905 906 // Update the cookies, since the cookie store may have been updated from the 907 // headers in the 401/407. Since cookies were already appended to 908 // extra_headers, we need to strip them out before adding them again. 909 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 910 911 AddCookieHeaderAndStart(); 912 } 913 914 void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 915 DCHECK(!transaction_.get()) << "cannot change once started"; 916 request_info_.upload_data_stream = upload; 917 } 918 919 void URLRequestHttpJob::SetExtraRequestHeaders( 920 const HttpRequestHeaders& headers) { 921 DCHECK(!transaction_.get()) << "cannot change once started"; 922 request_info_.extra_headers.CopyFrom(headers); 923 } 924 925 LoadState URLRequestHttpJob::GetLoadState() const { 926 return transaction_.get() ? 927 transaction_->GetLoadState() : LOAD_STATE_IDLE; 928 } 929 930 UploadProgress URLRequestHttpJob::GetUploadProgress() const { 931 return transaction_.get() ? 932 transaction_->GetUploadProgress() : UploadProgress(); 933 } 934 935 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 936 DCHECK(transaction_.get()); 937 938 if (!response_info_) 939 return false; 940 941 return GetResponseHeaders()->GetMimeType(mime_type); 942 } 943 944 bool URLRequestHttpJob::GetCharset(std::string* charset) { 945 DCHECK(transaction_.get()); 946 947 if (!response_info_) 948 return false; 949 950 return GetResponseHeaders()->GetCharset(charset); 951 } 952 953 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 954 DCHECK(request_); 955 956 if (response_info_) { 957 DCHECK(transaction_.get()); 958 959 *info = *response_info_; 960 if (override_response_headers_.get()) 961 info->headers = override_response_headers_; 962 } 963 } 964 965 void URLRequestHttpJob::GetLoadTimingInfo( 966 LoadTimingInfo* load_timing_info) const { 967 // If haven't made it far enough to receive any headers, don't return 968 // anything. This makes for more consistent behavior in the case of errors. 969 if (!transaction_ || receive_headers_end_.is_null()) 970 return; 971 if (transaction_->GetLoadTimingInfo(load_timing_info)) 972 load_timing_info->receive_headers_end = receive_headers_end_; 973 } 974 975 bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 976 DCHECK(transaction_.get()); 977 978 if (!response_info_) 979 return false; 980 981 // TODO(darin): Why are we extracting response cookies again? Perhaps we 982 // should just leverage response_cookies_. 983 984 cookies->clear(); 985 FetchResponseCookies(cookies); 986 return true; 987 } 988 989 int URLRequestHttpJob::GetResponseCode() const { 990 DCHECK(transaction_.get()); 991 992 if (!response_info_) 993 return -1; 994 995 return GetResponseHeaders()->response_code(); 996 } 997 998 Filter* URLRequestHttpJob::SetupFilter() const { 999 DCHECK(transaction_.get()); 1000 if (!response_info_) 1001 return NULL; 1002 1003 std::vector<Filter::FilterType> encoding_types; 1004 std::string encoding_type; 1005 HttpResponseHeaders* headers = GetResponseHeaders(); 1006 void* iter = NULL; 1007 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1008 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1009 } 1010 1011 if (filter_context_->IsSdchResponse()) { 1012 // We are wary of proxies that discard or damage SDCH encoding. If a server 1013 // explicitly states that this is not SDCH content, then we can correct our 1014 // assumption that this is an SDCH response, and avoid the need to recover 1015 // as though the content is corrupted (when we discover it is not SDCH 1016 // encoded). 1017 std::string sdch_response_status; 1018 iter = NULL; 1019 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1020 &sdch_response_status)) { 1021 if (sdch_response_status == "0") { 1022 filter_context_->ResetSdchResponseToFalse(); 1023 break; 1024 } 1025 } 1026 } 1027 1028 // Even if encoding types are empty, there is a chance that we need to add 1029 // some decoding, as some proxies strip encoding completely. In such cases, 1030 // we may need to add (for example) SDCH filtering (when the context suggests 1031 // it is appropriate). 1032 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1033 1034 return !encoding_types.empty() 1035 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1036 } 1037 1038 bool URLRequestHttpJob::CopyFragmentOnRedirect(const GURL& location) const { 1039 // Allow modification of reference fragments by default, unless 1040 // |allowed_unsafe_redirect_url_| is set and equal to the redirect URL. 1041 // When this is the case, we assume that the network delegate has set the 1042 // desired redirect URL (with or without fragment), so it must not be changed 1043 // any more. 1044 return !allowed_unsafe_redirect_url_.is_valid() || 1045 allowed_unsafe_redirect_url_ != location; 1046 } 1047 1048 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1049 // HTTP is always safe. 1050 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1051 if (location.is_valid() && 1052 (location.scheme() == "http" || location.scheme() == "https")) { 1053 return true; 1054 } 1055 // Delegates may mark a URL as safe for redirection. 1056 if (allowed_unsafe_redirect_url_.is_valid() && 1057 allowed_unsafe_redirect_url_ == location) { 1058 return true; 1059 } 1060 // Query URLRequestJobFactory as to whether |location| would be safe to 1061 // redirect to. 1062 return request_->context()->job_factory() && 1063 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1064 } 1065 1066 bool URLRequestHttpJob::NeedsAuth() { 1067 int code = GetResponseCode(); 1068 if (code == -1) 1069 return false; 1070 1071 // Check if we need either Proxy or WWW Authentication. This could happen 1072 // because we either provided no auth info, or provided incorrect info. 1073 switch (code) { 1074 case 407: 1075 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1076 return false; 1077 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1078 return true; 1079 case 401: 1080 if (server_auth_state_ == AUTH_STATE_CANCELED) 1081 return false; 1082 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1083 return true; 1084 } 1085 return false; 1086 } 1087 1088 void URLRequestHttpJob::GetAuthChallengeInfo( 1089 scoped_refptr<AuthChallengeInfo>* result) { 1090 DCHECK(transaction_.get()); 1091 DCHECK(response_info_); 1092 1093 // sanity checks: 1094 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1095 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1096 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1097 (GetResponseHeaders()->response_code() == 1098 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1099 1100 *result = response_info_->auth_challenge; 1101 } 1102 1103 void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1104 DCHECK(transaction_.get()); 1105 1106 // Proxy gets set first, then WWW. 1107 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1108 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1109 } else { 1110 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1111 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1112 } 1113 1114 RestartTransactionWithAuth(credentials); 1115 } 1116 1117 void URLRequestHttpJob::CancelAuth() { 1118 // Proxy gets set first, then WWW. 1119 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1120 proxy_auth_state_ = AUTH_STATE_CANCELED; 1121 } else { 1122 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1123 server_auth_state_ = AUTH_STATE_CANCELED; 1124 } 1125 1126 // These will be reset in OnStartCompleted. 1127 response_info_ = NULL; 1128 receive_headers_end_ = base::TimeTicks::Now(); 1129 response_cookies_.clear(); 1130 1131 ResetTimer(); 1132 1133 // OK, let the consumer read the error page... 1134 // 1135 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1136 // which will cause the consumer to receive OnResponseStarted instead of 1137 // OnAuthRequired. 1138 // 1139 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1140 // 1141 base::MessageLoop::current()->PostTask( 1142 FROM_HERE, 1143 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1144 weak_factory_.GetWeakPtr(), OK)); 1145 } 1146 1147 void URLRequestHttpJob::ContinueWithCertificate( 1148 X509Certificate* client_cert) { 1149 DCHECK(transaction_.get()); 1150 1151 DCHECK(!response_info_) << "should not have a response yet"; 1152 receive_headers_end_ = base::TimeTicks(); 1153 1154 ResetTimer(); 1155 1156 // No matter what, we want to report our status as IO pending since we will 1157 // be notifying our consumer asynchronously via OnStartCompleted. 1158 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1159 1160 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1161 if (rv == ERR_IO_PENDING) 1162 return; 1163 1164 // The transaction started synchronously, but we need to notify the 1165 // URLRequest delegate via the message loop. 1166 base::MessageLoop::current()->PostTask( 1167 FROM_HERE, 1168 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1169 weak_factory_.GetWeakPtr(), rv)); 1170 } 1171 1172 void URLRequestHttpJob::ContinueDespiteLastError() { 1173 // If the transaction was destroyed, then the job was cancelled. 1174 if (!transaction_.get()) 1175 return; 1176 1177 DCHECK(!response_info_) << "should not have a response yet"; 1178 receive_headers_end_ = base::TimeTicks(); 1179 1180 ResetTimer(); 1181 1182 // No matter what, we want to report our status as IO pending since we will 1183 // be notifying our consumer asynchronously via OnStartCompleted. 1184 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1185 1186 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1187 if (rv == ERR_IO_PENDING) 1188 return; 1189 1190 // The transaction started synchronously, but we need to notify the 1191 // URLRequest delegate via the message loop. 1192 base::MessageLoop::current()->PostTask( 1193 FROM_HERE, 1194 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1195 weak_factory_.GetWeakPtr(), rv)); 1196 } 1197 1198 void URLRequestHttpJob::ResumeNetworkStart() { 1199 DCHECK(transaction_.get()); 1200 transaction_->ResumeNetworkStart(); 1201 } 1202 1203 bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1204 // Some servers send the body compressed, but specify the content length as 1205 // the uncompressed size. Although this violates the HTTP spec we want to 1206 // support it (as IE and FireFox do), but *only* for an exact match. 1207 // See http://crbug.com/79694. 1208 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1209 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1210 if (request_ && request_->response_headers()) { 1211 int64 expected_length = request_->response_headers()->GetContentLength(); 1212 VLOG(1) << __FUNCTION__ << "() " 1213 << "\"" << request_->url().spec() << "\"" 1214 << " content-length = " << expected_length 1215 << " pre total = " << prefilter_bytes_read() 1216 << " post total = " << postfilter_bytes_read(); 1217 if (postfilter_bytes_read() == expected_length) { 1218 // Clear the error. 1219 return true; 1220 } 1221 } 1222 } 1223 return false; 1224 } 1225 1226 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1227 int* bytes_read) { 1228 DCHECK_NE(buf_size, 0); 1229 DCHECK(bytes_read); 1230 DCHECK(!read_in_progress_); 1231 1232 int rv = transaction_->Read( 1233 buf, buf_size, 1234 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1235 1236 if (ShouldFixMismatchedContentLength(rv)) 1237 rv = 0; 1238 1239 if (rv >= 0) { 1240 *bytes_read = rv; 1241 if (!rv) 1242 DoneWithRequest(FINISHED); 1243 return true; 1244 } 1245 1246 if (rv == ERR_IO_PENDING) { 1247 read_in_progress_ = true; 1248 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1249 } else { 1250 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1251 } 1252 1253 return false; 1254 } 1255 1256 void URLRequestHttpJob::StopCaching() { 1257 if (transaction_.get()) 1258 transaction_->StopCaching(); 1259 } 1260 1261 bool URLRequestHttpJob::GetFullRequestHeaders( 1262 HttpRequestHeaders* headers) const { 1263 if (!transaction_) 1264 return false; 1265 1266 return transaction_->GetFullRequestHeaders(headers); 1267 } 1268 1269 int64 URLRequestHttpJob::GetTotalReceivedBytes() const { 1270 if (!transaction_) 1271 return 0; 1272 1273 return transaction_->GetTotalReceivedBytes(); 1274 } 1275 1276 void URLRequestHttpJob::DoneReading() { 1277 if (transaction_) { 1278 transaction_->DoneReading(); 1279 } 1280 DoneWithRequest(FINISHED); 1281 } 1282 1283 void URLRequestHttpJob::DoneReadingRedirectResponse() { 1284 if (transaction_) { 1285 if (transaction_->GetResponseInfo()->headers->IsRedirect(NULL)) { 1286 // If the original headers indicate a redirect, go ahead and cache the 1287 // response, even if the |override_response_headers_| are a redirect to 1288 // another location. 1289 transaction_->DoneReading(); 1290 } else { 1291 // Otherwise, |override_response_headers_| must be non-NULL and contain 1292 // bogus headers indicating a redirect. 1293 DCHECK(override_response_headers_); 1294 DCHECK(override_response_headers_->IsRedirect(NULL)); 1295 transaction_->StopCaching(); 1296 } 1297 } 1298 DoneWithRequest(FINISHED); 1299 } 1300 1301 HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1302 return response_info_ ? response_info_->socket_address : HostPortPair(); 1303 } 1304 1305 void URLRequestHttpJob::RecordTimer() { 1306 if (request_creation_time_.is_null()) { 1307 NOTREACHED() 1308 << "The same transaction shouldn't start twice without new timing."; 1309 return; 1310 } 1311 1312 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1313 request_creation_time_ = base::Time(); 1314 1315 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1316 } 1317 1318 void URLRequestHttpJob::ResetTimer() { 1319 if (!request_creation_time_.is_null()) { 1320 NOTREACHED() 1321 << "The timer was reset before it was recorded."; 1322 return; 1323 } 1324 request_creation_time_ = base::Time::Now(); 1325 } 1326 1327 void URLRequestHttpJob::UpdatePacketReadTimes() { 1328 if (!packet_timing_enabled_) 1329 return; 1330 1331 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1332 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1333 return; // No new bytes have arrived. 1334 } 1335 1336 final_packet_time_ = base::Time::Now(); 1337 if (!bytes_observed_in_packets_) 1338 request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); 1339 1340 bytes_observed_in_packets_ = filter_input_byte_count(); 1341 } 1342 1343 void URLRequestHttpJob::RecordPacketStats( 1344 FilterContext::StatisticSelector statistic) const { 1345 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1346 return; 1347 1348 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1349 switch (statistic) { 1350 case FilterContext::SDCH_DECODE: { 1351 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1352 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1353 return; 1354 } 1355 case FilterContext::SDCH_PASSTHROUGH: { 1356 // Despite advertising a dictionary, we handled non-sdch compressed 1357 // content. 1358 return; 1359 } 1360 1361 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1362 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode", 1363 duration, 1364 base::TimeDelta::FromMilliseconds(20), 1365 base::TimeDelta::FromMinutes(10), 100); 1366 return; 1367 } 1368 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1369 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback", 1370 duration, 1371 base::TimeDelta::FromMilliseconds(20), 1372 base::TimeDelta::FromMinutes(10), 100); 1373 return; 1374 } 1375 default: 1376 NOTREACHED(); 1377 return; 1378 } 1379 } 1380 1381 // The common type of histogram we use for all compression-tracking histograms. 1382 #define COMPRESSION_HISTOGRAM(name, sample) \ 1383 do { \ 1384 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1385 500, 1000000, 100); \ 1386 } while (0) 1387 1388 void URLRequestHttpJob::RecordCompressionHistograms() { 1389 DCHECK(request_); 1390 if (!request_) 1391 return; 1392 1393 if (is_cached_content_ || // Don't record cached content 1394 !GetStatus().is_success() || // Don't record failed content 1395 !IsCompressibleContent() || // Only record compressible content 1396 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1397 return; 1398 1399 // Miniature requests aren't really compressible. Don't count them. 1400 const int kMinSize = 16; 1401 if (prefilter_bytes_read() < kMinSize) 1402 return; 1403 1404 // Only record for http or https urls. 1405 bool is_http = request_->url().SchemeIs("http"); 1406 bool is_https = request_->url().SchemeIs("https"); 1407 if (!is_http && !is_https) 1408 return; 1409 1410 int compressed_B = prefilter_bytes_read(); 1411 int decompressed_B = postfilter_bytes_read(); 1412 bool was_filtered = HasFilter(); 1413 1414 // We want to record how often downloaded resources are compressed. 1415 // But, we recognize that different protocols may have different 1416 // properties. So, for each request, we'll put it into one of 3 1417 // groups: 1418 // a) SSL resources 1419 // Proxies cannot tamper with compression headers with SSL. 1420 // b) Non-SSL, loaded-via-proxy resources 1421 // In this case, we know a proxy might have interfered. 1422 // c) Non-SSL, loaded-without-proxy resources 1423 // In this case, we know there was no explicit proxy. However, 1424 // it is possible that a transparent proxy was still interfering. 1425 // 1426 // For each group, we record the same 3 histograms. 1427 1428 if (is_https) { 1429 if (was_filtered) { 1430 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1431 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1432 } else { 1433 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1434 } 1435 return; 1436 } 1437 1438 if (request_->was_fetched_via_proxy()) { 1439 if (was_filtered) { 1440 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1441 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1442 } else { 1443 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1444 } 1445 return; 1446 } 1447 1448 if (was_filtered) { 1449 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1450 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1451 } else { 1452 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1453 } 1454 } 1455 1456 bool URLRequestHttpJob::IsCompressibleContent() const { 1457 std::string mime_type; 1458 return GetMimeType(&mime_type) && 1459 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1460 IsSupportedNonImageMimeType(mime_type.c_str())); 1461 } 1462 1463 void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1464 if (start_time_.is_null()) 1465 return; 1466 1467 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1468 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1469 1470 if (reason == FINISHED) { 1471 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1472 } else { 1473 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1474 } 1475 1476 if (response_info_) { 1477 if (response_info_->was_cached) { 1478 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1479 } else { 1480 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1481 } 1482 } 1483 1484 if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached()) 1485 UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork", 1486 prefilter_bytes_read()); 1487 1488 start_time_ = base::TimeTicks(); 1489 } 1490 1491 void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1492 if (done_) 1493 return; 1494 done_ = true; 1495 RecordPerfHistograms(reason); 1496 if (reason == FINISHED) { 1497 request_->set_received_response_content_length(prefilter_bytes_read()); 1498 RecordCompressionHistograms(); 1499 } 1500 } 1501 1502 HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1503 DCHECK(transaction_.get()); 1504 DCHECK(transaction_->GetResponseInfo()); 1505 return override_response_headers_.get() ? 1506 override_response_headers_.get() : 1507 transaction_->GetResponseInfo()->headers.get(); 1508 } 1509 1510 void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1511 awaiting_callback_ = false; 1512 } 1513 1514 } // namespace net 1515