1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "net/socket/client_socket_pool_base.h" 6 7 #include "base/compiler_specific.h" 8 #include "base/format_macros.h" 9 #include "base/logging.h" 10 #include "base/message_loop/message_loop.h" 11 #include "base/metrics/stats_counters.h" 12 #include "base/stl_util.h" 13 #include "base/strings/string_util.h" 14 #include "base/time/time.h" 15 #include "base/values.h" 16 #include "net/base/net_errors.h" 17 #include "net/base/net_log.h" 18 #include "net/socket/client_socket_handle.h" 19 20 using base::TimeDelta; 21 22 namespace net { 23 24 namespace { 25 26 // Indicate whether we should enable idle socket cleanup timer. When timer is 27 // disabled, sockets are closed next time a socket request is made. 28 bool g_cleanup_timer_enabled = true; 29 30 // The timeout value, in seconds, used to clean up idle sockets that can't be 31 // reused. 32 // 33 // Note: It's important to close idle sockets that have received data as soon 34 // as possible because the received data may cause BSOD on Windows XP under 35 // some conditions. See http://crbug.com/4606. 36 const int kCleanupInterval = 10; // DO NOT INCREASE THIS TIMEOUT. 37 38 // Indicate whether or not we should establish a new transport layer connection 39 // after a certain timeout has passed without receiving an ACK. 40 bool g_connect_backup_jobs_enabled = true; 41 42 } // namespace 43 44 ConnectJob::ConnectJob(const std::string& group_name, 45 base::TimeDelta timeout_duration, 46 RequestPriority priority, 47 Delegate* delegate, 48 const BoundNetLog& net_log) 49 : group_name_(group_name), 50 timeout_duration_(timeout_duration), 51 priority_(priority), 52 delegate_(delegate), 53 net_log_(net_log), 54 idle_(true) { 55 DCHECK(!group_name.empty()); 56 DCHECK(delegate); 57 net_log.BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB, 58 NetLog::StringCallback("group_name", &group_name_)); 59 } 60 61 ConnectJob::~ConnectJob() { 62 net_log().EndEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB); 63 } 64 65 scoped_ptr<StreamSocket> ConnectJob::PassSocket() { 66 return socket_.Pass(); 67 } 68 69 int ConnectJob::Connect() { 70 if (timeout_duration_ != base::TimeDelta()) 71 timer_.Start(FROM_HERE, timeout_duration_, this, &ConnectJob::OnTimeout); 72 73 idle_ = false; 74 75 LogConnectStart(); 76 77 int rv = ConnectInternal(); 78 79 if (rv != ERR_IO_PENDING) { 80 LogConnectCompletion(rv); 81 delegate_ = NULL; 82 } 83 84 return rv; 85 } 86 87 void ConnectJob::SetSocket(scoped_ptr<StreamSocket> socket) { 88 if (socket) { 89 net_log().AddEvent(NetLog::TYPE_CONNECT_JOB_SET_SOCKET, 90 socket->NetLog().source().ToEventParametersCallback()); 91 } 92 socket_ = socket.Pass(); 93 } 94 95 void ConnectJob::NotifyDelegateOfCompletion(int rv) { 96 // The delegate will own |this|. 97 Delegate* delegate = delegate_; 98 delegate_ = NULL; 99 100 LogConnectCompletion(rv); 101 delegate->OnConnectJobComplete(rv, this); 102 } 103 104 void ConnectJob::ResetTimer(base::TimeDelta remaining_time) { 105 timer_.Stop(); 106 timer_.Start(FROM_HERE, remaining_time, this, &ConnectJob::OnTimeout); 107 } 108 109 void ConnectJob::LogConnectStart() { 110 connect_timing_.connect_start = base::TimeTicks::Now(); 111 net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT); 112 } 113 114 void ConnectJob::LogConnectCompletion(int net_error) { 115 connect_timing_.connect_end = base::TimeTicks::Now(); 116 net_log().EndEventWithNetErrorCode( 117 NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT, net_error); 118 } 119 120 void ConnectJob::OnTimeout() { 121 // Make sure the socket is NULL before calling into |delegate|. 122 SetSocket(scoped_ptr<StreamSocket>()); 123 124 net_log_.AddEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_TIMED_OUT); 125 126 NotifyDelegateOfCompletion(ERR_TIMED_OUT); 127 } 128 129 namespace internal { 130 131 ClientSocketPoolBaseHelper::Request::Request( 132 ClientSocketHandle* handle, 133 const CompletionCallback& callback, 134 RequestPriority priority, 135 bool ignore_limits, 136 Flags flags, 137 const BoundNetLog& net_log) 138 : handle_(handle), 139 callback_(callback), 140 priority_(priority), 141 ignore_limits_(ignore_limits), 142 flags_(flags), 143 net_log_(net_log) { 144 if (ignore_limits_) 145 DCHECK_EQ(priority_, MAXIMUM_PRIORITY); 146 } 147 148 ClientSocketPoolBaseHelper::Request::~Request() {} 149 150 ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper( 151 HigherLayeredPool* pool, 152 int max_sockets, 153 int max_sockets_per_group, 154 base::TimeDelta unused_idle_socket_timeout, 155 base::TimeDelta used_idle_socket_timeout, 156 ConnectJobFactory* connect_job_factory) 157 : idle_socket_count_(0), 158 connecting_socket_count_(0), 159 handed_out_socket_count_(0), 160 max_sockets_(max_sockets), 161 max_sockets_per_group_(max_sockets_per_group), 162 use_cleanup_timer_(g_cleanup_timer_enabled), 163 unused_idle_socket_timeout_(unused_idle_socket_timeout), 164 used_idle_socket_timeout_(used_idle_socket_timeout), 165 connect_job_factory_(connect_job_factory), 166 connect_backup_jobs_enabled_(false), 167 pool_generation_number_(0), 168 pool_(pool), 169 weak_factory_(this) { 170 DCHECK_LE(0, max_sockets_per_group); 171 DCHECK_LE(max_sockets_per_group, max_sockets); 172 173 NetworkChangeNotifier::AddIPAddressObserver(this); 174 } 175 176 ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() { 177 // Clean up any idle sockets and pending connect jobs. Assert that we have no 178 // remaining active sockets or pending requests. They should have all been 179 // cleaned up prior to |this| being destroyed. 180 FlushWithError(ERR_ABORTED); 181 DCHECK(group_map_.empty()); 182 DCHECK(pending_callback_map_.empty()); 183 DCHECK_EQ(0, connecting_socket_count_); 184 CHECK(higher_pools_.empty()); 185 186 NetworkChangeNotifier::RemoveIPAddressObserver(this); 187 188 // Remove from lower layer pools. 189 for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin(); 190 it != lower_pools_.end(); 191 ++it) { 192 (*it)->RemoveHigherLayeredPool(pool_); 193 } 194 } 195 196 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair() 197 : result(OK) { 198 } 199 200 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair( 201 const CompletionCallback& callback_in, int result_in) 202 : callback(callback_in), 203 result(result_in) { 204 } 205 206 ClientSocketPoolBaseHelper::CallbackResultPair::~CallbackResultPair() {} 207 208 bool ClientSocketPoolBaseHelper::IsStalled() const { 209 // If a lower layer pool is stalled, consider |this| stalled as well. 210 for (std::set<LowerLayeredPool*>::const_iterator it = lower_pools_.begin(); 211 it != lower_pools_.end(); 212 ++it) { 213 if ((*it)->IsStalled()) 214 return true; 215 } 216 217 // If fewer than |max_sockets_| are in use, then clearly |this| is not 218 // stalled. 219 if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_) 220 return false; 221 // So in order to be stalled, |this| must be using at least |max_sockets_| AND 222 // |this| must have a request that is actually stalled on the global socket 223 // limit. To find such a request, look for a group that has more requests 224 // than jobs AND where the number of sockets is less than 225 // |max_sockets_per_group_|. (If the number of sockets is equal to 226 // |max_sockets_per_group_|, then the request is stalled on the group limit, 227 // which does not count.) 228 for (GroupMap::const_iterator it = group_map_.begin(); 229 it != group_map_.end(); ++it) { 230 if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) 231 return true; 232 } 233 return false; 234 } 235 236 void ClientSocketPoolBaseHelper::AddLowerLayeredPool( 237 LowerLayeredPool* lower_pool) { 238 DCHECK(pool_); 239 CHECK(!ContainsKey(lower_pools_, lower_pool)); 240 lower_pools_.insert(lower_pool); 241 lower_pool->AddHigherLayeredPool(pool_); 242 } 243 244 void ClientSocketPoolBaseHelper::AddHigherLayeredPool( 245 HigherLayeredPool* higher_pool) { 246 CHECK(higher_pool); 247 CHECK(!ContainsKey(higher_pools_, higher_pool)); 248 higher_pools_.insert(higher_pool); 249 } 250 251 void ClientSocketPoolBaseHelper::RemoveHigherLayeredPool( 252 HigherLayeredPool* higher_pool) { 253 CHECK(higher_pool); 254 CHECK(ContainsKey(higher_pools_, higher_pool)); 255 higher_pools_.erase(higher_pool); 256 } 257 258 int ClientSocketPoolBaseHelper::RequestSocket( 259 const std::string& group_name, 260 scoped_ptr<const Request> request) { 261 CHECK(!request->callback().is_null()); 262 CHECK(request->handle()); 263 264 // Cleanup any timed-out idle sockets if no timer is used. 265 if (!use_cleanup_timer_) 266 CleanupIdleSockets(false); 267 268 request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL); 269 Group* group = GetOrCreateGroup(group_name); 270 271 int rv = RequestSocketInternal(group_name, *request); 272 if (rv != ERR_IO_PENDING) { 273 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv); 274 CHECK(!request->handle()->is_initialized()); 275 request.reset(); 276 } else { 277 group->InsertPendingRequest(request.Pass()); 278 // Have to do this asynchronously, as closing sockets in higher level pools 279 // call back in to |this|, which will cause all sorts of fun and exciting 280 // re-entrancy issues if the socket pool is doing something else at the 281 // time. 282 if (group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) { 283 base::MessageLoop::current()->PostTask( 284 FROM_HERE, 285 base::Bind( 286 &ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools, 287 weak_factory_.GetWeakPtr())); 288 } 289 } 290 return rv; 291 } 292 293 void ClientSocketPoolBaseHelper::RequestSockets( 294 const std::string& group_name, 295 const Request& request, 296 int num_sockets) { 297 DCHECK(request.callback().is_null()); 298 DCHECK(!request.handle()); 299 300 // Cleanup any timed out idle sockets if no timer is used. 301 if (!use_cleanup_timer_) 302 CleanupIdleSockets(false); 303 304 if (num_sockets > max_sockets_per_group_) { 305 num_sockets = max_sockets_per_group_; 306 } 307 308 request.net_log().BeginEvent( 309 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS, 310 NetLog::IntegerCallback("num_sockets", num_sockets)); 311 312 Group* group = GetOrCreateGroup(group_name); 313 314 // RequestSocketsInternal() may delete the group. 315 bool deleted_group = false; 316 317 int rv = OK; 318 for (int num_iterations_left = num_sockets; 319 group->NumActiveSocketSlots() < num_sockets && 320 num_iterations_left > 0 ; num_iterations_left--) { 321 rv = RequestSocketInternal(group_name, request); 322 if (rv < 0 && rv != ERR_IO_PENDING) { 323 // We're encountering a synchronous error. Give up. 324 if (!ContainsKey(group_map_, group_name)) 325 deleted_group = true; 326 break; 327 } 328 if (!ContainsKey(group_map_, group_name)) { 329 // Unexpected. The group should only be getting deleted on synchronous 330 // error. 331 NOTREACHED(); 332 deleted_group = true; 333 break; 334 } 335 } 336 337 if (!deleted_group && group->IsEmpty()) 338 RemoveGroup(group_name); 339 340 if (rv == ERR_IO_PENDING) 341 rv = OK; 342 request.net_log().EndEventWithNetErrorCode( 343 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS, rv); 344 } 345 346 int ClientSocketPoolBaseHelper::RequestSocketInternal( 347 const std::string& group_name, 348 const Request& request) { 349 ClientSocketHandle* const handle = request.handle(); 350 const bool preconnecting = !handle; 351 Group* group = GetOrCreateGroup(group_name); 352 353 if (!(request.flags() & NO_IDLE_SOCKETS)) { 354 // Try to reuse a socket. 355 if (AssignIdleSocketToRequest(request, group)) 356 return OK; 357 } 358 359 // If there are more ConnectJobs than pending requests, don't need to do 360 // anything. Can just wait for the extra job to connect, and then assign it 361 // to the request. 362 if (!preconnecting && group->TryToUseUnassignedConnectJob()) 363 return ERR_IO_PENDING; 364 365 // Can we make another active socket now? 366 if (!group->HasAvailableSocketSlot(max_sockets_per_group_) && 367 !request.ignore_limits()) { 368 // TODO(willchan): Consider whether or not we need to close a socket in a 369 // higher layered group. I don't think this makes sense since we would just 370 // reuse that socket then if we needed one and wouldn't make it down to this 371 // layer. 372 request.net_log().AddEvent( 373 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP); 374 return ERR_IO_PENDING; 375 } 376 377 if (ReachedMaxSocketsLimit() && !request.ignore_limits()) { 378 // NOTE(mmenke): Wonder if we really need different code for each case 379 // here. Only reason for them now seems to be preconnects. 380 if (idle_socket_count() > 0) { 381 // There's an idle socket in this pool. Either that's because there's 382 // still one in this group, but we got here due to preconnecting bypassing 383 // idle sockets, or because there's an idle socket in another group. 384 bool closed = CloseOneIdleSocketExceptInGroup(group); 385 if (preconnecting && !closed) 386 return ERR_PRECONNECT_MAX_SOCKET_LIMIT; 387 } else { 388 // We could check if we really have a stalled group here, but it requires 389 // a scan of all groups, so just flip a flag here, and do the check later. 390 request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS); 391 return ERR_IO_PENDING; 392 } 393 } 394 395 // We couldn't find a socket to reuse, and there's space to allocate one, 396 // so allocate and connect a new one. 397 scoped_ptr<ConnectJob> connect_job( 398 connect_job_factory_->NewConnectJob(group_name, request, this)); 399 400 int rv = connect_job->Connect(); 401 if (rv == OK) { 402 LogBoundConnectJobToRequest(connect_job->net_log().source(), request); 403 if (!preconnecting) { 404 HandOutSocket(connect_job->PassSocket(), false /* not reused */, 405 connect_job->connect_timing(), handle, base::TimeDelta(), 406 group, request.net_log()); 407 } else { 408 AddIdleSocket(connect_job->PassSocket(), group); 409 } 410 } else if (rv == ERR_IO_PENDING) { 411 // If we don't have any sockets in this group, set a timer for potentially 412 // creating a new one. If the SYN is lost, this backup socket may complete 413 // before the slow socket, improving end user latency. 414 if (connect_backup_jobs_enabled_ && group->IsEmpty()) { 415 group->StartBackupJobTimer(group_name, this); 416 } 417 418 connecting_socket_count_++; 419 420 group->AddJob(connect_job.Pass(), preconnecting); 421 } else { 422 LogBoundConnectJobToRequest(connect_job->net_log().source(), request); 423 scoped_ptr<StreamSocket> error_socket; 424 if (!preconnecting) { 425 DCHECK(handle); 426 connect_job->GetAdditionalErrorState(handle); 427 error_socket = connect_job->PassSocket(); 428 } 429 if (error_socket) { 430 HandOutSocket(error_socket.Pass(), false /* not reused */, 431 connect_job->connect_timing(), handle, base::TimeDelta(), 432 group, request.net_log()); 433 } else if (group->IsEmpty()) { 434 RemoveGroup(group_name); 435 } 436 } 437 438 return rv; 439 } 440 441 bool ClientSocketPoolBaseHelper::AssignIdleSocketToRequest( 442 const Request& request, Group* group) { 443 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets(); 444 std::list<IdleSocket>::iterator idle_socket_it = idle_sockets->end(); 445 446 // Iterate through the idle sockets forwards (oldest to newest) 447 // * Delete any disconnected ones. 448 // * If we find a used idle socket, assign to |idle_socket|. At the end, 449 // the |idle_socket_it| will be set to the newest used idle socket. 450 for (std::list<IdleSocket>::iterator it = idle_sockets->begin(); 451 it != idle_sockets->end();) { 452 if (!it->socket->IsConnectedAndIdle()) { 453 DecrementIdleCount(); 454 delete it->socket; 455 it = idle_sockets->erase(it); 456 continue; 457 } 458 459 if (it->socket->WasEverUsed()) { 460 // We found one we can reuse! 461 idle_socket_it = it; 462 } 463 464 ++it; 465 } 466 467 // If we haven't found an idle socket, that means there are no used idle 468 // sockets. Pick the oldest (first) idle socket (FIFO). 469 470 if (idle_socket_it == idle_sockets->end() && !idle_sockets->empty()) 471 idle_socket_it = idle_sockets->begin(); 472 473 if (idle_socket_it != idle_sockets->end()) { 474 DecrementIdleCount(); 475 base::TimeDelta idle_time = 476 base::TimeTicks::Now() - idle_socket_it->start_time; 477 IdleSocket idle_socket = *idle_socket_it; 478 idle_sockets->erase(idle_socket_it); 479 HandOutSocket( 480 scoped_ptr<StreamSocket>(idle_socket.socket), 481 idle_socket.socket->WasEverUsed(), 482 LoadTimingInfo::ConnectTiming(), 483 request.handle(), 484 idle_time, 485 group, 486 request.net_log()); 487 return true; 488 } 489 490 return false; 491 } 492 493 // static 494 void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest( 495 const NetLog::Source& connect_job_source, const Request& request) { 496 request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB, 497 connect_job_source.ToEventParametersCallback()); 498 } 499 500 void ClientSocketPoolBaseHelper::CancelRequest( 501 const std::string& group_name, ClientSocketHandle* handle) { 502 PendingCallbackMap::iterator callback_it = pending_callback_map_.find(handle); 503 if (callback_it != pending_callback_map_.end()) { 504 int result = callback_it->second.result; 505 pending_callback_map_.erase(callback_it); 506 scoped_ptr<StreamSocket> socket = handle->PassSocket(); 507 if (socket) { 508 if (result != OK) 509 socket->Disconnect(); 510 ReleaseSocket(handle->group_name(), socket.Pass(), handle->id()); 511 } 512 return; 513 } 514 515 CHECK(ContainsKey(group_map_, group_name)); 516 517 Group* group = GetOrCreateGroup(group_name); 518 519 // Search pending_requests for matching handle. 520 scoped_ptr<const Request> request = 521 group->FindAndRemovePendingRequest(handle); 522 if (request) { 523 request->net_log().AddEvent(NetLog::TYPE_CANCELLED); 524 request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL); 525 526 // We let the job run, unless we're at the socket limit and there is 527 // not another request waiting on the job. 528 if (group->jobs().size() > group->pending_request_count() && 529 ReachedMaxSocketsLimit()) { 530 RemoveConnectJob(*group->jobs().begin(), group); 531 CheckForStalledSocketGroups(); 532 } 533 } 534 } 535 536 bool ClientSocketPoolBaseHelper::HasGroup(const std::string& group_name) const { 537 return ContainsKey(group_map_, group_name); 538 } 539 540 void ClientSocketPoolBaseHelper::CloseIdleSockets() { 541 CleanupIdleSockets(true); 542 DCHECK_EQ(0, idle_socket_count_); 543 } 544 545 int ClientSocketPoolBaseHelper::IdleSocketCountInGroup( 546 const std::string& group_name) const { 547 GroupMap::const_iterator i = group_map_.find(group_name); 548 CHECK(i != group_map_.end()); 549 550 return i->second->idle_sockets().size(); 551 } 552 553 LoadState ClientSocketPoolBaseHelper::GetLoadState( 554 const std::string& group_name, 555 const ClientSocketHandle* handle) const { 556 if (ContainsKey(pending_callback_map_, handle)) 557 return LOAD_STATE_CONNECTING; 558 559 if (!ContainsKey(group_map_, group_name)) { 560 NOTREACHED() << "ClientSocketPool does not contain group: " << group_name 561 << " for handle: " << handle; 562 return LOAD_STATE_IDLE; 563 } 564 565 // Can't use operator[] since it is non-const. 566 const Group& group = *group_map_.find(group_name)->second; 567 568 if (group.HasConnectJobForHandle(handle)) { 569 // Just return the state of the farthest along ConnectJob for the first 570 // group.jobs().size() pending requests. 571 LoadState max_state = LOAD_STATE_IDLE; 572 for (ConnectJobSet::const_iterator job_it = group.jobs().begin(); 573 job_it != group.jobs().end(); ++job_it) { 574 max_state = std::max(max_state, (*job_it)->GetLoadState()); 575 } 576 return max_state; 577 } 578 579 if (group.IsStalledOnPoolMaxSockets(max_sockets_per_group_)) 580 return LOAD_STATE_WAITING_FOR_STALLED_SOCKET_POOL; 581 return LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET; 582 } 583 584 base::DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue( 585 const std::string& name, const std::string& type) const { 586 base::DictionaryValue* dict = new base::DictionaryValue(); 587 dict->SetString("name", name); 588 dict->SetString("type", type); 589 dict->SetInteger("handed_out_socket_count", handed_out_socket_count_); 590 dict->SetInteger("connecting_socket_count", connecting_socket_count_); 591 dict->SetInteger("idle_socket_count", idle_socket_count_); 592 dict->SetInteger("max_socket_count", max_sockets_); 593 dict->SetInteger("max_sockets_per_group", max_sockets_per_group_); 594 dict->SetInteger("pool_generation_number", pool_generation_number_); 595 596 if (group_map_.empty()) 597 return dict; 598 599 base::DictionaryValue* all_groups_dict = new base::DictionaryValue(); 600 for (GroupMap::const_iterator it = group_map_.begin(); 601 it != group_map_.end(); it++) { 602 const Group* group = it->second; 603 base::DictionaryValue* group_dict = new base::DictionaryValue(); 604 605 group_dict->SetInteger("pending_request_count", 606 group->pending_request_count()); 607 if (group->has_pending_requests()) { 608 group_dict->SetString( 609 "top_pending_priority", 610 RequestPriorityToString(group->TopPendingPriority())); 611 } 612 613 group_dict->SetInteger("active_socket_count", group->active_socket_count()); 614 615 base::ListValue* idle_socket_list = new base::ListValue(); 616 std::list<IdleSocket>::const_iterator idle_socket; 617 for (idle_socket = group->idle_sockets().begin(); 618 idle_socket != group->idle_sockets().end(); 619 idle_socket++) { 620 int source_id = idle_socket->socket->NetLog().source().id; 621 idle_socket_list->Append(new base::FundamentalValue(source_id)); 622 } 623 group_dict->Set("idle_sockets", idle_socket_list); 624 625 base::ListValue* connect_jobs_list = new base::ListValue(); 626 std::set<ConnectJob*>::const_iterator job = group->jobs().begin(); 627 for (job = group->jobs().begin(); job != group->jobs().end(); job++) { 628 int source_id = (*job)->net_log().source().id; 629 connect_jobs_list->Append(new base::FundamentalValue(source_id)); 630 } 631 group_dict->Set("connect_jobs", connect_jobs_list); 632 633 group_dict->SetBoolean("is_stalled", 634 group->IsStalledOnPoolMaxSockets( 635 max_sockets_per_group_)); 636 group_dict->SetBoolean("backup_job_timer_is_running", 637 group->BackupJobTimerIsRunning()); 638 639 all_groups_dict->SetWithoutPathExpansion(it->first, group_dict); 640 } 641 dict->Set("groups", all_groups_dict); 642 return dict; 643 } 644 645 bool ClientSocketPoolBaseHelper::IdleSocket::ShouldCleanup( 646 base::TimeTicks now, 647 base::TimeDelta timeout) const { 648 bool timed_out = (now - start_time) >= timeout; 649 if (timed_out) 650 return true; 651 if (socket->WasEverUsed()) 652 return !socket->IsConnectedAndIdle(); 653 return !socket->IsConnected(); 654 } 655 656 void ClientSocketPoolBaseHelper::CleanupIdleSockets(bool force) { 657 if (idle_socket_count_ == 0) 658 return; 659 660 // Current time value. Retrieving it once at the function start rather than 661 // inside the inner loop, since it shouldn't change by any meaningful amount. 662 base::TimeTicks now = base::TimeTicks::Now(); 663 664 GroupMap::iterator i = group_map_.begin(); 665 while (i != group_map_.end()) { 666 Group* group = i->second; 667 668 std::list<IdleSocket>::iterator j = group->mutable_idle_sockets()->begin(); 669 while (j != group->idle_sockets().end()) { 670 base::TimeDelta timeout = 671 j->socket->WasEverUsed() ? 672 used_idle_socket_timeout_ : unused_idle_socket_timeout_; 673 if (force || j->ShouldCleanup(now, timeout)) { 674 delete j->socket; 675 j = group->mutable_idle_sockets()->erase(j); 676 DecrementIdleCount(); 677 } else { 678 ++j; 679 } 680 } 681 682 // Delete group if no longer needed. 683 if (group->IsEmpty()) { 684 RemoveGroup(i++); 685 } else { 686 ++i; 687 } 688 } 689 } 690 691 ClientSocketPoolBaseHelper::Group* ClientSocketPoolBaseHelper::GetOrCreateGroup( 692 const std::string& group_name) { 693 GroupMap::iterator it = group_map_.find(group_name); 694 if (it != group_map_.end()) 695 return it->second; 696 Group* group = new Group; 697 group_map_[group_name] = group; 698 return group; 699 } 700 701 void ClientSocketPoolBaseHelper::RemoveGroup(const std::string& group_name) { 702 GroupMap::iterator it = group_map_.find(group_name); 703 CHECK(it != group_map_.end()); 704 705 RemoveGroup(it); 706 } 707 708 void ClientSocketPoolBaseHelper::RemoveGroup(GroupMap::iterator it) { 709 delete it->second; 710 group_map_.erase(it); 711 } 712 713 // static 714 bool ClientSocketPoolBaseHelper::connect_backup_jobs_enabled() { 715 return g_connect_backup_jobs_enabled; 716 } 717 718 // static 719 bool ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled(bool enabled) { 720 bool old_value = g_connect_backup_jobs_enabled; 721 g_connect_backup_jobs_enabled = enabled; 722 return old_value; 723 } 724 725 void ClientSocketPoolBaseHelper::EnableConnectBackupJobs() { 726 connect_backup_jobs_enabled_ = g_connect_backup_jobs_enabled; 727 } 728 729 void ClientSocketPoolBaseHelper::IncrementIdleCount() { 730 if (++idle_socket_count_ == 1 && use_cleanup_timer_) 731 StartIdleSocketTimer(); 732 } 733 734 void ClientSocketPoolBaseHelper::DecrementIdleCount() { 735 if (--idle_socket_count_ == 0) 736 timer_.Stop(); 737 } 738 739 // static 740 bool ClientSocketPoolBaseHelper::cleanup_timer_enabled() { 741 return g_cleanup_timer_enabled; 742 } 743 744 // static 745 bool ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(bool enabled) { 746 bool old_value = g_cleanup_timer_enabled; 747 g_cleanup_timer_enabled = enabled; 748 return old_value; 749 } 750 751 void ClientSocketPoolBaseHelper::StartIdleSocketTimer() { 752 timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kCleanupInterval), this, 753 &ClientSocketPoolBaseHelper::OnCleanupTimerFired); 754 } 755 756 void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name, 757 scoped_ptr<StreamSocket> socket, 758 int id) { 759 GroupMap::iterator i = group_map_.find(group_name); 760 CHECK(i != group_map_.end()); 761 762 Group* group = i->second; 763 764 CHECK_GT(handed_out_socket_count_, 0); 765 handed_out_socket_count_--; 766 767 CHECK_GT(group->active_socket_count(), 0); 768 group->DecrementActiveSocketCount(); 769 770 const bool can_reuse = socket->IsConnectedAndIdle() && 771 id == pool_generation_number_; 772 if (can_reuse) { 773 // Add it to the idle list. 774 AddIdleSocket(socket.Pass(), group); 775 OnAvailableSocketSlot(group_name, group); 776 } else { 777 socket.reset(); 778 } 779 780 CheckForStalledSocketGroups(); 781 } 782 783 void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() { 784 // If we have idle sockets, see if we can give one to the top-stalled group. 785 std::string top_group_name; 786 Group* top_group = NULL; 787 if (!FindTopStalledGroup(&top_group, &top_group_name)) { 788 // There may still be a stalled group in a lower level pool. 789 for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin(); 790 it != lower_pools_.end(); 791 ++it) { 792 if ((*it)->IsStalled()) { 793 CloseOneIdleSocket(); 794 break; 795 } 796 } 797 return; 798 } 799 800 if (ReachedMaxSocketsLimit()) { 801 if (idle_socket_count() > 0) { 802 CloseOneIdleSocket(); 803 } else { 804 // We can't activate more sockets since we're already at our global 805 // limit. 806 return; 807 } 808 } 809 810 // Note: we don't loop on waking stalled groups. If the stalled group is at 811 // its limit, may be left with other stalled groups that could be 812 // woken. This isn't optimal, but there is no starvation, so to avoid 813 // the looping we leave it at this. 814 OnAvailableSocketSlot(top_group_name, top_group); 815 } 816 817 // Search for the highest priority pending request, amongst the groups that 818 // are not at the |max_sockets_per_group_| limit. Note: for requests with 819 // the same priority, the winner is based on group hash ordering (and not 820 // insertion order). 821 bool ClientSocketPoolBaseHelper::FindTopStalledGroup( 822 Group** group, 823 std::string* group_name) const { 824 CHECK((group && group_name) || (!group && !group_name)); 825 Group* top_group = NULL; 826 const std::string* top_group_name = NULL; 827 bool has_stalled_group = false; 828 for (GroupMap::const_iterator i = group_map_.begin(); 829 i != group_map_.end(); ++i) { 830 Group* curr_group = i->second; 831 if (!curr_group->has_pending_requests()) 832 continue; 833 if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) { 834 if (!group) 835 return true; 836 has_stalled_group = true; 837 bool has_higher_priority = !top_group || 838 curr_group->TopPendingPriority() > top_group->TopPendingPriority(); 839 if (has_higher_priority) { 840 top_group = curr_group; 841 top_group_name = &i->first; 842 } 843 } 844 } 845 846 if (top_group) { 847 CHECK(group); 848 *group = top_group; 849 *group_name = *top_group_name; 850 } else { 851 CHECK(!has_stalled_group); 852 } 853 return has_stalled_group; 854 } 855 856 void ClientSocketPoolBaseHelper::OnConnectJobComplete( 857 int result, ConnectJob* job) { 858 DCHECK_NE(ERR_IO_PENDING, result); 859 const std::string group_name = job->group_name(); 860 GroupMap::iterator group_it = group_map_.find(group_name); 861 CHECK(group_it != group_map_.end()); 862 Group* group = group_it->second; 863 864 scoped_ptr<StreamSocket> socket = job->PassSocket(); 865 866 // Copies of these are needed because |job| may be deleted before they are 867 // accessed. 868 BoundNetLog job_log = job->net_log(); 869 LoadTimingInfo::ConnectTiming connect_timing = job->connect_timing(); 870 871 // RemoveConnectJob(job, _) must be called by all branches below; 872 // otherwise, |job| will be leaked. 873 874 if (result == OK) { 875 DCHECK(socket.get()); 876 RemoveConnectJob(job, group); 877 scoped_ptr<const Request> request = group->PopNextPendingRequest(); 878 if (request) { 879 LogBoundConnectJobToRequest(job_log.source(), *request); 880 HandOutSocket( 881 socket.Pass(), false /* unused socket */, connect_timing, 882 request->handle(), base::TimeDelta(), group, request->net_log()); 883 request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL); 884 InvokeUserCallbackLater(request->handle(), request->callback(), result); 885 } else { 886 AddIdleSocket(socket.Pass(), group); 887 OnAvailableSocketSlot(group_name, group); 888 CheckForStalledSocketGroups(); 889 } 890 } else { 891 // If we got a socket, it must contain error information so pass that 892 // up so that the caller can retrieve it. 893 bool handed_out_socket = false; 894 scoped_ptr<const Request> request = group->PopNextPendingRequest(); 895 if (request) { 896 LogBoundConnectJobToRequest(job_log.source(), *request); 897 job->GetAdditionalErrorState(request->handle()); 898 RemoveConnectJob(job, group); 899 if (socket.get()) { 900 handed_out_socket = true; 901 HandOutSocket(socket.Pass(), false /* unused socket */, 902 connect_timing, request->handle(), base::TimeDelta(), 903 group, request->net_log()); 904 } 905 request->net_log().EndEventWithNetErrorCode( 906 NetLog::TYPE_SOCKET_POOL, result); 907 InvokeUserCallbackLater(request->handle(), request->callback(), result); 908 } else { 909 RemoveConnectJob(job, group); 910 } 911 if (!handed_out_socket) { 912 OnAvailableSocketSlot(group_name, group); 913 CheckForStalledSocketGroups(); 914 } 915 } 916 } 917 918 void ClientSocketPoolBaseHelper::OnIPAddressChanged() { 919 FlushWithError(ERR_NETWORK_CHANGED); 920 } 921 922 void ClientSocketPoolBaseHelper::FlushWithError(int error) { 923 pool_generation_number_++; 924 CancelAllConnectJobs(); 925 CloseIdleSockets(); 926 CancelAllRequestsWithError(error); 927 } 928 929 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job, 930 Group* group) { 931 CHECK_GT(connecting_socket_count_, 0); 932 connecting_socket_count_--; 933 934 DCHECK(group); 935 group->RemoveJob(job); 936 } 937 938 void ClientSocketPoolBaseHelper::OnAvailableSocketSlot( 939 const std::string& group_name, Group* group) { 940 DCHECK(ContainsKey(group_map_, group_name)); 941 if (group->IsEmpty()) { 942 RemoveGroup(group_name); 943 } else if (group->has_pending_requests()) { 944 ProcessPendingRequest(group_name, group); 945 } 946 } 947 948 void ClientSocketPoolBaseHelper::ProcessPendingRequest( 949 const std::string& group_name, Group* group) { 950 const Request* next_request = group->GetNextPendingRequest(); 951 DCHECK(next_request); 952 int rv = RequestSocketInternal(group_name, *next_request); 953 if (rv != ERR_IO_PENDING) { 954 scoped_ptr<const Request> request = group->PopNextPendingRequest(); 955 DCHECK(request); 956 if (group->IsEmpty()) 957 RemoveGroup(group_name); 958 959 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv); 960 InvokeUserCallbackLater(request->handle(), request->callback(), rv); 961 } 962 } 963 964 void ClientSocketPoolBaseHelper::HandOutSocket( 965 scoped_ptr<StreamSocket> socket, 966 bool reused, 967 const LoadTimingInfo::ConnectTiming& connect_timing, 968 ClientSocketHandle* handle, 969 base::TimeDelta idle_time, 970 Group* group, 971 const BoundNetLog& net_log) { 972 DCHECK(socket); 973 handle->SetSocket(socket.Pass()); 974 handle->set_is_reused(reused); 975 handle->set_idle_time(idle_time); 976 handle->set_pool_id(pool_generation_number_); 977 handle->set_connect_timing(connect_timing); 978 979 if (reused) { 980 net_log.AddEvent( 981 NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET, 982 NetLog::IntegerCallback( 983 "idle_ms", static_cast<int>(idle_time.InMilliseconds()))); 984 } 985 986 net_log.AddEvent( 987 NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET, 988 handle->socket()->NetLog().source().ToEventParametersCallback()); 989 990 handed_out_socket_count_++; 991 group->IncrementActiveSocketCount(); 992 } 993 994 void ClientSocketPoolBaseHelper::AddIdleSocket( 995 scoped_ptr<StreamSocket> socket, 996 Group* group) { 997 DCHECK(socket); 998 IdleSocket idle_socket; 999 idle_socket.socket = socket.release(); 1000 idle_socket.start_time = base::TimeTicks::Now(); 1001 1002 group->mutable_idle_sockets()->push_back(idle_socket); 1003 IncrementIdleCount(); 1004 } 1005 1006 void ClientSocketPoolBaseHelper::CancelAllConnectJobs() { 1007 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) { 1008 Group* group = i->second; 1009 connecting_socket_count_ -= group->jobs().size(); 1010 group->RemoveAllJobs(); 1011 1012 // Delete group if no longer needed. 1013 if (group->IsEmpty()) { 1014 // RemoveGroup() will call .erase() which will invalidate the iterator, 1015 // but i will already have been incremented to a valid iterator before 1016 // RemoveGroup() is called. 1017 RemoveGroup(i++); 1018 } else { 1019 ++i; 1020 } 1021 } 1022 DCHECK_EQ(0, connecting_socket_count_); 1023 } 1024 1025 void ClientSocketPoolBaseHelper::CancelAllRequestsWithError(int error) { 1026 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) { 1027 Group* group = i->second; 1028 1029 while (true) { 1030 scoped_ptr<const Request> request = group->PopNextPendingRequest(); 1031 if (!request) 1032 break; 1033 InvokeUserCallbackLater(request->handle(), request->callback(), error); 1034 } 1035 1036 // Delete group if no longer needed. 1037 if (group->IsEmpty()) { 1038 // RemoveGroup() will call .erase() which will invalidate the iterator, 1039 // but i will already have been incremented to a valid iterator before 1040 // RemoveGroup() is called. 1041 RemoveGroup(i++); 1042 } else { 1043 ++i; 1044 } 1045 } 1046 } 1047 1048 bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { 1049 // Each connecting socket will eventually connect and be handed out. 1050 int total = handed_out_socket_count_ + connecting_socket_count_ + 1051 idle_socket_count(); 1052 // There can be more sockets than the limit since some requests can ignore 1053 // the limit 1054 if (total < max_sockets_) 1055 return false; 1056 return true; 1057 } 1058 1059 bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() { 1060 if (idle_socket_count() == 0) 1061 return false; 1062 return CloseOneIdleSocketExceptInGroup(NULL); 1063 } 1064 1065 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup( 1066 const Group* exception_group) { 1067 CHECK_GT(idle_socket_count(), 0); 1068 1069 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) { 1070 Group* group = i->second; 1071 if (exception_group == group) 1072 continue; 1073 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets(); 1074 1075 if (!idle_sockets->empty()) { 1076 delete idle_sockets->front().socket; 1077 idle_sockets->pop_front(); 1078 DecrementIdleCount(); 1079 if (group->IsEmpty()) 1080 RemoveGroup(i); 1081 1082 return true; 1083 } 1084 } 1085 1086 return false; 1087 } 1088 1089 bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInHigherLayeredPool() { 1090 // This pool doesn't have any idle sockets. It's possible that a pool at a 1091 // higher layer is holding one of this sockets active, but it's actually idle. 1092 // Query the higher layers. 1093 for (std::set<HigherLayeredPool*>::const_iterator it = higher_pools_.begin(); 1094 it != higher_pools_.end(); ++it) { 1095 if ((*it)->CloseOneIdleConnection()) 1096 return true; 1097 } 1098 return false; 1099 } 1100 1101 void ClientSocketPoolBaseHelper::InvokeUserCallbackLater( 1102 ClientSocketHandle* handle, const CompletionCallback& callback, int rv) { 1103 CHECK(!ContainsKey(pending_callback_map_, handle)); 1104 pending_callback_map_[handle] = CallbackResultPair(callback, rv); 1105 base::MessageLoop::current()->PostTask( 1106 FROM_HERE, 1107 base::Bind(&ClientSocketPoolBaseHelper::InvokeUserCallback, 1108 weak_factory_.GetWeakPtr(), handle)); 1109 } 1110 1111 void ClientSocketPoolBaseHelper::InvokeUserCallback( 1112 ClientSocketHandle* handle) { 1113 PendingCallbackMap::iterator it = pending_callback_map_.find(handle); 1114 1115 // Exit if the request has already been cancelled. 1116 if (it == pending_callback_map_.end()) 1117 return; 1118 1119 CHECK(!handle->is_initialized()); 1120 CompletionCallback callback = it->second.callback; 1121 int result = it->second.result; 1122 pending_callback_map_.erase(it); 1123 callback.Run(result); 1124 } 1125 1126 void ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools() { 1127 while (IsStalled()) { 1128 // Closing a socket will result in calling back into |this| to use the freed 1129 // socket slot, so nothing else is needed. 1130 if (!CloseOneIdleConnectionInHigherLayeredPool()) 1131 return; 1132 } 1133 } 1134 1135 ClientSocketPoolBaseHelper::Group::Group() 1136 : unassigned_job_count_(0), 1137 pending_requests_(NUM_PRIORITIES), 1138 active_socket_count_(0) {} 1139 1140 ClientSocketPoolBaseHelper::Group::~Group() { 1141 DCHECK_EQ(0u, unassigned_job_count_); 1142 } 1143 1144 void ClientSocketPoolBaseHelper::Group::StartBackupJobTimer( 1145 const std::string& group_name, 1146 ClientSocketPoolBaseHelper* pool) { 1147 // Only allow one timer to run at a time. 1148 if (BackupJobTimerIsRunning()) 1149 return; 1150 1151 // Unretained here is okay because |backup_job_timer_| is 1152 // automatically cancelled when it's destroyed. 1153 backup_job_timer_.Start( 1154 FROM_HERE, pool->ConnectRetryInterval(), 1155 base::Bind(&Group::OnBackupJobTimerFired, base::Unretained(this), 1156 group_name, pool)); 1157 } 1158 1159 bool ClientSocketPoolBaseHelper::Group::BackupJobTimerIsRunning() const { 1160 return backup_job_timer_.IsRunning(); 1161 } 1162 1163 bool ClientSocketPoolBaseHelper::Group::TryToUseUnassignedConnectJob() { 1164 SanityCheck(); 1165 1166 if (unassigned_job_count_ == 0) 1167 return false; 1168 --unassigned_job_count_; 1169 return true; 1170 } 1171 1172 void ClientSocketPoolBaseHelper::Group::AddJob(scoped_ptr<ConnectJob> job, 1173 bool is_preconnect) { 1174 SanityCheck(); 1175 1176 if (is_preconnect) 1177 ++unassigned_job_count_; 1178 jobs_.insert(job.release()); 1179 } 1180 1181 void ClientSocketPoolBaseHelper::Group::RemoveJob(ConnectJob* job) { 1182 scoped_ptr<ConnectJob> owned_job(job); 1183 SanityCheck(); 1184 1185 std::set<ConnectJob*>::iterator it = jobs_.find(job); 1186 if (it != jobs_.end()) { 1187 jobs_.erase(it); 1188 } else { 1189 NOTREACHED(); 1190 } 1191 size_t job_count = jobs_.size(); 1192 if (job_count < unassigned_job_count_) 1193 unassigned_job_count_ = job_count; 1194 1195 // If we've got no more jobs for this group, then we no longer need a 1196 // backup job either. 1197 if (jobs_.empty()) 1198 backup_job_timer_.Stop(); 1199 } 1200 1201 void ClientSocketPoolBaseHelper::Group::OnBackupJobTimerFired( 1202 std::string group_name, 1203 ClientSocketPoolBaseHelper* pool) { 1204 // If there are no more jobs pending, there is no work to do. 1205 // If we've done our cleanups correctly, this should not happen. 1206 if (jobs_.empty()) { 1207 NOTREACHED(); 1208 return; 1209 } 1210 1211 // If our old job is waiting on DNS, or if we can't create any sockets 1212 // right now due to limits, just reset the timer. 1213 if (pool->ReachedMaxSocketsLimit() || 1214 !HasAvailableSocketSlot(pool->max_sockets_per_group_) || 1215 (*jobs_.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST) { 1216 StartBackupJobTimer(group_name, pool); 1217 return; 1218 } 1219 1220 if (pending_requests_.empty()) 1221 return; 1222 1223 scoped_ptr<ConnectJob> backup_job = 1224 pool->connect_job_factory_->NewConnectJob( 1225 group_name, *pending_requests_.FirstMax().value(), pool); 1226 backup_job->net_log().AddEvent(NetLog::TYPE_BACKUP_CONNECT_JOB_CREATED); 1227 SIMPLE_STATS_COUNTER("socket.backup_created"); 1228 int rv = backup_job->Connect(); 1229 pool->connecting_socket_count_++; 1230 ConnectJob* raw_backup_job = backup_job.get(); 1231 AddJob(backup_job.Pass(), false); 1232 if (rv != ERR_IO_PENDING) 1233 pool->OnConnectJobComplete(rv, raw_backup_job); 1234 } 1235 1236 void ClientSocketPoolBaseHelper::Group::SanityCheck() { 1237 DCHECK_LE(unassigned_job_count_, jobs_.size()); 1238 } 1239 1240 void ClientSocketPoolBaseHelper::Group::RemoveAllJobs() { 1241 SanityCheck(); 1242 1243 // Delete active jobs. 1244 STLDeleteElements(&jobs_); 1245 unassigned_job_count_ = 0; 1246 1247 // Stop backup job timer. 1248 backup_job_timer_.Stop(); 1249 } 1250 1251 const ClientSocketPoolBaseHelper::Request* 1252 ClientSocketPoolBaseHelper::Group::GetNextPendingRequest() const { 1253 return 1254 pending_requests_.empty() ? NULL : pending_requests_.FirstMax().value(); 1255 } 1256 1257 bool ClientSocketPoolBaseHelper::Group::HasConnectJobForHandle( 1258 const ClientSocketHandle* handle) const { 1259 // Search the first |jobs_.size()| pending requests for |handle|. 1260 // If it's farther back in the deque than that, it doesn't have a 1261 // corresponding ConnectJob. 1262 size_t i = 0; 1263 for (RequestQueue::Pointer pointer = pending_requests_.FirstMax(); 1264 !pointer.is_null() && i < jobs_.size(); 1265 pointer = pending_requests_.GetNextTowardsLastMin(pointer), ++i) { 1266 if (pointer.value()->handle() == handle) 1267 return true; 1268 } 1269 return false; 1270 } 1271 1272 void ClientSocketPoolBaseHelper::Group::InsertPendingRequest( 1273 scoped_ptr<const Request> request) { 1274 // This value must be cached before we release |request|. 1275 RequestPriority priority = request->priority(); 1276 if (request->ignore_limits()) { 1277 // Put requests with ignore_limits == true (which should have 1278 // priority == MAXIMUM_PRIORITY) ahead of other requests with 1279 // MAXIMUM_PRIORITY. 1280 DCHECK_EQ(priority, MAXIMUM_PRIORITY); 1281 pending_requests_.InsertAtFront(request.release(), priority); 1282 } else { 1283 pending_requests_.Insert(request.release(), priority); 1284 } 1285 } 1286 1287 scoped_ptr<const ClientSocketPoolBaseHelper::Request> 1288 ClientSocketPoolBaseHelper::Group::PopNextPendingRequest() { 1289 if (pending_requests_.empty()) 1290 return scoped_ptr<const ClientSocketPoolBaseHelper::Request>(); 1291 return RemovePendingRequest(pending_requests_.FirstMax()); 1292 } 1293 1294 scoped_ptr<const ClientSocketPoolBaseHelper::Request> 1295 ClientSocketPoolBaseHelper::Group::FindAndRemovePendingRequest( 1296 ClientSocketHandle* handle) { 1297 for (RequestQueue::Pointer pointer = pending_requests_.FirstMax(); 1298 !pointer.is_null(); 1299 pointer = pending_requests_.GetNextTowardsLastMin(pointer)) { 1300 if (pointer.value()->handle() == handle) { 1301 scoped_ptr<const Request> request = RemovePendingRequest(pointer); 1302 return request.Pass(); 1303 } 1304 } 1305 return scoped_ptr<const ClientSocketPoolBaseHelper::Request>(); 1306 } 1307 1308 scoped_ptr<const ClientSocketPoolBaseHelper::Request> 1309 ClientSocketPoolBaseHelper::Group::RemovePendingRequest( 1310 const RequestQueue::Pointer& pointer) { 1311 scoped_ptr<const Request> request(pointer.value()); 1312 pending_requests_.Erase(pointer); 1313 // If there are no more requests, kill the backup timer. 1314 if (pending_requests_.empty()) 1315 backup_job_timer_.Stop(); 1316 return request.Pass(); 1317 } 1318 1319 } // namespace internal 1320 1321 } // namespace net 1322