1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef NET_SPDY_SPDY_SESSION_H_ 6 #define NET_SPDY_SPDY_SESSION_H_ 7 #pragma once 8 9 #include <deque> 10 #include <list> 11 #include <map> 12 #include <queue> 13 #include <string> 14 15 #include "base/gtest_prod_util.h" 16 #include "base/memory/linked_ptr.h" 17 #include "base/memory/ref_counted.h" 18 #include "base/task.h" 19 #include "net/base/io_buffer.h" 20 #include "net/base/load_states.h" 21 #include "net/base/net_errors.h" 22 #include "net/base/net_log.h" 23 #include "net/base/request_priority.h" 24 #include "net/base/ssl_config_service.h" 25 #include "net/base/upload_data_stream.h" 26 #include "net/socket/client_socket.h" 27 #include "net/socket/client_socket_handle.h" 28 #include "net/spdy/spdy_framer.h" 29 #include "net/spdy/spdy_io_buffer.h" 30 #include "net/spdy/spdy_protocol.h" 31 #include "net/spdy/spdy_session_pool.h" 32 33 namespace net { 34 35 // This is somewhat arbitrary and not really fixed, but it will always work 36 // reasonably with ethernet. Chop the world into 2-packet chunks. This is 37 // somewhat arbitrary, but is reasonably small and ensures that we elicit 38 // ACKs quickly from TCP (because TCP tries to only ACK every other packet). 39 const int kMss = 1430; 40 const int kMaxSpdyFrameChunkSize = (2 * kMss) - spdy::SpdyFrame::size(); 41 42 class BoundNetLog; 43 class SpdySettingsStorage; 44 class SpdyStream; 45 class SSLInfo; 46 47 class SpdySession : public base::RefCounted<SpdySession>, 48 public spdy::SpdyFramerVisitorInterface { 49 public: 50 // Create a new SpdySession. 51 // |host_port_proxy_pair| is the host/port that this session connects to, and 52 // the proxy configuration settings that it's using. 53 // |spdy_session_pool| is the SpdySessionPool that owns us. Its lifetime must 54 // strictly be greater than |this|. 55 // |session| is the HttpNetworkSession. |net_log| is the NetLog that we log 56 // network events to. 57 SpdySession(const HostPortProxyPair& host_port_proxy_pair, 58 SpdySessionPool* spdy_session_pool, 59 SpdySettingsStorage* spdy_settings, 60 NetLog* net_log); 61 62 const HostPortPair& host_port_pair() const { 63 return host_port_proxy_pair_.first; 64 } 65 const HostPortProxyPair& host_port_proxy_pair() const { 66 return host_port_proxy_pair_; 67 } 68 69 // Get a pushed stream for a given |url|. 70 // If the server initiates a stream, it might already exist for a given path. 71 // The server might also not have initiated the stream yet, but indicated it 72 // will via X-Associated-Content. Writes the stream out to |spdy_stream|. 73 // Returns a net error code. 74 int GetPushStream( 75 const GURL& url, 76 scoped_refptr<SpdyStream>* spdy_stream, 77 const BoundNetLog& stream_net_log); 78 79 // Create a new stream for a given |url|. Writes it out to |spdy_stream|. 80 // Returns a net error code, possibly ERR_IO_PENDING. 81 int CreateStream( 82 const GURL& url, 83 RequestPriority priority, 84 scoped_refptr<SpdyStream>* spdy_stream, 85 const BoundNetLog& stream_net_log, 86 CompletionCallback* callback); 87 88 // Remove PendingCreateStream objects on transaction deletion 89 void CancelPendingCreateStreams(const scoped_refptr<SpdyStream>* spdy_stream); 90 91 // Used by SpdySessionPool to initialize with a pre-existing SSL socket. For 92 // testing, setting is_secure to false allows initialization with a 93 // pre-existing TCP socket. 94 // Returns OK on success, or an error on failure. 95 net::Error InitializeWithSocket(ClientSocketHandle* connection, 96 bool is_secure, 97 int certificate_error_code); 98 99 // Check to see if this SPDY session can support an additional domain. 100 // If the session is un-authenticated, then this call always returns true. 101 // For SSL-based sessions, verifies that the certificate in use by this 102 // session provides authentication for the domain. 103 // NOTE: This function can have false negatives on some platforms. 104 bool VerifyDomainAuthentication(const std::string& domain); 105 106 // Send the SYN frame for |stream_id|. This also sends PING message to check 107 // the status of the connection. 108 int WriteSynStream( 109 spdy::SpdyStreamId stream_id, 110 RequestPriority priority, 111 spdy::SpdyControlFlags flags, 112 const linked_ptr<spdy::SpdyHeaderBlock>& headers); 113 114 // Write a data frame to the stream. 115 // Used to create and queue a data frame for the given stream. 116 int WriteStreamData(spdy::SpdyStreamId stream_id, net::IOBuffer* data, 117 int len, 118 spdy::SpdyDataFlags flags); 119 120 // Close a stream. 121 void CloseStream(spdy::SpdyStreamId stream_id, int status); 122 123 // Reset a stream by sending a RST_STREAM frame with given status code. 124 // Also closes the stream. Was not piggybacked to CloseStream since not 125 // all of the calls to CloseStream necessitate sending a RST_STREAM. 126 void ResetStream(spdy::SpdyStreamId stream_id, spdy::SpdyStatusCodes status); 127 128 // Check if a stream is active. 129 bool IsStreamActive(spdy::SpdyStreamId stream_id) const; 130 131 // The LoadState is used for informing the user of the current network 132 // status, such as "resolving host", "connecting", etc. 133 LoadState GetLoadState() const; 134 135 // Fills SSL info in |ssl_info| and returns true when SSL is in use. 136 bool GetSSLInfo(SSLInfo* ssl_info, bool* was_npn_negotiated); 137 138 // Fills SSL Certificate Request info |cert_request_info| and returns 139 // true when SSL is in use. 140 bool GetSSLCertRequestInfo(SSLCertRequestInfo* cert_request_info); 141 142 // Enable or disable SSL. 143 static void SetSSLMode(bool enable) { use_ssl_ = enable; } 144 static bool SSLMode() { return use_ssl_; } 145 146 // Enable or disable flow control. 147 static void set_flow_control(bool enable) { use_flow_control_ = enable; } 148 static bool flow_control() { return use_flow_control_; } 149 150 // Sets the max concurrent streams per session. 151 static void set_max_concurrent_streams(size_t value) { 152 max_concurrent_stream_limit_ = value; 153 } 154 static size_t max_concurrent_streams() { 155 return max_concurrent_stream_limit_; 156 } 157 158 // Enable sending of PING frame with each request. 159 static void set_enable_ping_based_connection_checking(bool enable) { 160 enable_ping_based_connection_checking_ = enable; 161 } 162 static bool enable_ping_based_connection_checking() { 163 return enable_ping_based_connection_checking_; 164 } 165 166 // Send WINDOW_UPDATE frame, called by a stream whenever receive window 167 // size is increased. 168 void SendWindowUpdate(spdy::SpdyStreamId stream_id, int delta_window_size); 169 170 // If session is closed, no new streams/transactions should be created. 171 bool IsClosed() const { return state_ == CLOSED; } 172 173 // Closes this session. This will close all active streams and mark 174 // the session as permanently closed. 175 // |err| should not be OK; this function is intended to be called on 176 // error. 177 // |remove_from_pool| indicates whether to also remove the session from the 178 // session pool. 179 void CloseSessionOnError(net::Error err, bool remove_from_pool); 180 181 // Retrieves information on the current state of the SPDY session as a 182 // Value. Caller takes possession of the returned value. 183 Value* GetInfoAsValue() const; 184 185 // Indicates whether the session is being reused after having successfully 186 // used to send/receive data in the past. 187 bool IsReused() const { 188 return frames_received_ > 0; 189 } 190 191 // Returns true if the underlying transport socket ever had any reads or 192 // writes. 193 bool WasEverUsed() const { 194 return connection_->socket()->WasEverUsed(); 195 } 196 197 void set_spdy_session_pool(SpdySessionPool* pool) { 198 spdy_session_pool_ = NULL; 199 } 200 201 // Returns true if session is not currently active 202 bool is_active() const { 203 return !active_streams_.empty(); 204 } 205 206 // Access to the number of active and pending streams. These are primarily 207 // available for testing and diagnostics. 208 size_t num_active_streams() const { return active_streams_.size(); } 209 size_t num_unclaimed_pushed_streams() const { 210 return unclaimed_pushed_streams_.size(); 211 } 212 213 const BoundNetLog& net_log() const { return net_log_; } 214 215 int GetPeerAddress(AddressList* address) const; 216 int GetLocalAddress(IPEndPoint* address) const; 217 218 private: 219 friend class base::RefCounted<SpdySession>; 220 // Allow tests to access our innards for testing purposes. 221 FRIEND_TEST_ALL_PREFIXES(SpdySessionTest, Ping); 222 FRIEND_TEST_ALL_PREFIXES(SpdySessionTest, GetActivePushStream); 223 224 struct PendingCreateStream { 225 PendingCreateStream(const GURL& url, RequestPriority priority, 226 scoped_refptr<SpdyStream>* spdy_stream, 227 const BoundNetLog& stream_net_log, 228 CompletionCallback* callback) 229 : url(&url), priority(priority), spdy_stream(spdy_stream), 230 stream_net_log(&stream_net_log), callback(callback) { } 231 232 const GURL* url; 233 RequestPriority priority; 234 scoped_refptr<SpdyStream>* spdy_stream; 235 const BoundNetLog* stream_net_log; 236 CompletionCallback* callback; 237 }; 238 typedef std::queue<PendingCreateStream, std::list< PendingCreateStream> > 239 PendingCreateStreamQueue; 240 typedef std::map<int, scoped_refptr<SpdyStream> > ActiveStreamMap; 241 // Only HTTP push a stream. 242 typedef std::map<std::string, scoped_refptr<SpdyStream> > PushedStreamMap; 243 typedef std::priority_queue<SpdyIOBuffer> OutputQueue; 244 245 struct CallbackResultPair { 246 CallbackResultPair() : callback(NULL), result(OK) {} 247 CallbackResultPair(CompletionCallback* callback_in, int result_in) 248 : callback(callback_in), result(result_in) {} 249 250 CompletionCallback* callback; 251 int result; 252 }; 253 254 typedef std::map<const scoped_refptr<SpdyStream>*, CallbackResultPair> 255 PendingCallbackMap; 256 257 enum State { 258 IDLE, 259 CONNECTING, 260 CONNECTED, 261 CLOSED 262 }; 263 264 enum { kDefaultMaxConcurrentStreams = 10 }; 265 266 virtual ~SpdySession(); 267 268 void ProcessPendingCreateStreams(); 269 int CreateStreamImpl( 270 const GURL& url, 271 RequestPriority priority, 272 scoped_refptr<SpdyStream>* spdy_stream, 273 const BoundNetLog& stream_net_log); 274 275 // Control frame handlers. 276 void OnSyn(const spdy::SpdySynStreamControlFrame& frame, 277 const linked_ptr<spdy::SpdyHeaderBlock>& headers); 278 void OnSynReply(const spdy::SpdySynReplyControlFrame& frame, 279 const linked_ptr<spdy::SpdyHeaderBlock>& headers); 280 void OnHeaders(const spdy::SpdyHeadersControlFrame& frame, 281 const linked_ptr<spdy::SpdyHeaderBlock>& headers); 282 void OnRst(const spdy::SpdyRstStreamControlFrame& frame); 283 void OnGoAway(const spdy::SpdyGoAwayControlFrame& frame); 284 void OnPing(const spdy::SpdyPingControlFrame& frame); 285 void OnSettings(const spdy::SpdySettingsControlFrame& frame); 286 void OnWindowUpdate(const spdy::SpdyWindowUpdateControlFrame& frame); 287 288 // IO Callbacks 289 void OnReadComplete(int result); 290 void OnWriteComplete(int result); 291 292 // Send relevant SETTINGS. This is generally called on connection setup. 293 void SendSettings(); 294 295 // Handle SETTINGS. Either when we send settings, or when we receive a 296 // SETTINGS ontrol frame, update our SpdySession accordingly. 297 void HandleSettings(const spdy::SpdySettings& settings); 298 299 // Send the PING (preface-PING and trailing-PING) frames. 300 void SendPrefacePingIfNoneInFlight(); 301 302 // Send PING if there are no PINGs in flight and we haven't heard from server. 303 void SendPrefacePing(); 304 305 // Send a PING after delay. Don't post a PING if there is already 306 // a trailing PING pending. 307 void PlanToSendTrailingPing(); 308 309 // Send a PING if there is no |trailing_ping_pending_|. This PING verifies 310 // that the requests are being received by the server. 311 void SendTrailingPing(); 312 313 // Send the PING frame. 314 void WritePingFrame(uint32 unique_id); 315 316 // Post a CheckPingStatus call after delay. Don't post if there is already 317 // CheckPingStatus running. 318 void PlanToCheckPingStatus(); 319 320 // Check the status of the connection. It calls |CloseSessionOnError| if we 321 // haven't received any data in |kHungInterval| time period. 322 void CheckPingStatus(base::TimeTicks last_check_time); 323 324 // Start reading from the socket. 325 // Returns OK on success, or an error on failure. 326 net::Error ReadSocket(); 327 328 // Write current data to the socket. 329 void WriteSocketLater(); 330 void WriteSocket(); 331 332 // Get a new stream id. 333 int GetNewStreamId(); 334 335 // Queue a frame for sending. 336 // |frame| is the frame to send. 337 // |priority| is the priority for insertion into the queue. 338 // |stream| is the stream which this IO is associated with (or NULL). 339 void QueueFrame(spdy::SpdyFrame* frame, spdy::SpdyPriority priority, 340 SpdyStream* stream); 341 342 // Track active streams in the active stream list. 343 void ActivateStream(SpdyStream* stream); 344 void DeleteStream(spdy::SpdyStreamId id, int status); 345 346 // Removes this session from the session pool. 347 void RemoveFromPool(); 348 349 // Check if we have a pending pushed-stream for this url 350 // Returns the stream if found (and returns it from the pending 351 // list), returns NULL otherwise. 352 scoped_refptr<SpdyStream> GetActivePushStream(const std::string& url); 353 354 // Calls OnResponseReceived(). 355 // Returns true if successful. 356 bool Respond(const spdy::SpdyHeaderBlock& headers, 357 const scoped_refptr<SpdyStream> stream); 358 359 void RecordHistograms(); 360 361 // Closes all streams. Used as part of shutdown. 362 void CloseAllStreams(net::Error status); 363 364 // Invokes a user callback for stream creation. We provide this method so it 365 // can be deferred to the MessageLoop, so we avoid re-entrancy problems. 366 void InvokeUserStreamCreationCallback(scoped_refptr<SpdyStream>* stream); 367 368 // SpdyFramerVisitorInterface: 369 virtual void OnError(spdy::SpdyFramer*); 370 virtual void OnStreamFrameData(spdy::SpdyStreamId stream_id, 371 const char* data, 372 size_t len); 373 virtual void OnControl(const spdy::SpdyControlFrame* frame); 374 375 virtual bool OnControlFrameHeaderData(spdy::SpdyStreamId stream_id, 376 const char* header_data, 377 size_t len); 378 379 virtual void OnDataFrameHeader(const spdy::SpdyDataFrame* frame); 380 381 // -------------------------- 382 // Helper methods for testing 383 // -------------------------- 384 static void set_connection_at_risk_of_loss_ms(int duration) { 385 connection_at_risk_of_loss_ms_ = duration; 386 } 387 static int connection_at_risk_of_loss_ms() { 388 return connection_at_risk_of_loss_ms_; 389 } 390 391 static void set_trailing_ping_delay_time_ms(int duration) { 392 trailing_ping_delay_time_ms_ = duration; 393 } 394 static int trailing_ping_delay_time_ms() { 395 return trailing_ping_delay_time_ms_; 396 } 397 398 static void set_hung_interval_ms(int duration) { 399 hung_interval_ms_ = duration; 400 } 401 static int hung_interval_ms() { 402 return hung_interval_ms_; 403 } 404 405 int64 pings_in_flight() const { return pings_in_flight_; } 406 407 uint32 next_ping_id() const { return next_ping_id_; } 408 409 base::TimeTicks received_data_time() const { return received_data_time_; } 410 411 bool trailing_ping_pending() const { return trailing_ping_pending_; } 412 413 bool check_ping_status_pending() const { return check_ping_status_pending_; } 414 415 // Callbacks for the Spdy session. 416 CompletionCallbackImpl<SpdySession> read_callback_; 417 CompletionCallbackImpl<SpdySession> write_callback_; 418 419 // Used for posting asynchronous IO tasks. We use this even though 420 // SpdySession is refcounted because we don't need to keep the SpdySession 421 // alive if the last reference is within a RunnableMethod. Just revoke the 422 // method. 423 ScopedRunnableMethodFactory<SpdySession> method_factory_; 424 425 // Map of the SpdyStreams for which we have a pending Task to invoke a 426 // callback. This is necessary since, before we invoke said callback, it's 427 // possible that the request is cancelled. 428 PendingCallbackMap pending_callback_map_; 429 430 // The domain this session is connected to. 431 const HostPortProxyPair host_port_proxy_pair_; 432 433 // |spdy_session_pool_| owns us, therefore its lifetime must exceed ours. We 434 // set this to NULL after we are removed from the pool. 435 SpdySessionPool* spdy_session_pool_; 436 SpdySettingsStorage* const spdy_settings_; 437 438 // The socket handle for this session. 439 scoped_ptr<ClientSocketHandle> connection_; 440 441 // The read buffer used to read data from the socket. 442 scoped_refptr<IOBuffer> read_buffer_; 443 bool read_pending_; 444 445 int stream_hi_water_mark_; // The next stream id to use. 446 447 // Queue, for each priority, of pending Create Streams that have not 448 // yet been satisfied 449 PendingCreateStreamQueue create_stream_queues_[NUM_PRIORITIES]; 450 451 // Map from stream id to all active streams. Streams are active in the sense 452 // that they have a consumer (typically SpdyNetworkTransaction and regardless 453 // of whether or not there is currently any ongoing IO [might be waiting for 454 // the server to start pushing the stream]) or there are still network events 455 // incoming even though the consumer has already gone away (cancellation). 456 // TODO(willchan): Perhaps we should separate out cancelled streams and move 457 // them into a separate ActiveStreamMap, and not deliver network events to 458 // them? 459 ActiveStreamMap active_streams_; 460 // Map of all the streams that have already started to be pushed by the 461 // server, but do not have consumers yet. 462 PushedStreamMap unclaimed_pushed_streams_; 463 464 // As we gather data to be sent, we put it into the output queue. 465 OutputQueue queue_; 466 467 // The packet we are currently sending. 468 bool write_pending_; // Will be true when a write is in progress. 469 SpdyIOBuffer in_flight_write_; // This is the write buffer in progress. 470 471 // Flag if we have a pending message scheduled for WriteSocket. 472 bool delayed_write_pending_; 473 474 // Flag if we're using an SSL connection for this SpdySession. 475 bool is_secure_; 476 477 // Certificate error code when using a secure connection. 478 int certificate_error_code_; 479 480 // Spdy Frame state. 481 spdy::SpdyFramer spdy_framer_; 482 483 // If an error has occurred on the session, the session is effectively 484 // dead. Record this error here. When no error has occurred, |error_| will 485 // be OK. 486 net::Error error_; 487 State state_; 488 489 // Limits 490 size_t max_concurrent_streams_; // 0 if no limit 491 492 // Some statistics counters for the session. 493 int streams_initiated_count_; 494 int streams_pushed_count_; 495 int streams_pushed_and_claimed_count_; 496 int streams_abandoned_count_; 497 int frames_received_; 498 int bytes_received_; 499 bool sent_settings_; // Did this session send settings when it started. 500 bool received_settings_; // Did this session receive at least one settings 501 // frame. 502 int stalled_streams_; // Count of streams that were ever stalled. 503 504 // Count of all pings on the wire, for which we have not gotten a response. 505 int64 pings_in_flight_; 506 507 // This is the next ping_id (unique_id) to be sent in PING frame. 508 uint32 next_ping_id_; 509 510 // This is the last time we have received data. 511 base::TimeTicks received_data_time_; 512 513 // Indicate if we have already scheduled a delayed task to send a trailing 514 // ping (and we never have more than one scheduled at a time). 515 bool trailing_ping_pending_; 516 517 // Indicate if we have already scheduled a delayed task to check the ping 518 // status. 519 bool check_ping_status_pending_; 520 521 // Indicate if we need to send a ping (generally, a trailing ping). This helps 522 // us to decide if we need yet another trailing ping, or if it would be a 523 // waste of effort (and MUST not be done). 524 bool need_to_send_ping_; 525 526 // Initial send window size for the session; can be changed by an 527 // arriving SETTINGS frame; newly created streams use this value for the 528 // initial send window size. 529 int initial_send_window_size_; 530 531 // Initial receive window size for the session; there are plans to add a 532 // command line switch that would cause a SETTINGS frame with window size 533 // announcement to be sent on startup; newly created streams will use 534 // this value for the initial receive window size. 535 int initial_recv_window_size_; 536 537 BoundNetLog net_log_; 538 539 static bool use_ssl_; 540 static bool use_flow_control_; 541 static size_t max_concurrent_stream_limit_; 542 543 // This enables or disables connection health checking system. 544 static bool enable_ping_based_connection_checking_; 545 546 // |connection_at_risk_of_loss_ms_| is an optimization to avoid sending 547 // wasteful preface pings (when we just got some data). 548 // 549 // If it is zero (the most conservative figure), then we always send the 550 // preface ping (when none are in flight). 551 // 552 // It is common for TCP/IP sessions to time out in about 3-5 minutes. 553 // Certainly if it has been more than 3 minutes, we do want to send a preface 554 // ping. 555 // 556 // We don't think any connection will time out in under about 10 seconds. So 557 // this might as well be set to something conservative like 10 seconds. Later, 558 // we could adjust it to send fewer pings perhaps. 559 static int connection_at_risk_of_loss_ms_; 560 561 // This is the amount of time (in milliseconds) we wait before sending a 562 // trailing ping. We use a trailing ping (sent after all data) to get an 563 // effective acknowlegement from the server that it has indeed received all 564 // (prior) data frames. With that assurance, we are willing to enter into a 565 // wait state for responses to our last data frame(s) without further pings. 566 static int trailing_ping_delay_time_ms_; 567 568 // The amount of time (in milliseconds) that we are willing to tolerate with 569 // no data received (of any form), while there is a ping in flight, before we 570 // declare the connection to be hung. 571 static int hung_interval_ms_; 572 }; 573 574 class NetLogSpdySynParameter : public NetLog::EventParameters { 575 public: 576 NetLogSpdySynParameter(const linked_ptr<spdy::SpdyHeaderBlock>& headers, 577 spdy::SpdyControlFlags flags, 578 spdy::SpdyStreamId id, 579 spdy::SpdyStreamId associated_stream); 580 581 const linked_ptr<spdy::SpdyHeaderBlock>& GetHeaders() const { 582 return headers_; 583 } 584 585 virtual Value* ToValue() const; 586 587 private: 588 virtual ~NetLogSpdySynParameter(); 589 590 const linked_ptr<spdy::SpdyHeaderBlock> headers_; 591 const spdy::SpdyControlFlags flags_; 592 const spdy::SpdyStreamId id_; 593 const spdy::SpdyStreamId associated_stream_; 594 595 DISALLOW_COPY_AND_ASSIGN(NetLogSpdySynParameter); 596 }; 597 598 } // namespace net 599 600 #endif // NET_SPDY_SPDY_SESSION_H_ 601