Home | History | Annotate | Download | only in congestion_control
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "net/quic/congestion_control/cubic.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/basictypes.h"
     10 #include "base/logging.h"
     11 #include "base/time/time.h"
     12 #include "net/quic/congestion_control/cube_root.h"
     13 #include "net/quic/quic_protocol.h"
     14 
     15 using std::max;
     16 
     17 namespace net {
     18 
     19 namespace {
     20 
     21 // Constants based on TCP defaults.
     22 // The following constants are in 2^10 fractions of a second instead of ms to
     23 // allow a 10 shift right to divide.
     24 const int kCubeScale = 40;  // 1024*1024^3 (first 1024 is from 0.100^3)
     25                             // where 0.100 is 100 ms which is the scaling
     26                             // round trip time.
     27 const int kCubeCongestionWindowScale = 410;
     28 const uint64 kCubeFactor = (GG_UINT64_C(1) << kCubeScale) /
     29     kCubeCongestionWindowScale;
     30 
     31 const uint32 kNumConnections = 2;
     32 const float kBeta = 0.7f;  // Default Cubic backoff factor.
     33 // Additional backoff factor when loss occurs in the concave part of the Cubic
     34 // curve. This additional backoff factor is expected to give up bandwidth to
     35 // new concurrent flows and speed up convergence.
     36 const float kBetaLastMax = 0.85f;
     37 
     38 // kNConnectionBeta is the backoff factor after loss for our N-connection
     39 // emulation, which emulates the effective backoff of an ensemble of N TCP-Reno
     40 // connections on a single loss event. The effective multiplier is computed as:
     41 const float kNConnectionBeta = (kNumConnections - 1 + kBeta) / kNumConnections;
     42 
     43 // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
     44 // kBeta here is a cwnd multiplier, and is equal to 1-beta from the CUBIC paper.
     45 // We derive the equivalent kNConnectionAlpha for an N-connection emulation as:
     46 const float kNConnectionAlpha = 3 * kNumConnections * kNumConnections *
     47       (1 - kNConnectionBeta) / (1 + kNConnectionBeta);
     48 // TODO(jri): Compute kNConnectionBeta and kNConnectionAlpha from
     49 // number of active streams.
     50 
     51 }  // namespace
     52 
     53 Cubic::Cubic(const QuicClock* clock, QuicConnectionStats* stats)
     54     : clock_(clock),
     55       epoch_(QuicTime::Zero()),
     56       last_update_time_(QuicTime::Zero()),
     57       stats_(stats) {
     58   Reset();
     59 }
     60 
     61 void Cubic::Reset() {
     62   epoch_ = QuicTime::Zero();  // Reset time.
     63   last_update_time_ = QuicTime::Zero();  // Reset time.
     64   last_congestion_window_ = 0;
     65   last_max_congestion_window_ = 0;
     66   acked_packets_count_ = 0;
     67   estimated_tcp_congestion_window_ = 0;
     68   origin_point_congestion_window_ = 0;
     69   time_to_origin_point_ = 0;
     70   last_target_congestion_window_ = 0;
     71 }
     72 
     73 void Cubic::UpdateCongestionControlStats(
     74     QuicTcpCongestionWindow new_cubic_mode_cwnd,
     75     QuicTcpCongestionWindow new_reno_mode_cwnd) {
     76 
     77   QuicTcpCongestionWindow highest_new_cwnd = std::max(new_cubic_mode_cwnd,
     78                                                       new_reno_mode_cwnd);
     79   if (last_congestion_window_ < highest_new_cwnd) {
     80     // cwnd will increase to highest_new_cwnd.
     81     stats_->cwnd_increase_congestion_avoidance +=
     82         highest_new_cwnd - last_congestion_window_;
     83     if (new_cubic_mode_cwnd > new_reno_mode_cwnd) {
     84       // This cwnd increase is due to cubic mode.
     85       stats_->cwnd_increase_cubic_mode +=
     86           new_cubic_mode_cwnd - last_congestion_window_;
     87     }
     88   }
     89 }
     90 
     91 QuicTcpCongestionWindow Cubic::CongestionWindowAfterPacketLoss(
     92     QuicTcpCongestionWindow current_congestion_window) {
     93   if (current_congestion_window < last_max_congestion_window_) {
     94     // We never reached the old max, so assume we are competing with another
     95     // flow. Use our extra back off factor to allow the other flow to go up.
     96     last_max_congestion_window_ =
     97         static_cast<int>(kBetaLastMax * current_congestion_window);
     98   } else {
     99     last_max_congestion_window_ = current_congestion_window;
    100   }
    101   epoch_ = QuicTime::Zero();  // Reset time.
    102   return static_cast<int>(current_congestion_window * kNConnectionBeta);
    103 }
    104 
    105 QuicTcpCongestionWindow Cubic::CongestionWindowAfterAck(
    106     QuicTcpCongestionWindow current_congestion_window,
    107     QuicTime::Delta delay_min) {
    108   acked_packets_count_ += 1;  // Packets acked.
    109   QuicTime current_time = clock_->ApproximateNow();
    110 
    111   // Cubic is "independent" of RTT, the update is limited by the time elapsed.
    112   if (last_congestion_window_ == current_congestion_window &&
    113       (current_time.Subtract(last_update_time_) <= MaxCubicTimeInterval())) {
    114     return max(last_target_congestion_window_,
    115                estimated_tcp_congestion_window_);
    116   }
    117   last_congestion_window_ = current_congestion_window;
    118   last_update_time_ = current_time;
    119 
    120   if (!epoch_.IsInitialized()) {
    121     // First ACK after a loss event.
    122     DVLOG(1) << "Start of epoch";
    123     epoch_ = current_time;  // Start of epoch.
    124     acked_packets_count_ = 1;  // Reset count.
    125     // Reset estimated_tcp_congestion_window_ to be in sync with cubic.
    126     estimated_tcp_congestion_window_ = current_congestion_window;
    127     if (last_max_congestion_window_ <= current_congestion_window) {
    128       time_to_origin_point_ = 0;
    129       origin_point_congestion_window_ = current_congestion_window;
    130     } else {
    131       time_to_origin_point_ = CubeRoot::Root(kCubeFactor *
    132           (last_max_congestion_window_ - current_congestion_window));
    133       origin_point_congestion_window_ =
    134           last_max_congestion_window_;
    135     }
    136   }
    137   // Change the time unit from microseconds to 2^10 fractions per second. Take
    138   // the round trip time in account. This is done to allow us to use shift as a
    139   // divide operator.
    140   int64 elapsed_time =
    141       (current_time.Add(delay_min).Subtract(epoch_).ToMicroseconds() << 10) /
    142       base::Time::kMicrosecondsPerSecond;
    143 
    144   int64 offset = time_to_origin_point_ - elapsed_time;
    145   QuicTcpCongestionWindow delta_congestion_window = (kCubeCongestionWindowScale
    146       * offset * offset * offset) >> kCubeScale;
    147 
    148   QuicTcpCongestionWindow target_congestion_window =
    149       origin_point_congestion_window_ - delta_congestion_window;
    150 
    151   DCHECK_LT(0u, estimated_tcp_congestion_window_);
    152   // With dynamic beta/alpha based on number of active streams, it is possible
    153   // for the required_ack_count to become much lower than acked_packets_count_
    154   // suddenly, leading to more than one iteration through the following loop.
    155   while (true) {
    156     // Update estimated TCP congestion_window.
    157     uint32 required_ack_count =
    158         estimated_tcp_congestion_window_ / kNConnectionAlpha;
    159     if (acked_packets_count_ < required_ack_count) {
    160       break;
    161     }
    162     acked_packets_count_ -= required_ack_count;
    163     estimated_tcp_congestion_window_++;
    164   }
    165 
    166   // Update cubic mode and reno mode stats in QuicConnectionStats.
    167   UpdateCongestionControlStats(target_congestion_window,
    168                                estimated_tcp_congestion_window_);
    169 
    170   // We have a new cubic congestion window.
    171   last_target_congestion_window_ = target_congestion_window;
    172 
    173   // Compute target congestion_window based on cubic target and estimated TCP
    174   // congestion_window, use highest (fastest).
    175   if (target_congestion_window < estimated_tcp_congestion_window_) {
    176     target_congestion_window = estimated_tcp_congestion_window_;
    177   }
    178 
    179   DVLOG(1) << "Target congestion_window: " << target_congestion_window;
    180   return target_congestion_window;
    181 }
    182 
    183 }  // namespace net
    184