Home | History | Annotate | Download | only in congestion_control
      1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // This class is a helper class to the inter arrival congestion control. It
      6 // provide a signal to the inter arrival congestion control of the estimated
      7 // state of our transport channel. The estimate is based on the inter arrival
      8 // time of the received packets relative to the time those packets were sent;
      9 // we can estimate the build up of buffers on the network before packets are
     10 // lost.
     11 //
     12 // Note: this class is not thread-safe.
     13 
     14 #ifndef NET_QUIC_CONGESTION_CONTROL_INTER_ARRIVAL_OVERUSE_DETECTOR_H_
     15 #define NET_QUIC_CONGESTION_CONTROL_INTER_ARRIVAL_OVERUSE_DETECTOR_H_
     16 
     17 #include "base/basictypes.h"
     18 #include "net/base/net_export.h"
     19 #include "net/quic/quic_protocol.h"
     20 #include "net/quic/quic_time.h"
     21 
     22 namespace net {
     23 
     24 enum NET_EXPORT_PRIVATE RateControlRegion {
     25   kRateControlRegionUnknown = 0,
     26   kRateControlRegionUnderMax = 1,
     27   kRateControlRegionNearMax = 2
     28 };
     29 
     30 // Note: Order is important.
     31 enum NET_EXPORT_PRIVATE BandwidthUsage {
     32   kBandwidthSteady = 0,
     33   kBandwidthUnderUsing = 1,
     34   kBandwidthDraining = 2,
     35   kBandwidthOverUsing = 3,
     36 };
     37 
     38 //  Normal state transition diagram
     39 //
     40 //         kBandwidthUnderUsing
     41 //                  |
     42 //                  |
     43 //           kBandwidthSteady
     44 //             |          ^
     45 //             |          |
     46 //   kBandwidthOverUsing  |
     47 //             |          |
     48 //             |          |
     49 //          kBandwidthDraining
     50 //
     51 //  The above transitions is in normal operation, with extreme values we don't
     52 //  enforce the state transitions, hence you could in extreme scenarios go
     53 //  between any states.
     54 //
     55 //  kBandwidthSteady       When the packets arrive in the same pace as we sent
     56 //                         them. In this state we can increase our send pace.
     57 //
     58 //  kBandwidthOverUsing    When the packets arrive slower than the pace we sent
     59 //                         them. In this state we should decrease our send pace.
     60 //                         When we enter into this state we will also get an
     61 //                         estimate on how much delay we have built up. The
     62 //                         reduction in send pace should be chosen to drain the
     63 //                         built up delay within reasonable time.
     64 //
     65 //  kBandwidthUnderUsing   When the packets arrive faster than the pace we sent
     66 //                         them. In this state another stream disappeared from
     67 //                         a shared link leaving us more available bandwidth.
     68 //                         In this state we should hold our pace to make sure we
     69 //                         fully drain the buffers before we start increasing
     70 //                         our send rate. We do this to avoid operating with
     71 //                         semi-full buffers.
     72 //
     73 //  kBandwidthDraining     We can only be in this state after we have been in a
     74 //                         overuse state. In this state we should hold our pace
     75 //                         to make sure we fully drain the buffers before we
     76 //                         start increasing our send rate. We do this to avoid
     77 //                         operating with semi-full buffers.
     78 
     79 class NET_EXPORT_PRIVATE InterArrivalOveruseDetector {
     80  public:
     81   InterArrivalOveruseDetector();
     82 
     83   // Update the statistics with the received delta times, call for every
     84   // received delta time. This function assumes that there is no re-orderings.
     85   // If multiple packets are sent at the same time (identical send_time)
     86   // last_of_send_time should be set to false for all but the last calls to
     87   // this function. If there is only one packet sent at a given time
     88   // last_of_send_time must be true.
     89   // received_delta is the time difference between receiving this packet and the
     90   // previously received packet.
     91   void OnAcknowledgedPacket(QuicPacketSequenceNumber sequence_number,
     92                             QuicTime send_time,
     93                             bool last_of_send_time,
     94                             QuicTime receive_time);
     95 
     96   // Get the current estimated state and update the estimated congestion delay.
     97   // |estimated_congestion_delay| will be updated with the estimated built up
     98   // buffer delay; it must not be NULL as it will be updated with the estimate.
     99   // Note 1: estimated_buffer_delay will only be valid when kBandwidthOverUsing
    100   //         is returned.
    101   // Note 2: it's assumed that the pacer lower its send pace to drain the
    102   //         built up buffer within reasonable time. The pacer should use the
    103   //         estimated_buffer_delay as a guidance on how much to back off.
    104   // Note 3: The absolute value of estimated_congestion_delay is less reliable
    105   //         than the state itself. It is also biased to low since we can't know
    106   //         how full the buffers are when the flow starts.
    107   BandwidthUsage GetState(QuicTime::Delta* estimated_congestion_delay);
    108 
    109  private:
    110   struct PacketGroup {
    111     PacketGroup()
    112         : send_time(QuicTime::Zero()),
    113           last_receive_time(QuicTime::Zero()) {
    114     }
    115     QuicTime send_time;
    116     QuicTime last_receive_time;
    117   };
    118 
    119   // Update the statistics with the absolute receive time relative to the
    120   // absolute send time.
    121   void UpdateSendReceiveTimeOffset(QuicTime::Delta offset);
    122 
    123   // Update the filter with this new data point.
    124   void UpdateFilter(QuicTime::Delta received_delta,
    125                     QuicTime::Delta sent_delta);
    126 
    127   // Update the estimate with this residual.
    128   void UpdateDeltaEstimate(QuicTime::Delta residual);
    129 
    130   // Estimate the state based on the slope of the changes.
    131   void DetectSlope(int64 sigma_delta);
    132 
    133   // Estimate the state based on the accumulated drift of the changes.
    134   void DetectDrift(int64 sigma_delta);
    135 
    136   // Current grouping of packets that were sent at the same time.
    137   PacketGroup current_packet_group_;
    138   // Grouping of packets that were sent at the same time, just before the
    139   // current_packet_group_ above.
    140   PacketGroup previous_packet_group_;
    141   // Sequence number of the last acknowledged packet.
    142   QuicPacketSequenceNumber last_sequence_number_;
    143   // Number of received delta times with unique send time.
    144   int num_of_deltas_;
    145   // Estimated accumulation of received delta times.
    146   // Note: Can be negative and can drift over time which is why we bias it
    147   // towards 0 and reset it given some triggers.
    148   QuicTime::Delta accumulated_deltas_;
    149   // Current running mean of our received delta times.
    150   int delta_mean_;
    151   // Current running variance of our received delta times.
    152   int64 delta_variance_;
    153   // Number of overuse signals currently triggered in this state.
    154   // Note: negative represent underuse.
    155   int delta_overuse_counter_;
    156   // State estimated by the delta times.
    157   BandwidthUsage delta_estimate_;
    158   // Number of overuse signals currently triggered in this state.
    159   // Note: negative represent underuse.
    160   int slope_overuse_counter_;
    161   // State estimated by the slope of the delta times.
    162   BandwidthUsage slope_estimate_;
    163   // Lowest offset between send and receive time ever received in this session.
    164   QuicTime::Delta send_receive_offset_;
    165   // Last received time difference between our normalized send and receive time.
    166   QuicTime::Delta estimated_congestion_delay_;
    167 
    168   DISALLOW_COPY_AND_ASSIGN(InterArrivalOveruseDetector);
    169 };
    170 
    171 }  // namespace net
    172 
    173 #endif  // NET_QUIC_CONGESTION_CONTROL_INTER_ARRIVAL_OVERUSE_DETECTOR_H_
    174