1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "quiche/quic/core/congestion_control/cubic_bytes.h"
6
7 #include <algorithm>
8 #include <cmath>
9 #include <cstdint>
10
11 #include "quiche/quic/core/quic_constants.h"
12 #include "quiche/quic/core/quic_packets.h"
13 #include "quiche/quic/platform/api/quic_flag_utils.h"
14 #include "quiche/quic/platform/api/quic_flags.h"
15 #include "quiche/quic/platform/api/quic_logging.h"
16
17 namespace quic {
18
19 namespace {
20
21 // Constants based on TCP defaults.
22 // The following constants are in 2^10 fractions of a second instead of ms to
23 // allow a 10 shift right to divide.
24 const int kCubeScale = 40; // 1024*1024^3 (first 1024 is from 0.100^3)
25 // where 0.100 is 100 ms which is the scaling
26 // round trip time.
27 const int kCubeCongestionWindowScale = 410;
28 // The cube factor for packets in bytes.
29 const uint64_t kCubeFactor =
30 (UINT64_C(1) << kCubeScale) / kCubeCongestionWindowScale / kDefaultTCPMSS;
31
32 const float kDefaultCubicBackoffFactor = 0.7f; // Default Cubic backoff factor.
33 // Additional backoff factor when loss occurs in the concave part of the Cubic
34 // curve. This additional backoff factor is expected to give up bandwidth to
35 // new concurrent flows and speed up convergence.
36 const float kBetaLastMax = 0.85f;
37
38 } // namespace
39
CubicBytes(const QuicClock * clock)40 CubicBytes::CubicBytes(const QuicClock* clock)
41 : clock_(clock),
42 num_connections_(kDefaultNumConnections),
43 epoch_(QuicTime::Zero()) {
44 ResetCubicState();
45 }
46
SetNumConnections(int num_connections)47 void CubicBytes::SetNumConnections(int num_connections) {
48 num_connections_ = num_connections;
49 }
50
Alpha() const51 float CubicBytes::Alpha() const {
52 // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
53 // beta here is a cwnd multiplier, and is equal to 1-beta from the paper.
54 // We derive the equivalent alpha for an N-connection emulation as:
55 const float beta = Beta();
56 return 3 * num_connections_ * num_connections_ * (1 - beta) / (1 + beta);
57 }
58
Beta() const59 float CubicBytes::Beta() const {
60 // kNConnectionBeta is the backoff factor after loss for our N-connection
61 // emulation, which emulates the effective backoff of an ensemble of N
62 // TCP-Reno connections on a single loss event. The effective multiplier is
63 // computed as:
64 return (num_connections_ - 1 + kDefaultCubicBackoffFactor) / num_connections_;
65 }
66
BetaLastMax() const67 float CubicBytes::BetaLastMax() const {
68 // BetaLastMax is the additional backoff factor after loss for our
69 // N-connection emulation, which emulates the additional backoff of
70 // an ensemble of N TCP-Reno connections on a single loss event. The
71 // effective multiplier is computed as:
72 return (num_connections_ - 1 + kBetaLastMax) / num_connections_;
73 }
74
ResetCubicState()75 void CubicBytes::ResetCubicState() {
76 epoch_ = QuicTime::Zero(); // Reset time.
77 last_max_congestion_window_ = 0;
78 acked_bytes_count_ = 0;
79 estimated_tcp_congestion_window_ = 0;
80 origin_point_congestion_window_ = 0;
81 time_to_origin_point_ = 0;
82 last_target_congestion_window_ = 0;
83 }
84
OnApplicationLimited()85 void CubicBytes::OnApplicationLimited() {
86 // When sender is not using the available congestion window, the window does
87 // not grow. But to be RTT-independent, Cubic assumes that the sender has been
88 // using the entire window during the time since the beginning of the current
89 // "epoch" (the end of the last loss recovery period). Since
90 // application-limited periods break this assumption, we reset the epoch when
91 // in such a period. This reset effectively freezes congestion window growth
92 // through application-limited periods and allows Cubic growth to continue
93 // when the entire window is being used.
94 epoch_ = QuicTime::Zero();
95 }
96
CongestionWindowAfterPacketLoss(QuicByteCount current_congestion_window)97 QuicByteCount CubicBytes::CongestionWindowAfterPacketLoss(
98 QuicByteCount current_congestion_window) {
99 // Since bytes-mode Reno mode slightly under-estimates the cwnd, we
100 // may never reach precisely the last cwnd over the course of an
101 // RTT. Do not interpret a slight under-estimation as competing traffic.
102 if (current_congestion_window + kDefaultTCPMSS <
103 last_max_congestion_window_) {
104 // We never reached the old max, so assume we are competing with
105 // another flow. Use our extra back off factor to allow the other
106 // flow to go up.
107 last_max_congestion_window_ =
108 static_cast<int>(BetaLastMax() * current_congestion_window);
109 } else {
110 last_max_congestion_window_ = current_congestion_window;
111 }
112 epoch_ = QuicTime::Zero(); // Reset time.
113 return static_cast<int>(current_congestion_window * Beta());
114 }
115
CongestionWindowAfterAck(QuicByteCount acked_bytes,QuicByteCount current_congestion_window,QuicTime::Delta delay_min,QuicTime event_time)116 QuicByteCount CubicBytes::CongestionWindowAfterAck(
117 QuicByteCount acked_bytes, QuicByteCount current_congestion_window,
118 QuicTime::Delta delay_min, QuicTime event_time) {
119 acked_bytes_count_ += acked_bytes;
120
121 if (!epoch_.IsInitialized()) {
122 // First ACK after a loss event.
123 QUIC_DVLOG(1) << "Start of epoch";
124 epoch_ = event_time; // Start of epoch.
125 acked_bytes_count_ = acked_bytes; // Reset count.
126 // Reset estimated_tcp_congestion_window_ to be in sync with cubic.
127 estimated_tcp_congestion_window_ = current_congestion_window;
128 if (last_max_congestion_window_ <= current_congestion_window) {
129 time_to_origin_point_ = 0;
130 origin_point_congestion_window_ = current_congestion_window;
131 } else {
132 time_to_origin_point_ = static_cast<uint32_t>(
133 cbrt(kCubeFactor *
134 (last_max_congestion_window_ - current_congestion_window)));
135 origin_point_congestion_window_ = last_max_congestion_window_;
136 }
137 }
138 // Change the time unit from microseconds to 2^10 fractions per second. Take
139 // the round trip time in account. This is done to allow us to use shift as a
140 // divide operator.
141 int64_t elapsed_time =
142 ((event_time + delay_min - epoch_).ToMicroseconds() << 10) /
143 kNumMicrosPerSecond;
144
145 // Right-shifts of negative, signed numbers have implementation-dependent
146 // behavior, so force the offset to be positive, as is done in the kernel.
147 uint64_t offset = std::abs(time_to_origin_point_ - elapsed_time);
148
149 QuicByteCount delta_congestion_window = (kCubeCongestionWindowScale * offset *
150 offset * offset * kDefaultTCPMSS) >>
151 kCubeScale;
152
153 const bool add_delta = elapsed_time > time_to_origin_point_;
154 QUICHE_DCHECK(add_delta ||
155 (origin_point_congestion_window_ > delta_congestion_window));
156 QuicByteCount target_congestion_window =
157 add_delta ? origin_point_congestion_window_ + delta_congestion_window
158 : origin_point_congestion_window_ - delta_congestion_window;
159 // Limit the CWND increase to half the acked bytes.
160 target_congestion_window =
161 std::min(target_congestion_window,
162 current_congestion_window + acked_bytes_count_ / 2);
163
164 QUICHE_DCHECK_LT(0u, estimated_tcp_congestion_window_);
165 // Increase the window by approximately Alpha * 1 MSS of bytes every
166 // time we ack an estimated tcp window of bytes. For small
167 // congestion windows (less than 25), the formula below will
168 // increase slightly slower than linearly per estimated tcp window
169 // of bytes.
170 estimated_tcp_congestion_window_ += acked_bytes_count_ *
171 (Alpha() * kDefaultTCPMSS) /
172 estimated_tcp_congestion_window_;
173 acked_bytes_count_ = 0;
174
175 // We have a new cubic congestion window.
176 last_target_congestion_window_ = target_congestion_window;
177
178 // Compute target congestion_window based on cubic target and estimated TCP
179 // congestion_window, use highest (fastest).
180 if (target_congestion_window < estimated_tcp_congestion_window_) {
181 target_congestion_window = estimated_tcp_congestion_window_;
182 }
183
184 QUIC_DVLOG(1) << "Final target congestion_window: "
185 << target_congestion_window;
186 return target_congestion_window;
187 }
188
189 } // namespace quic
190