1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.h"
6
7 #include <algorithm>
8 #include <cstdint>
9 #include <string>
10
11 #include "quiche/quic/core/congestion_control/prr_sender.h"
12 #include "quiche/quic/core/congestion_control/rtt_stats.h"
13 #include "quiche/quic/core/crypto/crypto_protocol.h"
14 #include "quiche/quic/core/quic_constants.h"
15 #include "quiche/quic/platform/api/quic_bug_tracker.h"
16 #include "quiche/quic/platform/api/quic_flags.h"
17 #include "quiche/quic/platform/api/quic_logging.h"
18
19 namespace quic {
20
21 namespace {
22 // Constants based on TCP defaults.
23 const QuicByteCount kMaxBurstBytes = 3 * kDefaultTCPMSS;
24 const float kRenoBeta = 0.7f; // Reno backoff factor.
25 // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a
26 // fast retransmission.
27 const QuicByteCount kDefaultMinimumCongestionWindow = 2 * kDefaultTCPMSS;
28 } // namespace
29
TcpCubicSenderBytes(const QuicClock * clock,const RttStats * rtt_stats,bool reno,QuicPacketCount initial_tcp_congestion_window,QuicPacketCount max_congestion_window,QuicConnectionStats * stats)30 TcpCubicSenderBytes::TcpCubicSenderBytes(
31 const QuicClock* clock, const RttStats* rtt_stats, bool reno,
32 QuicPacketCount initial_tcp_congestion_window,
33 QuicPacketCount max_congestion_window, QuicConnectionStats* stats)
34 : rtt_stats_(rtt_stats),
35 stats_(stats),
36 reno_(reno),
37 num_connections_(kDefaultNumConnections),
38 min4_mode_(false),
39 last_cutback_exited_slowstart_(false),
40 slow_start_large_reduction_(false),
41 no_prr_(false),
42 cubic_(clock),
43 num_acked_packets_(0),
44 congestion_window_(initial_tcp_congestion_window * kDefaultTCPMSS),
45 min_congestion_window_(kDefaultMinimumCongestionWindow),
46 max_congestion_window_(max_congestion_window * kDefaultTCPMSS),
47 slowstart_threshold_(max_congestion_window * kDefaultTCPMSS),
48 initial_tcp_congestion_window_(initial_tcp_congestion_window *
49 kDefaultTCPMSS),
50 initial_max_tcp_congestion_window_(max_congestion_window *
51 kDefaultTCPMSS),
52 min_slow_start_exit_window_(min_congestion_window_) {}
53
~TcpCubicSenderBytes()54 TcpCubicSenderBytes::~TcpCubicSenderBytes() {}
55
SetFromConfig(const QuicConfig & config,Perspective perspective)56 void TcpCubicSenderBytes::SetFromConfig(const QuicConfig& config,
57 Perspective perspective) {
58 if (perspective == Perspective::IS_SERVER &&
59 config.HasReceivedConnectionOptions()) {
60 if (ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN4)) {
61 // Min CWND of 4 experiment.
62 min4_mode_ = true;
63 SetMinCongestionWindowInPackets(1);
64 }
65 if (ContainsQuicTag(config.ReceivedConnectionOptions(), kSSLR)) {
66 // Slow Start Fast Exit experiment.
67 slow_start_large_reduction_ = true;
68 }
69 if (ContainsQuicTag(config.ReceivedConnectionOptions(), kNPRR)) {
70 // Use unity pacing instead of PRR.
71 no_prr_ = true;
72 }
73 }
74 }
75
AdjustNetworkParameters(const NetworkParams & params)76 void TcpCubicSenderBytes::AdjustNetworkParameters(const NetworkParams& params) {
77 if (params.bandwidth.IsZero() || params.rtt.IsZero()) {
78 return;
79 }
80 SetCongestionWindowFromBandwidthAndRtt(params.bandwidth, params.rtt);
81 }
82
RenoBeta() const83 float TcpCubicSenderBytes::RenoBeta() const {
84 // kNConnectionBeta is the backoff factor after loss for our N-connection
85 // emulation, which emulates the effective backoff of an ensemble of N
86 // TCP-Reno connections on a single loss event. The effective multiplier is
87 // computed as:
88 return (num_connections_ - 1 + kRenoBeta) / num_connections_;
89 }
90
OnCongestionEvent(bool rtt_updated,QuicByteCount prior_in_flight,QuicTime event_time,const AckedPacketVector & acked_packets,const LostPacketVector & lost_packets,QuicPacketCount,QuicPacketCount)91 void TcpCubicSenderBytes::OnCongestionEvent(
92 bool rtt_updated, QuicByteCount prior_in_flight, QuicTime event_time,
93 const AckedPacketVector& acked_packets,
94 const LostPacketVector& lost_packets, QuicPacketCount /*num_ect*/,
95 QuicPacketCount /*num_ce*/) {
96 if (rtt_updated && InSlowStart() &&
97 hybrid_slow_start_.ShouldExitSlowStart(
98 rtt_stats_->latest_rtt(), rtt_stats_->min_rtt(),
99 GetCongestionWindow() / kDefaultTCPMSS)) {
100 ExitSlowstart();
101 }
102 for (const LostPacket& lost_packet : lost_packets) {
103 OnPacketLost(lost_packet.packet_number, lost_packet.bytes_lost,
104 prior_in_flight);
105 }
106 for (const AckedPacket& acked_packet : acked_packets) {
107 OnPacketAcked(acked_packet.packet_number, acked_packet.bytes_acked,
108 prior_in_flight, event_time);
109 }
110 }
111
OnPacketAcked(QuicPacketNumber acked_packet_number,QuicByteCount acked_bytes,QuicByteCount prior_in_flight,QuicTime event_time)112 void TcpCubicSenderBytes::OnPacketAcked(QuicPacketNumber acked_packet_number,
113 QuicByteCount acked_bytes,
114 QuicByteCount prior_in_flight,
115 QuicTime event_time) {
116 largest_acked_packet_number_.UpdateMax(acked_packet_number);
117 if (InRecovery()) {
118 if (!no_prr_) {
119 // PRR is used when in recovery.
120 prr_.OnPacketAcked(acked_bytes);
121 }
122 return;
123 }
124 MaybeIncreaseCwnd(acked_packet_number, acked_bytes, prior_in_flight,
125 event_time);
126 if (InSlowStart()) {
127 hybrid_slow_start_.OnPacketAcked(acked_packet_number);
128 }
129 }
130
OnPacketSent(QuicTime,QuicByteCount,QuicPacketNumber packet_number,QuicByteCount bytes,HasRetransmittableData is_retransmittable)131 void TcpCubicSenderBytes::OnPacketSent(
132 QuicTime /*sent_time*/, QuicByteCount /*bytes_in_flight*/,
133 QuicPacketNumber packet_number, QuicByteCount bytes,
134 HasRetransmittableData is_retransmittable) {
135 if (InSlowStart()) {
136 ++(stats_->slowstart_packets_sent);
137 }
138
139 if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) {
140 return;
141 }
142 if (InRecovery()) {
143 // PRR is used when in recovery.
144 prr_.OnPacketSent(bytes);
145 }
146 QUICHE_DCHECK(!largest_sent_packet_number_.IsInitialized() ||
147 largest_sent_packet_number_ < packet_number);
148 largest_sent_packet_number_ = packet_number;
149 hybrid_slow_start_.OnPacketSent(packet_number);
150 }
151
CanSend(QuicByteCount bytes_in_flight)152 bool TcpCubicSenderBytes::CanSend(QuicByteCount bytes_in_flight) {
153 if (!no_prr_ && InRecovery()) {
154 // PRR is used when in recovery.
155 return prr_.CanSend(GetCongestionWindow(), bytes_in_flight,
156 GetSlowStartThreshold());
157 }
158 if (GetCongestionWindow() > bytes_in_flight) {
159 return true;
160 }
161 if (min4_mode_ && bytes_in_flight < 4 * kDefaultTCPMSS) {
162 return true;
163 }
164 return false;
165 }
166
PacingRate(QuicByteCount) const167 QuicBandwidth TcpCubicSenderBytes::PacingRate(
168 QuicByteCount /* bytes_in_flight */) const {
169 // We pace at twice the rate of the underlying sender's bandwidth estimate
170 // during slow start and 1.25x during congestion avoidance to ensure pacing
171 // doesn't prevent us from filling the window.
172 QuicTime::Delta srtt = rtt_stats_->SmoothedOrInitialRtt();
173 const QuicBandwidth bandwidth =
174 QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
175 return bandwidth * (InSlowStart() ? 2 : (no_prr_ && InRecovery() ? 1 : 1.25));
176 }
177
BandwidthEstimate() const178 QuicBandwidth TcpCubicSenderBytes::BandwidthEstimate() const {
179 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
180 if (srtt.IsZero()) {
181 // If we haven't measured an rtt, the bandwidth estimate is unknown.
182 return QuicBandwidth::Zero();
183 }
184 return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
185 }
186
InSlowStart() const187 bool TcpCubicSenderBytes::InSlowStart() const {
188 return GetCongestionWindow() < GetSlowStartThreshold();
189 }
190
IsCwndLimited(QuicByteCount bytes_in_flight) const191 bool TcpCubicSenderBytes::IsCwndLimited(QuicByteCount bytes_in_flight) const {
192 const QuicByteCount congestion_window = GetCongestionWindow();
193 if (bytes_in_flight >= congestion_window) {
194 return true;
195 }
196 const QuicByteCount available_bytes = congestion_window - bytes_in_flight;
197 const bool slow_start_limited =
198 InSlowStart() && bytes_in_flight > congestion_window / 2;
199 return slow_start_limited || available_bytes <= kMaxBurstBytes;
200 }
201
InRecovery() const202 bool TcpCubicSenderBytes::InRecovery() const {
203 return largest_acked_packet_number_.IsInitialized() &&
204 largest_sent_at_last_cutback_.IsInitialized() &&
205 largest_acked_packet_number_ <= largest_sent_at_last_cutback_;
206 }
207
OnRetransmissionTimeout(bool packets_retransmitted)208 void TcpCubicSenderBytes::OnRetransmissionTimeout(bool packets_retransmitted) {
209 largest_sent_at_last_cutback_.Clear();
210 if (!packets_retransmitted) {
211 return;
212 }
213 hybrid_slow_start_.Restart();
214 HandleRetransmissionTimeout();
215 }
216
GetDebugState() const217 std::string TcpCubicSenderBytes::GetDebugState() const { return ""; }
218
OnApplicationLimited(QuicByteCount)219 void TcpCubicSenderBytes::OnApplicationLimited(
220 QuicByteCount /*bytes_in_flight*/) {}
221
SetCongestionWindowFromBandwidthAndRtt(QuicBandwidth bandwidth,QuicTime::Delta rtt)222 void TcpCubicSenderBytes::SetCongestionWindowFromBandwidthAndRtt(
223 QuicBandwidth bandwidth, QuicTime::Delta rtt) {
224 QuicByteCount new_congestion_window = bandwidth.ToBytesPerPeriod(rtt);
225 // Limit new CWND if needed.
226 congestion_window_ =
227 std::max(min_congestion_window_,
228 std::min(new_congestion_window,
229 kMaxResumptionCongestionWindow * kDefaultTCPMSS));
230 }
231
SetInitialCongestionWindowInPackets(QuicPacketCount congestion_window)232 void TcpCubicSenderBytes::SetInitialCongestionWindowInPackets(
233 QuicPacketCount congestion_window) {
234 congestion_window_ = congestion_window * kDefaultTCPMSS;
235 }
236
SetMinCongestionWindowInPackets(QuicPacketCount congestion_window)237 void TcpCubicSenderBytes::SetMinCongestionWindowInPackets(
238 QuicPacketCount congestion_window) {
239 min_congestion_window_ = congestion_window * kDefaultTCPMSS;
240 }
241
SetNumEmulatedConnections(int num_connections)242 void TcpCubicSenderBytes::SetNumEmulatedConnections(int num_connections) {
243 num_connections_ = std::max(1, num_connections);
244 cubic_.SetNumConnections(num_connections_);
245 }
246
ExitSlowstart()247 void TcpCubicSenderBytes::ExitSlowstart() {
248 slowstart_threshold_ = congestion_window_;
249 }
250
OnPacketLost(QuicPacketNumber packet_number,QuicByteCount lost_bytes,QuicByteCount prior_in_flight)251 void TcpCubicSenderBytes::OnPacketLost(QuicPacketNumber packet_number,
252 QuicByteCount lost_bytes,
253 QuicByteCount prior_in_flight) {
254 // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
255 // already sent should be treated as a single loss event, since it's expected.
256 if (largest_sent_at_last_cutback_.IsInitialized() &&
257 packet_number <= largest_sent_at_last_cutback_) {
258 if (last_cutback_exited_slowstart_) {
259 ++stats_->slowstart_packets_lost;
260 stats_->slowstart_bytes_lost += lost_bytes;
261 if (slow_start_large_reduction_) {
262 // Reduce congestion window by lost_bytes for every loss.
263 congestion_window_ = std::max(congestion_window_ - lost_bytes,
264 min_slow_start_exit_window_);
265 slowstart_threshold_ = congestion_window_;
266 }
267 }
268 QUIC_DVLOG(1) << "Ignoring loss for largest_missing:" << packet_number
269 << " because it was sent prior to the last CWND cutback.";
270 return;
271 }
272 ++stats_->tcp_loss_events;
273 last_cutback_exited_slowstart_ = InSlowStart();
274 if (InSlowStart()) {
275 ++stats_->slowstart_packets_lost;
276 }
277
278 if (!no_prr_) {
279 prr_.OnPacketLost(prior_in_flight);
280 }
281
282 // TODO(b/77268641): Separate out all of slow start into a separate class.
283 if (slow_start_large_reduction_ && InSlowStart()) {
284 QUICHE_DCHECK_LT(kDefaultTCPMSS, congestion_window_);
285 if (congestion_window_ >= 2 * initial_tcp_congestion_window_) {
286 min_slow_start_exit_window_ = congestion_window_ / 2;
287 }
288 congestion_window_ = congestion_window_ - kDefaultTCPMSS;
289 } else if (reno_) {
290 congestion_window_ = congestion_window_ * RenoBeta();
291 } else {
292 congestion_window_ =
293 cubic_.CongestionWindowAfterPacketLoss(congestion_window_);
294 }
295 if (congestion_window_ < min_congestion_window_) {
296 congestion_window_ = min_congestion_window_;
297 }
298 slowstart_threshold_ = congestion_window_;
299 largest_sent_at_last_cutback_ = largest_sent_packet_number_;
300 // Reset packet count from congestion avoidance mode. We start counting again
301 // when we're out of recovery.
302 num_acked_packets_ = 0;
303 QUIC_DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_
304 << " slowstart threshold: " << slowstart_threshold_;
305 }
306
GetCongestionWindow() const307 QuicByteCount TcpCubicSenderBytes::GetCongestionWindow() const {
308 return congestion_window_;
309 }
310
GetSlowStartThreshold() const311 QuicByteCount TcpCubicSenderBytes::GetSlowStartThreshold() const {
312 return slowstart_threshold_;
313 }
314
315 // Called when we receive an ack. Normal TCP tracks how many packets one ack
316 // represents, but quic has a separate ack for each packet.
MaybeIncreaseCwnd(QuicPacketNumber,QuicByteCount acked_bytes,QuicByteCount prior_in_flight,QuicTime event_time)317 void TcpCubicSenderBytes::MaybeIncreaseCwnd(
318 QuicPacketNumber /*acked_packet_number*/, QuicByteCount acked_bytes,
319 QuicByteCount prior_in_flight, QuicTime event_time) {
320 QUIC_BUG_IF(quic_bug_10439_1, InRecovery())
321 << "Never increase the CWND during recovery.";
322 // Do not increase the congestion window unless the sender is close to using
323 // the current window.
324 if (!IsCwndLimited(prior_in_flight)) {
325 cubic_.OnApplicationLimited();
326 return;
327 }
328 if (congestion_window_ >= max_congestion_window_) {
329 return;
330 }
331 if (InSlowStart()) {
332 // TCP slow start, exponential growth, increase by one for each ACK.
333 congestion_window_ += kDefaultTCPMSS;
334 QUIC_DVLOG(1) << "Slow start; congestion window: " << congestion_window_
335 << " slowstart threshold: " << slowstart_threshold_;
336 return;
337 }
338 // Congestion avoidance.
339 if (reno_) {
340 // Classic Reno congestion avoidance.
341 ++num_acked_packets_;
342 // Divide by num_connections to smoothly increase the CWND at a faster rate
343 // than conventional Reno.
344 if (num_acked_packets_ * num_connections_ >=
345 congestion_window_ / kDefaultTCPMSS) {
346 congestion_window_ += kDefaultTCPMSS;
347 num_acked_packets_ = 0;
348 }
349
350 QUIC_DVLOG(1) << "Reno; congestion window: " << congestion_window_
351 << " slowstart threshold: " << slowstart_threshold_
352 << " congestion window count: " << num_acked_packets_;
353 } else {
354 congestion_window_ = std::min(
355 max_congestion_window_,
356 cubic_.CongestionWindowAfterAck(acked_bytes, congestion_window_,
357 rtt_stats_->min_rtt(), event_time));
358 QUIC_DVLOG(1) << "Cubic; congestion window: " << congestion_window_
359 << " slowstart threshold: " << slowstart_threshold_;
360 }
361 }
362
HandleRetransmissionTimeout()363 void TcpCubicSenderBytes::HandleRetransmissionTimeout() {
364 cubic_.ResetCubicState();
365 slowstart_threshold_ = congestion_window_ / 2;
366 congestion_window_ = min_congestion_window_;
367 }
368
OnConnectionMigration()369 void TcpCubicSenderBytes::OnConnectionMigration() {
370 hybrid_slow_start_.Restart();
371 prr_ = PrrSender();
372 largest_sent_packet_number_.Clear();
373 largest_acked_packet_number_.Clear();
374 largest_sent_at_last_cutback_.Clear();
375 last_cutback_exited_slowstart_ = false;
376 cubic_.ResetCubicState();
377 num_acked_packets_ = 0;
378 congestion_window_ = initial_tcp_congestion_window_;
379 max_congestion_window_ = initial_max_tcp_congestion_window_;
380 slowstart_threshold_ = initial_max_tcp_congestion_window_;
381 }
382
GetCongestionControlType() const383 CongestionControlType TcpCubicSenderBytes::GetCongestionControlType() const {
384 return reno_ ? kRenoBytes : kCubicBytes;
385 }
386
387 } // namespace quic
388