1 // Copyright (C) 2019, Cloudflare, Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 //     * Redistributions of source code must retain the above copyright notice,
9 //       this list of conditions and the following disclaimer.
10 //
11 //     * Redistributions in binary form must reproduce the above copyright
12 //       notice, this list of conditions and the following disclaimer in the
13 //       documentation and/or other materials provided with the distribution.
14 //
15 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 //! CUBIC Congestion Control
28 //!
29 //! This implementation is based on the following draft:
30 //! <https://tools.ietf.org/html/draft-ietf-tcpm-rfc8312bis-02>
31 //!
32 //! Note that Slow Start can use HyStart++ when enabled.
33 
34 use std::cmp;
35 
36 use std::time::Duration;
37 use std::time::Instant;
38 
39 use crate::packet;
40 use crate::recovery;
41 use crate::recovery::reno;
42 
43 use crate::recovery::Acked;
44 use crate::recovery::CongestionControlOps;
45 use crate::recovery::Recovery;
46 
47 pub static CUBIC: CongestionControlOps = CongestionControlOps {
48     on_init,
49     reset,
50     on_packet_sent,
51     on_packets_acked,
52     congestion_event,
53     collapse_cwnd,
54     checkpoint,
55     rollback,
56     has_custom_pacing,
57     debug_fmt,
58 };
59 
60 /// CUBIC Constants.
61 ///
62 /// These are recommended value in RFC8312.
63 const BETA_CUBIC: f64 = 0.7;
64 
65 const C: f64 = 0.4;
66 
67 /// Threshold for rolling back state, as percentage of lost packets relative to
68 /// cwnd.
69 const ROLLBACK_THRESHOLD_PERCENT: usize = 20;
70 
71 /// Minimum threshold for rolling back state, as number of packets.
72 const MIN_ROLLBACK_THRESHOLD: usize = 2;
73 
74 /// Default value of alpha_aimd in the beginning of congestion avoidance.
75 const ALPHA_AIMD: f64 = 3.0 * (1.0 - BETA_CUBIC) / (1.0 + BETA_CUBIC);
76 
77 /// CUBIC State Variables.
78 ///
79 /// We need to keep those variables across the connection.
80 /// k, w_max, w_est are described in the RFC.
81 #[derive(Debug, Default)]
82 pub struct State {
83     k: f64,
84 
85     w_max: f64,
86 
87     w_est: f64,
88 
89     alpha_aimd: f64,
90 
91     // Used in CUBIC fix (see on_packet_sent())
92     last_sent_time: Option<Instant>,
93 
94     // Store cwnd increment during congestion avoidance.
95     cwnd_inc: usize,
96 
97     // CUBIC state checkpoint preceding the last congestion event.
98     prior: PriorState,
99 }
100 
101 /// Stores the CUBIC state from before the last congestion event.
102 ///
103 /// <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
104 #[derive(Debug, Default)]
105 struct PriorState {
106     congestion_window: usize,
107 
108     ssthresh: usize,
109 
110     w_max: f64,
111 
112     k: f64,
113 
114     epoch_start: Option<Instant>,
115 
116     lost_count: usize,
117 }
118 
119 /// CUBIC Functions.
120 ///
121 /// Note that these calculations are based on a count of cwnd as bytes,
122 /// not packets.
123 /// Unit of t (duration) and RTT are based on seconds (f64).
124 impl State {
125     // K = cubic_root ((w_max - cwnd) / C) (Eq. 2)
cubic_k(&self, cwnd: usize, max_datagram_size: usize) -> f64126     fn cubic_k(&self, cwnd: usize, max_datagram_size: usize) -> f64 {
127         let w_max = self.w_max / max_datagram_size as f64;
128         let cwnd = cwnd as f64 / max_datagram_size as f64;
129 
130         libm::cbrt((w_max - cwnd) / C)
131     }
132 
133     // W_cubic(t) = C * (t - K)^3 + w_max (Eq. 1)
w_cubic(&self, t: Duration, max_datagram_size: usize) -> f64134     fn w_cubic(&self, t: Duration, max_datagram_size: usize) -> f64 {
135         let w_max = self.w_max / max_datagram_size as f64;
136 
137         (C * (t.as_secs_f64() - self.k).powi(3) + w_max) *
138             max_datagram_size as f64
139     }
140 
141     // W_est = W_est + alpha_aimd * (segments_acked / cwnd)  (Eq. 4)
w_est_inc( &self, acked: usize, cwnd: usize, max_datagram_size: usize, ) -> f64142     fn w_est_inc(
143         &self, acked: usize, cwnd: usize, max_datagram_size: usize,
144     ) -> f64 {
145         self.alpha_aimd * (acked as f64 / cwnd as f64) * max_datagram_size as f64
146     }
147 }
148 
on_init(_r: &mut Recovery)149 fn on_init(_r: &mut Recovery) {}
150 
reset(r: &mut Recovery)151 fn reset(r: &mut Recovery) {
152     r.cubic_state = State::default();
153 }
154 
collapse_cwnd(r: &mut Recovery)155 fn collapse_cwnd(r: &mut Recovery) {
156     let cubic = &mut r.cubic_state;
157 
158     r.congestion_recovery_start_time = None;
159 
160     cubic.w_max = r.congestion_window as f64;
161 
162     // 4.7 Timeout - reduce ssthresh based on BETA_CUBIC
163     r.ssthresh = (r.congestion_window as f64 * BETA_CUBIC) as usize;
164     r.ssthresh = cmp::max(
165         r.ssthresh,
166         r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS,
167     );
168 
169     cubic.cwnd_inc = 0;
170 
171     reno::collapse_cwnd(r);
172 }
173 
on_packet_sent(r: &mut Recovery, sent_bytes: usize, now: Instant)174 fn on_packet_sent(r: &mut Recovery, sent_bytes: usize, now: Instant) {
175     // See https://github.com/torvalds/linux/commit/30927520dbae297182990bb21d08762bcc35ce1d
176     // First transmit when no packets in flight
177     let cubic = &mut r.cubic_state;
178 
179     if let Some(last_sent_time) = cubic.last_sent_time {
180         if r.bytes_in_flight == 0 {
181             let delta = now - last_sent_time;
182 
183             // We were application limited (idle) for a while.
184             // Shift epoch start to keep cwnd growth to cubic curve.
185             if let Some(recovery_start_time) = r.congestion_recovery_start_time {
186                 if delta.as_nanos() > 0 {
187                     r.congestion_recovery_start_time =
188                         Some(recovery_start_time + delta);
189                 }
190             }
191         }
192     }
193 
194     cubic.last_sent_time = Some(now);
195 
196     reno::on_packet_sent(r, sent_bytes, now);
197 }
198 
on_packets_acked( r: &mut Recovery, packets: &[Acked], epoch: packet::Epoch, now: Instant, )199 fn on_packets_acked(
200     r: &mut Recovery, packets: &[Acked], epoch: packet::Epoch, now: Instant,
201 ) {
202     for pkt in packets {
203         on_packet_acked(r, pkt, epoch, now);
204     }
205 }
206 
on_packet_acked( r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant, )207 fn on_packet_acked(
208     r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant,
209 ) {
210     let in_congestion_recovery = r.in_congestion_recovery(packet.time_sent);
211 
212     r.bytes_in_flight = r.bytes_in_flight.saturating_sub(packet.size);
213 
214     if in_congestion_recovery {
215         r.prr.on_packet_acked(
216             packet.size,
217             r.bytes_in_flight,
218             r.ssthresh,
219             r.max_datagram_size,
220         );
221 
222         return;
223     }
224 
225     if r.app_limited {
226         return;
227     }
228 
229     // Detecting spurious congestion events.
230     // <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
231     //
232     // When the recovery episode ends with recovering
233     // a few packets (less than cwnd / mss * ROLLBACK_THRESHOLD_PERCENT(%)), it's
234     // considered as spurious and restore to the previous state.
235     if r.congestion_recovery_start_time.is_some() {
236         let new_lost = r.lost_count - r.cubic_state.prior.lost_count;
237         let rollback_threshold = (r.congestion_window / r.max_datagram_size) *
238             ROLLBACK_THRESHOLD_PERCENT /
239             100;
240         let rollback_threshold = rollback_threshold.max(MIN_ROLLBACK_THRESHOLD);
241 
242         if new_lost < rollback_threshold {
243             let did_rollback = rollback(r);
244 
245             if did_rollback {
246                 return;
247             }
248         }
249     }
250 
251     if r.congestion_window < r.ssthresh {
252         // In Slow slart, bytes_acked_sl is used for counting
253         // acknowledged bytes.
254         r.bytes_acked_sl += packet.size;
255 
256         if r.bytes_acked_sl >= r.max_datagram_size {
257             if r.hystart.in_css(epoch) {
258                 r.congestion_window +=
259                     r.hystart.css_cwnd_inc(r.max_datagram_size);
260             } else {
261                 r.congestion_window += r.max_datagram_size;
262             }
263 
264             r.bytes_acked_sl -= r.max_datagram_size;
265         }
266 
267         if r.hystart.on_packet_acked(epoch, packet, r.latest_rtt, now) {
268             // Exit to congestion avoidance if CSS ends.
269             r.ssthresh = r.congestion_window;
270         }
271     } else {
272         // Congestion avoidance.
273         let ca_start_time;
274 
275         // In CSS, use css_start_time instead of congestion_recovery_start_time.
276         if r.hystart.in_css(epoch) {
277             ca_start_time = r.hystart.css_start_time().unwrap();
278 
279             // Reset w_max and k when CSS started.
280             if r.cubic_state.w_max == 0.0 {
281                 r.cubic_state.w_max = r.congestion_window as f64;
282                 r.cubic_state.k = 0.0;
283 
284                 r.cubic_state.w_est = r.congestion_window as f64;
285                 r.cubic_state.alpha_aimd = ALPHA_AIMD;
286             }
287         } else {
288             match r.congestion_recovery_start_time {
289                 Some(t) => ca_start_time = t,
290                 None => {
291                     // When we come here without congestion_event() triggered,
292                     // initialize congestion_recovery_start_time, w_max and k.
293                     ca_start_time = now;
294                     r.congestion_recovery_start_time = Some(now);
295 
296                     r.cubic_state.w_max = r.congestion_window as f64;
297                     r.cubic_state.k = 0.0;
298 
299                     r.cubic_state.w_est = r.congestion_window as f64;
300                     r.cubic_state.alpha_aimd = ALPHA_AIMD;
301                 },
302             }
303         }
304 
305         let t = now.saturating_duration_since(ca_start_time);
306 
307         // target = w_cubic(t + rtt)
308         let target = r.cubic_state.w_cubic(t + r.min_rtt, r.max_datagram_size);
309 
310         // Clipping target to [cwnd, 1.5 x cwnd]
311         let target = f64::max(target, r.congestion_window as f64);
312         let target = f64::min(target, r.congestion_window as f64 * 1.5);
313 
314         // Update w_est.
315         let w_est_inc = r.cubic_state.w_est_inc(
316             packet.size,
317             r.congestion_window,
318             r.max_datagram_size,
319         );
320         r.cubic_state.w_est += w_est_inc;
321 
322         if r.cubic_state.w_est >= r.cubic_state.w_max {
323             r.cubic_state.alpha_aimd = 1.0;
324         }
325 
326         let mut cubic_cwnd = r.congestion_window;
327 
328         if r.cubic_state.w_cubic(t, r.max_datagram_size) < r.cubic_state.w_est {
329             // AIMD friendly region (W_cubic(t) < W_est)
330             cubic_cwnd = cmp::max(cubic_cwnd, r.cubic_state.w_est as usize);
331         } else {
332             // Concave region or convex region use same increment.
333             let cubic_inc =
334                 r.max_datagram_size * (target as usize - cubic_cwnd) / cubic_cwnd;
335 
336             cubic_cwnd += cubic_inc;
337         }
338 
339         // Update the increment and increase cwnd by MSS.
340         r.cubic_state.cwnd_inc += cubic_cwnd - r.congestion_window;
341 
342         if r.cubic_state.cwnd_inc >= r.max_datagram_size {
343             r.congestion_window += r.max_datagram_size;
344             r.cubic_state.cwnd_inc -= r.max_datagram_size;
345         }
346     }
347 }
348 
congestion_event( r: &mut Recovery, _lost_bytes: usize, time_sent: Instant, epoch: packet::Epoch, now: Instant, )349 fn congestion_event(
350     r: &mut Recovery, _lost_bytes: usize, time_sent: Instant,
351     epoch: packet::Epoch, now: Instant,
352 ) {
353     let in_congestion_recovery = r.in_congestion_recovery(time_sent);
354 
355     // Start a new congestion event if packet was sent after the
356     // start of the previous congestion recovery period.
357     if !in_congestion_recovery {
358         r.congestion_recovery_start_time = Some(now);
359 
360         // Fast convergence
361         if (r.congestion_window as f64) < r.cubic_state.w_max {
362             r.cubic_state.w_max =
363                 r.congestion_window as f64 * (1.0 + BETA_CUBIC) / 2.0;
364         } else {
365             r.cubic_state.w_max = r.congestion_window as f64;
366         }
367 
368         r.ssthresh = (r.congestion_window as f64 * BETA_CUBIC) as usize;
369         r.ssthresh = cmp::max(
370             r.ssthresh,
371             r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS,
372         );
373         r.congestion_window = r.ssthresh;
374 
375         r.cubic_state.k = if r.cubic_state.w_max < r.congestion_window as f64 {
376             0.0
377         } else {
378             r.cubic_state
379                 .cubic_k(r.congestion_window, r.max_datagram_size)
380         };
381 
382         r.cubic_state.cwnd_inc =
383             (r.cubic_state.cwnd_inc as f64 * BETA_CUBIC) as usize;
384 
385         r.cubic_state.w_est = r.congestion_window as f64;
386         r.cubic_state.alpha_aimd = ALPHA_AIMD;
387 
388         if r.hystart.in_css(epoch) {
389             r.hystart.congestion_event();
390         }
391 
392         r.prr.congestion_event(r.bytes_in_flight);
393     }
394 }
395 
checkpoint(r: &mut Recovery)396 fn checkpoint(r: &mut Recovery) {
397     r.cubic_state.prior.congestion_window = r.congestion_window;
398     r.cubic_state.prior.ssthresh = r.ssthresh;
399     r.cubic_state.prior.w_max = r.cubic_state.w_max;
400     r.cubic_state.prior.k = r.cubic_state.k;
401     r.cubic_state.prior.epoch_start = r.congestion_recovery_start_time;
402     r.cubic_state.prior.lost_count = r.lost_count;
403 }
404 
rollback(r: &mut Recovery) -> bool405 fn rollback(r: &mut Recovery) -> bool {
406     // Don't go back to slow start.
407     if r.cubic_state.prior.congestion_window < r.cubic_state.prior.ssthresh {
408         return false;
409     }
410 
411     if r.congestion_window >= r.cubic_state.prior.congestion_window {
412         return false;
413     }
414 
415     r.congestion_window = r.cubic_state.prior.congestion_window;
416     r.ssthresh = r.cubic_state.prior.ssthresh;
417     r.cubic_state.w_max = r.cubic_state.prior.w_max;
418     r.cubic_state.k = r.cubic_state.prior.k;
419     r.congestion_recovery_start_time = r.cubic_state.prior.epoch_start;
420 
421     true
422 }
423 
has_custom_pacing() -> bool424 fn has_custom_pacing() -> bool {
425     false
426 }
427 
debug_fmt(r: &Recovery, f: &mut std::fmt::Formatter) -> std::fmt::Result428 fn debug_fmt(r: &Recovery, f: &mut std::fmt::Formatter) -> std::fmt::Result {
429     write!(
430         f,
431         "cubic={{ k={} w_max={} }} ",
432         r.cubic_state.k, r.cubic_state.w_max
433     )
434 }
435 
436 #[cfg(test)]
437 mod tests {
438     use super::*;
439     use crate::recovery::hystart;
440 
441     use smallvec::smallvec;
442 
443     #[test]
cubic_init()444     fn cubic_init() {
445         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
446         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
447 
448         let r = Recovery::new(&cfg);
449 
450         assert!(r.cwnd() > 0);
451         assert_eq!(r.bytes_in_flight, 0);
452     }
453 
454     #[test]
cubic_send()455     fn cubic_send() {
456         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
457         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
458 
459         let mut r = Recovery::new(&cfg);
460 
461         r.on_packet_sent_cc(1000, Instant::now());
462 
463         assert_eq!(r.bytes_in_flight, 1000);
464     }
465 
466     #[test]
cubic_slow_start()467     fn cubic_slow_start() {
468         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
469         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
470 
471         let mut r = Recovery::new(&cfg);
472         let now = Instant::now();
473 
474         let p = recovery::Sent {
475             pkt_num: 0,
476             frames: smallvec![],
477             time_sent: now,
478             time_acked: None,
479             time_lost: None,
480             size: r.max_datagram_size,
481             ack_eliciting: true,
482             in_flight: true,
483             delivered: 0,
484             delivered_time: now,
485             first_sent_time: now,
486             is_app_limited: false,
487             has_data: false,
488         };
489 
490         // Send initcwnd full MSS packets to become no longer app limited
491         for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
492             r.on_packet_sent_cc(p.size, now);
493         }
494 
495         let cwnd_prev = r.cwnd();
496 
497         let acked = vec![Acked {
498             pkt_num: p.pkt_num,
499             time_sent: p.time_sent,
500             size: p.size,
501             delivered: 0,
502             delivered_time: now,
503             first_sent_time: now,
504             is_app_limited: false,
505             rtt: Duration::ZERO,
506         }];
507 
508         r.on_packets_acked(acked, packet::Epoch::Application, now);
509 
510         // Check if cwnd increased by packet size (slow start)
511         assert_eq!(r.cwnd(), cwnd_prev + p.size);
512     }
513 
514     #[test]
cubic_slow_start_multi_acks()515     fn cubic_slow_start_multi_acks() {
516         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
517         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
518 
519         let mut r = Recovery::new(&cfg);
520         let now = Instant::now();
521 
522         let p = recovery::Sent {
523             pkt_num: 0,
524             frames: smallvec![],
525             time_sent: now,
526             time_acked: None,
527             time_lost: None,
528             size: r.max_datagram_size,
529             ack_eliciting: true,
530             in_flight: true,
531             delivered: 0,
532             delivered_time: now,
533             first_sent_time: now,
534             is_app_limited: false,
535             has_data: false,
536         };
537 
538         // Send initcwnd full MSS packets to become no longer app limited
539         for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
540             r.on_packet_sent_cc(p.size, now);
541         }
542 
543         let cwnd_prev = r.cwnd();
544 
545         let acked = vec![
546             Acked {
547                 pkt_num: p.pkt_num,
548                 time_sent: p.time_sent,
549                 size: p.size,
550                 delivered: 0,
551                 delivered_time: now,
552                 first_sent_time: now,
553                 is_app_limited: false,
554                 rtt: Duration::ZERO,
555             },
556             Acked {
557                 pkt_num: p.pkt_num,
558                 time_sent: p.time_sent,
559                 size: p.size,
560                 delivered: 0,
561                 delivered_time: now,
562                 first_sent_time: now,
563                 is_app_limited: false,
564                 rtt: Duration::ZERO,
565             },
566             Acked {
567                 pkt_num: p.pkt_num,
568                 time_sent: p.time_sent,
569                 size: p.size,
570                 delivered: 0,
571                 delivered_time: now,
572                 first_sent_time: now,
573                 is_app_limited: false,
574                 rtt: Duration::ZERO,
575             },
576         ];
577 
578         r.on_packets_acked(acked, packet::Epoch::Application, now);
579 
580         // Acked 3 packets.
581         assert_eq!(r.cwnd(), cwnd_prev + p.size * 3);
582     }
583 
584     #[test]
cubic_congestion_event()585     fn cubic_congestion_event() {
586         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
587         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
588 
589         let mut r = Recovery::new(&cfg);
590         let now = Instant::now();
591         let prev_cwnd = r.cwnd();
592 
593         r.congestion_event(
594             r.max_datagram_size,
595             now,
596             packet::Epoch::Application,
597             now,
598         );
599 
600         // In CUBIC, after congestion event, cwnd will be reduced by (1 -
601         // CUBIC_BETA)
602         assert_eq!(prev_cwnd as f64 * BETA_CUBIC, r.cwnd() as f64);
603     }
604 
605     #[test]
cubic_congestion_avoidance()606     fn cubic_congestion_avoidance() {
607         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
608         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
609 
610         let mut r = Recovery::new(&cfg);
611         let mut now = Instant::now();
612         let prev_cwnd = r.cwnd();
613 
614         // Send initcwnd full MSS packets to become no longer app limited
615         for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
616             r.on_packet_sent_cc(r.max_datagram_size, now);
617         }
618 
619         // Trigger congestion event to update ssthresh
620         r.congestion_event(
621             r.max_datagram_size,
622             now,
623             packet::Epoch::Application,
624             now,
625         );
626 
627         // After congestion event, cwnd will be reduced.
628         let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
629         assert_eq!(r.cwnd(), cur_cwnd);
630 
631         // Shift current time by 1 RTT.
632         let rtt = Duration::from_millis(100);
633 
634         r.update_rtt(rtt, Duration::from_millis(0), now);
635 
636         // Exit from the recovery.
637         now += rtt;
638 
639         // To avoid rollback
640         r.lost_count += MIN_ROLLBACK_THRESHOLD;
641 
642         // During Congestion Avoidance, it will take
643         // 5 ACKs to increase cwnd by 1 MSS.
644         for _ in 0..5 {
645             let acked = vec![Acked {
646                 pkt_num: 0,
647                 time_sent: now,
648                 size: r.max_datagram_size,
649                 delivered: 0,
650                 delivered_time: now,
651                 first_sent_time: now,
652                 is_app_limited: false,
653                 rtt: Duration::ZERO,
654             }];
655 
656             r.on_packets_acked(acked, packet::Epoch::Application, now);
657             now += rtt;
658         }
659 
660         assert_eq!(r.cwnd(), cur_cwnd + r.max_datagram_size);
661     }
662 
663     #[test]
cubic_collapse_cwnd_and_restart()664     fn cubic_collapse_cwnd_and_restart() {
665         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
666         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
667 
668         let mut r = Recovery::new(&cfg);
669         let now = Instant::now();
670 
671         // Fill up bytes_in_flight to avoid app_limited=true
672         r.on_packet_sent_cc(30000, now);
673 
674         // Trigger congestion event to update ssthresh
675         r.congestion_event(
676             r.max_datagram_size,
677             now,
678             packet::Epoch::Application,
679             now,
680         );
681 
682         // After persistent congestion, cwnd should be the minimum window
683         r.collapse_cwnd();
684         assert_eq!(
685             r.cwnd(),
686             r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS
687         );
688 
689         let acked = vec![Acked {
690             pkt_num: 0,
691             // To exit from recovery
692             time_sent: now + Duration::from_millis(1),
693             size: r.max_datagram_size,
694             delivered: 0,
695             delivered_time: now,
696             first_sent_time: now,
697             is_app_limited: false,
698             rtt: Duration::ZERO,
699         }];
700 
701         r.on_packets_acked(acked, packet::Epoch::Application, now);
702 
703         // Slow start again - cwnd will be increased by 1 MSS
704         assert_eq!(
705             r.cwnd(),
706             r.max_datagram_size * (recovery::MINIMUM_WINDOW_PACKETS + 1)
707         );
708     }
709 
710     #[test]
cubic_hystart_css_to_ss()711     fn cubic_hystart_css_to_ss() {
712         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
713         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
714         cfg.enable_hystart(true);
715 
716         let mut r = Recovery::new(&cfg);
717         let now = Instant::now();
718         let epoch = packet::Epoch::Application;
719 
720         let p = recovery::Sent {
721             pkt_num: 0,
722             frames: smallvec![],
723             time_sent: now,
724             time_acked: None,
725             time_lost: None,
726             size: r.max_datagram_size,
727             ack_eliciting: true,
728             in_flight: true,
729             delivered: 0,
730             delivered_time: now,
731             first_sent_time: now,
732             is_app_limited: false,
733             has_data: false,
734         };
735 
736         // 1st round.
737         let n_rtt_sample = hystart::N_RTT_SAMPLE;
738         let mut send_pn = 0;
739         let mut ack_pn = 0;
740 
741         let rtt_1st = Duration::from_millis(50);
742 
743         // Send 1st round packets.
744         for _ in 0..n_rtt_sample {
745             r.on_packet_sent_cc(p.size, now);
746             send_pn += 1;
747         }
748 
749         r.hystart.start_round(send_pn - 1);
750 
751         // Receiving Acks.
752         let now = now + rtt_1st;
753         for _ in 0..n_rtt_sample {
754             r.update_rtt(rtt_1st, Duration::from_millis(0), now);
755 
756             let acked = vec![Acked {
757                 pkt_num: ack_pn,
758                 time_sent: p.time_sent,
759                 size: p.size,
760                 delivered: 0,
761                 delivered_time: now,
762                 first_sent_time: now,
763                 is_app_limited: false,
764                 rtt: Duration::ZERO,
765             }];
766 
767             r.on_packets_acked(acked, epoch, now);
768             ack_pn += 1;
769         }
770 
771         // Not in CSS yet.
772         assert_eq!(r.hystart.css_start_time().is_some(), false);
773 
774         // 2nd round.
775         let mut rtt_2nd = Duration::from_millis(100);
776         let now = now + rtt_2nd;
777 
778         // Send 2nd round packets.
779         for _ in 0..n_rtt_sample {
780             r.on_packet_sent_cc(p.size, now);
781             send_pn += 1;
782         }
783         r.hystart.start_round(send_pn - 1);
784 
785         // Receiving Acks.
786         // Last ack will cause to exit to CSS.
787         let mut cwnd_prev = r.cwnd();
788 
789         for _ in 0..n_rtt_sample {
790             cwnd_prev = r.cwnd();
791             r.update_rtt(rtt_2nd, Duration::from_millis(0), now);
792 
793             let acked = vec![Acked {
794                 pkt_num: ack_pn,
795                 time_sent: p.time_sent,
796                 size: p.size,
797                 delivered: 0,
798                 delivered_time: now,
799                 first_sent_time: now,
800                 is_app_limited: false,
801                 rtt: Duration::ZERO,
802             }];
803 
804             r.on_packets_acked(acked, epoch, now);
805             ack_pn += 1;
806 
807             // Keep increasing RTT so that hystart exits to CSS.
808             rtt_2nd += rtt_2nd.saturating_add(Duration::from_millis(4));
809         }
810 
811         // Now we are in CSS.
812         assert_eq!(r.hystart.css_start_time().is_some(), true);
813         assert_eq!(r.cwnd(), cwnd_prev + r.max_datagram_size);
814 
815         // 3rd round, which RTT is less than previous round to
816         // trigger back to Slow Start.
817         let rtt_3rd = Duration::from_millis(80);
818         let now = now + rtt_3rd;
819         cwnd_prev = r.cwnd();
820 
821         // Send 3nd round packets.
822         for _ in 0..n_rtt_sample {
823             r.on_packet_sent_cc(p.size, now);
824             send_pn += 1;
825         }
826         r.hystart.start_round(send_pn - 1);
827 
828         // Receiving Acks.
829         // Last ack will cause to exit to SS.
830         for _ in 0..n_rtt_sample {
831             r.update_rtt(rtt_3rd, Duration::from_millis(0), now);
832 
833             let acked = vec![Acked {
834                 pkt_num: ack_pn,
835                 time_sent: p.time_sent,
836                 size: p.size,
837                 delivered: 0,
838                 delivered_time: now,
839                 first_sent_time: now,
840                 is_app_limited: false,
841                 rtt: Duration::ZERO,
842             }];
843 
844             r.on_packets_acked(acked, epoch, now);
845             ack_pn += 1;
846         }
847 
848         // Now we are back in Slow Start.
849         assert_eq!(r.hystart.css_start_time().is_some(), false);
850         assert_eq!(
851             r.cwnd(),
852             cwnd_prev +
853                 r.max_datagram_size / hystart::CSS_GROWTH_DIVISOR *
854                     hystart::N_RTT_SAMPLE
855         );
856     }
857 
858     #[test]
cubic_hystart_css_to_ca()859     fn cubic_hystart_css_to_ca() {
860         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
861         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
862         cfg.enable_hystart(true);
863 
864         let mut r = Recovery::new(&cfg);
865         let now = Instant::now();
866         let epoch = packet::Epoch::Application;
867 
868         let p = recovery::Sent {
869             pkt_num: 0,
870             frames: smallvec![],
871             time_sent: now,
872             time_acked: None,
873             time_lost: None,
874             size: r.max_datagram_size,
875             ack_eliciting: true,
876             in_flight: true,
877             delivered: 0,
878             delivered_time: now,
879             first_sent_time: now,
880             is_app_limited: false,
881             has_data: false,
882         };
883 
884         // 1st round.
885         let n_rtt_sample = hystart::N_RTT_SAMPLE;
886         let mut send_pn = 0;
887         let mut ack_pn = 0;
888 
889         let rtt_1st = Duration::from_millis(50);
890 
891         // Send 1st round packets.
892         for _ in 0..n_rtt_sample {
893             r.on_packet_sent_cc(p.size, now);
894             send_pn += 1;
895         }
896 
897         r.hystart.start_round(send_pn - 1);
898 
899         // Receiving Acks.
900         let now = now + rtt_1st;
901         for _ in 0..n_rtt_sample {
902             r.update_rtt(rtt_1st, Duration::from_millis(0), now);
903 
904             let acked = vec![Acked {
905                 pkt_num: ack_pn,
906                 time_sent: p.time_sent,
907                 size: p.size,
908                 delivered: 0,
909                 delivered_time: now,
910                 first_sent_time: now,
911                 is_app_limited: false,
912                 rtt: Duration::ZERO,
913             }];
914 
915             r.on_packets_acked(acked, epoch, now);
916             ack_pn += 1;
917         }
918 
919         // Not in CSS yet.
920         assert_eq!(r.hystart.css_start_time().is_some(), false);
921 
922         // 2nd round.
923         let mut rtt_2nd = Duration::from_millis(100);
924         let now = now + rtt_2nd;
925 
926         // Send 2nd round packets.
927         for _ in 0..n_rtt_sample {
928             r.on_packet_sent_cc(p.size, now);
929             send_pn += 1;
930         }
931         r.hystart.start_round(send_pn - 1);
932 
933         // Receiving Acks.
934         // Last ack will cause to exit to CSS.
935         let mut cwnd_prev = r.cwnd();
936 
937         for _ in 0..n_rtt_sample {
938             cwnd_prev = r.cwnd();
939             r.update_rtt(rtt_2nd, Duration::from_millis(0), now);
940 
941             let acked = vec![Acked {
942                 pkt_num: ack_pn,
943                 time_sent: p.time_sent,
944                 size: p.size,
945                 delivered: 0,
946                 delivered_time: now,
947                 first_sent_time: now,
948                 is_app_limited: false,
949                 rtt: Duration::ZERO,
950             }];
951 
952             r.on_packets_acked(acked, epoch, now);
953             ack_pn += 1;
954 
955             // Keep increasing RTT so that hystart exits to CSS.
956             rtt_2nd += rtt_2nd.saturating_add(Duration::from_millis(4));
957         }
958 
959         // Now we are in CSS.
960         assert_eq!(r.hystart.css_start_time().is_some(), true);
961         assert_eq!(r.cwnd(), cwnd_prev + r.max_datagram_size);
962 
963         // Run 5 (CSS_ROUNDS) in CSS, to exit to congestion avoidance.
964         let rtt_css = Duration::from_millis(100);
965         let now = now + rtt_css;
966 
967         for _ in 0..hystart::CSS_ROUNDS {
968             // Send a round of packets.
969             for _ in 0..n_rtt_sample {
970                 r.on_packet_sent_cc(p.size, now);
971                 send_pn += 1;
972             }
973             r.hystart.start_round(send_pn - 1);
974 
975             // Receiving Acks.
976             for _ in 0..n_rtt_sample {
977                 r.update_rtt(rtt_css, Duration::from_millis(0), now);
978 
979                 let acked = vec![Acked {
980                     pkt_num: ack_pn,
981                     time_sent: p.time_sent,
982                     size: p.size,
983                     delivered: 0,
984                     delivered_time: now,
985                     first_sent_time: now,
986                     is_app_limited: false,
987                     rtt: Duration::ZERO,
988                 }];
989 
990                 r.on_packets_acked(acked, epoch, now);
991                 ack_pn += 1;
992             }
993         }
994 
995         // Now we are in congestion avoidance.
996         assert_eq!(r.cwnd(), r.ssthresh);
997     }
998 
999     #[test]
cubic_spurious_congestion_event()1000     fn cubic_spurious_congestion_event() {
1001         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
1002         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
1003 
1004         let mut r = Recovery::new(&cfg);
1005         let now = Instant::now();
1006         let prev_cwnd = r.cwnd();
1007 
1008         // Send initcwnd full MSS packets to become no longer app limited
1009         for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
1010             r.on_packet_sent_cc(r.max_datagram_size, now);
1011         }
1012 
1013         // Trigger congestion event to update ssthresh
1014         r.congestion_event(
1015             r.max_datagram_size,
1016             now,
1017             packet::Epoch::Application,
1018             now,
1019         );
1020 
1021         // After congestion event, cwnd will be reduced.
1022         let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
1023         assert_eq!(r.cwnd(), cur_cwnd);
1024 
1025         let rtt = Duration::from_millis(100);
1026 
1027         let acked = vec![Acked {
1028             pkt_num: 0,
1029             // To exit from recovery
1030             time_sent: now + rtt,
1031             size: r.max_datagram_size,
1032             delivered: 0,
1033             delivered_time: now,
1034             first_sent_time: now,
1035             is_app_limited: false,
1036             rtt: Duration::ZERO,
1037         }];
1038 
1039         // Ack more than cwnd bytes with rtt=100ms
1040         r.update_rtt(rtt, Duration::from_millis(0), now);
1041 
1042         // Trigger detecting spurious congestion event
1043         r.on_packets_acked(
1044             acked,
1045             packet::Epoch::Application,
1046             now + rtt + Duration::from_millis(5),
1047         );
1048 
1049         // This is from slow start, no rollback.
1050         assert_eq!(r.cwnd(), cur_cwnd);
1051 
1052         let now = now + rtt;
1053 
1054         // Trigger another congestion event.
1055         let prev_cwnd = r.cwnd();
1056         r.congestion_event(
1057             r.max_datagram_size,
1058             now,
1059             packet::Epoch::Application,
1060             now,
1061         );
1062 
1063         // After congestion event, cwnd will be reduced.
1064         let cur_cwnd = (cur_cwnd as f64 * BETA_CUBIC) as usize;
1065         assert_eq!(r.cwnd(), cur_cwnd);
1066 
1067         let rtt = Duration::from_millis(100);
1068 
1069         let acked = vec![Acked {
1070             pkt_num: 0,
1071             // To exit from recovery
1072             time_sent: now + rtt,
1073             size: r.max_datagram_size,
1074             delivered: 0,
1075             delivered_time: now,
1076             first_sent_time: now,
1077             is_app_limited: false,
1078             rtt: Duration::ZERO,
1079         }];
1080 
1081         // Ack more than cwnd bytes with rtt=100ms.
1082         r.update_rtt(rtt, Duration::from_millis(0), now);
1083 
1084         // Trigger detecting spurious congestion event.
1085         r.on_packets_acked(
1086             acked,
1087             packet::Epoch::Application,
1088             now + rtt + Duration::from_millis(5),
1089         );
1090 
1091         // cwnd is rolled back to the previous one.
1092         assert_eq!(r.cwnd(), prev_cwnd);
1093     }
1094 
1095     #[test]
cubic_fast_convergence()1096     fn cubic_fast_convergence() {
1097         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
1098         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::CUBIC);
1099 
1100         let mut r = Recovery::new(&cfg);
1101         let mut now = Instant::now();
1102         let prev_cwnd = r.cwnd();
1103 
1104         // Send initcwnd full MSS packets to become no longer app limited
1105         for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
1106             r.on_packet_sent_cc(r.max_datagram_size, now);
1107         }
1108 
1109         // Trigger congestion event to update ssthresh
1110         r.congestion_event(
1111             r.max_datagram_size,
1112             now,
1113             packet::Epoch::Application,
1114             now,
1115         );
1116 
1117         // After 1st congestion event, cwnd will be reduced.
1118         let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
1119         assert_eq!(r.cwnd(), cur_cwnd);
1120 
1121         // Shift current time by 1 RTT.
1122         let rtt = Duration::from_millis(100);
1123         r.update_rtt(rtt, Duration::from_millis(0), now);
1124 
1125         // Exit from the recovery.
1126         now += rtt;
1127 
1128         // To avoid rollback
1129         r.lost_count += MIN_ROLLBACK_THRESHOLD;
1130 
1131         // During Congestion Avoidance, it will take
1132         // 5 ACKs to increase cwnd by 1 MSS.
1133         for _ in 0..5 {
1134             let acked = vec![Acked {
1135                 pkt_num: 0,
1136                 time_sent: now,
1137                 size: r.max_datagram_size,
1138                 delivered: 0,
1139                 delivered_time: now,
1140                 first_sent_time: now,
1141                 is_app_limited: false,
1142                 rtt: Duration::ZERO,
1143             }];
1144 
1145             r.on_packets_acked(acked, packet::Epoch::Application, now);
1146             now += rtt;
1147         }
1148 
1149         assert_eq!(r.cwnd(), cur_cwnd + r.max_datagram_size);
1150 
1151         let prev_cwnd = r.cwnd();
1152 
1153         // Fast convergence: now there is 2nd congestion event and
1154         // cwnd is not fully recovered to w_max, w_max will be
1155         // further reduced.
1156         r.congestion_event(
1157             r.max_datagram_size,
1158             now,
1159             packet::Epoch::Application,
1160             now,
1161         );
1162 
1163         // After 2nd congestion event, cwnd will be reduced.
1164         let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
1165         assert_eq!(r.cwnd(), cur_cwnd);
1166 
1167         // w_max will be further reduced, not prev_cwnd
1168         assert_eq!(
1169             r.cubic_state.w_max,
1170             prev_cwnd as f64 * (1.0 + BETA_CUBIC) / 2.0
1171         );
1172     }
1173 }
1174