1 // Copyright (C) 2019, Cloudflare, Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 //     * Redistributions of source code must retain the above copyright notice,
9 //       this list of conditions and the following disclaimer.
10 //
11 //     * Redistributions in binary form must reproduce the above copyright
12 //       notice, this list of conditions and the following disclaimer in the
13 //       documentation and/or other materials provided with the distribution.
14 //
15 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 //! Reno Congestion Control
28 //!
29 //! Note that Slow Start can use HyStart++ when enabled.
30 
31 use std::cmp;
32 use std::time::Instant;
33 
34 use crate::packet;
35 use crate::recovery;
36 
37 use crate::recovery::Acked;
38 use crate::recovery::CongestionControlOps;
39 use crate::recovery::Recovery;
40 
41 pub static RENO: CongestionControlOps = CongestionControlOps {
42     on_init,
43     reset,
44     on_packet_sent,
45     on_packets_acked,
46     congestion_event,
47     collapse_cwnd,
48     checkpoint,
49     rollback,
50     has_custom_pacing,
51     debug_fmt,
52 };
53 
on_init(_r: &mut Recovery)54 pub fn on_init(_r: &mut Recovery) {}
55 
reset(_r: &mut Recovery)56 pub fn reset(_r: &mut Recovery) {}
57 
on_packet_sent(r: &mut Recovery, sent_bytes: usize, _now: Instant)58 pub fn on_packet_sent(r: &mut Recovery, sent_bytes: usize, _now: Instant) {
59     r.bytes_in_flight += sent_bytes;
60 }
61 
on_packets_acked( r: &mut Recovery, packets: &[Acked], epoch: packet::Epoch, now: Instant, )62 fn on_packets_acked(
63     r: &mut Recovery, packets: &[Acked], epoch: packet::Epoch, now: Instant,
64 ) {
65     for pkt in packets {
66         on_packet_acked(r, pkt, epoch, now);
67     }
68 }
69 
on_packet_acked( r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant, )70 fn on_packet_acked(
71     r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant,
72 ) {
73     r.bytes_in_flight = r.bytes_in_flight.saturating_sub(packet.size);
74 
75     if r.in_congestion_recovery(packet.time_sent) {
76         return;
77     }
78 
79     if r.app_limited {
80         return;
81     }
82 
83     if r.congestion_window < r.ssthresh {
84         // In Slow slart, bytes_acked_sl is used for counting
85         // acknowledged bytes.
86         r.bytes_acked_sl += packet.size;
87 
88         if r.hystart.in_css(epoch) {
89             r.congestion_window += r.hystart.css_cwnd_inc(r.max_datagram_size);
90         } else {
91             r.congestion_window += r.max_datagram_size;
92         }
93 
94         if r.hystart.on_packet_acked(epoch, packet, r.latest_rtt, now) {
95             // Exit to congestion avoidance if CSS ends.
96             r.ssthresh = r.congestion_window;
97         }
98     } else {
99         // Congestion avoidance.
100         r.bytes_acked_ca += packet.size;
101 
102         if r.bytes_acked_ca >= r.congestion_window {
103             r.bytes_acked_ca -= r.congestion_window;
104             r.congestion_window += r.max_datagram_size;
105         }
106     }
107 }
108 
congestion_event( r: &mut Recovery, _lost_bytes: usize, time_sent: Instant, epoch: packet::Epoch, now: Instant, )109 fn congestion_event(
110     r: &mut Recovery, _lost_bytes: usize, time_sent: Instant,
111     epoch: packet::Epoch, now: Instant,
112 ) {
113     // Start a new congestion event if packet was sent after the
114     // start of the previous congestion recovery period.
115     if !r.in_congestion_recovery(time_sent) {
116         r.congestion_recovery_start_time = Some(now);
117 
118         r.congestion_window = (r.congestion_window as f64 *
119             recovery::LOSS_REDUCTION_FACTOR)
120             as usize;
121 
122         r.congestion_window = cmp::max(
123             r.congestion_window,
124             r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS,
125         );
126 
127         r.bytes_acked_ca = (r.congestion_window as f64 *
128             recovery::LOSS_REDUCTION_FACTOR) as usize;
129 
130         r.ssthresh = r.congestion_window;
131 
132         if r.hystart.in_css(epoch) {
133             r.hystart.congestion_event();
134         }
135     }
136 }
137 
collapse_cwnd(r: &mut Recovery)138 pub fn collapse_cwnd(r: &mut Recovery) {
139     r.congestion_window = r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS;
140     r.bytes_acked_sl = 0;
141     r.bytes_acked_ca = 0;
142 
143     if r.hystart.enabled() {
144         r.hystart.reset();
145     }
146 }
147 
checkpoint(_r: &mut Recovery)148 fn checkpoint(_r: &mut Recovery) {}
149 
rollback(_r: &mut Recovery) -> bool150 fn rollback(_r: &mut Recovery) -> bool {
151     true
152 }
153 
has_custom_pacing() -> bool154 fn has_custom_pacing() -> bool {
155     false
156 }
157 
debug_fmt(_r: &Recovery, _f: &mut std::fmt::Formatter) -> std::fmt::Result158 fn debug_fmt(_r: &Recovery, _f: &mut std::fmt::Formatter) -> std::fmt::Result {
159     Ok(())
160 }
161 
162 #[cfg(test)]
163 mod tests {
164     use super::*;
165 
166     use smallvec::smallvec;
167     use std::time::Duration;
168 
169     #[test]
reno_init()170     fn reno_init() {
171         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
172         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
173 
174         let r = Recovery::new(&cfg);
175 
176         assert!(r.cwnd() > 0);
177         assert_eq!(r.bytes_in_flight, 0);
178     }
179 
180     #[test]
reno_send()181     fn reno_send() {
182         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
183         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
184 
185         let mut r = Recovery::new(&cfg);
186 
187         let now = Instant::now();
188 
189         r.on_packet_sent_cc(1000, now);
190 
191         assert_eq!(r.bytes_in_flight, 1000);
192     }
193 
194     #[test]
reno_slow_start()195     fn reno_slow_start() {
196         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
197         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
198 
199         let mut r = Recovery::new(&cfg);
200 
201         let now = Instant::now();
202 
203         let p = recovery::Sent {
204             pkt_num: 0,
205             frames: smallvec![],
206             time_sent: now,
207             time_acked: None,
208             time_lost: None,
209             size: r.max_datagram_size,
210             ack_eliciting: true,
211             in_flight: true,
212             delivered: 0,
213             delivered_time: std::time::Instant::now(),
214             first_sent_time: std::time::Instant::now(),
215             is_app_limited: false,
216             has_data: false,
217         };
218 
219         // Send initcwnd full MSS packets to become no longer app limited
220         for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
221             r.on_packet_sent_cc(p.size, now);
222         }
223 
224         let cwnd_prev = r.cwnd();
225 
226         let acked = vec![Acked {
227             pkt_num: p.pkt_num,
228             time_sent: p.time_sent,
229             size: p.size,
230             delivered: 0,
231             delivered_time: now,
232             first_sent_time: now,
233             is_app_limited: false,
234             rtt: Duration::ZERO,
235         }];
236 
237         r.on_packets_acked(acked, packet::Epoch::Application, now);
238 
239         // Check if cwnd increased by packet size (slow start).
240         assert_eq!(r.cwnd(), cwnd_prev + p.size);
241     }
242 
243     #[test]
reno_slow_start_multi_acks()244     fn reno_slow_start_multi_acks() {
245         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
246         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
247 
248         let mut r = Recovery::new(&cfg);
249 
250         let now = Instant::now();
251 
252         let p = recovery::Sent {
253             pkt_num: 0,
254             frames: smallvec![],
255             time_sent: now,
256             time_acked: None,
257             time_lost: None,
258             size: r.max_datagram_size,
259             ack_eliciting: true,
260             in_flight: true,
261             delivered: 0,
262             delivered_time: std::time::Instant::now(),
263             first_sent_time: std::time::Instant::now(),
264             is_app_limited: false,
265             has_data: false,
266         };
267 
268         // Send initcwnd full MSS packets to become no longer app limited
269         for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
270             r.on_packet_sent_cc(p.size, now);
271         }
272 
273         let cwnd_prev = r.cwnd();
274 
275         let acked = vec![
276             Acked {
277                 pkt_num: p.pkt_num,
278                 time_sent: p.time_sent,
279                 size: p.size,
280                 delivered: 0,
281                 delivered_time: now,
282                 first_sent_time: now,
283                 is_app_limited: false,
284                 rtt: Duration::ZERO,
285             },
286             Acked {
287                 pkt_num: p.pkt_num,
288                 time_sent: p.time_sent,
289                 size: p.size,
290                 delivered: 0,
291                 delivered_time: now,
292                 first_sent_time: now,
293                 is_app_limited: false,
294                 rtt: Duration::ZERO,
295             },
296             Acked {
297                 pkt_num: p.pkt_num,
298                 time_sent: p.time_sent,
299                 size: p.size,
300                 delivered: 0,
301                 delivered_time: now,
302                 first_sent_time: now,
303                 is_app_limited: false,
304                 rtt: Duration::ZERO,
305             },
306         ];
307 
308         r.on_packets_acked(acked, packet::Epoch::Application, now);
309 
310         // Acked 3 packets.
311         assert_eq!(r.cwnd(), cwnd_prev + p.size * 3);
312     }
313 
314     #[test]
reno_congestion_event()315     fn reno_congestion_event() {
316         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
317         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
318 
319         let mut r = Recovery::new(&cfg);
320 
321         let prev_cwnd = r.cwnd();
322 
323         let now = Instant::now();
324 
325         r.congestion_event(
326             r.max_datagram_size,
327             now,
328             packet::Epoch::Application,
329             now,
330         );
331 
332         // In Reno, after congestion event, cwnd will be cut in half.
333         assert_eq!(prev_cwnd / 2, r.cwnd());
334     }
335 
336     #[test]
reno_congestion_avoidance()337     fn reno_congestion_avoidance() {
338         let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
339         cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
340 
341         let mut r = Recovery::new(&cfg);
342         let now = Instant::now();
343         let prev_cwnd = r.cwnd();
344 
345         // Fill up bytes_in_flight to avoid app_limited=true
346         r.on_packet_sent_cc(20000, now);
347 
348         // Trigger congestion event to update ssthresh
349         r.congestion_event(
350             r.max_datagram_size,
351             now,
352             packet::Epoch::Application,
353             now,
354         );
355 
356         // After congestion event, cwnd will be reduced.
357         let cur_cwnd =
358             (prev_cwnd as f64 * recovery::LOSS_REDUCTION_FACTOR) as usize;
359         assert_eq!(r.cwnd(), cur_cwnd);
360 
361         let rtt = Duration::from_millis(100);
362 
363         let acked = vec![Acked {
364             pkt_num: 0,
365             // To exit from recovery
366             time_sent: now + rtt,
367             // More than cur_cwnd to increase cwnd
368             size: 8000,
369             delivered: 0,
370             delivered_time: now,
371             first_sent_time: now,
372             is_app_limited: false,
373             rtt: Duration::ZERO,
374         }];
375 
376         // Ack more than cwnd bytes with rtt=100ms
377         r.update_rtt(rtt, Duration::from_millis(0), now);
378         r.on_packets_acked(acked, packet::Epoch::Application, now + rtt * 2);
379 
380         // After acking more than cwnd, expect cwnd increased by MSS
381         assert_eq!(r.cwnd(), cur_cwnd + r.max_datagram_size);
382     }
383 }
384