1use std::any::Any;
2use std::fmt::Debug;
3use std::sync::Arc;
4
5use rand::{Rng, SeedableRng};
6
7use crate::congestion::ControllerMetrics;
8use crate::congestion::bbr::bw_estimation::BandwidthEstimation;
9use crate::congestion::bbr::min_max::MinMax;
10use crate::connection::RttEstimator;
11use crate::{Duration, Instant};
12
13use super::{BASE_DATAGRAM_SIZE, Controller, ControllerFactory};
14
15mod bw_estimation;
16mod min_max;
17
18#[derive(Debug, Clone)]
25pub struct Bbr {
26 config: Arc<BbrConfig>,
27 current_mtu: u64,
28 max_bandwidth: BandwidthEstimation,
29 acked_bytes: u64,
30 mode: Mode,
31 loss_state: LossState,
32 recovery_state: RecoveryState,
33 recovery_window: u64,
34 is_at_full_bandwidth: bool,
35 pacing_gain: f32,
36 high_gain: f32,
37 drain_gain: f32,
38 cwnd_gain: f32,
39 high_cwnd_gain: f32,
40 last_cycle_start: Option<Instant>,
41 current_cycle_offset: u8,
42 init_cwnd: u64,
43 min_cwnd: u64,
44 prev_in_flight_count: u64,
45 exit_probe_rtt_at: Option<Instant>,
46 probe_rtt_last_started_at: Option<Instant>,
47 min_rtt: Duration,
48 exiting_quiescence: bool,
49 pacing_rate: u64,
50 max_acked_packet_number: u64,
51 max_sent_packet_number: u64,
52 end_recovery_at_packet_number: u64,
53 cwnd: u64,
54 current_round_trip_end_packet_number: u64,
55 round_count: u64,
56 bw_at_last_round: u64,
57 round_wo_bw_gain: u64,
58 ack_aggregation: AckAggregationState,
59 random_number_generator: rand::rngs::StdRng,
60}
61
62impl Bbr {
63 pub fn new(config: Arc<BbrConfig>, current_mtu: u16) -> Self {
65 let initial_window = config.initial_window;
66 Self {
67 config,
68 current_mtu: current_mtu as u64,
69 max_bandwidth: BandwidthEstimation::default(),
70 acked_bytes: 0,
71 mode: Mode::Startup,
72 loss_state: Default::default(),
73 recovery_state: RecoveryState::NotInRecovery,
74 recovery_window: 0,
75 is_at_full_bandwidth: false,
76 pacing_gain: K_DEFAULT_HIGH_GAIN,
77 high_gain: K_DEFAULT_HIGH_GAIN,
78 drain_gain: 1.0 / K_DEFAULT_HIGH_GAIN,
79 cwnd_gain: K_DEFAULT_HIGH_GAIN,
80 high_cwnd_gain: K_DEFAULT_HIGH_GAIN,
81 last_cycle_start: None,
82 current_cycle_offset: 0,
83 init_cwnd: initial_window,
84 min_cwnd: calculate_min_window(current_mtu as u64),
85 prev_in_flight_count: 0,
86 exit_probe_rtt_at: None,
87 probe_rtt_last_started_at: None,
88 min_rtt: Default::default(),
89 exiting_quiescence: false,
90 pacing_rate: 0,
91 max_acked_packet_number: 0,
92 max_sent_packet_number: 0,
93 end_recovery_at_packet_number: 0,
94 cwnd: initial_window,
95 current_round_trip_end_packet_number: 0,
96 round_count: 0,
97 bw_at_last_round: 0,
98 round_wo_bw_gain: 0,
99 ack_aggregation: AckAggregationState::default(),
100 random_number_generator: rand::rngs::StdRng::from_os_rng(),
101 }
102 }
103
104 fn enter_startup_mode(&mut self) {
105 self.mode = Mode::Startup;
106 self.pacing_gain = self.high_gain;
107 self.cwnd_gain = self.high_cwnd_gain;
108 }
109
110 fn enter_probe_bandwidth_mode(&mut self, now: Instant) {
111 self.mode = Mode::ProbeBw;
112 self.cwnd_gain = K_DERIVED_HIGH_CWNDGAIN;
113 self.last_cycle_start = Some(now);
114 let mut rand_index = self
118 .random_number_generator
119 .random_range(0..K_PACING_GAIN.len() as u8 - 1);
120 if rand_index >= 1 {
121 rand_index += 1;
122 }
123 self.current_cycle_offset = rand_index;
124 self.pacing_gain = K_PACING_GAIN[rand_index as usize];
125 }
126
127 fn update_recovery_state(&mut self, is_round_start: bool) {
128 if self.loss_state.has_losses() {
130 self.end_recovery_at_packet_number = self.max_sent_packet_number;
131 }
132 match self.recovery_state {
133 RecoveryState::NotInRecovery if self.loss_state.has_losses() => {
135 self.recovery_state = RecoveryState::Conservation;
136 self.recovery_window = 0;
139 self.current_round_trip_end_packet_number = self.max_sent_packet_number;
142 }
143 RecoveryState::Growth | RecoveryState::Conservation => {
144 if self.recovery_state == RecoveryState::Conservation && is_round_start {
145 self.recovery_state = RecoveryState::Growth;
146 }
147 if !self.loss_state.has_losses()
149 && self.max_acked_packet_number > self.end_recovery_at_packet_number
150 {
151 self.recovery_state = RecoveryState::NotInRecovery;
152 }
153 }
154 _ => {}
155 }
156 }
157
158 fn update_gain_cycle_phase(&mut self, now: Instant, in_flight: u64) {
159 let mut should_advance_gain_cycling = self
161 .last_cycle_start
162 .map(|last_cycle_start| now.duration_since(last_cycle_start) > self.min_rtt)
163 .unwrap_or(false);
164 if self.pacing_gain > 1.0
170 && !self.loss_state.has_losses()
171 && self.prev_in_flight_count < self.get_target_cwnd(self.pacing_gain)
172 {
173 should_advance_gain_cycling = false;
174 }
175
176 if self.pacing_gain < 1.0 && in_flight <= self.get_target_cwnd(1.0) {
182 should_advance_gain_cycling = true;
183 }
184
185 if should_advance_gain_cycling {
186 self.current_cycle_offset = (self.current_cycle_offset + 1) % K_PACING_GAIN.len() as u8;
187 self.last_cycle_start = Some(now);
188 if DRAIN_TO_TARGET
191 && self.pacing_gain < 1.0
192 && (K_PACING_GAIN[self.current_cycle_offset as usize] - 1.0).abs() < f32::EPSILON
193 && in_flight > self.get_target_cwnd(1.0)
194 {
195 return;
196 }
197 self.pacing_gain = K_PACING_GAIN[self.current_cycle_offset as usize];
198 }
199 }
200
201 fn maybe_exit_startup_or_drain(&mut self, now: Instant, in_flight: u64) {
202 if self.mode == Mode::Startup && self.is_at_full_bandwidth {
203 self.mode = Mode::Drain;
204 self.pacing_gain = self.drain_gain;
205 self.cwnd_gain = self.high_cwnd_gain;
206 }
207 if self.mode == Mode::Drain && in_flight <= self.get_target_cwnd(1.0) {
208 self.enter_probe_bandwidth_mode(now);
209 }
210 }
211
212 fn is_min_rtt_expired(&self, now: Instant, app_limited: bool) -> bool {
213 !app_limited
214 && self
215 .probe_rtt_last_started_at
216 .map(|last| now.saturating_duration_since(last) > Duration::from_secs(10))
217 .unwrap_or(true)
218 }
219
220 fn maybe_enter_or_exit_probe_rtt(
221 &mut self,
222 now: Instant,
223 is_round_start: bool,
224 bytes_in_flight: u64,
225 app_limited: bool,
226 ) {
227 let min_rtt_expired = self.is_min_rtt_expired(now, app_limited);
228 if min_rtt_expired && !self.exiting_quiescence && self.mode != Mode::ProbeRtt {
229 self.mode = Mode::ProbeRtt;
230 self.pacing_gain = 1.0;
231 self.exit_probe_rtt_at = None;
234 self.probe_rtt_last_started_at = Some(now);
235 }
236
237 if self.mode == Mode::ProbeRtt {
238 if self.exit_probe_rtt_at.is_none() {
239 if bytes_in_flight < self.get_probe_rtt_cwnd() + self.current_mtu {
244 const K_PROBE_RTT_TIME: Duration = Duration::from_millis(200);
245 self.exit_probe_rtt_at = Some(now + K_PROBE_RTT_TIME);
246 }
247 } else if is_round_start && now >= self.exit_probe_rtt_at.unwrap() {
248 if !self.is_at_full_bandwidth {
249 self.enter_startup_mode();
250 } else {
251 self.enter_probe_bandwidth_mode(now);
252 }
253 }
254 }
255
256 self.exiting_quiescence = false;
257 }
258
259 fn get_target_cwnd(&self, gain: f32) -> u64 {
260 let bw = self.max_bandwidth.get_estimate();
261 let bdp = self.min_rtt.as_micros() as u64 * bw;
262 let bdpf = bdp as f64;
263 let cwnd = ((gain as f64 * bdpf) / 1_000_000f64) as u64;
264 if cwnd == 0 {
266 return self.init_cwnd;
267 }
268 cwnd.max(self.min_cwnd)
269 }
270
271 fn get_probe_rtt_cwnd(&self) -> u64 {
272 const K_MODERATE_PROBE_RTT_MULTIPLIER: f32 = 0.75;
273 if PROBE_RTT_BASED_ON_BDP {
274 return self.get_target_cwnd(K_MODERATE_PROBE_RTT_MULTIPLIER);
275 }
276 self.min_cwnd
277 }
278
279 fn calculate_pacing_rate(&mut self) {
280 let bw = self.max_bandwidth.get_estimate();
281 if bw == 0 {
282 return;
283 }
284 let target_rate = (bw as f64 * self.pacing_gain as f64) as u64;
285 if self.is_at_full_bandwidth {
286 self.pacing_rate = target_rate;
287 return;
288 }
289
290 if self.pacing_rate == 0 && self.min_rtt.as_nanos() != 0 {
293 self.pacing_rate =
294 BandwidthEstimation::bw_from_delta(self.init_cwnd, self.min_rtt).unwrap();
295 return;
296 }
297
298 if self.pacing_rate < target_rate {
300 self.pacing_rate = target_rate;
301 }
302 }
303
304 fn calculate_cwnd(&mut self, bytes_acked: u64, excess_acked: u64) {
305 if self.mode == Mode::ProbeRtt {
306 return;
307 }
308 let mut target_window = self.get_target_cwnd(self.cwnd_gain);
309 if self.is_at_full_bandwidth {
310 target_window += self.ack_aggregation.max_ack_height.get();
312 } else {
313 target_window += excess_acked;
316 }
317 if self.is_at_full_bandwidth {
321 self.cwnd = target_window.min(self.cwnd + bytes_acked);
322 } else if (self.cwnd_gain < target_window as f32) || (self.acked_bytes < self.init_cwnd) {
323 self.cwnd += bytes_acked;
326 }
327
328 if self.cwnd < self.min_cwnd {
330 self.cwnd = self.min_cwnd;
331 }
332 }
333
334 fn calculate_recovery_window(&mut self, bytes_acked: u64, bytes_lost: u64, in_flight: u64) {
335 if !self.recovery_state.in_recovery() {
336 return;
337 }
338 if self.recovery_window == 0 {
340 self.recovery_window = self.min_cwnd.max(in_flight + bytes_acked);
341 return;
342 }
343
344 if self.recovery_window >= bytes_lost {
347 self.recovery_window -= bytes_lost;
348 } else {
349 self.recovery_window = self.current_mtu;
351 }
352 if self.recovery_state == RecoveryState::Growth {
355 self.recovery_window += bytes_acked;
356 }
357
358 self.recovery_window = self
361 .recovery_window
362 .max(in_flight + bytes_acked)
363 .max(self.min_cwnd);
364 }
365
366 fn check_if_full_bw_reached(&mut self, app_limited: bool) {
368 if app_limited {
369 return;
370 }
371 let target = (self.bw_at_last_round as f64 * K_STARTUP_GROWTH_TARGET as f64) as u64;
372 let bw = self.max_bandwidth.get_estimate();
373 if bw >= target {
374 self.bw_at_last_round = bw;
375 self.round_wo_bw_gain = 0;
376 self.ack_aggregation.max_ack_height.reset();
377 return;
378 }
379
380 self.round_wo_bw_gain += 1;
381 if self.round_wo_bw_gain >= K_ROUND_TRIPS_WITHOUT_GROWTH_BEFORE_EXITING_STARTUP as u64
382 || (self.recovery_state.in_recovery())
383 {
384 self.is_at_full_bandwidth = true;
385 }
386 }
387}
388
389impl Controller for Bbr {
390 fn on_sent(&mut self, now: Instant, bytes: u64, last_packet_number: u64) {
391 self.max_sent_packet_number = last_packet_number;
392 self.max_bandwidth.on_sent(now, bytes);
393 }
394
395 fn on_ack(
396 &mut self,
397 now: Instant,
398 sent: Instant,
399 bytes: u64,
400 app_limited: bool,
401 rtt: &RttEstimator,
402 ) {
403 self.max_bandwidth
404 .on_ack(now, sent, bytes, self.round_count, app_limited);
405 self.acked_bytes += bytes;
406 if self.is_min_rtt_expired(now, app_limited) || self.min_rtt > rtt.min() {
407 self.min_rtt = rtt.min();
408 }
409 }
410
411 fn on_end_acks(
412 &mut self,
413 now: Instant,
414 in_flight: u64,
415 app_limited: bool,
416 largest_packet_num_acked: Option<u64>,
417 ) {
418 let bytes_acked = self.max_bandwidth.bytes_acked_this_window();
419 let excess_acked = self.ack_aggregation.update_ack_aggregation_bytes(
420 bytes_acked,
421 now,
422 self.round_count,
423 self.max_bandwidth.get_estimate(),
424 );
425 self.max_bandwidth.end_acks(self.round_count, app_limited);
426 if let Some(largest_acked_packet) = largest_packet_num_acked {
427 self.max_acked_packet_number = largest_acked_packet;
428 }
429
430 let mut is_round_start = false;
431 if bytes_acked > 0 {
432 is_round_start =
433 self.max_acked_packet_number > self.current_round_trip_end_packet_number;
434 if is_round_start {
435 self.current_round_trip_end_packet_number = self.max_sent_packet_number;
436 self.round_count += 1;
437 }
438 }
439
440 self.update_recovery_state(is_round_start);
441
442 if self.mode == Mode::ProbeBw {
443 self.update_gain_cycle_phase(now, in_flight);
444 }
445
446 if is_round_start && !self.is_at_full_bandwidth {
447 self.check_if_full_bw_reached(app_limited);
448 }
449
450 self.maybe_exit_startup_or_drain(now, in_flight);
451
452 self.maybe_enter_or_exit_probe_rtt(now, is_round_start, in_flight, app_limited);
453
454 self.calculate_pacing_rate();
456 self.calculate_cwnd(bytes_acked, excess_acked);
457 self.calculate_recovery_window(bytes_acked, self.loss_state.lost_bytes, in_flight);
458
459 self.prev_in_flight_count = in_flight;
460 self.loss_state.reset();
461 }
462
463 fn on_congestion_event(
464 &mut self,
465 _now: Instant,
466 _sent: Instant,
467 _is_persistent_congestion: bool,
468 _is_ecn: bool,
469 lost_bytes: u64,
470 ) {
471 self.loss_state.lost_bytes += lost_bytes;
472 }
473
474 fn on_mtu_update(&mut self, new_mtu: u16) {
475 self.current_mtu = new_mtu as u64;
476 self.min_cwnd = calculate_min_window(self.current_mtu);
477 self.init_cwnd = self.config.initial_window.max(self.min_cwnd);
478 self.cwnd = self.cwnd.max(self.min_cwnd);
479 }
480
481 fn window(&self) -> u64 {
482 if self.mode == Mode::ProbeRtt {
483 return self.get_probe_rtt_cwnd();
484 } else if self.recovery_state.in_recovery() && self.mode != Mode::Startup {
485 return self.cwnd.min(self.recovery_window);
486 }
487 self.cwnd
488 }
489
490 fn metrics(&self) -> ControllerMetrics {
491 ControllerMetrics {
492 congestion_window: self.window(),
493 ssthresh: None,
494 pacing_rate: Some(self.pacing_rate * 8),
495 }
496 }
497
498 fn clone_box(&self) -> Box<dyn Controller> {
499 Box::new(self.clone())
500 }
501
502 fn initial_window(&self) -> u64 {
503 self.config.initial_window
504 }
505
506 fn into_any(self: Box<Self>) -> Box<dyn Any> {
507 self
508 }
509}
510
511#[derive(Debug, Clone)]
513pub struct BbrConfig {
514 initial_window: u64,
515}
516
517impl BbrConfig {
518 pub fn initial_window(&mut self, value: u64) -> &mut Self {
522 self.initial_window = value;
523 self
524 }
525}
526
527impl Default for BbrConfig {
528 fn default() -> Self {
529 Self {
530 initial_window: K_MAX_INITIAL_CONGESTION_WINDOW * BASE_DATAGRAM_SIZE,
531 }
532 }
533}
534
535impl ControllerFactory for BbrConfig {
536 fn build(self: Arc<Self>, _now: Instant, current_mtu: u16) -> Box<dyn Controller> {
537 Box::new(Bbr::new(self, current_mtu))
538 }
539}
540
541#[derive(Debug, Default, Copy, Clone)]
542struct AckAggregationState {
543 max_ack_height: MinMax,
544 aggregation_epoch_start_time: Option<Instant>,
545 aggregation_epoch_bytes: u64,
546}
547
548impl AckAggregationState {
549 fn update_ack_aggregation_bytes(
550 &mut self,
551 newly_acked_bytes: u64,
552 now: Instant,
553 round: u64,
554 max_bandwidth: u64,
555 ) -> u64 {
556 let expected_bytes_acked = max_bandwidth
559 * now
560 .saturating_duration_since(self.aggregation_epoch_start_time.unwrap_or(now))
561 .as_micros() as u64
562 / 1_000_000;
563
564 if self.aggregation_epoch_bytes <= expected_bytes_acked {
567 self.aggregation_epoch_bytes = newly_acked_bytes;
569 self.aggregation_epoch_start_time = Some(now);
570 return 0;
571 }
572
573 self.aggregation_epoch_bytes += newly_acked_bytes;
576 let diff = self.aggregation_epoch_bytes - expected_bytes_acked;
577 self.max_ack_height.update_max(round, diff);
578 diff
579 }
580}
581
582#[derive(Debug, Clone, Copy, Eq, PartialEq)]
583enum Mode {
584 Startup,
586 Drain,
589 ProbeBw,
591 ProbeRtt,
594}
595
596#[derive(Debug, Clone, Copy, Eq, PartialEq)]
598enum RecoveryState {
599 NotInRecovery,
601 Conservation,
603 Growth,
606}
607
608impl RecoveryState {
609 pub(super) fn in_recovery(&self) -> bool {
610 !matches!(self, Self::NotInRecovery)
611 }
612}
613
614#[derive(Debug, Clone, Default)]
615struct LossState {
616 lost_bytes: u64,
617}
618
619impl LossState {
620 pub(super) fn reset(&mut self) {
621 self.lost_bytes = 0;
622 }
623
624 pub(super) fn has_losses(&self) -> bool {
625 self.lost_bytes != 0
626 }
627}
628
629fn calculate_min_window(current_mtu: u64) -> u64 {
630 4 * current_mtu
631}
632
633const K_DEFAULT_HIGH_GAIN: f32 = 2.885;
635const K_DERIVED_HIGH_CWNDGAIN: f32 = 2.0;
637const K_PACING_GAIN: [f32; 8] = [1.25, 0.75, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0];
639
640const K_STARTUP_GROWTH_TARGET: f32 = 1.25;
641const K_ROUND_TRIPS_WITHOUT_GROWTH_BEFORE_EXITING_STARTUP: u8 = 3;
642
643const K_MAX_INITIAL_CONGESTION_WINDOW: u64 = 200;
645
646const PROBE_RTT_BASED_ON_BDP: bool = true;
647const DRAIN_TO_TARGET: bool = true;