noq_proto/config/
transport.rs

1#[cfg(feature = "qlog")]
2use std::path::Path;
3use std::{
4    fmt,
5    net::SocketAddr,
6    num::{NonZeroU8, NonZeroU32},
7    sync::Arc,
8};
9
10use crate::{
11    ConnectionId, Duration, INITIAL_MTU, Instant, MAX_UDP_PAYLOAD, Side, VarInt,
12    VarIntBoundsExceeded, address_discovery, congestion, connection::qlog::QlogSink,
13};
14#[cfg(feature = "qlog")]
15use crate::{QlogFactory, QlogFileFactory};
16
17/// When multipath is required and has not been explicitly enabled, this value will be used for
18/// [`TransportConfig::max_concurrent_multipath_paths`].
19const DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED_: NonZeroU32 = {
20    match NonZeroU32::new(12) {
21        Some(v) => v,
22        None => panic!("to enable multipath this must be positive, which clearly it is"),
23    }
24};
25
26/// When multipath is required and has not been explicitly enabled, this value will be used for
27///
28/// [`TransportConfig::max_concurrent_multipath_paths`].
29#[cfg(doc)]
30pub const DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED: NonZeroU32 =
31    DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED_;
32
33/// Parameters governing the core QUIC state machine
34///
35/// Default values should be suitable for most internet applications. Applications protocols which
36/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and
37/// `max_concurrent_uni_streams` to zero.
38///
39/// In some cases, performance or resource requirements can be improved by tuning these values to
40/// suit a particular application and/or network connection. In particular, data window sizes can be
41/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning
42/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair
43/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps
44/// link with a 100ms round trip time.
45#[derive(Clone)]
46pub struct TransportConfig {
47    pub(crate) max_concurrent_bidi_streams: VarInt,
48    pub(crate) max_concurrent_uni_streams: VarInt,
49    pub(crate) max_idle_timeout: Option<VarInt>,
50    pub(crate) stream_receive_window: VarInt,
51    pub(crate) receive_window: VarInt,
52    pub(crate) send_window: u64,
53    pub(crate) send_fairness: bool,
54
55    pub(crate) packet_threshold: u32,
56    pub(crate) time_threshold: f32,
57    pub(crate) initial_rtt: Duration,
58    pub(crate) initial_mtu: u16,
59    pub(crate) min_mtu: u16,
60    pub(crate) mtu_discovery_config: Option<MtuDiscoveryConfig>,
61    pub(crate) pad_to_mtu: bool,
62    pub(crate) ack_frequency_config: Option<AckFrequencyConfig>,
63    pub(crate) max_outgoing_bytes_per_second: Option<u64>,
64
65    pub(crate) persistent_congestion_threshold: u32,
66    pub(crate) keep_alive_interval: Option<Duration>,
67    pub(crate) crypto_buffer_size: usize,
68    pub(crate) allow_spin: bool,
69    pub(crate) datagram_receive_buffer_size: Option<usize>,
70    pub(crate) datagram_send_buffer_size: usize,
71    #[cfg(test)]
72    pub(crate) deterministic_packet_numbers: bool,
73
74    pub(crate) congestion_controller_factory: Arc<dyn congestion::ControllerFactory + Send + Sync>,
75
76    pub(crate) enable_segmentation_offload: bool,
77
78    pub(crate) address_discovery_role: address_discovery::Role,
79
80    pub(crate) max_concurrent_multipath_paths: Option<NonZeroU32>,
81
82    pub(crate) default_path_max_idle_timeout: Option<Duration>,
83    pub(crate) default_path_keep_alive_interval: Option<Duration>,
84
85    pub(crate) max_remote_nat_traversal_addresses: Option<NonZeroU8>,
86
87    #[cfg(feature = "qlog")]
88    pub(crate) qlog_factory: Option<Arc<dyn QlogFactory>>,
89}
90
91impl TransportConfig {
92    /// Maximum number of incoming bidirectional streams that may be open concurrently
93    ///
94    /// Must be nonzero for the peer to open any bidirectional streams.
95    ///
96    /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams *
97    /// stream_receive_window`, with an upper bound proportional to `receive_window`.
98    pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self {
99        self.max_concurrent_bidi_streams = value;
100        self
101    }
102
103    /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams
104    pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self {
105        self.max_concurrent_uni_streams = value;
106        self
107    }
108
109    /// Maximum duration of inactivity to accept before timing out the connection.
110    ///
111    /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None`
112    /// represents an infinite timeout. Defaults to 30 seconds.
113    ///
114    /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite
115    /// idle timeout can result in permanently hung futures!
116    ///
117    /// ```
118    /// # use std::{convert::TryInto, time::Duration};
119    /// # use noq_proto::{TransportConfig, VarInt, VarIntBoundsExceeded};
120    /// # fn main() -> Result<(), VarIntBoundsExceeded> {
121    /// let mut config = TransportConfig::default();
122    ///
123    /// // Set the idle timeout as `VarInt`-encoded milliseconds
124    /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into()));
125    ///
126    /// // Set the idle timeout as a `Duration`
127    /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?));
128    /// # Ok(())
129    /// # }
130    /// ```
131    pub fn max_idle_timeout(&mut self, value: Option<IdleTimeout>) -> &mut Self {
132        self.max_idle_timeout = value.map(|t| t.0);
133        self
134    }
135
136    /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream
137    /// before becoming blocked.
138    ///
139    /// This should be set to at least the expected connection latency multiplied by the maximum
140    /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single
141    /// stream doesn't monopolize receive buffers, which may otherwise occur if the application
142    /// chooses not to read from a large stream for a time while still requiring data on other
143    /// streams.
144    pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self {
145        self.stream_receive_window = value;
146        self
147    }
148
149    /// Maximum number of bytes the peer may transmit across all streams of a connection before
150    /// becoming blocked.
151    ///
152    /// This should be set to at least the expected connection latency multiplied by the maximum
153    /// desired throughput. Larger values can be useful to allow maximum throughput within a
154    /// stream while another is blocked.
155    pub fn receive_window(&mut self, value: VarInt) -> &mut Self {
156        self.receive_window = value;
157        self
158    }
159
160    /// Maximum number of bytes to transmit to a peer without acknowledgment
161    ///
162    /// Provides an upper bound on memory when communicating with peers that issue large amounts of
163    /// flow control credit. Endpoints that wish to handle large numbers of connections robustly
164    /// should take care to set this low enough to guarantee memory exhaustion does not occur if
165    /// every connection uses the entire window.
166    pub fn send_window(&mut self, value: u64) -> &mut Self {
167        self.send_window = value;
168        self
169    }
170
171    /// Whether to implement fair queuing for send streams having the same priority.
172    ///
173    /// When enabled, connections schedule data from outgoing streams having the same priority in a
174    /// round-robin fashion. When disabled, streams are scheduled in the order they are written to.
175    ///
176    /// Note that this only affects streams with the same priority. Higher priority streams always
177    /// take precedence over lower priority streams.
178    ///
179    /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use
180    /// many small streams.
181    pub fn send_fairness(&mut self, value: bool) -> &mut Self {
182        self.send_fairness = value;
183        self
184    }
185
186    /// Maximum reordering in packet number space before FACK style loss detection considers a
187    /// packet lost. Should not be less than 3, per RFC5681.
188    pub fn packet_threshold(&mut self, value: u32) -> &mut Self {
189        self.packet_threshold = value;
190        self
191    }
192
193    /// Maximum reordering in time space before time based loss detection considers a packet lost,
194    /// as a factor of RTT
195    pub fn time_threshold(&mut self, value: f32) -> &mut Self {
196        self.time_threshold = value;
197        self
198    }
199
200    /// The RTT used before an RTT sample is taken
201    pub fn initial_rtt(&mut self, value: Duration) -> &mut Self {
202        self.initial_rtt = value;
203        self
204    }
205
206    /// The initial value to be used as the maximum UDP payload size before running MTU discovery
207    /// (see [`TransportConfig::mtu_discovery_config`]).
208    ///
209    /// Must be at least 1200, which is the default, and known to be safe for typical internet
210    /// applications. Larger values are more efficient, but increase the risk of packet loss due to
211    /// exceeding the network path's IP MTU. If the provided value is higher than what the network
212    /// path actually supports, packet loss will eventually trigger black hole detection and bring
213    /// it down to [`TransportConfig::min_mtu`].
214    pub fn initial_mtu(&mut self, value: u16) -> &mut Self {
215        self.initial_mtu = value.max(INITIAL_MTU);
216        self
217    }
218
219    pub(crate) fn get_initial_mtu(&self) -> u16 {
220        self.initial_mtu.max(self.min_mtu)
221    }
222
223    /// The maximum UDP payload size guaranteed to be supported by the network.
224    ///
225    /// Must be at least 1200, which is the default, and lower than or equal to
226    /// [`TransportConfig::initial_mtu`].
227    ///
228    /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links
229    /// outside of either endpoint's control. Extreme care should be used when raising this value
230    /// outside of private networks where these factors are fully controlled. If the provided value
231    /// is higher than what the network path actually supports, the result will be unpredictable and
232    /// catastrophic packet loss, without a possibility of repair. Prefer
233    /// [`TransportConfig::initial_mtu`] together with
234    /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly
235    /// adapts to the network.
236    pub fn min_mtu(&mut self, value: u16) -> &mut Self {
237        self.min_mtu = value.max(INITIAL_MTU);
238        self
239    }
240
241    /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details).
242    ///
243    /// Enabled by default.
244    pub fn mtu_discovery_config(&mut self, value: Option<MtuDiscoveryConfig>) -> &mut Self {
245        self.mtu_discovery_config = value;
246        self
247    }
248
249    /// Pad UDP datagrams carrying application data to current maximum UDP payload size
250    ///
251    /// Disabled by default. UDP datagrams containing loss probes are exempt from padding.
252    ///
253    /// Enabling this helps mitigate traffic analysis by network observers, but it increases
254    /// bandwidth usage. Without this mitigation precise plain text size of application datagrams as
255    /// well as the total size of stream write bursts can be inferred by observers under certain
256    /// conditions. This analysis requires either an uncongested connection or application datagrams
257    /// too large to be coalesced.
258    pub fn pad_to_mtu(&mut self, value: bool) -> &mut Self {
259        self.pad_to_mtu = value;
260        self
261    }
262
263    /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details)
264    ///
265    /// The provided configuration will be ignored if the peer does not support the acknowledgement
266    /// frequency QUIC extension.
267    ///
268    /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even
269    /// if set to `None`, the local side still supports the acknowledgement frequency QUIC
270    /// extension and may use it in other ways.
271    pub fn ack_frequency_config(&mut self, value: Option<AckFrequencyConfig>) -> &mut Self {
272        self.ack_frequency_config = value;
273        self
274    }
275
276    /// Configures an outbound rate limit (in bytes per second) for each connection.
277    ///
278    /// Defaults to `None`, which disables rate limiting.
279    pub fn max_outgoing_bytes_per_second(&mut self, value: Option<u64>) -> &mut Self {
280        self.max_outgoing_bytes_per_second = value;
281        self
282    }
283
284    /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion.
285    pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self {
286        self.persistent_congestion_threshold = value;
287        self
288    }
289
290    /// Period of inactivity before sending a keep-alive packet
291    ///
292    /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out.
293    ///
294    /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive
295    /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both
296    /// peers to be effective.
297    pub fn keep_alive_interval(&mut self, value: Option<Duration>) -> &mut Self {
298        self.keep_alive_interval = value;
299        self
300    }
301
302    /// Maximum quantity of out-of-order crypto layer data to buffer
303    pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self {
304        self.crypto_buffer_size = value;
305        self
306    }
307
308    /// Whether the implementation is permitted to set the spin bit on this connection
309    ///
310    /// This allows passive observers to easily judge the round trip time of a connection, which can
311    /// be useful for network administration but sacrifices a small amount of privacy.
312    pub fn allow_spin(&mut self, value: bool) -> &mut Self {
313        self.allow_spin = value;
314        self
315    }
316
317    /// Maximum number of incoming application datagram bytes to buffer, or None to disable
318    /// incoming datagrams
319    ///
320    /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size
321    /// of all datagrams that have been received from the peer but not consumed by the application
322    /// exceeds this value, old datagrams are dropped until it is no longer exceeded.
323    pub fn datagram_receive_buffer_size(&mut self, value: Option<usize>) -> &mut Self {
324        self.datagram_receive_buffer_size = value;
325        self
326    }
327
328    /// Maximum number of outgoing application datagram bytes to buffer
329    ///
330    /// While datagrams are sent ASAP, it is possible for an application to generate data faster
331    /// than the link, or even the underlying hardware, can transmit them. This limits the amount of
332    /// memory that may be consumed in that case. When the send buffer is full and a new datagram is
333    /// sent, older datagrams are dropped until sufficient space is available.
334    pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self {
335        self.datagram_send_buffer_size = value;
336        self
337    }
338
339    /// Whether to force every packet number to be used
340    ///
341    /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets
342    /// before they see them.
343    #[cfg(test)]
344    pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self {
345        self.deterministic_packet_numbers = enabled;
346        self
347    }
348
349    /// How to construct new `congestion::Controller`s
350    ///
351    /// Typically the refcounted configuration of a `congestion::Controller`,
352    /// e.g. a `congestion::NewRenoConfig`.
353    ///
354    /// # Example
355    /// ```
356    /// # use noq_proto::*; use std::sync::Arc;
357    /// let mut config = TransportConfig::default();
358    /// config.congestion_controller_factory(Arc::new(congestion::NewRenoConfig::default()));
359    /// ```
360    pub fn congestion_controller_factory(
361        &mut self,
362        factory: Arc<dyn congestion::ControllerFactory + Send + Sync + 'static>,
363    ) -> &mut Self {
364        self.congestion_controller_factory = factory;
365        self
366    }
367
368    /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the
369    /// environment
370    ///
371    /// Defaults to `true`.
372    ///
373    /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same
374    /// headers, such as when transmitting bulk data on a connection. However, it is not supported
375    /// by all network interface drivers or packet inspection tools. `noq-udp` will attempt to
376    /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at
377    /// startup, temporarily degrading performance.
378    pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self {
379        self.enable_segmentation_offload = enabled;
380        self
381    }
382
383    /// Whether to send observed address reports to peers.
384    ///
385    /// This will aid peers in inferring their reachable address, which in most NATd networks
386    /// will not be easily available to them.
387    pub fn send_observed_address_reports(&mut self, enabled: bool) -> &mut Self {
388        self.address_discovery_role.send_reports_to_peers(enabled);
389        self
390    }
391
392    /// Whether to receive observed address reports from other peers.
393    ///
394    /// Peers with the address discovery extension enabled that are willing to provide observed
395    /// address reports will do so if this transport parameter is set. In general, observed address
396    /// reports cannot be trusted. This, however, can aid the current endpoint in inferring its
397    /// reachable address, which in most NATd networks will not be easily available.
398    pub fn receive_observed_address_reports(&mut self, enabled: bool) -> &mut Self {
399        self.address_discovery_role
400            .receive_reports_from_peers(enabled);
401        self
402    }
403
404    /// Enables the Multipath Extension for QUIC.
405    ///
406    /// Setting this to any nonzero value will enable the Multipath Extension for QUIC,
407    /// <https://datatracker.ietf.org/doc/draft-ietf-quic-multipath/>.
408    ///
409    /// The value provided specifies the number maximum number of paths this endpoint may open
410    /// concurrently when multipath is negotiated. For any path to be opened, the remote must
411    /// enable multipath as well.
412    pub fn max_concurrent_multipath_paths(&mut self, max_concurrent: u32) -> &mut Self {
413        self.max_concurrent_multipath_paths = NonZeroU32::new(max_concurrent);
414        self
415    }
416
417    /// Sets a default per-path maximum idle timeout
418    ///
419    /// If the path is idle for this long the path will be abandoned. Bear in mind this will
420    /// interact with the [`TransportConfig::max_idle_timeout`], if the last path is
421    /// abandoned the entire connection will be closed.
422    ///
423    /// You can also change this using [`Connection::set_path_max_idle_timeout`] for
424    /// existing paths.
425    ///
426    /// [`Connection::set_path_max_idle_timeout`]: crate::Connection::set_path_max_idle_timeout
427    pub fn default_path_max_idle_timeout(&mut self, timeout: Option<Duration>) -> &mut Self {
428        self.default_path_max_idle_timeout = timeout;
429        self
430    }
431
432    /// Sets a default per-path keep alive interval
433    ///
434    /// Note that this does not interact with the connection-wide
435    /// [`TransportConfig::keep_alive_interval`].  This setting will keep this path active,
436    /// [`TransportConfig::keep_alive_interval`] will keep the connection active, with no
437    /// control over which path is used for this.
438    ///
439    /// You can also change this using [`Connection::set_path_keep_alive_interval`] for
440    /// existing path.
441    ///
442    /// [`Connection::set_path_keep_alive_interval`]: crate::Connection::set_path_keep_alive_interval
443    pub fn default_path_keep_alive_interval(&mut self, interval: Option<Duration>) -> &mut Self {
444        self.default_path_keep_alive_interval = interval;
445        self
446    }
447
448    /// Get the initial max [`crate::PathId`] this endpoint allows.
449    ///
450    /// Returns `None` if multipath is disabled.
451    pub(crate) fn get_initial_max_path_id(&self) -> Option<crate::PathId> {
452        self.max_concurrent_multipath_paths
453            // a max_concurrent_multipath_paths value of 1 only allows the first path, which
454            // has id 0
455            .map(|nonzero_concurrent| nonzero_concurrent.get() - 1)
456            .map(Into::into)
457    }
458
459    /// Sets the maximum number of nat traversal addresses this endpoint allows the remote to
460    /// advertise
461    ///
462    /// Setting this to any nonzero value will enable n0's nat traversal protocol, loosely based in
463    /// the Nat Traversal Extension for QUIC, see
464    /// <https://www.ietf.org/archive/id/draft-seemann-quic-nat-traversal-02.html>
465    ///
466    /// This implementation expects the multipath extension to be enabled as well. if not yet
467    /// enabled via [`Self::max_concurrent_multipath_paths`], a default value of
468    /// [`DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED`] will be used.
469    pub fn set_max_remote_nat_traversal_addresses(&mut self, max_addresses: u8) -> &mut Self {
470        self.max_remote_nat_traversal_addresses = NonZeroU8::new(max_addresses);
471        if max_addresses != 0 && self.max_concurrent_multipath_paths.is_none() {
472            self.max_concurrent_multipath_paths(
473                DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED_.get(),
474            );
475        }
476        self
477    }
478
479    /// Configures qlog capturing by setting a [`QlogFactory`].
480    ///
481    /// This assigns a [`QlogFactory`] that produces qlog capture configurations for
482    /// individual connections.
483    #[cfg(feature = "qlog")]
484    pub fn qlog_factory(&mut self, factory: Arc<dyn QlogFactory>) -> &mut Self {
485        self.qlog_factory = Some(factory);
486        self
487    }
488
489    /// Configures qlog capturing through the `QLOGDIR` environment variable.
490    ///
491    /// This uses [`QlogFileFactory::from_env`] to create a factory to write qlog traces
492    /// into the directory set through the `QLOGDIR` environment variable.
493    ///
494    /// If `QLOGDIR` is not set, no traces will be written. If `QLOGDIR` is set to a path
495    /// that does not exist, it will be created.
496    ///
497    /// The files will be prefixed with `prefix`.
498    #[cfg(feature = "qlog")]
499    pub fn qlog_from_env(&mut self, prefix: &str) -> &mut Self {
500        self.qlog_factory(Arc::new(QlogFileFactory::from_env().with_prefix(prefix)))
501    }
502
503    /// Configures qlog capturing into a directory.
504    ///
505    /// This uses [`QlogFileFactory`] to create a factory to write qlog traces into
506    /// the specified directory.  The files will be prefixed with `prefix`.
507    #[cfg(feature = "qlog")]
508    pub fn qlog_from_path(&mut self, path: impl AsRef<Path>, prefix: &str) -> &mut Self {
509        self.qlog_factory(Arc::new(
510            QlogFileFactory::new(path.as_ref().to_owned()).with_prefix(prefix),
511        ))
512    }
513
514    pub(crate) fn create_qlog_sink(
515        &self,
516        side: Side,
517        remote: SocketAddr,
518        initial_dst_cid: ConnectionId,
519        now: Instant,
520    ) -> QlogSink {
521        #[cfg(not(feature = "qlog"))]
522        let sink = {
523            let _ = (side, remote, initial_dst_cid, now);
524            QlogSink::default()
525        };
526
527        #[cfg(feature = "qlog")]
528        let sink = {
529            if let Some(config) = self
530                .qlog_factory
531                .as_ref()
532                .and_then(|factory| factory.for_connection(side, remote, initial_dst_cid, now))
533            {
534                QlogSink::new(config, initial_dst_cid, side, now)
535            } else {
536                QlogSink::default()
537            }
538        };
539
540        sink
541    }
542}
543
544impl Default for TransportConfig {
545    fn default() -> Self {
546        const EXPECTED_RTT: u32 = 100; // ms
547        const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
548        // Window size needed to avoid pipeline
549        // stalls
550        const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
551
552        Self {
553            max_concurrent_bidi_streams: 100u32.into(),
554            max_concurrent_uni_streams: 100u32.into(),
555            // 30 second default recommended by RFC 9308 ยง 3.2
556            max_idle_timeout: Some(VarInt(30_000)),
557            stream_receive_window: STREAM_RWND.into(),
558            receive_window: VarInt::MAX,
559            send_window: (8 * STREAM_RWND).into(),
560            send_fairness: true,
561
562            packet_threshold: 3,
563            time_threshold: 9.0 / 8.0,
564            initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT
565            initial_mtu: INITIAL_MTU,
566            min_mtu: INITIAL_MTU,
567            mtu_discovery_config: Some(MtuDiscoveryConfig::default()),
568            pad_to_mtu: false,
569            ack_frequency_config: None,
570            max_outgoing_bytes_per_second: None,
571
572            persistent_congestion_threshold: 3,
573            keep_alive_interval: None,
574            crypto_buffer_size: 16 * 1024,
575            allow_spin: true,
576            datagram_receive_buffer_size: Some(STREAM_RWND as usize),
577            datagram_send_buffer_size: 1024 * 1024,
578            #[cfg(test)]
579            deterministic_packet_numbers: false,
580
581            congestion_controller_factory: Arc::new(congestion::CubicConfig::default()),
582
583            enable_segmentation_offload: true,
584
585            address_discovery_role: address_discovery::Role::default(),
586
587            // disabled multipath by default
588            max_concurrent_multipath_paths: None,
589            default_path_max_idle_timeout: None,
590            default_path_keep_alive_interval: None,
591
592            // nat traversal disabled by default
593            max_remote_nat_traversal_addresses: None,
594
595            #[cfg(feature = "qlog")]
596            qlog_factory: None,
597        }
598    }
599}
600
601impl fmt::Debug for TransportConfig {
602    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
603        let Self {
604            max_concurrent_bidi_streams,
605            max_concurrent_uni_streams,
606            max_idle_timeout,
607            stream_receive_window,
608            receive_window,
609            send_window,
610            send_fairness,
611            packet_threshold,
612            time_threshold,
613            initial_rtt,
614            initial_mtu,
615            min_mtu,
616            mtu_discovery_config,
617            pad_to_mtu,
618            ack_frequency_config,
619            max_outgoing_bytes_per_second,
620            persistent_congestion_threshold,
621            keep_alive_interval,
622            crypto_buffer_size,
623            allow_spin,
624            datagram_receive_buffer_size,
625            datagram_send_buffer_size,
626            #[cfg(test)]
627                deterministic_packet_numbers: _,
628            congestion_controller_factory: _,
629            enable_segmentation_offload,
630            address_discovery_role,
631            max_concurrent_multipath_paths,
632            default_path_max_idle_timeout,
633            default_path_keep_alive_interval,
634            max_remote_nat_traversal_addresses,
635            #[cfg(feature = "qlog")]
636            qlog_factory,
637        } = self;
638        let mut s = fmt.debug_struct("TransportConfig");
639
640        s.field("max_concurrent_bidi_streams", max_concurrent_bidi_streams)
641            .field("max_concurrent_uni_streams", max_concurrent_uni_streams)
642            .field("max_idle_timeout", max_idle_timeout)
643            .field("stream_receive_window", stream_receive_window)
644            .field("receive_window", receive_window)
645            .field("send_window", send_window)
646            .field("send_fairness", send_fairness)
647            .field("packet_threshold", packet_threshold)
648            .field("time_threshold", time_threshold)
649            .field("initial_rtt", initial_rtt)
650            .field("initial_mtu", initial_mtu)
651            .field("min_mtu", min_mtu)
652            .field("mtu_discovery_config", mtu_discovery_config)
653            .field("pad_to_mtu", pad_to_mtu)
654            .field("ack_frequency_config", ack_frequency_config)
655            .field(
656                "max_outgoing_bytes_per_second",
657                max_outgoing_bytes_per_second,
658            )
659            .field(
660                "persistent_congestion_threshold",
661                persistent_congestion_threshold,
662            )
663            .field("keep_alive_interval", keep_alive_interval)
664            .field("crypto_buffer_size", crypto_buffer_size)
665            .field("allow_spin", allow_spin)
666            .field("datagram_receive_buffer_size", datagram_receive_buffer_size)
667            .field("datagram_send_buffer_size", datagram_send_buffer_size)
668            // congestion_controller_factory not debug
669            .field("enable_segmentation_offload", enable_segmentation_offload)
670            .field("address_discovery_role", address_discovery_role)
671            .field(
672                "max_concurrent_multipath_paths",
673                max_concurrent_multipath_paths,
674            )
675            .field(
676                "default_path_max_idle_timeout",
677                default_path_max_idle_timeout,
678            )
679            .field(
680                "default_path_keep_alive_interval",
681                default_path_keep_alive_interval,
682            )
683            .field(
684                "max_remote_nat_traversal_addresses",
685                max_remote_nat_traversal_addresses,
686            );
687        #[cfg(feature = "qlog")]
688        s.field("qlog_factory", &qlog_factory.is_some());
689
690        s.finish_non_exhaustive()
691    }
692}
693
694/// Parameters for controlling the peer's acknowledgement frequency
695///
696/// The parameters provided in this config will be sent to the peer at the beginning of the
697/// connection, so it can take them into account when sending acknowledgements (see each parameter's
698/// description for details on how it influences acknowledgement frequency).
699///
700/// noq's implementation follows the fourth draft of the
701/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04).
702/// The defaults produce behavior slightly different than the behavior without this extension,
703/// because they change the way reordered packets are handled (see
704/// [`AckFrequencyConfig::reordering_threshold`] for details).
705#[derive(Clone, Debug)]
706pub struct AckFrequencyConfig {
707    pub(crate) ack_eliciting_threshold: VarInt,
708    pub(crate) max_ack_delay: Option<Duration>,
709    pub(crate) reordering_threshold: VarInt,
710}
711
712impl AckFrequencyConfig {
713    /// The ack-eliciting threshold we will request the peer to use
714    ///
715    /// This threshold represents the number of ack-eliciting packets an endpoint may receive
716    /// without immediately sending an ACK.
717    ///
718    /// The remote peer should send at least one ACK frame when more than this number of
719    /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately
720    /// acknowledging every ack-eliciting packet.
721    ///
722    /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet.
723    pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self {
724        self.ack_eliciting_threshold = value;
725        self
726    }
727
728    /// The `max_ack_delay` we will request the peer to use
729    ///
730    /// This parameter represents the maximum amount of time that an endpoint waits before sending
731    /// an ACK when the ack-eliciting threshold hasn't been reached.
732    ///
733    /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay`
734    /// transport parameter, and at most the greater of the current path RTT or 25ms.
735    ///
736    /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as
737    /// obtained from its transport parameters.
738    pub fn max_ack_delay(&mut self, value: Option<Duration>) -> &mut Self {
739        self.max_ack_delay = value;
740        self
741    }
742
743    /// The reordering threshold we will request the peer to use
744    ///
745    /// This threshold represents the amount of out-of-order packets that will trigger an endpoint
746    /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for
747    /// `max_ack_delay` to be elapsed.
748    ///
749    /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1
750    /// immediately acknowledges any packets that are received out of order (this is also the
751    /// behavior when the extension is disabled).
752    ///
753    /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one.
754    /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults
755    /// to 2.
756    pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self {
757        self.reordering_threshold = value;
758        self
759    }
760}
761
762impl Default for AckFrequencyConfig {
763    fn default() -> Self {
764        Self {
765            ack_eliciting_threshold: VarInt(1),
766            max_ack_delay: None,
767            reordering_threshold: VarInt(2),
768        }
769    }
770}
771
772/// Parameters governing MTU discovery.
773///
774/// # The why of MTU discovery
775///
776/// By design, QUIC ensures during the handshake that the network path between the client and the
777/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words,
778/// once the connection is established, we know that the network path's maximum transmission unit
779/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can
780/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to
781/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up
782/// being dropped).
783///
784/// There is, however, a significant overhead associated to sending a packet. If the same
785/// information can be sent in fewer packets, that results in higher throughput. The amount of
786/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the
787/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given
788/// amount of bytes.
789///
790/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the
791/// path's MTU and, if it turns out to be higher, start sending bigger packets.
792///
793/// # MTU discovery internals
794///
795/// noq implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU
796/// Discovery), described in [section 14.3 of RFC
797/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending
798/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote
799/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote
800/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the
801/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the
802/// packet's size.
803///
804/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through
805/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and
806/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the
807/// last time when MTU discovery completed.
808///
809/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest
810/// is 65527), noq performs a binary search to keep the number of probes as low as possible. The
811/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the
812/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The
813/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the
814/// `max_udp_payload_size` transport parameter received from the peer during the handshake.
815///
816/// # Black hole detection
817///
818/// If, at some point, the network path no longer accepts packets of the detected size, packet loss
819/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case,
820/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the
821/// timer that was set based on [`MtuDiscoveryConfig::interval`]).
822///
823/// # Interaction between peers
824///
825/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the
826/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery
827/// independently in order to discover the path's MTU.
828#[derive(Clone, Debug)]
829pub struct MtuDiscoveryConfig {
830    pub(crate) interval: Duration,
831    pub(crate) upper_bound: u16,
832    pub(crate) minimum_change: u16,
833    pub(crate) black_hole_cooldown: Duration,
834}
835
836impl MtuDiscoveryConfig {
837    /// Specifies the time to wait after completing MTU discovery before starting a new MTU
838    /// discovery run.
839    ///
840    /// Defaults to 600 seconds, as recommended by [RFC
841    /// 8899](https://www.rfc-editor.org/rfc/rfc8899).
842    pub fn interval(&mut self, value: Duration) -> &mut Self {
843        self.interval = value;
844        self
845    }
846
847    /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for.
848    ///
849    /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest
850    /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6.
851    ///
852    /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The
853    /// only drawback is that MTU discovery might take more time to finish.
854    pub fn upper_bound(&mut self, value: u16) -> &mut Self {
855        self.upper_bound = value.min(MAX_UDP_PAYLOAD);
856        self
857    }
858
859    /// Specifies the amount of time that MTU discovery should wait after a black hole was detected
860    /// before running again. Defaults to one minute.
861    ///
862    /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to
863    /// try MTU discovery again after a short period of time.
864    pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self {
865        self.black_hole_cooldown = value;
866        self
867    }
868
869    /// Specifies the minimum MTU change to stop the MTU discovery phase.
870    /// Defaults to 20.
871    pub fn minimum_change(&mut self, value: u16) -> &mut Self {
872        self.minimum_change = value;
873        self
874    }
875}
876
877impl Default for MtuDiscoveryConfig {
878    fn default() -> Self {
879        Self {
880            interval: Duration::from_secs(600),
881            upper_bound: 1452,
882            black_hole_cooldown: Duration::from_secs(60),
883            minimum_change: 20,
884        }
885    }
886}
887
888/// Maximum duration of inactivity to accept before timing out the connection
889///
890/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be
891/// constructed by converting directly from `VarInt`, or using `TryFrom<Duration>`.
892///
893/// ```
894/// # use std::{convert::TryFrom, time::Duration};
895/// # use noq_proto::{IdleTimeout, VarIntBoundsExceeded, VarInt};
896/// # fn main() -> Result<(), VarIntBoundsExceeded> {
897/// // A `VarInt`-encoded value in milliseconds
898/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000));
899///
900/// // Try to convert a `Duration` into a `VarInt`-encoded timeout
901/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?;
902/// # Ok(())
903/// # }
904/// ```
905#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
906pub struct IdleTimeout(VarInt);
907
908impl From<VarInt> for IdleTimeout {
909    fn from(inner: VarInt) -> Self {
910        Self(inner)
911    }
912}
913
914impl std::convert::TryFrom<Duration> for IdleTimeout {
915    type Error = VarIntBoundsExceeded;
916
917    fn try_from(timeout: Duration) -> Result<Self, Self::Error> {
918        let inner = VarInt::try_from(timeout.as_millis())?;
919        Ok(Self(inner))
920    }
921}
922
923impl fmt::Debug for IdleTimeout {
924    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
925        self.0.fmt(f)
926    }
927}