iroh_quinn_proto/config/transport.rs
1#[cfg(feature = "qlog")]
2use std::path::Path;
3use std::{
4 fmt,
5 net::SocketAddr,
6 num::{NonZeroU8, NonZeroU32},
7 sync::Arc,
8};
9
10use crate::{
11 ConnectionId, Duration, INITIAL_MTU, Instant, MAX_UDP_PAYLOAD, Side, VarInt,
12 VarIntBoundsExceeded, address_discovery, congestion, connection::qlog::QlogSink,
13};
14#[cfg(feature = "qlog")]
15use crate::{QlogFactory, QlogFileFactory};
16
17/// When multipath is required and has not been explicitly enabled, this value will be used for
18/// [`TransportConfig::max_concurrent_multipath_paths`].
19const DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED_: NonZeroU32 = {
20 match NonZeroU32::new(12) {
21 Some(v) => v,
22 None => panic!("to enable multipath this must be positive, which clearly it is"),
23 }
24};
25
26/// When multipath is required and has not been explicitly enabled, this value will be used for
27///
28/// [`TransportConfig::max_concurrent_multipath_paths`].
29#[cfg(doc)]
30pub const DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED: NonZeroU32 =
31 DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED_;
32
33/// Parameters governing the core QUIC state machine
34///
35/// Default values should be suitable for most internet applications. Applications protocols which
36/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and
37/// `max_concurrent_uni_streams` to zero.
38///
39/// In some cases, performance or resource requirements can be improved by tuning these values to
40/// suit a particular application and/or network connection. In particular, data window sizes can be
41/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning
42/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair
43/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps
44/// link with a 100ms round trip time.
45#[derive(Clone)]
46pub struct TransportConfig {
47 pub(crate) max_concurrent_bidi_streams: VarInt,
48 pub(crate) max_concurrent_uni_streams: VarInt,
49 pub(crate) max_idle_timeout: Option<VarInt>,
50 pub(crate) stream_receive_window: VarInt,
51 pub(crate) receive_window: VarInt,
52 pub(crate) send_window: u64,
53 pub(crate) send_fairness: bool,
54
55 pub(crate) packet_threshold: u32,
56 pub(crate) time_threshold: f32,
57 pub(crate) initial_rtt: Duration,
58 pub(crate) initial_mtu: u16,
59 pub(crate) min_mtu: u16,
60 pub(crate) mtu_discovery_config: Option<MtuDiscoveryConfig>,
61 pub(crate) pad_to_mtu: bool,
62 pub(crate) ack_frequency_config: Option<AckFrequencyConfig>,
63
64 pub(crate) persistent_congestion_threshold: u32,
65 pub(crate) keep_alive_interval: Option<Duration>,
66 pub(crate) crypto_buffer_size: usize,
67 pub(crate) allow_spin: bool,
68 pub(crate) datagram_receive_buffer_size: Option<usize>,
69 pub(crate) datagram_send_buffer_size: usize,
70 #[cfg(test)]
71 pub(crate) deterministic_packet_numbers: bool,
72
73 pub(crate) congestion_controller_factory: Arc<dyn congestion::ControllerFactory + Send + Sync>,
74
75 pub(crate) enable_segmentation_offload: bool,
76
77 pub(crate) address_discovery_role: address_discovery::Role,
78
79 pub(crate) max_concurrent_multipath_paths: Option<NonZeroU32>,
80
81 pub(crate) default_path_max_idle_timeout: Option<Duration>,
82 pub(crate) default_path_keep_alive_interval: Option<Duration>,
83
84 pub(crate) max_remote_nat_traversal_addresses: Option<NonZeroU8>,
85
86 #[cfg(feature = "qlog")]
87 pub(crate) qlog_factory: Option<Arc<dyn QlogFactory>>,
88}
89
90impl TransportConfig {
91 /// Maximum number of incoming bidirectional streams that may be open concurrently
92 ///
93 /// Must be nonzero for the peer to open any bidirectional streams.
94 ///
95 /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams *
96 /// stream_receive_window`, with an upper bound proportional to `receive_window`.
97 pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self {
98 self.max_concurrent_bidi_streams = value;
99 self
100 }
101
102 /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams
103 pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self {
104 self.max_concurrent_uni_streams = value;
105 self
106 }
107
108 /// Maximum duration of inactivity to accept before timing out the connection.
109 ///
110 /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None`
111 /// represents an infinite timeout. Defaults to 30 seconds.
112 ///
113 /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite
114 /// idle timeout can result in permanently hung futures!
115 ///
116 /// ```
117 /// # use std::{convert::TryInto, time::Duration};
118 /// # use iroh_quinn_proto::{TransportConfig, VarInt, VarIntBoundsExceeded};
119 /// # fn main() -> Result<(), VarIntBoundsExceeded> {
120 /// let mut config = TransportConfig::default();
121 ///
122 /// // Set the idle timeout as `VarInt`-encoded milliseconds
123 /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into()));
124 ///
125 /// // Set the idle timeout as a `Duration`
126 /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?));
127 /// # Ok(())
128 /// # }
129 /// ```
130 pub fn max_idle_timeout(&mut self, value: Option<IdleTimeout>) -> &mut Self {
131 self.max_idle_timeout = value.map(|t| t.0);
132 self
133 }
134
135 /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream
136 /// before becoming blocked.
137 ///
138 /// This should be set to at least the expected connection latency multiplied by the maximum
139 /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single
140 /// stream doesn't monopolize receive buffers, which may otherwise occur if the application
141 /// chooses not to read from a large stream for a time while still requiring data on other
142 /// streams.
143 pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self {
144 self.stream_receive_window = value;
145 self
146 }
147
148 /// Maximum number of bytes the peer may transmit across all streams of a connection before
149 /// becoming blocked.
150 ///
151 /// This should be set to at least the expected connection latency multiplied by the maximum
152 /// desired throughput. Larger values can be useful to allow maximum throughput within a
153 /// stream while another is blocked.
154 pub fn receive_window(&mut self, value: VarInt) -> &mut Self {
155 self.receive_window = value;
156 self
157 }
158
159 /// Maximum number of bytes to transmit to a peer without acknowledgment
160 ///
161 /// Provides an upper bound on memory when communicating with peers that issue large amounts of
162 /// flow control credit. Endpoints that wish to handle large numbers of connections robustly
163 /// should take care to set this low enough to guarantee memory exhaustion does not occur if
164 /// every connection uses the entire window.
165 pub fn send_window(&mut self, value: u64) -> &mut Self {
166 self.send_window = value;
167 self
168 }
169
170 /// Whether to implement fair queuing for send streams having the same priority.
171 ///
172 /// When enabled, connections schedule data from outgoing streams having the same priority in a
173 /// round-robin fashion. When disabled, streams are scheduled in the order they are written to.
174 ///
175 /// Note that this only affects streams with the same priority. Higher priority streams always
176 /// take precedence over lower priority streams.
177 ///
178 /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use
179 /// many small streams.
180 pub fn send_fairness(&mut self, value: bool) -> &mut Self {
181 self.send_fairness = value;
182 self
183 }
184
185 /// Maximum reordering in packet number space before FACK style loss detection considers a
186 /// packet lost. Should not be less than 3, per RFC5681.
187 pub fn packet_threshold(&mut self, value: u32) -> &mut Self {
188 self.packet_threshold = value;
189 self
190 }
191
192 /// Maximum reordering in time space before time based loss detection considers a packet lost,
193 /// as a factor of RTT
194 pub fn time_threshold(&mut self, value: f32) -> &mut Self {
195 self.time_threshold = value;
196 self
197 }
198
199 /// The RTT used before an RTT sample is taken
200 pub fn initial_rtt(&mut self, value: Duration) -> &mut Self {
201 self.initial_rtt = value;
202 self
203 }
204
205 /// The initial value to be used as the maximum UDP payload size before running MTU discovery
206 /// (see [`TransportConfig::mtu_discovery_config`]).
207 ///
208 /// Must be at least 1200, which is the default, and known to be safe for typical internet
209 /// applications. Larger values are more efficient, but increase the risk of packet loss due to
210 /// exceeding the network path's IP MTU. If the provided value is higher than what the network
211 /// path actually supports, packet loss will eventually trigger black hole detection and bring
212 /// it down to [`TransportConfig::min_mtu`].
213 pub fn initial_mtu(&mut self, value: u16) -> &mut Self {
214 self.initial_mtu = value.max(INITIAL_MTU);
215 self
216 }
217
218 pub(crate) fn get_initial_mtu(&self) -> u16 {
219 self.initial_mtu.max(self.min_mtu)
220 }
221
222 /// The maximum UDP payload size guaranteed to be supported by the network.
223 ///
224 /// Must be at least 1200, which is the default, and lower than or equal to
225 /// [`TransportConfig::initial_mtu`].
226 ///
227 /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links
228 /// outside of either endpoint's control. Extreme care should be used when raising this value
229 /// outside of private networks where these factors are fully controlled. If the provided value
230 /// is higher than what the network path actually supports, the result will be unpredictable and
231 /// catastrophic packet loss, without a possibility of repair. Prefer
232 /// [`TransportConfig::initial_mtu`] together with
233 /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly
234 /// adapts to the network.
235 pub fn min_mtu(&mut self, value: u16) -> &mut Self {
236 self.min_mtu = value.max(INITIAL_MTU);
237 self
238 }
239
240 /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details).
241 ///
242 /// Enabled by default.
243 pub fn mtu_discovery_config(&mut self, value: Option<MtuDiscoveryConfig>) -> &mut Self {
244 self.mtu_discovery_config = value;
245 self
246 }
247
248 /// Pad UDP datagrams carrying application data to current maximum UDP payload size
249 ///
250 /// Disabled by default. UDP datagrams containing loss probes are exempt from padding.
251 ///
252 /// Enabling this helps mitigate traffic analysis by network observers, but it increases
253 /// bandwidth usage. Without this mitigation precise plain text size of application datagrams as
254 /// well as the total size of stream write bursts can be inferred by observers under certain
255 /// conditions. This analysis requires either an uncongested connection or application datagrams
256 /// too large to be coalesced.
257 pub fn pad_to_mtu(&mut self, value: bool) -> &mut Self {
258 self.pad_to_mtu = value;
259 self
260 }
261
262 /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details)
263 ///
264 /// The provided configuration will be ignored if the peer does not support the acknowledgement
265 /// frequency QUIC extension.
266 ///
267 /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even
268 /// if set to `None`, the local side still supports the acknowledgement frequency QUIC
269 /// extension and may use it in other ways.
270 pub fn ack_frequency_config(&mut self, value: Option<AckFrequencyConfig>) -> &mut Self {
271 self.ack_frequency_config = value;
272 self
273 }
274
275 /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion.
276 pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self {
277 self.persistent_congestion_threshold = value;
278 self
279 }
280
281 /// Period of inactivity before sending a keep-alive packet
282 ///
283 /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out.
284 ///
285 /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive
286 /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both
287 /// peers to be effective.
288 pub fn keep_alive_interval(&mut self, value: Option<Duration>) -> &mut Self {
289 self.keep_alive_interval = value;
290 self
291 }
292
293 /// Maximum quantity of out-of-order crypto layer data to buffer
294 pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self {
295 self.crypto_buffer_size = value;
296 self
297 }
298
299 /// Whether the implementation is permitted to set the spin bit on this connection
300 ///
301 /// This allows passive observers to easily judge the round trip time of a connection, which can
302 /// be useful for network administration but sacrifices a small amount of privacy.
303 pub fn allow_spin(&mut self, value: bool) -> &mut Self {
304 self.allow_spin = value;
305 self
306 }
307
308 /// Maximum number of incoming application datagram bytes to buffer, or None to disable
309 /// incoming datagrams
310 ///
311 /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size
312 /// of all datagrams that have been received from the peer but not consumed by the application
313 /// exceeds this value, old datagrams are dropped until it is no longer exceeded.
314 pub fn datagram_receive_buffer_size(&mut self, value: Option<usize>) -> &mut Self {
315 self.datagram_receive_buffer_size = value;
316 self
317 }
318
319 /// Maximum number of outgoing application datagram bytes to buffer
320 ///
321 /// While datagrams are sent ASAP, it is possible for an application to generate data faster
322 /// than the link, or even the underlying hardware, can transmit them. This limits the amount of
323 /// memory that may be consumed in that case. When the send buffer is full and a new datagram is
324 /// sent, older datagrams are dropped until sufficient space is available.
325 pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self {
326 self.datagram_send_buffer_size = value;
327 self
328 }
329
330 /// Whether to force every packet number to be used
331 ///
332 /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets
333 /// before they see them.
334 #[cfg(test)]
335 pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self {
336 self.deterministic_packet_numbers = enabled;
337 self
338 }
339
340 /// How to construct new `congestion::Controller`s
341 ///
342 /// Typically the refcounted configuration of a `congestion::Controller`,
343 /// e.g. a `congestion::NewRenoConfig`.
344 ///
345 /// # Example
346 /// ```
347 /// # use iroh_quinn_proto::*; use std::sync::Arc;
348 /// let mut config = TransportConfig::default();
349 /// config.congestion_controller_factory(Arc::new(congestion::NewRenoConfig::default()));
350 /// ```
351 pub fn congestion_controller_factory(
352 &mut self,
353 factory: Arc<dyn congestion::ControllerFactory + Send + Sync + 'static>,
354 ) -> &mut Self {
355 self.congestion_controller_factory = factory;
356 self
357 }
358
359 /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the
360 /// environment
361 ///
362 /// Defaults to `true`.
363 ///
364 /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same
365 /// headers, such as when transmitting bulk data on a connection. However, it is not supported
366 /// by all network interface drivers or packet inspection tools. `quinn-udp` will attempt to
367 /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at
368 /// startup, temporarily degrading performance.
369 pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self {
370 self.enable_segmentation_offload = enabled;
371 self
372 }
373
374 /// Whether to send observed address reports to peers.
375 ///
376 /// This will aid peers in inferring their reachable address, which in most NATd networks
377 /// will not be easily available to them.
378 pub fn send_observed_address_reports(&mut self, enabled: bool) -> &mut Self {
379 self.address_discovery_role.send_reports_to_peers(enabled);
380 self
381 }
382
383 /// Whether to receive observed address reports from other peers.
384 ///
385 /// Peers with the address discovery extension enabled that are willing to provide observed
386 /// address reports will do so if this transport parameter is set. In general, observed address
387 /// reports cannot be trusted. This, however, can aid the current endpoint in inferring its
388 /// reachable address, which in most NATd networks will not be easily available.
389 pub fn receive_observed_address_reports(&mut self, enabled: bool) -> &mut Self {
390 self.address_discovery_role
391 .receive_reports_from_peers(enabled);
392 self
393 }
394
395 /// Enables the Multipath Extension for QUIC.
396 ///
397 /// Setting this to any nonzero value will enable the Multipath Extension for QUIC,
398 /// <https://datatracker.ietf.org/doc/draft-ietf-quic-multipath/>.
399 ///
400 /// The value provided specifies the number maximum number of paths this endpoint may open
401 /// concurrently when multipath is negotiated. For any path to be opened, the remote must
402 /// enable multipath as well.
403 pub fn max_concurrent_multipath_paths(&mut self, max_concurrent: u32) -> &mut Self {
404 self.max_concurrent_multipath_paths = NonZeroU32::new(max_concurrent);
405 self
406 }
407
408 /// Sets a default per-path maximum idle timeout
409 ///
410 /// If the path is idle for this long the path will be abandoned. Bear in mind this will
411 /// interact with the [`TransportConfig::max_idle_timeout`], if the last path is
412 /// abandoned the entire connection will be closed.
413 ///
414 /// You can also change this using [`Connection::set_path_max_idle_timeout`] for
415 /// existing paths.
416 ///
417 /// [`Connection::set_path_max_idle_timeout`]: crate::Connection::set_path_max_idle_timeout
418 pub fn default_path_max_idle_timeout(&mut self, timeout: Option<Duration>) -> &mut Self {
419 self.default_path_max_idle_timeout = timeout;
420 self
421 }
422
423 /// Sets a default per-path keep alive interval
424 ///
425 /// Note that this does not interact with the connection-wide
426 /// [`TransportConfig::keep_alive_interval`]. This setting will keep this path active,
427 /// [`TransportConfig::keep_alive_interval`] will keep the connection active, with no
428 /// control over which path is used for this.
429 ///
430 /// You can also change this using [`Connection::set_path_keep_alive_interval`] for
431 /// existing path.
432 ///
433 /// [`Connection::set_path_keep_alive_interval`]: crate::Connection::set_path_keep_alive_interval
434 pub fn default_path_keep_alive_interval(&mut self, interval: Option<Duration>) -> &mut Self {
435 self.default_path_keep_alive_interval = interval;
436 self
437 }
438
439 /// Get the initial max [`crate::PathId`] this endpoint allows.
440 ///
441 /// Returns `None` if multipath is disabled.
442 pub(crate) fn get_initial_max_path_id(&self) -> Option<crate::PathId> {
443 self.max_concurrent_multipath_paths
444 // a max_concurrent_multipath_paths value of 1 only allows the first path, which
445 // has id 0
446 .map(|nonzero_concurrent| nonzero_concurrent.get() - 1)
447 .map(Into::into)
448 }
449
450 /// Sets the maximum number of nat traversal addresses this endpoint allows the remote to
451 /// advertise
452 ///
453 /// Setting this to any nonzero value will enable Iroh's holepunching, loosely based in the Nat
454 /// Traversal Extension for QUIC, see
455 /// <https://www.ietf.org/archive/id/draft-seemann-quic-nat-traversal-02.html>
456 ///
457 /// This implementation expects the multipath extension to be enabled as well. if not yet
458 /// enabled via [`Self::max_concurrent_multipath_paths`], a default value of
459 /// [`DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED`] will be used.
460 pub fn set_max_remote_nat_traversal_addresses(&mut self, max_addresses: u8) -> &mut Self {
461 self.max_remote_nat_traversal_addresses = NonZeroU8::new(max_addresses);
462 if max_addresses != 0 && self.max_concurrent_multipath_paths.is_none() {
463 self.max_concurrent_multipath_paths(
464 DEFAULT_CONCURRENT_MULTIPATH_PATHS_WHEN_ENABLED_.get(),
465 );
466 }
467 self
468 }
469
470 /// Configures qlog capturing by setting a [`QlogFactory`].
471 ///
472 /// This assigns a [`QlogFactory`] that produces qlog capture configurations for
473 /// individual connections.
474 #[cfg(feature = "qlog")]
475 pub fn qlog_factory(&mut self, factory: Arc<dyn QlogFactory>) -> &mut Self {
476 self.qlog_factory = Some(factory);
477 self
478 }
479
480 /// Configures qlog capturing through the `QLOGDIR` environment variable.
481 ///
482 /// This uses [`QlogFileFactory::from_env`] to create a factory to write qlog traces
483 /// into the directory set through the `QLOGDIR` environment variable.
484 ///
485 /// If `QLOGDIR` is not set, no traces will be written. If `QLOGDIR` is set to a path
486 /// that does not exist, it will be created.
487 ///
488 /// The files will be prefixed with `prefix`.
489 #[cfg(feature = "qlog")]
490 pub fn qlog_from_env(&mut self, prefix: &str) -> &mut Self {
491 self.qlog_factory(Arc::new(QlogFileFactory::from_env().with_prefix(prefix)))
492 }
493
494 /// Configures qlog capturing into a directory.
495 ///
496 /// This uses [`QlogFileFactory`] to create a factory to write qlog traces into
497 /// the specified directory. The files will be prefixed with `prefix`.
498 #[cfg(feature = "qlog")]
499 pub fn qlog_from_path(&mut self, path: impl AsRef<Path>, prefix: &str) -> &mut Self {
500 self.qlog_factory(Arc::new(
501 QlogFileFactory::new(path.as_ref().to_owned()).with_prefix(prefix),
502 ))
503 }
504
505 pub(crate) fn create_qlog_sink(
506 &self,
507 side: Side,
508 remote: SocketAddr,
509 initial_dst_cid: ConnectionId,
510 now: Instant,
511 ) -> QlogSink {
512 #[cfg(not(feature = "qlog"))]
513 let sink = {
514 let _ = (side, remote, initial_dst_cid, now);
515 QlogSink::default()
516 };
517
518 #[cfg(feature = "qlog")]
519 let sink = {
520 if let Some(config) = self
521 .qlog_factory
522 .as_ref()
523 .and_then(|factory| factory.for_connection(side, remote, initial_dst_cid, now))
524 {
525 QlogSink::new(config, initial_dst_cid, side, now)
526 } else {
527 QlogSink::default()
528 }
529 };
530
531 sink
532 }
533}
534
535impl Default for TransportConfig {
536 fn default() -> Self {
537 const EXPECTED_RTT: u32 = 100; // ms
538 const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
539 // Window size needed to avoid pipeline
540 // stalls
541 const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
542
543 Self {
544 max_concurrent_bidi_streams: 100u32.into(),
545 max_concurrent_uni_streams: 100u32.into(),
546 // 30 second default recommended by RFC 9308 ยง 3.2
547 max_idle_timeout: Some(VarInt(30_000)),
548 stream_receive_window: STREAM_RWND.into(),
549 receive_window: VarInt::MAX,
550 send_window: (8 * STREAM_RWND).into(),
551 send_fairness: true,
552
553 packet_threshold: 3,
554 time_threshold: 9.0 / 8.0,
555 initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT
556 initial_mtu: INITIAL_MTU,
557 min_mtu: INITIAL_MTU,
558 mtu_discovery_config: Some(MtuDiscoveryConfig::default()),
559 pad_to_mtu: false,
560 ack_frequency_config: None,
561
562 persistent_congestion_threshold: 3,
563 keep_alive_interval: None,
564 crypto_buffer_size: 16 * 1024,
565 allow_spin: true,
566 datagram_receive_buffer_size: Some(STREAM_RWND as usize),
567 datagram_send_buffer_size: 1024 * 1024,
568 #[cfg(test)]
569 deterministic_packet_numbers: false,
570
571 congestion_controller_factory: Arc::new(congestion::CubicConfig::default()),
572
573 enable_segmentation_offload: true,
574
575 address_discovery_role: address_discovery::Role::default(),
576
577 // disabled multipath by default
578 max_concurrent_multipath_paths: None,
579 default_path_max_idle_timeout: None,
580 default_path_keep_alive_interval: None,
581
582 // nat traversal disabled by default
583 max_remote_nat_traversal_addresses: None,
584
585 #[cfg(feature = "qlog")]
586 qlog_factory: None,
587 }
588 }
589}
590
591impl fmt::Debug for TransportConfig {
592 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
593 let Self {
594 max_concurrent_bidi_streams,
595 max_concurrent_uni_streams,
596 max_idle_timeout,
597 stream_receive_window,
598 receive_window,
599 send_window,
600 send_fairness,
601 packet_threshold,
602 time_threshold,
603 initial_rtt,
604 initial_mtu,
605 min_mtu,
606 mtu_discovery_config,
607 pad_to_mtu,
608 ack_frequency_config,
609 persistent_congestion_threshold,
610 keep_alive_interval,
611 crypto_buffer_size,
612 allow_spin,
613 datagram_receive_buffer_size,
614 datagram_send_buffer_size,
615 #[cfg(test)]
616 deterministic_packet_numbers: _,
617 congestion_controller_factory: _,
618 enable_segmentation_offload,
619 address_discovery_role,
620 max_concurrent_multipath_paths,
621 default_path_max_idle_timeout,
622 default_path_keep_alive_interval,
623 max_remote_nat_traversal_addresses,
624 #[cfg(feature = "qlog")]
625 qlog_factory,
626 } = self;
627 let mut s = fmt.debug_struct("TransportConfig");
628
629 s.field("max_concurrent_bidi_streams", max_concurrent_bidi_streams)
630 .field("max_concurrent_uni_streams", max_concurrent_uni_streams)
631 .field("max_idle_timeout", max_idle_timeout)
632 .field("stream_receive_window", stream_receive_window)
633 .field("receive_window", receive_window)
634 .field("send_window", send_window)
635 .field("send_fairness", send_fairness)
636 .field("packet_threshold", packet_threshold)
637 .field("time_threshold", time_threshold)
638 .field("initial_rtt", initial_rtt)
639 .field("initial_mtu", initial_mtu)
640 .field("min_mtu", min_mtu)
641 .field("mtu_discovery_config", mtu_discovery_config)
642 .field("pad_to_mtu", pad_to_mtu)
643 .field("ack_frequency_config", ack_frequency_config)
644 .field(
645 "persistent_congestion_threshold",
646 persistent_congestion_threshold,
647 )
648 .field("keep_alive_interval", keep_alive_interval)
649 .field("crypto_buffer_size", crypto_buffer_size)
650 .field("allow_spin", allow_spin)
651 .field("datagram_receive_buffer_size", datagram_receive_buffer_size)
652 .field("datagram_send_buffer_size", datagram_send_buffer_size)
653 // congestion_controller_factory not debug
654 .field("enable_segmentation_offload", enable_segmentation_offload)
655 .field("address_discovery_role", address_discovery_role)
656 .field(
657 "max_concurrent_multipath_paths",
658 max_concurrent_multipath_paths,
659 )
660 .field(
661 "default_path_max_idle_timeout",
662 default_path_max_idle_timeout,
663 )
664 .field(
665 "default_path_keep_alive_interval",
666 default_path_keep_alive_interval,
667 )
668 .field(
669 "max_remote_nat_traversal_addresses",
670 max_remote_nat_traversal_addresses,
671 );
672 #[cfg(feature = "qlog")]
673 s.field("qlog_factory", &qlog_factory.is_some());
674
675 s.finish_non_exhaustive()
676 }
677}
678
679/// Parameters for controlling the peer's acknowledgement frequency
680///
681/// The parameters provided in this config will be sent to the peer at the beginning of the
682/// connection, so it can take them into account when sending acknowledgements (see each parameter's
683/// description for details on how it influences acknowledgement frequency).
684///
685/// Quinn's implementation follows the fourth draft of the
686/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04).
687/// The defaults produce behavior slightly different than the behavior without this extension,
688/// because they change the way reordered packets are handled (see
689/// [`AckFrequencyConfig::reordering_threshold`] for details).
690#[derive(Clone, Debug)]
691pub struct AckFrequencyConfig {
692 pub(crate) ack_eliciting_threshold: VarInt,
693 pub(crate) max_ack_delay: Option<Duration>,
694 pub(crate) reordering_threshold: VarInt,
695}
696
697impl AckFrequencyConfig {
698 /// The ack-eliciting threshold we will request the peer to use
699 ///
700 /// This threshold represents the number of ack-eliciting packets an endpoint may receive
701 /// without immediately sending an ACK.
702 ///
703 /// The remote peer should send at least one ACK frame when more than this number of
704 /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately
705 /// acknowledging every ack-eliciting packet.
706 ///
707 /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet.
708 pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self {
709 self.ack_eliciting_threshold = value;
710 self
711 }
712
713 /// The `max_ack_delay` we will request the peer to use
714 ///
715 /// This parameter represents the maximum amount of time that an endpoint waits before sending
716 /// an ACK when the ack-eliciting threshold hasn't been reached.
717 ///
718 /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay`
719 /// transport parameter, and at most the greater of the current path RTT or 25ms.
720 ///
721 /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as
722 /// obtained from its transport parameters.
723 pub fn max_ack_delay(&mut self, value: Option<Duration>) -> &mut Self {
724 self.max_ack_delay = value;
725 self
726 }
727
728 /// The reordering threshold we will request the peer to use
729 ///
730 /// This threshold represents the amount of out-of-order packets that will trigger an endpoint
731 /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for
732 /// `max_ack_delay` to be elapsed.
733 ///
734 /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1
735 /// immediately acknowledges any packets that are received out of order (this is also the
736 /// behavior when the extension is disabled).
737 ///
738 /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one.
739 /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults
740 /// to 2.
741 pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self {
742 self.reordering_threshold = value;
743 self
744 }
745}
746
747impl Default for AckFrequencyConfig {
748 fn default() -> Self {
749 Self {
750 ack_eliciting_threshold: VarInt(1),
751 max_ack_delay: None,
752 reordering_threshold: VarInt(2),
753 }
754 }
755}
756
757/// Parameters governing MTU discovery.
758///
759/// # The why of MTU discovery
760///
761/// By design, QUIC ensures during the handshake that the network path between the client and the
762/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words,
763/// once the connection is established, we know that the network path's maximum transmission unit
764/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can
765/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to
766/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up
767/// being dropped).
768///
769/// There is, however, a significant overhead associated to sending a packet. If the same
770/// information can be sent in fewer packets, that results in higher throughput. The amount of
771/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the
772/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given
773/// amount of bytes.
774///
775/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the
776/// path's MTU and, if it turns out to be higher, start sending bigger packets.
777///
778/// # MTU discovery internals
779///
780/// Quinn implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU
781/// Discovery), described in [section 14.3 of RFC
782/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending
783/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote
784/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote
785/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the
786/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the
787/// packet's size.
788///
789/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through
790/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and
791/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the
792/// last time when MTU discovery completed.
793///
794/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest
795/// is 65527), Quinn performs a binary search to keep the number of probes as low as possible. The
796/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the
797/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The
798/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the
799/// `max_udp_payload_size` transport parameter received from the peer during the handshake.
800///
801/// # Black hole detection
802///
803/// If, at some point, the network path no longer accepts packets of the detected size, packet loss
804/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case,
805/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the
806/// timer that was set based on [`MtuDiscoveryConfig::interval`]).
807///
808/// # Interaction between peers
809///
810/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the
811/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery
812/// independently in order to discover the path's MTU.
813#[derive(Clone, Debug)]
814pub struct MtuDiscoveryConfig {
815 pub(crate) interval: Duration,
816 pub(crate) upper_bound: u16,
817 pub(crate) minimum_change: u16,
818 pub(crate) black_hole_cooldown: Duration,
819}
820
821impl MtuDiscoveryConfig {
822 /// Specifies the time to wait after completing MTU discovery before starting a new MTU
823 /// discovery run.
824 ///
825 /// Defaults to 600 seconds, as recommended by [RFC
826 /// 8899](https://www.rfc-editor.org/rfc/rfc8899).
827 pub fn interval(&mut self, value: Duration) -> &mut Self {
828 self.interval = value;
829 self
830 }
831
832 /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for.
833 ///
834 /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest
835 /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6.
836 ///
837 /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The
838 /// only drawback is that MTU discovery might take more time to finish.
839 pub fn upper_bound(&mut self, value: u16) -> &mut Self {
840 self.upper_bound = value.min(MAX_UDP_PAYLOAD);
841 self
842 }
843
844 /// Specifies the amount of time that MTU discovery should wait after a black hole was detected
845 /// before running again. Defaults to one minute.
846 ///
847 /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to
848 /// try MTU discovery again after a short period of time.
849 pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self {
850 self.black_hole_cooldown = value;
851 self
852 }
853
854 /// Specifies the minimum MTU change to stop the MTU discovery phase.
855 /// Defaults to 20.
856 pub fn minimum_change(&mut self, value: u16) -> &mut Self {
857 self.minimum_change = value;
858 self
859 }
860}
861
862impl Default for MtuDiscoveryConfig {
863 fn default() -> Self {
864 Self {
865 interval: Duration::from_secs(600),
866 upper_bound: 1452,
867 black_hole_cooldown: Duration::from_secs(60),
868 minimum_change: 20,
869 }
870 }
871}
872
873/// Maximum duration of inactivity to accept before timing out the connection
874///
875/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be
876/// constructed by converting directly from `VarInt`, or using `TryFrom<Duration>`.
877///
878/// ```
879/// # use std::{convert::TryFrom, time::Duration};
880/// # use iroh_quinn_proto::{IdleTimeout, VarIntBoundsExceeded, VarInt};
881/// # fn main() -> Result<(), VarIntBoundsExceeded> {
882/// // A `VarInt`-encoded value in milliseconds
883/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000));
884///
885/// // Try to convert a `Duration` into a `VarInt`-encoded timeout
886/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?;
887/// # Ok(())
888/// # }
889/// ```
890#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
891pub struct IdleTimeout(VarInt);
892
893impl From<VarInt> for IdleTimeout {
894 fn from(inner: VarInt) -> Self {
895 Self(inner)
896 }
897}
898
899impl std::convert::TryFrom<Duration> for IdleTimeout {
900 type Error = VarIntBoundsExceeded;
901
902 fn try_from(timeout: Duration) -> Result<Self, Self::Error> {
903 let inner = VarInt::try_from(timeout.as_millis())?;
904 Ok(Self(inner))
905 }
906}
907
908impl fmt::Debug for IdleTimeout {
909 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
910 self.0.fmt(f)
911 }
912}