noq/lib.rs
1//! QUIC transport protocol implementation
2//!
3//! [QUIC](https://en.wikipedia.org/wiki/QUIC) is a modern transport protocol addressing
4//! shortcomings of TCP, such as head-of-line blocking, poor security, slow handshakes, and
5//! inefficient congestion control. This crate provides a portable userspace implementation. It
6//! builds on top of noq-proto, which implements protocol logic independent of any particular
7//! runtime.
8//!
9//! The entry point of this crate is the [`Endpoint`].
10//!
11//! # About QUIC
12//!
13//! A QUIC connection is an association between two endpoints. The endpoint which initiates the
14//! connection is termed the client, and the endpoint which accepts it is termed the server. A
15//! single endpoint may function as both client and server for different connections, for example
16//! in a peer-to-peer application. To communicate application data, each endpoint may open streams
17//! up to a limit dictated by its peer. Typically, that limit is increased as old streams are
18//! finished.
19//!
20//! Streams may be unidirectional or bidirectional, and are cheap to create and disposable. For
21//! example, a traditionally datagram-oriented application could use a new stream for every
22//! message it wants to send, no longer needing to worry about MTUs. Bidirectional streams behave
23//! much like a traditional TCP connection, and are useful for sending messages that have an
24//! immediate response, such as an HTTP request. Stream data is delivered reliably, and there is no
25//! ordering enforced between data on different streams.
26//!
27//! By avoiding head-of-line blocking and providing unified congestion control across all streams
28//! of a connection, QUIC is able to provide higher throughput and lower latency than one or
29//! multiple TCP connections between the same two hosts, while providing more useful behavior than
30//! raw UDP sockets.
31//!
32//! noq also exposes unreliable datagrams, which are a low-level primitive preferred when
33//! automatic fragmentation and retransmission of certain data is not desired.
34//!
35//! QUIC uses encryption and identity verification built directly on TLS 1.3. Just as with a TLS
36//! server, it is useful for a QUIC server to be identified by a certificate signed by a trusted
37//! authority. If this is infeasible--for example, if servers are short-lived or not associated
38//! with a domain name--then as with TLS, self-signed certificates can be used to provide
39//! encryption alone.
40#![warn(missing_docs)]
41
42use std::pin::Pin;
43use std::sync::Arc;
44
45mod connection;
46mod endpoint;
47mod event_stream;
48mod incoming;
49mod mutex;
50mod path;
51mod recv_stream;
52mod runtime;
53mod send_stream;
54mod work_limiter;
55
56#[cfg(not(wasm_browser))]
57pub(crate) use std::time::{Duration, Instant};
58#[cfg(wasm_browser)]
59pub(crate) use web_time::{Duration, Instant};
60
61#[cfg(feature = "bloom")]
62pub use proto::BloomTokenLog;
63pub use proto::{
64 AckFrequencyConfig, ApplicationClose, Chunk, ClientConfig, ClosePathError, ClosedPath,
65 ClosedStream, ConfigError, ConnectError, ConnectionClose, ConnectionError, ConnectionId,
66 ConnectionIdGenerator, ConnectionStats, DecryptedInitial, Dir, EcnCodepoint, EndpointConfig,
67 FrameStats, FrameType, IdleTimeout, InvalidCid, MtuDiscoveryConfig, NetworkChangeHint,
68 NoneTokenLog, NoneTokenStore, PathError, PathEvent, PathId, PathStats, PathStatus,
69 ServerConfig, SetPathStatusError, Side, StdSystemTime, StreamId, TimeSource, TokenLog,
70 TokenMemoryCache, TokenReuseError, TokenStore, Transmit, TransportConfig, TransportErrorCode,
71 UdpStats, ValidationTokenConfig, VarInt, VarIntBoundsExceeded, Written, congestion, crypto,
72};
73#[cfg(feature = "qlog")]
74pub use proto::{QlogConfig, QlogFactory, QlogFileFactory};
75#[cfg(feature = "rustls")]
76pub use rustls;
77pub use udp;
78
79pub use crate::connection::{
80 AcceptBi, AcceptUni, Closed, Connecting, Connection, OnClosed, OpenBi, OpenUni, ReadDatagram,
81 SendDatagram, SendDatagramError, WeakConnectionHandle, ZeroRttAccepted,
82};
83pub use crate::endpoint::{Accept, Endpoint, EndpointStats};
84pub use crate::event_stream::{Lagged, NatTraversalUpdates, ObservedExternalAddr, PathEvents};
85pub use crate::incoming::{Incoming, IncomingFuture, RetryError};
86pub use crate::path::{AddressDiscovery, OpenPath, Path, WeakPathHandle};
87pub use crate::recv_stream::{
88 ReadError, ReadExactError, ReadToEndError, RecvStream, ResetError, UnorderedRecvStream,
89};
90#[cfg(feature = "runtime-smol")]
91pub use crate::runtime::SmolRuntime;
92#[cfg(feature = "runtime-tokio")]
93pub use crate::runtime::TokioRuntime;
94#[cfg(any(feature = "runtime-tokio", feature = "runtime-smol"))]
95pub use crate::runtime::default_runtime;
96pub use crate::runtime::{AsyncTimer, AsyncUdpSocket, Runtime, UdpSender};
97pub use crate::send_stream::{SendStream, Stopped, StoppedError, WriteError};
98
99#[cfg(test)]
100mod tests;
101
102#[derive(Debug)]
103enum ConnectionEvent {
104 Close {
105 error_code: VarInt,
106 reason: bytes::Bytes,
107 },
108 Proto(proto::ConnectionEvent),
109 Rebind(Pin<Box<dyn UdpSender>>),
110 LocalAddressChanged(Option<Arc<dyn NetworkChangeHint + Sync + Send>>),
111}
112
113fn udp_transmit<'a>(t: &proto::Transmit, buffer: &'a [u8]) -> udp::Transmit<'a> {
114 udp::Transmit {
115 destination: t.destination,
116 ecn: t.ecn.map(udp_ecn),
117 contents: buffer,
118 segment_size: t.segment_size,
119 src_ip: t.src_ip,
120 }
121}
122
123fn udp_ecn(ecn: proto::EcnCodepoint) -> udp::EcnCodepoint {
124 match ecn {
125 proto::EcnCodepoint::Ect0 => udp::EcnCodepoint::Ect0,
126 proto::EcnCodepoint::Ect1 => udp::EcnCodepoint::Ect1,
127 proto::EcnCodepoint::Ce => udp::EcnCodepoint::Ce,
128 }
129}
130
131/// Maximum number of datagrams processed in send/recv calls to make before moving on to other processing
132///
133/// This helps ensure we don't starve anything when the CPU is slower than the link.
134/// Value is selected by picking a low number which didn't degrade throughput in benchmarks.
135const IO_LOOP_BOUND: usize = 160;
136
137/// The maximum amount of time that should be spent in `recvmsg()` calls per endpoint iteration
138///
139/// 50us are chosen so that an endpoint iteration with a 50us sendmsg limit blocks
140/// the runtime for a maximum of about 100us.
141/// Going much lower does not yield any noticeable difference, since a single `recvmmsg`
142/// batch of size 32 was observed to take 30us on some systems.
143const RECV_TIME_BOUND: Duration = Duration::from_micros(50);