use std::{
collections::{hash_map::Entry, BTreeSet, HashMap, HashSet, VecDeque},
net::SocketAddr,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use anyhow::Context as _;
use bytes::BytesMut;
use futures_concurrency::stream::{stream_group, StreamGroup};
use futures_util::FutureExt as _;
use iroh::{
endpoint::{Connection, DirectAddr},
protocol::ProtocolHandler,
Endpoint, NodeAddr, NodeId, PublicKey, RelayUrl,
};
use irpc::{channel::spsc, WithChannels};
use n0_future::{
boxed::BoxFuture,
task::{self, AbortOnDropHandle, JoinSet},
time::Instant,
Stream, StreamExt as _,
};
use rand::rngs::StdRng;
use rand_core::SeedableRng;
use serde::{Deserialize, Serialize};
use tokio::sync::{broadcast, mpsc, oneshot};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, error_span, trace, warn, Instrument};
use self::util::{read_message, write_message, Timers};
use crate::{
metrics::Metrics,
proto::{self, HyparviewConfig, PeerData, PlumtreeConfig, Scope, TopicId},
};
pub mod util;
use crate::api::{self, Command, Event, GossipApi, GossipEvent};
pub const GOSSIP_ALPN: &[u8] = b"/iroh-gossip/0";
const SEND_QUEUE_CAP: usize = 64;
const TO_ACTOR_CAP: usize = 64;
const IN_EVENT_CAP: usize = 1024;
const TOPIC_EVENT_CAP: usize = 256;
const SOURCE_NAME: &str = "gossip";
pub type ProtoEvent = proto::Event<PublicKey>;
pub type ProtoCommand = proto::Command<PublicKey>;
type InEvent = proto::InEvent<PublicKey>;
type OutEvent = proto::OutEvent<PublicKey>;
type Timer = proto::Timer<PublicKey>;
type ProtoMessage = proto::Message<PublicKey>;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("Actor closed")]
ActorClosed,
#[error("Joined event to be the first event received")]
UnexpectedEvent,
#[error("Receiver closed")]
ReceiverClosed,
#[error("Ser/De {0}")]
SerDe(#[from] postcard::Error),
#[error("empty peer data")]
EmptyPeerData,
#[error("write {0}")]
Write(#[from] util::WriteError),
#[error("read {0}")]
Read(#[from] util::ReadError),
#[error(transparent)]
WatchableDisconnected(#[from] iroh::watchable::Disconnected),
#[error(transparent)]
IrohConnection(#[from] iroh::endpoint::ConnectionError),
#[error(transparent)]
Iroh(#[from] anyhow::Error),
#[error("join")]
Join(#[from] task::JoinError),
#[error(transparent)]
RpcSend(#[from] irpc::channel::SendError),
#[error(transparent)]
RpcRecv(#[from] irpc::channel::RecvError),
}
impl<T> From<async_channel::SendError<T>> for Error {
fn from(_value: async_channel::SendError<T>) -> Self {
Error::ActorClosed
}
}
impl<T> From<mpsc::error::SendError<T>> for Error {
fn from(_value: mpsc::error::SendError<T>) -> Self {
Error::ActorClosed
}
}
#[derive(Debug, Clone)]
pub struct Gossip {
pub(crate) inner: Arc<Inner>,
}
impl std::ops::Deref for Gossip {
type Target = GossipApi;
fn deref(&self) -> &Self::Target {
&self.inner.api
}
}
#[derive(Debug)]
enum LocalActorMessage {
HandleConnection(Connection),
Shutdown { reply: oneshot::Sender<()> },
}
#[derive(Debug)]
pub(crate) struct Inner {
api: GossipApi,
local_tx: mpsc::Sender<LocalActorMessage>,
_actor_handle: AbortOnDropHandle<()>,
max_message_size: usize,
metrics: Arc<Metrics>,
}
impl ProtocolHandler for Gossip {
fn accept(&self, conn: Connection) -> BoxFuture<anyhow::Result<()>> {
let this = self.clone();
Box::pin(async move {
this.handle_connection(conn).await?;
Ok(())
})
}
fn shutdown(&self) -> BoxFuture<()> {
let this = self.clone();
Box::pin(async move {
if let Err(err) = this.shutdown().await {
warn!("error while shutting down gossip: {err:#}");
}
})
}
}
#[derive(Debug, Clone)]
pub struct Builder {
config: proto::Config,
}
impl Builder {
pub fn max_message_size(mut self, size: usize) -> Self {
self.config.max_message_size = size;
self
}
pub fn membership_config(mut self, config: HyparviewConfig) -> Self {
self.config.membership = config;
self
}
pub fn broadcast_config(mut self, config: PlumtreeConfig) -> Self {
self.config.broadcast = config;
self
}
pub async fn spawn(self, endpoint: Endpoint) -> Result<Gossip, Error> {
let metrics = Arc::new(Metrics::default());
let addr = {
n0_future::future::race(
endpoint.direct_addresses().initialized().map(|_| ()),
endpoint.home_relay().initialized().map(|_| ()),
)
.await;
let addrs = endpoint
.direct_addresses()
.get()
.expect("endpoint alive")
.unwrap_or_default()
.into_iter()
.map(|x| x.addr);
let home_relay = endpoint.home_relay().get().expect("endpoint alive");
NodeAddr::from_parts(endpoint.node_id(), home_relay, addrs)
};
let (actor, rpc_tx, local_tx) =
Actor::new(endpoint, self.config, metrics.clone(), &addr.into());
let me = actor.endpoint.node_id().fmt_short();
let max_message_size = actor.state.max_message_size();
let actor_handle = task::spawn(
async move {
if let Err(err) = actor.run().await {
warn!("gossip actor closed with error: {err:?}");
}
}
.instrument(error_span!("gossip", %me)),
);
let api = GossipApi::local(rpc_tx);
Ok(Gossip {
inner: Inner {
api,
local_tx,
_actor_handle: AbortOnDropHandle::new(actor_handle),
max_message_size,
metrics,
}
.into(),
})
}
}
impl Gossip {
pub fn builder() -> Builder {
Builder {
config: Default::default(),
}
}
#[cfg(feature = "rpc")]
pub async fn listen(self, endpoint: quinn::Endpoint) {
self.inner.api.listen(endpoint).await
}
pub fn max_message_size(&self) -> usize {
self.inner.max_message_size
}
pub async fn handle_connection(&self, conn: Connection) -> Result<(), Error> {
self.inner
.local_tx
.send(LocalActorMessage::HandleConnection(conn))
.await?;
Ok(())
}
pub async fn shutdown(&self) -> anyhow::Result<()> {
let (reply, reply_rx) = oneshot::channel();
self.inner
.local_tx
.send(LocalActorMessage::Shutdown { reply })
.await?;
reply_rx.await?;
Ok(())
}
pub fn metrics(&self) -> &Arc<Metrics> {
&self.inner.metrics
}
}
struct Actor {
state: proto::State<PublicKey, StdRng>,
endpoint: Endpoint,
dialer: Dialer,
rpc_rx: mpsc::Receiver<api::RpcMessage>,
local_rx: mpsc::Receiver<LocalActorMessage>,
in_event_tx: mpsc::Sender<InEvent>,
in_event_rx: mpsc::Receiver<InEvent>,
timers: Timers<Timer>,
topics: HashMap<TopicId, TopicState>,
peers: HashMap<NodeId, PeerState>,
command_rx: stream_group::Keyed<TopicCommandStream>,
quit_queue: VecDeque<TopicId>,
connection_tasks: JoinSet<(NodeId, Connection, anyhow::Result<()>)>,
metrics: Arc<Metrics>,
topic_event_forwarders: JoinSet<TopicId>,
}
impl Actor {
fn new(
endpoint: Endpoint,
config: proto::Config,
metrics: Arc<Metrics>,
my_addr: &AddrInfo,
) -> (
Self,
mpsc::Sender<api::RpcMessage>,
mpsc::Sender<LocalActorMessage>,
) {
let peer_id = endpoint.node_id();
let dialer = Dialer::new(endpoint.clone());
let state = proto::State::new(
peer_id,
encode_peer_data(my_addr).unwrap(),
config,
rand::rngs::StdRng::from_entropy(),
);
let (rpc_tx, rpc_rx) = mpsc::channel(TO_ACTOR_CAP);
let (local_tx, local_rx) = mpsc::channel(16);
let (in_event_tx, in_event_rx) = mpsc::channel(IN_EVENT_CAP);
let actor = Actor {
endpoint,
state,
dialer,
rpc_rx,
in_event_rx,
in_event_tx,
timers: Timers::new(),
command_rx: StreamGroup::new().keyed(),
peers: Default::default(),
topics: Default::default(),
quit_queue: Default::default(),
connection_tasks: Default::default(),
metrics,
local_rx,
topic_event_forwarders: Default::default(),
};
(actor, rpc_tx, local_tx)
}
pub async fn run(mut self) -> Result<(), Error> {
let (mut current_addresses, mut home_relay_stream, mut direct_addresses_stream) =
self.setup().await?;
let mut i = 0;
while let Some(()) = self
.event_loop(
&mut current_addresses,
&mut home_relay_stream,
&mut direct_addresses_stream,
i,
)
.await?
{
i += 1;
}
Ok(())
}
async fn setup(
&mut self,
) -> Result<
(
BTreeSet<DirectAddr>,
impl Stream<Item = iroh::RelayUrl> + Unpin,
impl Stream<Item = BTreeSet<DirectAddr>> + Unpin,
),
Error,
> {
let direct_addresses_stream = self.endpoint.direct_addresses().stream().filter_map(|i| i);
let home_relay_stream = self.endpoint.home_relay().stream().filter_map(|i| i);
let current_addresses = self.endpoint.direct_addresses().get()?.unwrap_or_default();
self.handle_addr_update(¤t_addresses).await?;
Ok((
current_addresses,
home_relay_stream,
direct_addresses_stream,
))
}
async fn event_loop(
&mut self,
current_addresses: &mut BTreeSet<DirectAddr>,
home_relay_stream: &mut (impl Stream<Item = iroh::RelayUrl> + Unpin),
direct_addresses_stream: &mut (impl Stream<Item = BTreeSet<DirectAddr>> + Unpin),
i: usize,
) -> Result<Option<()>, Error> {
self.metrics.actor_tick_main.inc();
tokio::select! {
biased;
conn = self.local_rx.recv() => {
match conn {
Some(LocalActorMessage::Shutdown { reply }) => {
debug!("received shutdown message, quit all topics");
self.quit_queue.extend(self.topics.keys().copied());
self.process_quit_queue().await.ok();
debug!("all topics quit, stop gossip actor");
reply.send(()).ok();
return Ok(None)
},
Some(LocalActorMessage::HandleConnection(conn)) => {
if let Ok(remote_node_id) = conn.remote_node_id() {
self.handle_connection(remote_node_id, ConnOrigin::Accept, conn);
}
}
None => {
debug!("all gossip handles dropped, stop gossip actor");
return Ok(None)
}
}
}
msg = self.rpc_rx.recv() => {
trace!(?i, "tick: to_actor_rx");
self.metrics.actor_tick_rx.inc();
match msg {
Some(msg) => {
self.handle_rpc_msg(msg, Instant::now()).await?;
}
None => {
debug!("all gossip handles dropped, stop gossip actor");
return Ok(None)
}
}
},
Some((key, (topic, command))) = self.command_rx.next(), if !self.command_rx.is_empty() => {
trace!(?i, "tick: command_rx");
self.handle_command(topic, key, command).await?;
},
Some(new_addresses) = direct_addresses_stream.next() => {
trace!(?i, "tick: new_endpoints");
self.metrics.actor_tick_endpoint.inc();
*current_addresses = new_addresses;
self.handle_addr_update(current_addresses).await?;
}
Some(_relay_url) = home_relay_stream.next() => {
trace!(?i, "tick: new_home_relay");
self.handle_addr_update(current_addresses).await?;
}
(peer_id, res) = self.dialer.next_conn() => {
trace!(?i, "tick: dialer");
self.metrics.actor_tick_dialer.inc();
match res {
Some(Ok(conn)) => {
debug!(peer = %peer_id.fmt_short(), "dial successful");
self.metrics.actor_tick_dialer_success.inc();
self.handle_connection(peer_id, ConnOrigin::Dial, conn);
}
Some(Err(err)) => {
warn!(peer = %peer_id.fmt_short(), "dial failed: {err}");
self.metrics.actor_tick_dialer_failure.inc();
let peer_state = self.peers.get(&peer_id);
let is_active = matches!(peer_state, Some(PeerState::Active { .. }));
if !is_active {
self.handle_in_event(InEvent::PeerDisconnected(peer_id), Instant::now())
.await?;
}
}
None => {
warn!(peer = %peer_id.fmt_short(), "dial disconnected");
self.metrics.actor_tick_dialer_failure.inc();
}
}
}
event = self.in_event_rx.recv() => {
trace!(?i, "tick: in_event_rx");
self.metrics.actor_tick_in_event_rx.inc();
let event = event.expect("unreachable: in_event_tx is never dropped before receiver");
self.handle_in_event(event, Instant::now()).await?;
}
_ = self.timers.wait_next() => {
trace!(?i, "tick: timers");
self.metrics.actor_tick_timers.inc();
let now = Instant::now();
while let Some((_instant, timer)) = self.timers.pop_before(now) {
self.handle_in_event(InEvent::TimerExpired(timer), now).await?;
}
}
Some(res) = self.connection_tasks.join_next(), if !self.connection_tasks.is_empty() => {
trace!(?i, "tick: connection_tasks");
let (peer_id, conn, result) = res.expect("connection task panicked");
self.handle_connection_task_finished(peer_id, conn, result).await?;
}
Some(res) = self.topic_event_forwarders.join_next(), if !self.topic_event_forwarders.is_empty() => {
let topic_id = res.expect("topic event forwarder panicked");
if let Some(state) = self.topics.get_mut(&topic_id) {
if !state.still_needed() {
self.quit_queue.push_back(topic_id);
self.process_quit_queue().await?;
}
}
}
}
Ok(Some(()))
}
async fn handle_addr_update(
&mut self,
current_addresses: &BTreeSet<DirectAddr>,
) -> Result<(), Error> {
let peer_data = our_peer_data(&self.endpoint, current_addresses)?;
self.handle_in_event(InEvent::UpdatePeerData(peer_data), Instant::now())
.await
}
async fn handle_command(
&mut self,
topic: TopicId,
key: stream_group::Key,
command: Option<Command>,
) -> Result<(), Error> {
debug!(?topic, ?key, ?command, "handle command");
let Some(state) = self.topics.get_mut(&topic) else {
warn!("received command for unknown topic");
return Ok(());
};
match command {
Some(command) => {
let command = match command {
Command::Broadcast(message) => ProtoCommand::Broadcast(message, Scope::Swarm),
Command::BroadcastNeighbors(message) => {
ProtoCommand::Broadcast(message, Scope::Neighbors)
}
Command::JoinPeers(peers) => ProtoCommand::Join(peers),
};
self.handle_in_event(proto::InEvent::Command(topic, command), Instant::now())
.await?;
}
None => {
state.command_rx_keys.remove(&key);
if !state.still_needed() {
self.quit_queue.push_back(topic);
self.process_quit_queue().await?;
}
}
}
Ok(())
}
fn handle_connection(&mut self, peer_id: NodeId, origin: ConnOrigin, conn: Connection) {
let (send_tx, send_rx) = mpsc::channel(SEND_QUEUE_CAP);
let conn_id = conn.stable_id();
let queue = match self.peers.entry(peer_id) {
Entry::Occupied(mut entry) => entry.get_mut().accept_conn(send_tx, conn_id),
Entry::Vacant(entry) => {
entry.insert(PeerState::Active {
active_send_tx: send_tx,
active_conn_id: conn_id,
other_conns: Vec::new(),
});
Vec::new()
}
};
let max_message_size = self.state.max_message_size();
let in_event_tx = self.in_event_tx.clone();
self.connection_tasks.spawn(
async move {
let res = connection_loop(
peer_id,
&conn,
origin,
send_rx,
&in_event_tx,
max_message_size,
queue,
)
.await;
(peer_id, conn, res)
}
.instrument(error_span!("conn", peer = %peer_id.fmt_short())),
);
}
#[tracing::instrument(name = "conn", skip_all, fields(peer = %peer_id.fmt_short()))]
async fn handle_connection_task_finished(
&mut self,
peer_id: NodeId,
conn: Connection,
task_result: anyhow::Result<()>,
) -> Result<(), Error> {
if conn.close_reason().is_none() {
conn.close(0u32.into(), b"close from disconnect");
}
let reason = conn.close_reason().expect("just closed");
let error = task_result.err();
debug!(%reason, ?error, "connection closed");
if let Some(PeerState::Active {
active_conn_id,
other_conns,
..
}) = self.peers.get_mut(&peer_id)
{
if conn.stable_id() == *active_conn_id {
debug!("active send connection closed, mark peer as disconnected");
self.handle_in_event(InEvent::PeerDisconnected(peer_id), Instant::now())
.await?;
} else {
other_conns.retain(|x| *x != conn.stable_id());
debug!("remaining {} other connections", other_conns.len() + 1);
}
} else {
debug!("peer already marked as disconnected");
}
Ok(())
}
async fn handle_rpc_msg(&mut self, msg: api::RpcMessage, now: Instant) -> Result<(), Error> {
trace!("handle to_actor {msg:?}");
match msg {
api::RpcMessage::Join(msg) => {
let WithChannels {
inner,
rx,
mut tx,
span: _,
} = msg;
let api::JoinRequest {
topic_id,
bootstrap,
} = inner;
let TopicState {
neighbors,
event_sender,
command_rx_keys,
} = self.topics.entry(topic_id).or_default();
let mut sender_dead = false;
if !neighbors.is_empty() {
for neighbor in neighbors.iter() {
if let Err(_err) = tx
.try_send(Event::Gossip(GossipEvent::NeighborUp(*neighbor)))
.await
{
sender_dead = true;
break;
}
}
}
if !sender_dead {
let fut =
topic_subscriber_loop(tx, event_sender.subscribe()).map(move |_| topic_id);
self.topic_event_forwarders.spawn(fut);
}
let command_rx = TopicCommandStream::new(topic_id, Box::pin(rx.into_stream()));
let key = self.command_rx.insert(command_rx);
command_rx_keys.insert(key);
self.handle_in_event(
InEvent::Command(
topic_id,
ProtoCommand::Join(bootstrap.into_iter().collect()),
),
now,
)
.await?;
}
}
Ok(())
}
async fn handle_in_event(&mut self, event: InEvent, now: Instant) -> Result<(), Error> {
self.handle_in_event_inner(event, now).await?;
self.process_quit_queue().await?;
Ok(())
}
async fn process_quit_queue(&mut self) -> Result<(), Error> {
while let Some(topic_id) = self.quit_queue.pop_front() {
self.handle_in_event_inner(
InEvent::Command(topic_id, ProtoCommand::Quit),
Instant::now(),
)
.await?;
if self.topics.remove(&topic_id).is_some() {
tracing::debug!(%topic_id, "publishers and subscribers gone; unsubscribing");
}
}
Ok(())
}
async fn handle_in_event_inner(&mut self, event: InEvent, now: Instant) -> Result<(), Error> {
if matches!(event, InEvent::TimerExpired(_)) {
trace!(?event, "handle in_event");
} else {
debug!(?event, "handle in_event");
};
let out = self.state.handle(event, now, Some(&self.metrics));
for event in out {
if matches!(event, OutEvent::ScheduleTimer(_, _)) {
trace!(?event, "handle out_event");
} else {
debug!(?event, "handle out_event");
};
match event {
OutEvent::SendMessage(peer_id, message) => {
let state = self.peers.entry(peer_id).or_default();
match state {
PeerState::Active { active_send_tx, .. } => {
if let Err(_err) = active_send_tx.send(message).await {
warn!(
peer = %peer_id.fmt_short(),
"failed to send: connection task send loop terminated",
);
}
}
PeerState::Pending { queue } => {
if queue.is_empty() {
debug!(peer = %peer_id.fmt_short(), "start to dial");
self.dialer.queue_dial(peer_id, GOSSIP_ALPN);
}
queue.push(message);
}
}
}
OutEvent::EmitEvent(topic_id, event) => {
let Some(state) = self.topics.get_mut(&topic_id) else {
warn!(?topic_id, "gossip state emitted event for unknown topic");
continue;
};
let TopicState {
neighbors,
event_sender,
..
} = state;
match &event {
ProtoEvent::NeighborUp(neighbor) => {
neighbors.insert(*neighbor);
}
ProtoEvent::NeighborDown(neighbor) => {
neighbors.remove(neighbor);
}
_ => {}
}
let event: GossipEvent = event.into();
event_sender.send(event).ok();
if !state.still_needed() {
self.quit_queue.push_back(topic_id);
}
}
OutEvent::ScheduleTimer(delay, timer) => {
self.timers.insert(now + delay, timer);
}
OutEvent::DisconnectPeer(peer_id) => {
debug!(peer=%peer_id.fmt_short(), "gossip state indicates disconnect: drop peer");
self.peers.remove(&peer_id);
}
OutEvent::PeerData(node_id, data) => match decode_peer_data(&data) {
Err(err) => warn!("Failed to decode {data:?} from {node_id}: {err}"),
Ok(info) => {
debug!(peer = ?node_id, "add known addrs: {info:?}");
let node_addr = NodeAddr {
node_id,
relay_url: info.relay_url,
direct_addresses: info.direct_addresses,
};
if let Err(err) = self
.endpoint
.add_node_addr_with_source(node_addr, SOURCE_NAME)
{
debug!(peer = ?node_id, "add known failed: {err:?}");
}
}
},
}
}
Ok(())
}
}
type ConnId = usize;
#[derive(Debug)]
enum PeerState {
Pending {
queue: Vec<ProtoMessage>,
},
Active {
active_send_tx: mpsc::Sender<ProtoMessage>,
active_conn_id: ConnId,
other_conns: Vec<ConnId>,
},
}
impl PeerState {
fn accept_conn(
&mut self,
send_tx: mpsc::Sender<ProtoMessage>,
conn_id: ConnId,
) -> Vec<ProtoMessage> {
match self {
PeerState::Pending { queue } => {
let queue = std::mem::take(queue);
*self = PeerState::Active {
active_send_tx: send_tx,
active_conn_id: conn_id,
other_conns: Vec::new(),
};
queue
}
PeerState::Active {
active_send_tx,
active_conn_id,
other_conns,
} => {
other_conns.push(*active_conn_id);
*active_send_tx = send_tx;
*active_conn_id = conn_id;
Vec::new()
}
}
}
}
impl Default for PeerState {
fn default() -> Self {
PeerState::Pending { queue: Vec::new() }
}
}
#[derive(Debug)]
struct TopicState {
neighbors: BTreeSet<NodeId>,
event_sender: broadcast::Sender<GossipEvent>,
command_rx_keys: HashSet<stream_group::Key>,
}
impl Default for TopicState {
fn default() -> Self {
let (event_sender, _) = broadcast::channel(TOPIC_EVENT_CAP);
Self {
neighbors: Default::default(),
command_rx_keys: Default::default(),
event_sender,
}
}
}
impl TopicState {
fn still_needed(&self) -> bool {
!self.command_rx_keys.is_empty() && self.event_sender.receiver_count() > 0
}
#[cfg(test)]
fn joined(&self) -> bool {
!self.neighbors.is_empty()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum ConnOrigin {
Accept,
Dial,
}
async fn connection_loop(
from: PublicKey,
conn: &Connection,
origin: ConnOrigin,
mut send_rx: mpsc::Receiver<ProtoMessage>,
in_event_tx: &mpsc::Sender<InEvent>,
max_message_size: usize,
queue: Vec<ProtoMessage>,
) -> anyhow::Result<()> {
let (mut send, mut recv) = match origin {
ConnOrigin::Accept => conn.accept_bi().await?,
ConnOrigin::Dial => conn.open_bi().await?,
};
debug!(?origin, "connection established");
let mut send_buf = BytesMut::new();
let mut recv_buf = BytesMut::new();
let send_loop = async {
for msg in queue {
write_message(&mut send, &mut send_buf, &msg, max_message_size).await?;
}
while let Some(msg) = send_rx.recv().await {
write_message(&mut send, &mut send_buf, &msg, max_message_size).await?;
}
let _ = send.finish();
let _ = send.stopped().await;
anyhow::Ok(())
};
let recv_loop = async {
loop {
let msg = read_message(&mut recv, &mut recv_buf, max_message_size).await?;
match msg {
None => break,
Some(msg) => in_event_tx.send(InEvent::RecvMessage(from, msg)).await?,
}
}
anyhow::Ok(())
};
let res = tokio::join!(send_loop, recv_loop);
res.0.context("send_loop").and(res.1.context("recv_loop"))
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
struct AddrInfo {
relay_url: Option<RelayUrl>,
direct_addresses: BTreeSet<SocketAddr>,
}
impl From<NodeAddr> for AddrInfo {
fn from(
NodeAddr {
relay_url,
direct_addresses,
..
}: NodeAddr,
) -> Self {
Self {
relay_url,
direct_addresses,
}
}
}
fn encode_peer_data(info: &AddrInfo) -> Result<PeerData, Error> {
let bytes = postcard::to_stdvec(info)?;
if bytes.is_empty() {
return Err(Error::EmptyPeerData);
}
Ok(PeerData::new(bytes))
}
fn decode_peer_data(peer_data: &PeerData) -> Result<AddrInfo, Error> {
let bytes = peer_data.as_bytes();
if bytes.is_empty() {
return Ok(AddrInfo::default());
}
let info = postcard::from_bytes(bytes)?;
Ok(info)
}
async fn topic_subscriber_loop(
mut sender: spsc::Sender<Event>,
mut topic_events: broadcast::Receiver<GossipEvent>,
) {
loop {
tokio::select! {
biased;
msg = topic_events.recv() => {
let event = match msg {
Err(broadcast::error::RecvError::Closed) => break,
Err(broadcast::error::RecvError::Lagged(_)) => Event::Lagged,
Ok(event) => Event::Gossip(event)
};
if sender.send(event).await.is_err() {
break;
}
}
_ = sender.closed() => break,
}
}
}
type BoxedCommandReceiver = n0_future::stream::Boxed<Result<Command, irpc::channel::RecvError>>;
#[derive(derive_more::Debug)]
struct TopicCommandStream {
topic_id: TopicId,
#[debug("CommandStream")]
stream: BoxedCommandReceiver,
closed: bool,
}
impl TopicCommandStream {
fn new(topic_id: TopicId, stream: BoxedCommandReceiver) -> Self {
Self {
topic_id,
stream,
closed: false,
}
}
}
impl Stream for TopicCommandStream {
type Item = (TopicId, Option<Command>);
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.closed {
return Poll::Ready(None);
}
match Pin::new(&mut self.stream).poll_next(cx) {
Poll::Ready(Some(Ok(item))) => Poll::Ready(Some((self.topic_id, Some(item)))),
Poll::Ready(None) | Poll::Ready(Some(Err(_))) => {
self.closed = true;
Poll::Ready(Some((self.topic_id, None)))
}
Poll::Pending => Poll::Pending,
}
}
}
fn our_peer_data(
endpoint: &Endpoint,
direct_addresses: &BTreeSet<DirectAddr>,
) -> Result<PeerData, Error> {
encode_peer_data(&AddrInfo {
relay_url: endpoint.home_relay().get().ok().flatten(),
direct_addresses: direct_addresses.iter().map(|x| x.addr).collect(),
})
}
#[derive(Debug)]
struct Dialer {
endpoint: Endpoint,
pending: JoinSet<(NodeId, Option<Result<Connection, Error>>)>,
pending_dials: HashMap<NodeId, CancellationToken>,
}
impl Dialer {
fn new(endpoint: Endpoint) -> Self {
Self {
endpoint,
pending: Default::default(),
pending_dials: Default::default(),
}
}
fn queue_dial(&mut self, node_id: NodeId, alpn: &'static [u8]) {
if self.is_pending(node_id) {
return;
}
let cancel = CancellationToken::new();
self.pending_dials.insert(node_id, cancel.clone());
let endpoint = self.endpoint.clone();
self.pending.spawn(async move {
let res = tokio::select! {
biased;
_ = cancel.cancelled() => None,
res = endpoint.connect(node_id, alpn) => Some(res.map_err(Error::from)),
};
(node_id, res)
});
}
fn is_pending(&self, node: NodeId) -> bool {
self.pending_dials.contains_key(&node)
}
async fn next_conn(&mut self) -> (NodeId, Option<Result<Connection, Error>>) {
match self.pending_dials.is_empty() {
false => {
let (node_id, res) = loop {
match self.pending.join_next().await {
Some(Ok((node_id, res))) => {
self.pending_dials.remove(&node_id);
break (node_id, res);
}
Some(Err(e)) => {
error!("next conn error: {:?}", e);
}
None => {
error!("no more pending conns available");
std::future::pending().await
}
}
};
(node_id, res)
}
true => std::future::pending().await,
}
}
}
#[cfg(test)]
pub(crate) mod test {
use std::time::Duration;
use bytes::Bytes;
use futures_concurrency::future::TryJoin;
use iroh::{protocol::Router, RelayMap, RelayMode, SecretKey};
use rand::Rng;
use tokio::{spawn, time::timeout};
use tokio_util::sync::CancellationToken;
use tracing::{info, instrument};
use tracing_test::traced_test;
use super::*;
struct ManualActorLoop {
actor: Actor,
current_addresses: BTreeSet<DirectAddr>,
step: usize,
}
impl std::ops::Deref for ManualActorLoop {
type Target = Actor;
fn deref(&self) -> &Self::Target {
&self.actor
}
}
impl std::ops::DerefMut for ManualActorLoop {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.actor
}
}
type EndpointHandle = tokio::task::JoinHandle<Result<(), Error>>;
impl ManualActorLoop {
#[instrument(skip_all, fields(me = %actor.endpoint.node_id().fmt_short()))]
async fn new(mut actor: Actor) -> Result<Self, Error> {
let (current_addresses, _, _) = actor.setup().await?;
let test_rig = Self {
actor,
current_addresses,
step: 0,
};
Ok(test_rig)
}
#[instrument(skip_all, fields(me = %self.endpoint.node_id().fmt_short()))]
async fn step(&mut self) -> Result<Option<()>, Error> {
let ManualActorLoop {
actor,
current_addresses,
step,
} = self;
*step += 1;
let home_relay_stream = &mut futures_lite::stream::pending();
let direct_addresses_stream = &mut futures_lite::stream::pending();
actor
.event_loop(
current_addresses,
home_relay_stream,
direct_addresses_stream,
*step,
)
.await
}
async fn steps(&mut self, n: usize) -> Result<(), Error> {
for _ in 0..n {
self.step().await?;
}
Ok(())
}
async fn finish(mut self) -> Result<(), Error> {
while self.step().await?.is_some() {}
Ok(())
}
}
impl Gossip {
async fn t_new_with_actor(
rng: &mut rand_chacha::ChaCha12Rng,
config: proto::Config,
relay_map: RelayMap,
cancel: &CancellationToken,
) -> Result<(Self, Actor, EndpointHandle), Error> {
let my_addr = AddrInfo {
relay_url: relay_map.nodes().next().map(|relay| relay.url.clone()),
direct_addresses: Default::default(),
};
let endpoint = create_endpoint(rng, relay_map).await?;
let metrics = Arc::new(Metrics::default());
let (actor, to_actor_tx, conn_tx) =
Actor::new(endpoint, config, metrics.clone(), &my_addr);
let max_message_size = actor.state.max_message_size();
let _actor_handle =
AbortOnDropHandle::new(task::spawn(futures_lite::future::pending()));
let gossip = Self {
inner: Inner {
api: GossipApi::local(to_actor_tx),
local_tx: conn_tx,
_actor_handle,
max_message_size,
metrics,
}
.into(),
};
let endpoing_task = task::spawn(endpoint_loop(
actor.endpoint.clone(),
gossip.clone(),
cancel.child_token(),
));
Ok((gossip, actor, endpoing_task))
}
async fn t_new(
rng: &mut rand_chacha::ChaCha12Rng,
config: proto::Config,
relay_map: RelayMap,
cancel: &CancellationToken,
) -> Result<(Self, Endpoint, EndpointHandle, impl Drop), Error> {
let (g, actor, ep_handle) =
Gossip::t_new_with_actor(rng, config, relay_map, cancel).await?;
let ep = actor.endpoint.clone();
let me = ep.node_id().fmt_short();
let actor_handle = task::spawn(
async move {
if let Err(err) = actor.run().await {
warn!("gossip actor closed with error: {err:?}");
}
}
.instrument(tracing::error_span!("gossip", %me)),
);
Ok((g, ep, ep_handle, AbortOnDropHandle::new(actor_handle)))
}
}
pub(crate) async fn create_endpoint(
rng: &mut rand_chacha::ChaCha12Rng,
relay_map: RelayMap,
) -> Result<Endpoint, Error> {
let ep = Endpoint::builder()
.secret_key(SecretKey::generate(rng))
.alpns(vec![GOSSIP_ALPN.to_vec()])
.relay_mode(RelayMode::Custom(relay_map))
.insecure_skip_relay_cert_verify(true)
.bind()
.await?;
ep.home_relay().initialized().await?;
Ok(ep)
}
async fn endpoint_loop(
endpoint: Endpoint,
gossip: Gossip,
cancel: CancellationToken,
) -> Result<(), Error> {
loop {
tokio::select! {
biased;
_ = cancel.cancelled() => break,
incoming = endpoint.accept() => match incoming {
None => break,
Some(incoming) => {
let connecting = match incoming.accept() {
Ok(connecting) => connecting,
Err(err) => {
warn!("incoming connection failed: {err:#}");
continue;
}
};
gossip.handle_connection(connecting.await?).await?
}
}
}
}
Ok(())
}
#[tokio::test]
#[traced_test]
async fn gossip_net_smoke() {
let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1);
let (relay_map, relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let ep1 = create_endpoint(&mut rng, relay_map.clone()).await.unwrap();
let ep2 = create_endpoint(&mut rng, relay_map.clone()).await.unwrap();
let ep3 = create_endpoint(&mut rng, relay_map.clone()).await.unwrap();
let go1 = Gossip::builder().spawn(ep1.clone()).await.unwrap();
let go2 = Gossip::builder().spawn(ep2.clone()).await.unwrap();
let go3 = Gossip::builder().spawn(ep3.clone()).await.unwrap();
debug!("peer1 {:?}", ep1.node_id());
debug!("peer2 {:?}", ep2.node_id());
debug!("peer3 {:?}", ep3.node_id());
let pi1 = ep1.node_id();
let pi2 = ep2.node_id();
let cancel = CancellationToken::new();
let tasks = [
spawn(endpoint_loop(ep1.clone(), go1.clone(), cancel.clone())),
spawn(endpoint_loop(ep2.clone(), go2.clone(), cancel.clone())),
spawn(endpoint_loop(ep3.clone(), go3.clone(), cancel.clone())),
];
debug!("----- adding peers ----- ");
let topic: TopicId = blake3::hash(b"foobar").into();
let addr1 = NodeAddr::new(pi1).with_relay_url(relay_url.clone());
let addr2 = NodeAddr::new(pi2).with_relay_url(relay_url);
ep2.add_node_addr(addr1.clone()).unwrap();
ep3.add_node_addr(addr2).unwrap();
debug!("----- joining ----- ");
let [sub1, mut sub2, mut sub3] = [
go1.subscribe_and_join(topic, vec![]),
go2.subscribe_and_join(topic, vec![pi1]),
go3.subscribe_and_join(topic, vec![pi2]),
]
.try_join()
.await
.unwrap();
let (mut sink1, _stream1) = sub1.split();
let len = 2;
let pub1 = spawn(async move {
for i in 0..len {
let message = format!("hi{}", i);
info!("go1 broadcast: {message:?}");
sink1.broadcast(message.into_bytes().into()).await.unwrap();
tokio::time::sleep(Duration::from_micros(1)).await;
}
});
let sub2 = spawn(async move {
let mut recv = vec![];
loop {
let ev = sub2.next().await.unwrap().unwrap();
info!("go2 event: {ev:?}");
if let Event::Gossip(GossipEvent::Received(msg)) = ev {
recv.push(msg.content);
}
if recv.len() == len {
return recv;
}
}
});
let sub3 = spawn(async move {
let mut recv = vec![];
loop {
let ev = sub3.next().await.unwrap().unwrap();
info!("go3 event: {ev:?}");
if let Event::Gossip(GossipEvent::Received(msg)) = ev {
recv.push(msg.content);
}
if recv.len() == len {
return recv;
}
}
});
timeout(Duration::from_secs(10), pub1)
.await
.unwrap()
.unwrap();
let recv2 = timeout(Duration::from_secs(10), sub2)
.await
.unwrap()
.unwrap();
let recv3 = timeout(Duration::from_secs(10), sub3)
.await
.unwrap()
.unwrap();
let expected: Vec<Bytes> = (0..len)
.map(|i| Bytes::from(format!("hi{i}").into_bytes()))
.collect();
assert_eq!(recv2, expected);
assert_eq!(recv3, expected);
cancel.cancel();
for t in tasks {
timeout(Duration::from_secs(10), t)
.await
.unwrap()
.unwrap()
.unwrap();
}
}
#[tokio::test]
#[traced_test]
async fn subscription_cleanup() -> testresult::TestResult {
let rng = &mut rand_chacha::ChaCha12Rng::seed_from_u64(1);
let ct = CancellationToken::new();
let (relay_map, relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let (go1, actor, ep1_handle) =
Gossip::t_new_with_actor(rng, Default::default(), relay_map.clone(), &ct).await?;
let mut actor = ManualActorLoop::new(actor).await?;
let (go2, ep2, ep2_handle, _test_actor_handle) =
Gossip::t_new(rng, Default::default(), relay_map, &ct).await?;
let node_id1 = actor.endpoint.node_id();
let node_id2 = ep2.node_id();
tracing::info!(
node_1 = node_id1.fmt_short(),
node_2 = node_id2.fmt_short(),
"nodes ready"
);
let topic: TopicId = blake3::hash(b"subscription_cleanup").into();
tracing::info!(%topic, "joining");
let ct2 = ct.clone();
let go2_task = async move {
let (_pub_tx, mut sub_rx) = go2.subscribe_and_join(topic, vec![]).await?.split();
let subscribe_fut = async {
while let Some(ev) = sub_rx.try_next().await? {
match ev {
Event::Lagged => tracing::debug!("missed some messages :("),
Event::Gossip(gm) => match gm {
GossipEvent::Received(_) => unreachable!("test does not send messages"),
other => tracing::debug!(?other, "gs event"),
},
}
}
tracing::debug!("subscribe stream ended");
anyhow::Ok(())
};
tokio::select! {
_ = ct2.cancelled() => Ok(()),
res = subscribe_fut => res,
}
}
.instrument(tracing::debug_span!("node_2", %node_id2));
let go2_handle = task::spawn(go2_task);
let addr2 = NodeAddr::new(node_id2).with_relay_url(relay_url);
actor.endpoint.add_node_addr(addr2)?;
let (tx, mut rx) = mpsc::channel::<()>(1);
let ct1 = ct.clone();
let go1_task = async move {
tracing::info!("subscribing the first time");
let sub_1a = go1.subscribe_and_join(topic, vec![node_id2]).await?;
rx.recv().await.expect("signal for second subscribe");
tracing::info!("subscribing a second time");
let sub_1b = go1.subscribe_and_join(topic, vec![node_id2]).await?;
drop(sub_1a);
rx.recv().await.expect("signal for second subscribe");
tracing::info!("dropping all handles");
drop(sub_1b);
ct1.cancelled().await;
drop(go1);
anyhow::Ok(())
}
.instrument(tracing::debug_span!("node_1", %node_id1));
let go1_handle = task::spawn(go1_task);
actor.steps(3).await?; let state = actor.topics.get(&topic).expect("get registered topic");
assert!(state.joined());
tx.send(()).await?;
actor.steps(3).await?; let state = actor.topics.get(&topic).expect("get registered topic");
assert!(state.joined());
tx.send(()).await?;
actor.steps(2).await?; assert!(!actor.topics.contains_key(&topic));
ct.cancel();
let wait = Duration::from_secs(2);
timeout(wait, ep1_handle).await???;
timeout(wait, ep2_handle).await???;
timeout(wait, go1_handle).await???;
timeout(wait, go2_handle).await???;
timeout(wait, actor.finish()).await??;
testresult::TestResult::Ok(())
}
#[tokio::test]
#[traced_test]
async fn can_reconnect() -> testresult::TestResult {
let rng = &mut rand_chacha::ChaCha12Rng::seed_from_u64(1);
let ct = CancellationToken::new();
let (relay_map, relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let (go1, ep1, ep1_handle, _test_actor_handle1) =
Gossip::t_new(rng, Default::default(), relay_map.clone(), &ct).await?;
let (go2, ep2, ep2_handle, _test_actor_handle2) =
Gossip::t_new(rng, Default::default(), relay_map, &ct).await?;
let node_id1 = ep1.node_id();
let node_id2 = ep2.node_id();
tracing::info!(
node_1 = node_id1.fmt_short(),
node_2 = node_id2.fmt_short(),
"nodes ready"
);
let topic: TopicId = blake3::hash(b"can_reconnect").into();
tracing::info!(%topic, "joining");
let ct2 = ct.child_token();
let (tx, mut rx) = mpsc::channel::<()>(1);
let addr1 = NodeAddr::new(node_id1).with_relay_url(relay_url.clone());
ep2.add_node_addr(addr1)?;
let go2_task = async move {
let mut sub = go2.subscribe(topic, Vec::new()).await?;
sub.joined().await?;
rx.recv().await.expect("signal to unsubscribe");
tracing::info!("unsubscribing");
drop(sub);
rx.recv().await.expect("signal to subscribe again");
tracing::info!("resubscribing");
let mut sub = go2.subscribe(topic, vec![node_id1]).await?;
sub.joined().await?;
tracing::info!("subscription successful!");
ct2.cancelled().await;
anyhow::Ok(())
}
.instrument(tracing::debug_span!("node_2", %node_id2));
let go2_handle = task::spawn(go2_task);
let addr2 = NodeAddr::new(node_id2).with_relay_url(relay_url);
ep1.add_node_addr(addr2)?;
let mut sub = go1.subscribe(topic, vec![node_id2]).await?;
sub.joined().await?;
tx.send(()).await?;
let conn_timeout = Duration::from_millis(500);
let ev = timeout(conn_timeout, sub.try_next()).await??;
assert_eq!(ev, Some(Event::Gossip(GossipEvent::NeighborDown(node_id2))));
tracing::info!("node 2 left");
tx.send(()).await?;
let conn_timeout = Duration::from_millis(500);
let ev = timeout(conn_timeout, sub.try_next()).await??;
assert_eq!(ev, Some(Event::Gossip(GossipEvent::NeighborUp(node_id2))));
tracing::info!("node 2 rejoined!");
ct.cancel();
let wait = Duration::from_secs(2);
timeout(wait, ep1_handle).await???;
timeout(wait, ep2_handle).await???;
timeout(wait, go2_handle).await???;
testresult::TestResult::Ok(())
}
#[tokio::test]
#[traced_test]
async fn can_die_and_reconnect() -> testresult::TestResult {
fn run_in_thread<T, F, Fut>(
cancel: CancellationToken,
f: F,
) -> std::thread::JoinHandle<Option<T>>
where
T: Send + 'static,
F: 'static + Send + FnOnce() -> Fut,
Fut: 'static + std::future::Future<Output = T>,
{
std::thread::spawn(move || {
let fut = f();
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
let res = rt.block_on(async move { cancel.run_until_cancelled(fut).await });
tracing::info!("run_in_thread terminate");
res
})
}
async fn spawn_gossip(
secret_key: SecretKey,
relay_map: RelayMap,
) -> anyhow::Result<(Router, Gossip)> {
let ep = Endpoint::builder()
.secret_key(secret_key)
.relay_mode(RelayMode::Custom(relay_map))
.insecure_skip_relay_cert_verify(true)
.bind()
.await?;
let gossip = Gossip::builder().spawn(ep.clone()).await?;
let router = Router::builder(ep.clone())
.accept(GOSSIP_ALPN, gossip.clone())
.spawn();
Ok((router, gossip))
}
async fn broadcast_once(
secret_key: SecretKey,
relay_map: RelayMap,
bootstrap_addr: NodeAddr,
topic_id: TopicId,
message: String,
) -> anyhow::Result<()> {
let (router, gossip) = spawn_gossip(secret_key, relay_map).await?;
info!(node_id = %router.endpoint().node_id().fmt_short(), "broadcast node spawned");
let bootstrap = vec![bootstrap_addr.node_id];
router.endpoint().add_node_addr(bootstrap_addr)?;
let mut topic = gossip.subscribe_and_join(topic_id, bootstrap).await?;
topic.broadcast(message.as_bytes().to_vec().into()).await?;
std::future::pending::<()>().await;
Ok(())
}
let (relay_map, _relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let mut rng = &mut rand_chacha::ChaCha12Rng::seed_from_u64(1);
let topic_id = TopicId::from_bytes(rng.gen());
let (addr_tx, addr_rx) = tokio::sync::oneshot::channel();
let (msgs_recv_tx, mut msgs_recv_rx) = tokio::sync::mpsc::channel(3);
let recv_task = tokio::task::spawn({
let relay_map = relay_map.clone();
let secret_key = SecretKey::generate(&mut rng);
async move {
let (router, gossip) = spawn_gossip(secret_key, relay_map).await?;
let addr = router.endpoint().node_addr().await?;
info!(node_id = %addr.node_id.fmt_short(), "recv node spawned");
addr_tx.send(addr).unwrap();
let mut topic = gossip.subscribe_and_join(topic_id, vec![]).await?;
while let Some(event) = topic.try_next().await.unwrap() {
if let Event::Gossip(GossipEvent::Received(message)) = event {
let message = std::str::from_utf8(&message.content)?.to_string();
msgs_recv_tx.send(message).await?;
}
}
anyhow::Ok(())
}
});
let node0_addr = addr_rx.await?;
let max_wait = Duration::from_secs(5);
let cancel = CancellationToken::new();
let secret = SecretKey::generate(&mut rng);
let join_handle_1 = run_in_thread(cancel.clone(), {
let secret = secret.clone();
let relay_map = relay_map.clone();
let node0_addr = node0_addr.clone();
move || broadcast_once(secret, relay_map, node0_addr, topic_id, "msg1".to_string())
});
let msg = timeout(max_wait, msgs_recv_rx.recv()).await?.unwrap();
assert_eq!(&msg, "msg1");
info!("kill broadcast node");
cancel.cancel();
let cancel = CancellationToken::new();
let join_handle_2 = run_in_thread(cancel.clone(), {
let secret = secret.clone();
let relay_map = relay_map.clone();
let node0_addr = node0_addr.clone();
move || broadcast_once(secret, relay_map, node0_addr, topic_id, "msg2".to_string())
});
let msg = timeout(max_wait, msgs_recv_rx.recv()).await?.unwrap();
assert_eq!(&msg, "msg2");
info!("kill broadcast node");
cancel.cancel();
info!("kill recv node");
recv_task.abort();
assert!(join_handle_1.join().unwrap().is_none());
assert!(join_handle_2.join().unwrap().is_none());
Ok(())
}
}