use std::{
collections::{hash_map::Entry, BTreeSet, HashMap, HashSet, VecDeque},
net::SocketAddr,
pin::Pin,
sync::{atomic::AtomicUsize, Arc},
task::{Context, Poll},
};
use anyhow::Context as _;
use bytes::BytesMut;
use discovery::GossipDiscovery;
use futures_concurrency::stream::{stream_group, StreamGroup};
use iroh::{
endpoint::Connection, node_info::NodeData, protocol::ProtocolHandler, Endpoint, NodeAddr,
NodeId, PublicKey, RelayUrl,
};
use iroh_metrics::inc;
use n0_future::{
boxed::BoxFuture,
task::{self, AbortOnDropHandle, JoinSet},
time::Instant,
Stream, StreamExt, TryFutureExt as _,
};
use rand::rngs::StdRng;
use rand_core::SeedableRng;
use serde::{Deserialize, Serialize};
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, error_span, trace, warn, Instrument};
use self::util::{read_message, write_message, Timers};
use crate::{
metrics::Metrics,
proto::{self, HyparviewConfig, PeerData, PlumtreeConfig, Scope, TopicId},
};
mod discovery;
mod handles;
pub mod util;
pub use self::handles::{
Command, CommandStream, Event, GossipEvent, GossipReceiver, GossipSender, GossipTopic,
JoinOptions, Message,
};
pub const GOSSIP_ALPN: &[u8] = b"/iroh-gossip/0";
const TOPIC_EVENTS_DEFAULT_CAP: usize = 2048;
const TOPIC_COMMANDS_DEFAULT_CAP: usize = 2048;
const SEND_QUEUE_CAP: usize = 64;
const TO_ACTOR_CAP: usize = 64;
const IN_EVENT_CAP: usize = 1024;
pub type ProtoEvent = proto::Event<PublicKey>;
pub type ProtoCommand = proto::Command<PublicKey>;
type InEvent = proto::InEvent<PublicKey>;
type OutEvent = proto::OutEvent<PublicKey>;
type Timer = proto::Timer<PublicKey>;
type ProtoMessage = proto::Message<PublicKey>;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("Actor closed")]
ActorClosed,
#[error("Joined event to be the first event received")]
UnexpectedEvent,
#[error("Receiver closed")]
ReceiverClosed,
#[error("Ser/De {0}")]
SerDe(#[from] postcard::Error),
#[error("empty peer data")]
EmptyPeerData,
#[error("write {0}")]
Write(#[from] util::WriteError),
#[error("read {0}")]
Read(#[from] util::ReadError),
#[error(transparent)]
WatchableDisconnected(#[from] iroh::watchable::Disconnected),
#[error(transparent)]
IrohConnection(#[from] iroh::endpoint::ConnectionError),
#[error(transparent)]
Iroh(#[from] anyhow::Error),
#[error("join")]
Join(#[from] task::JoinError),
}
impl<T> From<async_channel::SendError<T>> for Error {
fn from(_value: async_channel::SendError<T>) -> Self {
Error::ActorClosed
}
}
impl<T> From<mpsc::error::SendError<T>> for Error {
fn from(_value: mpsc::error::SendError<T>) -> Self {
Error::ActorClosed
}
}
#[derive(Debug, Clone)]
pub struct Gossip {
pub(crate) inner: Arc<Inner>,
#[cfg(feature = "rpc")]
pub(crate) rpc_handler: Arc<std::sync::OnceLock<crate::rpc::RpcHandler>>,
}
#[derive(Debug)]
pub(crate) struct Inner {
to_actor_tx: mpsc::Sender<ToActor>,
_actor_handle: AbortOnDropHandle<()>,
max_message_size: usize,
next_receiver_id: AtomicUsize,
}
impl ProtocolHandler for Gossip {
fn accept(&self, conn: Connection) -> BoxFuture<anyhow::Result<()>> {
let inner = self.inner.clone();
Box::pin(async move {
inner.handle_connection(conn).await?;
Ok(())
})
}
}
#[derive(Debug, Clone)]
pub struct Builder {
config: proto::Config,
discovery: Option<GossipDiscovery>,
}
impl Builder {
pub fn max_message_size(mut self, size: usize) -> Self {
self.config.max_message_size = size;
self
}
pub fn membership_config(mut self, config: HyparviewConfig) -> Self {
self.config.membership = config;
self
}
pub fn broadcast_config(mut self, config: PlumtreeConfig) -> Self {
self.config.broadcast = config;
self
}
pub fn use_gossip_for_discovery(mut self, discovery: GossipDiscovery) -> Self {
self.discovery = Some(discovery);
self
}
pub fn spawn(self, endpoint: Endpoint) -> Gossip {
let me = endpoint.node_id().fmt_short();
let max_message_size = self.config.max_message_size;
let (actor, to_actor_tx) = Actor::new(endpoint, self.config, self.discovery);
let actor_handle = task::spawn(
async move {
if let Err(err) = actor.run().await {
warn!("gossip actor closed with error: {err:?}");
}
}
.instrument(error_span!("gossip", %me)),
);
Gossip {
inner: Inner {
to_actor_tx,
_actor_handle: AbortOnDropHandle::new(actor_handle),
max_message_size,
next_receiver_id: Default::default(),
}
.into(),
#[cfg(feature = "rpc")]
rpc_handler: Default::default(),
}
}
}
impl Gossip {
pub fn builder() -> Builder {
Builder {
config: Default::default(),
discovery: None,
}
}
pub fn max_message_size(&self) -> usize {
self.inner.max_message_size
}
pub async fn handle_connection(&self, conn: Connection) -> Result<(), Error> {
self.inner.handle_connection(conn).await
}
pub async fn subscribe_and_join(
&self,
topic_id: TopicId,
bootstrap: Vec<NodeId>,
) -> Result<GossipTopic, Error> {
let mut sub = self.subscribe_with_opts(topic_id, JoinOptions::with_bootstrap(bootstrap));
sub.joined().await?;
Ok(sub)
}
pub fn subscribe(
&self,
topic_id: TopicId,
bootstrap: Vec<NodeId>,
) -> Result<GossipTopic, Error> {
let sub = self.subscribe_with_opts(topic_id, JoinOptions::with_bootstrap(bootstrap));
Ok(sub)
}
pub fn subscribe_with_opts(&self, topic_id: TopicId, opts: JoinOptions) -> GossipTopic {
let (command_tx, command_rx) = async_channel::bounded(TOPIC_COMMANDS_DEFAULT_CAP);
let command_rx: CommandStream = Box::pin(command_rx);
let event_rx = self.subscribe_with_stream(topic_id, opts, command_rx);
GossipTopic::new(command_tx, event_rx)
}
pub fn subscribe_with_stream(
&self,
topic_id: TopicId,
options: JoinOptions,
updates: CommandStream,
) -> EventStream {
self.inner.subscribe_with_stream(topic_id, options, updates)
}
}
impl Inner {
pub(crate) fn subscribe_with_stream(
&self,
topic_id: TopicId,
options: JoinOptions,
updates: CommandStream,
) -> EventStream {
let (event_tx, event_rx) = async_channel::bounded(options.subscription_capacity);
let to_actor_tx = self.to_actor_tx.clone();
let receiver_id = ReceiverId(
self.next_receiver_id
.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
);
let channels = SubscriberChannels {
receiver_id,
command_rx: updates,
event_tx,
};
let task = task::spawn(async move {
to_actor_tx
.send(ToActor::Join {
topic_id,
bootstrap: options.bootstrap,
channels,
})
.await
.map_err(Error::from)
});
let stream = async move {
task.await??;
Ok(event_rx)
}
.try_flatten_stream();
EventStream {
inner: Box::pin(stream),
to_actor_tx: self.to_actor_tx.clone(),
topic: topic_id,
receiver_id,
}
}
async fn send(&self, event: ToActor) -> Result<(), Error> {
self.to_actor_tx.send(event).await?;
Ok(())
}
async fn handle_connection(&self, conn: Connection) -> Result<(), Error> {
let node_id = conn.remote_node_id()?;
self.send(ToActor::HandleConnection(node_id, ConnOrigin::Accept, conn))
.await?;
Ok(())
}
}
#[derive(derive_more::Debug)]
pub struct EventStream {
#[debug("Stream")]
inner: Pin<Box<dyn Stream<Item = Result<Event, Error>> + Send + Sync + 'static>>,
to_actor_tx: mpsc::Sender<ToActor>,
topic: TopicId,
receiver_id: ReceiverId,
}
impl Stream for EventStream {
type Item = Result<Event, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.inner.poll_next(cx)
}
}
impl Drop for EventStream {
fn drop(&mut self) {
if let Err(e) = self.to_actor_tx.try_send(ToActor::ReceiverGone {
topic: self.topic,
receiver_id: self.receiver_id,
}) {
match e {
mpsc::error::TrySendError::Full(msg) => {
if let Ok(handle) = tokio::runtime::Handle::try_current() {
let to_actor_tx = self.to_actor_tx.clone();
handle.spawn(async move {
let _ = to_actor_tx.send(msg).await;
});
} else {
}
}
mpsc::error::TrySendError::Closed(_) => {
}
}
}
}
}
#[derive(derive_more::Debug)]
enum ToActor {
HandleConnection(PublicKey, ConnOrigin, #[debug("Connection")] Connection),
Join {
topic_id: TopicId,
bootstrap: BTreeSet<NodeId>,
channels: SubscriberChannels,
},
ReceiverGone {
topic: TopicId,
receiver_id: ReceiverId,
},
}
struct Actor {
state: proto::State<PublicKey, StdRng>,
discovery: Option<GossipDiscovery>,
dialer: Dialer,
to_actor_rx: mpsc::Receiver<ToActor>,
in_event_tx: mpsc::Sender<InEvent>,
in_event_rx: mpsc::Receiver<InEvent>,
timers: Timers<Timer>,
topics: HashMap<TopicId, TopicState>,
peers: HashMap<NodeId, PeerState>,
command_rx: stream_group::Keyed<TopicCommandStream>,
quit_queue: VecDeque<TopicId>,
connection_tasks: JoinSet<(NodeId, Connection, anyhow::Result<()>)>,
}
impl Actor {
fn new(
endpoint: Endpoint,
config: proto::Config,
discovery: Option<GossipDiscovery>,
) -> (Self, mpsc::Sender<ToActor>) {
let peer_id = endpoint.node_id();
let dialer = Dialer::new(endpoint);
let initial_peer_data = discovery
.as_ref()
.and_then(|discovery| discovery.our_addr.as_ref())
.and_then(|our_addr| our_addr.get())
.map(|addr_info| encode_peer_data(&addr_info).unwrap());
let state = proto::State::new(
peer_id,
initial_peer_data,
config,
rand::rngs::StdRng::from_entropy(),
);
let (to_actor_tx, to_actor_rx) = mpsc::channel(TO_ACTOR_CAP);
let (in_event_tx, in_event_rx) = mpsc::channel(IN_EVENT_CAP);
let actor = Actor {
state,
dialer,
to_actor_rx,
in_event_rx,
in_event_tx,
timers: Timers::new(),
command_rx: StreamGroup::new().keyed(),
peers: Default::default(),
topics: Default::default(),
quit_queue: Default::default(),
connection_tasks: Default::default(),
discovery,
};
(actor, to_actor_tx)
}
#[cfg(test)]
fn endpoint(&self) -> &Endpoint {
&self.dialer.endpoint
}
#[cfg(test)]
fn node_id(&self) -> NodeId {
self.dialer.endpoint.node_id()
}
pub async fn run(mut self) -> Result<(), Error> {
let mut i = 0;
let mut addr_update_stream = self.setup_addr_stream().await?;
while let Some(()) = self.event_loop(i, &mut addr_update_stream).await? {
i += 1;
}
Ok(())
}
async fn setup_addr_stream(&mut self) -> Result<impl Stream<Item = AddrInfo>, Error> {
match self.discovery.as_ref().and_then(|d| d.our_addr.as_ref()) {
Some(our_addr) => {
let watcher = our_addr.watch();
let mut stream = watcher.stream().filter_map(|x| x).boxed();
let Some(initial) = stream.next().await else {
return Err(anyhow::anyhow!(
"Failed to retrieve initial address from endpoint"
)
.into());
};
self.handle_addr_update(initial).await?;
Ok(stream)
}
None => Ok(n0_future::stream::pending().boxed()),
}
}
async fn event_loop(
&mut self,
i: usize,
mut our_addr_updates: impl Stream<Item = AddrInfo> + Unpin,
) -> Result<Option<()>, Error> {
inc!(Metrics, actor_tick_main);
tokio::select! {
biased;
msg = self.to_actor_rx.recv() => {
trace!(?i, "tick: to_actor_rx");
inc!(Metrics, actor_tick_rx);
match msg {
Some(msg) => self.handle_to_actor_msg(msg, Instant::now()).await?,
None => {
debug!("all gossip handles dropped, stop gossip actor");
return Ok(None)
}
}
},
Some((key, (topic, command))) = self.command_rx.next(), if !self.command_rx.is_empty() => {
trace!(?i, "tick: command_rx");
self.handle_command(topic, key, command).await?;
},
Some(addr_info) = our_addr_updates.next() => {
trace!(?i, "tick: new_addr_info");
inc!(Metrics, actor_tick_endpoint);
tracing::info!("addr update {addr_info:?}");
self.handle_addr_update(addr_info).await?;
}
(peer_id, res) = self.dialer.next_conn() => {
trace!(?i, "tick: dialer");
inc!(Metrics, actor_tick_dialer);
match res {
Some(Ok(conn)) => {
debug!(peer = %peer_id.fmt_short(), "dial successful");
inc!(Metrics, actor_tick_dialer_success);
self.handle_connection(peer_id, ConnOrigin::Dial, conn);
}
Some(Err(err)) => {
warn!(peer = %peer_id.fmt_short(), "dial failed: {err}");
inc!(Metrics, actor_tick_dialer_failure);
}
None => {
warn!(peer = %peer_id.fmt_short(), "dial disconnected");
inc!(Metrics, actor_tick_dialer_failure);
}
}
}
event = self.in_event_rx.recv() => {
trace!(?i, "tick: in_event_rx");
inc!(Metrics, actor_tick_in_event_rx);
let event = event.expect("unreachable: in_event_tx is never dropped before receiver");
self.handle_in_event(event, Instant::now()).await?;
}
drain = self.timers.wait_and_drain() => {
trace!(?i, "tick: timers");
inc!(Metrics, actor_tick_timers);
let now = Instant::now();
for (_instant, timer) in drain {
self.handle_in_event(InEvent::TimerExpired(timer), now).await?;
}
}
Some(res) = self.connection_tasks.join_next(), if !self.connection_tasks.is_empty() => {
trace!(?i, "tick: connection_tasks");
let (peer_id, conn, result) = res.expect("connection task panicked");
self.handle_connection_task_finished(peer_id, conn, result).await?;
}
}
Ok(Some(()))
}
async fn handle_addr_update(&mut self, info: AddrInfo) -> Result<(), Error> {
let peer_data = encode_peer_data(&info)?;
self.handle_in_event(InEvent::UpdatePeerData(peer_data), Instant::now())
.await
}
async fn handle_command(
&mut self,
topic: TopicId,
key: stream_group::Key,
command: Option<Command>,
) -> Result<(), Error> {
debug!(?topic, ?key, ?command, "handle command");
let Some(state) = self.topics.get_mut(&topic) else {
warn!("received command for unknown topic");
return Ok(());
};
match command {
Some(command) => {
let command = match command {
Command::Broadcast(message) => ProtoCommand::Broadcast(message, Scope::Swarm),
Command::BroadcastNeighbors(message) => {
ProtoCommand::Broadcast(message, Scope::Neighbors)
}
Command::JoinPeers(peers) => ProtoCommand::Join(peers),
};
self.handle_in_event(proto::InEvent::Command(topic, command), Instant::now())
.await?;
}
None => {
state.command_rx_keys.remove(&key);
if !state.still_needed() {
self.quit_queue.push_back(topic);
self.process_quit_queue().await?;
}
}
}
Ok(())
}
fn handle_connection(&mut self, peer_id: NodeId, origin: ConnOrigin, conn: Connection) {
let (send_tx, send_rx) = mpsc::channel(SEND_QUEUE_CAP);
let conn_id = conn.stable_id();
let queue = match self.peers.entry(peer_id) {
Entry::Occupied(mut entry) => entry.get_mut().accept_conn(send_tx, conn_id),
Entry::Vacant(entry) => {
entry.insert(PeerState::Active {
active_send_tx: send_tx,
active_conn_id: conn_id,
other_conns: Vec::new(),
});
Vec::new()
}
};
let max_message_size = self.state.max_message_size();
let in_event_tx = self.in_event_tx.clone();
self.connection_tasks.spawn(
async move {
let res = connection_loop(
peer_id,
&conn,
origin,
send_rx,
&in_event_tx,
max_message_size,
queue,
)
.await;
(peer_id, conn, res)
}
.instrument(error_span!("conn", peer = %peer_id.fmt_short())),
);
}
#[tracing::instrument(name = "conn", skip_all, fields(peer = %peer_id.fmt_short()))]
async fn handle_connection_task_finished(
&mut self,
peer_id: NodeId,
conn: Connection,
task_result: anyhow::Result<()>,
) -> Result<(), Error> {
if conn.close_reason().is_none() {
conn.close(0u32.into(), b"close from disconnect");
}
let reason = conn.close_reason().expect("just closed");
let error = task_result.err();
debug!(%reason, ?error, "connection closed");
if let Some(PeerState::Active {
active_conn_id,
other_conns,
..
}) = self.peers.get_mut(&peer_id)
{
if conn.stable_id() == *active_conn_id {
debug!("active send connection closed, mark peer as disconnected");
self.handle_in_event(InEvent::PeerDisconnected(peer_id), Instant::now())
.await?;
} else {
other_conns.retain(|x| *x != conn.stable_id());
debug!("remaining {} other connections", other_conns.len() + 1);
}
} else {
debug!("peer already marked as disconnected");
}
Ok(())
}
async fn handle_to_actor_msg(&mut self, msg: ToActor, now: Instant) -> Result<(), Error> {
trace!("handle to_actor {msg:?}");
match msg {
ToActor::HandleConnection(peer_id, origin, conn) => {
self.handle_connection(peer_id, origin, conn)
}
ToActor::Join {
topic_id,
bootstrap,
channels,
} => {
let state = self.topics.entry(topic_id).or_default();
let TopicState {
neighbors,
event_senders,
command_rx_keys,
} = state;
if !neighbors.is_empty() {
for neighbor in neighbors.iter() {
channels
.event_tx
.try_send(Ok(Event::Gossip(GossipEvent::NeighborUp(*neighbor))))
.ok();
}
}
event_senders.push(channels.receiver_id, channels.event_tx);
let command_rx = TopicCommandStream::new(topic_id, channels.command_rx);
let key = self.command_rx.insert(command_rx);
command_rx_keys.insert(key);
self.handle_in_event(
InEvent::Command(
topic_id,
ProtoCommand::Join(bootstrap.into_iter().collect()),
),
now,
)
.await?;
}
ToActor::ReceiverGone { topic, receiver_id } => {
self.handle_receiver_gone(topic, receiver_id).await?;
}
}
Ok(())
}
async fn handle_in_event(&mut self, event: InEvent, now: Instant) -> Result<(), Error> {
self.handle_in_event_inner(event, now).await?;
self.process_quit_queue().await?;
Ok(())
}
async fn process_quit_queue(&mut self) -> Result<(), Error> {
while let Some(topic_id) = self.quit_queue.pop_front() {
self.handle_in_event_inner(
InEvent::Command(topic_id, ProtoCommand::Quit),
Instant::now(),
)
.await?;
if self.topics.remove(&topic_id).is_some() {
tracing::debug!(%topic_id, "publishers and subscribers gone; unsubscribing");
}
}
Ok(())
}
async fn handle_in_event_inner(&mut self, event: InEvent, now: Instant) -> Result<(), Error> {
if matches!(event, InEvent::TimerExpired(_)) {
trace!(?event, "handle in_event");
} else {
debug!(?event, "handle in_event");
};
let out = self.state.handle(event, now);
for event in out {
if matches!(event, OutEvent::ScheduleTimer(_, _)) {
trace!(?event, "handle out_event");
} else {
debug!(?event, "handle out_event");
};
match event {
OutEvent::SendMessage(peer_id, message) => {
let state = self.peers.entry(peer_id).or_default();
match state {
PeerState::Active { active_send_tx, .. } => {
if let Err(_err) = active_send_tx.send(message).await {
warn!(
peer = %peer_id.fmt_short(),
"failed to send: connection task send loop terminated",
);
}
}
PeerState::Pending { queue } => {
if queue.is_empty() {
debug!(peer = %peer_id.fmt_short(), "start to dial");
self.dialer.queue_dial(peer_id, GOSSIP_ALPN);
}
queue.push(message);
}
}
}
OutEvent::EmitEvent(topic_id, event) => {
let Some(state) = self.topics.get_mut(&topic_id) else {
warn!(?topic_id, "gossip state emitted event for unknown topic");
continue;
};
let TopicState {
neighbors,
event_senders,
..
} = state;
match &event {
ProtoEvent::NeighborUp(neighbor) => {
neighbors.insert(*neighbor);
}
ProtoEvent::NeighborDown(neighbor) => {
neighbors.remove(neighbor);
}
_ => {}
}
let event: GossipEvent = event.into();
event_senders.send(&event);
if !state.still_needed() {
self.quit_queue.push_back(topic_id);
}
}
OutEvent::ScheduleTimer(delay, timer) => {
self.timers.insert(now + delay, timer);
}
OutEvent::DisconnectPeer(peer_id) => {
debug!(peer=%peer_id.fmt_short(), "gossip state indicates disconnect: drop peer");
self.peers.remove(&peer_id);
}
OutEvent::PeerData(node_id, data) => {
if let Some(discovery) = &self.discovery {
match decode_peer_data(&data) {
Ok(Some(info)) => {
debug!(peer = %node_id.fmt_short(), "add addr info to discovery: {info:?}");
let node_addr = NodeAddr {
node_id,
relay_url: info.relay_url,
direct_addresses: info.direct_addresses,
};
discovery.add_node_addr(node_addr);
}
Err(err) => warn!(
"Failed to decode peer data from {}: {err}",
node_id.fmt_short()
),
Ok(None) => {}
}
}
}
}
}
Ok(())
}
async fn handle_receiver_gone(
&mut self,
topic: TopicId,
receiver_id: ReceiverId,
) -> Result<(), Error> {
if let Some(state) = self.topics.get_mut(&topic) {
state.event_senders.remove(&receiver_id);
if !state.still_needed() {
self.quit_queue.push_back(topic);
self.process_quit_queue().await?;
}
} else {
warn!(%topic, "receiver gone for missing topic");
};
Ok(())
}
}
type ConnId = usize;
#[derive(Debug)]
enum PeerState {
Pending {
queue: Vec<ProtoMessage>,
},
Active {
active_send_tx: mpsc::Sender<ProtoMessage>,
active_conn_id: ConnId,
other_conns: Vec<ConnId>,
},
}
impl PeerState {
fn accept_conn(
&mut self,
send_tx: mpsc::Sender<ProtoMessage>,
conn_id: ConnId,
) -> Vec<ProtoMessage> {
match self {
PeerState::Pending { queue } => {
let queue = std::mem::take(queue);
*self = PeerState::Active {
active_send_tx: send_tx,
active_conn_id: conn_id,
other_conns: Vec::new(),
};
queue
}
PeerState::Active {
active_send_tx,
active_conn_id,
other_conns,
} => {
other_conns.push(*active_conn_id);
*active_send_tx = send_tx;
*active_conn_id = conn_id;
Vec::new()
}
}
}
}
impl Default for PeerState {
fn default() -> Self {
PeerState::Pending { queue: Vec::new() }
}
}
#[derive(Debug, Default)]
struct TopicState {
neighbors: BTreeSet<NodeId>,
event_senders: EventSenders,
command_rx_keys: HashSet<stream_group::Key>,
}
impl TopicState {
fn still_needed(&self) -> bool {
!self.event_senders.is_empty() || !self.command_rx_keys.is_empty()
}
#[cfg(test)]
fn joined(&self) -> bool {
!self.neighbors.is_empty()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum ConnOrigin {
Accept,
Dial,
}
#[derive(derive_more::Debug)]
struct SubscriberChannels {
receiver_id: ReceiverId,
event_tx: async_channel::Sender<Result<Event, Error>>,
#[debug("CommandStream")]
command_rx: CommandStream,
}
async fn connection_loop(
from: PublicKey,
conn: &Connection,
origin: ConnOrigin,
mut send_rx: mpsc::Receiver<ProtoMessage>,
in_event_tx: &mpsc::Sender<InEvent>,
max_message_size: usize,
queue: Vec<ProtoMessage>,
) -> anyhow::Result<()> {
let (mut send, mut recv) = match origin {
ConnOrigin::Accept => conn.accept_bi().await?,
ConnOrigin::Dial => conn.open_bi().await?,
};
debug!(?origin, "connection established");
let mut send_buf = BytesMut::new();
let mut recv_buf = BytesMut::new();
let send_loop = async {
for msg in queue {
write_message(&mut send, &mut send_buf, &msg, max_message_size).await?;
}
while let Some(msg) = send_rx.recv().await {
write_message(&mut send, &mut send_buf, &msg, max_message_size).await?;
}
let _ = send.finish();
let _ = send.stopped().await;
anyhow::Ok(())
};
let recv_loop = async {
loop {
let msg = read_message(&mut recv, &mut recv_buf, max_message_size).await?;
match msg {
None => break,
Some(msg) => in_event_tx.send(InEvent::RecvMessage(from, msg)).await?,
}
}
anyhow::Ok(())
};
let res = tokio::join!(send_loop, recv_loop);
res.0.context("send_loop").and(res.1.context("recv_loop"))
}
#[derive(Default, Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
struct AddrInfo {
relay_url: Option<RelayUrl>,
direct_addresses: BTreeSet<SocketAddr>,
}
impl From<&NodeData> for AddrInfo {
fn from(value: &NodeData) -> Self {
Self {
relay_url: value.relay_url().cloned(),
direct_addresses: value.direct_addresses().clone(),
}
}
}
impl From<NodeAddr> for AddrInfo {
fn from(
NodeAddr {
relay_url,
direct_addresses,
..
}: NodeAddr,
) -> Self {
Self {
relay_url,
direct_addresses,
}
}
}
fn encode_peer_data(info: &AddrInfo) -> Result<PeerData, Error> {
let bytes = postcard::to_stdvec(info)?;
if bytes.is_empty() {
return Err(Error::EmptyPeerData);
}
Ok(PeerData::new(bytes))
}
fn decode_peer_data(peer_data: &PeerData) -> Result<Option<AddrInfo>, Error> {
if peer_data.is_empty() {
Ok(None)
} else {
let info = postcard::from_bytes(peer_data.as_bytes())?;
Ok(Some(info))
}
}
#[derive(Debug, Default)]
struct EventSenders {
senders: HashMap<ReceiverId, (async_channel::Sender<Result<Event, Error>>, bool)>,
}
#[derive(derive_more::Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Clone, Copy)]
struct ReceiverId(usize);
impl EventSenders {
fn is_empty(&self) -> bool {
self.senders.is_empty()
}
fn push(&mut self, id: ReceiverId, sender: async_channel::Sender<Result<Event, Error>>) {
self.senders.insert(id, (sender, false));
}
fn send(&mut self, event: &GossipEvent) {
let mut remove = Vec::new();
for (&id, (send, lagged)) in self.senders.iter_mut() {
if send.is_closed() {
remove.push(id);
continue;
}
let cap = send.capacity().expect("we only use bounded channels");
let event = if send.len() >= cap - 1 {
if *lagged {
continue;
}
*lagged = true;
Event::Lagged
} else {
*lagged = false;
Event::Gossip(event.clone())
};
if let Err(async_channel::TrySendError::Closed(_)) = send.try_send(Ok(event)) {
remove.push(id);
}
}
for id in remove.into_iter() {
self.senders.remove(&id);
}
}
fn remove(&mut self, id: &ReceiverId) {
self.senders.remove(id);
}
}
#[derive(derive_more::Debug)]
struct TopicCommandStream {
topic_id: TopicId,
#[debug("CommandStream")]
stream: CommandStream,
closed: bool,
}
impl TopicCommandStream {
fn new(topic_id: TopicId, stream: CommandStream) -> Self {
Self {
topic_id,
stream,
closed: false,
}
}
}
impl Stream for TopicCommandStream {
type Item = (TopicId, Option<Command>);
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.closed {
return Poll::Ready(None);
}
match Pin::new(&mut self.stream).poll_next(cx) {
Poll::Ready(Some(item)) => Poll::Ready(Some((self.topic_id, Some(item)))),
Poll::Ready(None) => {
self.closed = true;
Poll::Ready(Some((self.topic_id, None)))
}
Poll::Pending => Poll::Pending,
}
}
}
#[derive(Debug)]
struct Dialer {
endpoint: Endpoint,
pending: JoinSet<(NodeId, Option<Result<Connection, Error>>)>,
pending_dials: HashMap<NodeId, CancellationToken>,
}
impl Dialer {
fn new(endpoint: Endpoint) -> Self {
Self {
endpoint,
pending: Default::default(),
pending_dials: Default::default(),
}
}
fn queue_dial(&mut self, node_id: NodeId, alpn: &'static [u8]) {
if self.is_pending(node_id) {
return;
}
let cancel = CancellationToken::new();
self.pending_dials.insert(node_id, cancel.clone());
let endpoint = self.endpoint.clone();
self.pending.spawn(async move {
let res = tokio::select! {
biased;
_ = cancel.cancelled() => None,
res = endpoint.connect(node_id, alpn) => Some(res.map_err(Error::from)),
};
(node_id, res)
});
}
fn is_pending(&self, node: NodeId) -> bool {
self.pending_dials.contains_key(&node)
}
async fn next_conn(&mut self) -> (NodeId, Option<Result<Connection, Error>>) {
match self.pending_dials.is_empty() {
false => {
let (node_id, res) = loop {
match self.pending.join_next().await {
Some(Ok((node_id, res))) => {
self.pending_dials.remove(&node_id);
break (node_id, res);
}
Some(Err(e)) => {
error!("next conn error: {:?}", e);
}
None => {
error!("no more pending conns available");
std::future::pending().await
}
}
};
(node_id, res)
}
true => std::future::pending().await,
}
}
}
#[cfg(test)]
mod test {
use std::time::Duration;
use bytes::Bytes;
use futures_concurrency::future::TryJoin;
use iroh::{protocol::Router, RelayMap, RelayMode, SecretKey};
use n0_future::{FuturesOrdered, StreamExt};
use rand::Rng;
use testresult::TestResult;
use tokio::{spawn, time::timeout};
use tokio_util::sync::CancellationToken;
use tracing::{info, instrument};
use tracing_test::traced_test;
use super::*;
struct ManualActorLoop {
actor: Actor,
step: usize,
}
impl std::ops::Deref for ManualActorLoop {
type Target = Actor;
fn deref(&self) -> &Self::Target {
&self.actor
}
}
impl std::ops::DerefMut for ManualActorLoop {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.actor
}
}
type EndpointHandle = tokio::task::JoinHandle<Result<(), Error>>;
impl ManualActorLoop {
#[instrument(skip_all, fields(me = %actor.node_id().fmt_short()))]
async fn new(actor: Actor) -> Result<Self, Error> {
let test_rig = Self { actor, step: 0 };
Ok(test_rig)
}
#[instrument(skip_all, fields(me = %self.node_id().fmt_short()))]
async fn step(&mut self) -> Result<Option<()>, Error> {
let ManualActorLoop { actor, step } = self;
*step += 1;
actor.event_loop(*step, n0_future::stream::pending()).await
}
async fn steps(&mut self, n: usize) -> Result<(), Error> {
for _ in 0..n {
self.step().await?;
}
Ok(())
}
async fn finish(mut self) -> Result<(), Error> {
while self.step().await?.is_some() {}
Ok(())
}
}
impl Gossip {
async fn t_new_with_actor(
rng: &mut rand_chacha::ChaCha12Rng,
config: proto::Config,
relay_map: RelayMap,
cancel: &CancellationToken,
) -> Result<(Self, Actor, EndpointHandle), Error> {
let endpoint = create_endpoint(rng, relay_map).await?;
let (actor, to_actor_tx) = Actor::new(endpoint, config, None);
let max_message_size = actor.state.max_message_size();
let _actor_handle =
AbortOnDropHandle::new(task::spawn(futures_lite::future::pending()));
let gossip = Self {
inner: Inner {
to_actor_tx,
_actor_handle,
max_message_size,
next_receiver_id: Default::default(),
}
.into(),
#[cfg(feature = "rpc")]
rpc_handler: Default::default(),
};
let endpoing_task = task::spawn(endpoint_loop(
actor.endpoint().clone(),
gossip.clone(),
cancel.child_token(),
));
Ok((gossip, actor, endpoing_task))
}
async fn t_new(
rng: &mut rand_chacha::ChaCha12Rng,
config: proto::Config,
relay_map: RelayMap,
cancel: &CancellationToken,
) -> Result<(Self, Endpoint, EndpointHandle, impl Drop), Error> {
let (g, actor, ep_handle) =
Gossip::t_new_with_actor(rng, config, relay_map, cancel).await?;
let ep = actor.endpoint().clone();
let me = ep.node_id().fmt_short();
let actor_handle = task::spawn(
async move {
if let Err(err) = actor.run().await {
warn!("gossip actor closed with error: {err:?}");
}
}
.instrument(tracing::error_span!("gossip", %me)),
);
Ok((g, ep, ep_handle, AbortOnDropHandle::new(actor_handle)))
}
}
async fn create_endpoint(
rng: &mut rand_chacha::ChaCha12Rng,
relay_map: RelayMap,
) -> Result<Endpoint, Error> {
let ep = Endpoint::builder()
.secret_key(SecretKey::generate(rng))
.alpns(vec![GOSSIP_ALPN.to_vec()])
.relay_mode(RelayMode::Custom(relay_map))
.insecure_skip_relay_cert_verify(true)
.bind()
.await?;
ep.home_relay().initialized().await?;
Ok(ep)
}
async fn endpoint_loop(
endpoint: Endpoint,
gossip: Gossip,
cancel: CancellationToken,
) -> Result<(), Error> {
loop {
tokio::select! {
biased;
_ = cancel.cancelled() => break,
incoming = endpoint.accept() => match incoming {
None => break,
Some(incoming) => {
let connecting = match incoming.accept() {
Ok(connecting) => connecting,
Err(err) => {
warn!("incoming connection failed: {err:#}");
continue;
}
};
gossip.handle_connection(connecting.await?).await?
}
}
}
}
Ok(())
}
#[tokio::test]
#[traced_test]
async fn gossip_net_smoke() {
let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1);
let (relay_map, relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let ep1 = create_endpoint(&mut rng, relay_map.clone()).await.unwrap();
let ep2 = create_endpoint(&mut rng, relay_map.clone()).await.unwrap();
let ep3 = create_endpoint(&mut rng, relay_map.clone()).await.unwrap();
let go1 = Gossip::builder().spawn(ep1.clone());
let go2 = Gossip::builder().spawn(ep2.clone());
let go3 = Gossip::builder().spawn(ep3.clone());
debug!("peer1 {:?}", ep1.node_id());
debug!("peer2 {:?}", ep2.node_id());
debug!("peer3 {:?}", ep3.node_id());
let pi1 = ep1.node_id();
let pi2 = ep2.node_id();
let cancel = CancellationToken::new();
let tasks = [
spawn(endpoint_loop(ep1.clone(), go1.clone(), cancel.clone())),
spawn(endpoint_loop(ep2.clone(), go2.clone(), cancel.clone())),
spawn(endpoint_loop(ep3.clone(), go3.clone(), cancel.clone())),
];
debug!("----- adding peers ----- ");
let topic: TopicId = blake3::hash(b"foobar").into();
let addr1 = NodeAddr::new(pi1).with_relay_url(relay_url.clone());
let addr2 = NodeAddr::new(pi2).with_relay_url(relay_url);
ep2.add_node_addr(addr1.clone()).unwrap();
ep3.add_node_addr(addr2).unwrap();
debug!("----- joining ----- ");
let [sub1, mut sub2, mut sub3] = [
go1.subscribe_and_join(topic, vec![]),
go2.subscribe_and_join(topic, vec![pi1]),
go3.subscribe_and_join(topic, vec![pi2]),
]
.try_join()
.await
.unwrap();
let (sink1, _stream1) = sub1.split();
let len = 2;
let pub1 = spawn(async move {
for i in 0..len {
let message = format!("hi{}", i);
info!("go1 broadcast: {message:?}");
sink1.broadcast(message.into_bytes().into()).await.unwrap();
tokio::time::sleep(Duration::from_micros(1)).await;
}
});
let sub2 = spawn(async move {
let mut recv = vec![];
loop {
let ev = sub2.next().await.unwrap().unwrap();
info!("go2 event: {ev:?}");
if let Event::Gossip(GossipEvent::Received(msg)) = ev {
recv.push(msg.content);
}
if recv.len() == len {
return recv;
}
}
});
let sub3 = spawn(async move {
let mut recv = vec![];
loop {
let ev = sub3.next().await.unwrap().unwrap();
info!("go3 event: {ev:?}");
if let Event::Gossip(GossipEvent::Received(msg)) = ev {
recv.push(msg.content);
}
if recv.len() == len {
return recv;
}
}
});
timeout(Duration::from_secs(10), pub1)
.await
.unwrap()
.unwrap();
let recv2 = timeout(Duration::from_secs(10), sub2)
.await
.unwrap()
.unwrap();
let recv3 = timeout(Duration::from_secs(10), sub3)
.await
.unwrap()
.unwrap();
let expected: Vec<Bytes> = (0..len)
.map(|i| Bytes::from(format!("hi{i}").into_bytes()))
.collect();
assert_eq!(recv2, expected);
assert_eq!(recv3, expected);
cancel.cancel();
for t in tasks {
timeout(Duration::from_secs(10), t)
.await
.unwrap()
.unwrap()
.unwrap();
}
}
#[tokio::test]
#[traced_test]
async fn subscription_cleanup() -> testresult::TestResult {
let rng = &mut rand_chacha::ChaCha12Rng::seed_from_u64(1);
let ct = CancellationToken::new();
let (relay_map, relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let (go1, actor, ep1_handle) =
Gossip::t_new_with_actor(rng, Default::default(), relay_map.clone(), &ct).await?;
let mut actor = ManualActorLoop::new(actor).await?;
let (go2, ep2, ep2_handle, _test_actor_handle) =
Gossip::t_new(rng, Default::default(), relay_map, &ct).await?;
let node_id1 = actor.node_id();
let node_id2 = ep2.node_id();
tracing::info!(
node_1 = node_id1.fmt_short(),
node_2 = node_id2.fmt_short(),
"nodes ready"
);
let topic: TopicId = blake3::hash(b"subscription_cleanup").into();
tracing::info!(%topic, "joining");
let ct2 = ct.clone();
let go2_task = async move {
let (_pub_tx, mut sub_rx) = go2.subscribe_and_join(topic, vec![]).await?.split();
let subscribe_fut = async {
while let Some(ev) = sub_rx.try_next().await? {
match ev {
Event::Lagged => tracing::debug!("missed some messages :("),
Event::Gossip(gm) => match gm {
GossipEvent::Received(_) => unreachable!("test does not send messages"),
other => tracing::debug!(?other, "gs event"),
},
}
}
tracing::debug!("subscribe stream ended");
anyhow::Ok(())
};
tokio::select! {
_ = ct2.cancelled() => Ok(()),
res = subscribe_fut => res,
}
}
.instrument(tracing::debug_span!("node_2", %node_id2));
let go2_handle = task::spawn(go2_task);
let addr2 = NodeAddr::new(node_id2).with_relay_url(relay_url);
actor.endpoint().add_node_addr(addr2)?;
let (tx, mut rx) = mpsc::channel::<()>(1);
let ct1 = ct.clone();
let go1_task = async move {
tracing::info!("subscribing the first time");
let sub_1a = go1.subscribe_and_join(topic, vec![node_id2]).await?;
rx.recv().await.expect("signal for second subscribe");
tracing::info!("subscribing a second time");
let sub_1b = go1.subscribe_and_join(topic, vec![node_id2]).await?;
drop(sub_1a);
rx.recv().await.expect("signal for second subscribe");
tracing::info!("dropping all handles");
drop(sub_1b);
ct1.cancelled().await;
drop(go1);
anyhow::Ok(())
}
.instrument(tracing::debug_span!("node_1", %node_id1));
let go1_handle = task::spawn(go1_task);
actor.steps(3).await?; let state = actor.topics.get(&topic).expect("get registered topic");
assert!(state.joined());
tx.send(()).await?;
actor.steps(3).await?; let state = actor.topics.get(&topic).expect("get registered topic");
assert!(state.joined());
tx.send(()).await?;
actor.steps(2).await?; assert!(!actor.topics.contains_key(&topic));
ct.cancel();
let wait = Duration::from_secs(2);
timeout(wait, ep1_handle).await???;
timeout(wait, ep2_handle).await???;
timeout(wait, go1_handle).await???;
timeout(wait, go2_handle).await???;
timeout(wait, actor.finish()).await??;
testresult::TestResult::Ok(())
}
#[tokio::test]
#[traced_test]
async fn can_reconnect() -> testresult::TestResult {
let rng = &mut rand_chacha::ChaCha12Rng::seed_from_u64(1);
let ct = CancellationToken::new();
let (relay_map, relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let (go1, ep1, ep1_handle, _test_actor_handle1) =
Gossip::t_new(rng, Default::default(), relay_map.clone(), &ct).await?;
let (go2, ep2, ep2_handle, _test_actor_handle2) =
Gossip::t_new(rng, Default::default(), relay_map, &ct).await?;
let node_id1 = ep1.node_id();
let node_id2 = ep2.node_id();
tracing::info!(
node_1 = node_id1.fmt_short(),
node_2 = node_id2.fmt_short(),
"nodes ready"
);
let topic: TopicId = blake3::hash(b"can_reconnect").into();
tracing::info!(%topic, "joining");
let ct2 = ct.child_token();
let (tx, mut rx) = mpsc::channel::<()>(1);
let addr1 = NodeAddr::new(node_id1).with_relay_url(relay_url.clone());
ep2.add_node_addr(addr1)?;
let go2_task = async move {
let mut sub = go2.subscribe(topic, Vec::new())?;
sub.joined().await?;
rx.recv().await.expect("signal to unsubscribe");
tracing::info!("unsubscribing");
drop(sub);
rx.recv().await.expect("signal to subscribe again");
tracing::info!("resubscribing");
let mut sub = go2.subscribe(topic, vec![node_id1])?;
sub.joined().await?;
tracing::info!("subscription successful!");
ct2.cancelled().await;
anyhow::Ok(())
}
.instrument(tracing::debug_span!("node_2", %node_id2));
let go2_handle = task::spawn(go2_task);
let addr2 = NodeAddr::new(node_id2).with_relay_url(relay_url);
ep1.add_node_addr(addr2)?;
let mut sub = go1.subscribe(topic, vec![node_id2])?;
sub.joined().await?;
tx.send(()).await?;
let conn_timeout = Duration::from_millis(500);
let ev = timeout(conn_timeout, sub.try_next()).await??;
assert_eq!(ev, Some(Event::Gossip(GossipEvent::NeighborDown(node_id2))));
tracing::info!("node 2 left");
tx.send(()).await?;
let conn_timeout = Duration::from_millis(500);
let ev = timeout(conn_timeout, sub.try_next()).await??;
assert_eq!(ev, Some(Event::Gossip(GossipEvent::NeighborUp(node_id2))));
tracing::info!("node 2 rejoined!");
ct.cancel();
let wait = Duration::from_secs(2);
timeout(wait, ep1_handle).await???;
timeout(wait, ep2_handle).await???;
timeout(wait, go2_handle).await???;
testresult::TestResult::Ok(())
}
#[tokio::test]
#[traced_test]
async fn can_die_and_reconnect() -> testresult::TestResult {
fn run_in_thread<T: Send + 'static>(
cancel: CancellationToken,
fut: impl std::future::Future<Output = T> + Send + 'static,
) -> std::thread::JoinHandle<Option<T>> {
std::thread::spawn(move || {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
rt.block_on(async move { cancel.run_until_cancelled(fut).await })
})
}
async fn spawn_gossip(
secret_key: SecretKey,
relay_map: RelayMap,
) -> anyhow::Result<(Router, Gossip)> {
let ep = Endpoint::builder()
.secret_key(secret_key)
.relay_mode(RelayMode::Custom(relay_map))
.insecure_skip_relay_cert_verify(true)
.bind()
.await?;
let gossip = Gossip::builder().spawn(ep.clone());
let router = Router::builder(ep.clone())
.accept(GOSSIP_ALPN, gossip.clone())
.spawn()
.await?;
Ok((router, gossip))
}
async fn broadcast_once(
secret_key: SecretKey,
relay_map: RelayMap,
bootstrap_addr: NodeAddr,
topic_id: TopicId,
message: String,
) -> anyhow::Result<()> {
let (router, gossip) = spawn_gossip(secret_key, relay_map).await?;
info!(node_id = %router.endpoint().node_id().fmt_short(), "broadcast node spawned");
let bootstrap = vec![bootstrap_addr.node_id];
router.endpoint().add_node_addr(bootstrap_addr)?;
let topic = gossip.subscribe_and_join(topic_id, bootstrap).await?;
topic.broadcast(message.as_bytes().to_vec().into()).await?;
std::future::pending::<()>().await;
Ok(())
}
let (relay_map, _relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let mut rng = &mut rand_chacha::ChaCha12Rng::seed_from_u64(183187);
let topic_id = TopicId::from_bytes(rng.gen());
let (addr_tx, addr_rx) = tokio::sync::oneshot::channel();
let (msgs_recv_tx, mut msgs_recv_rx) = tokio::sync::mpsc::channel(3);
let recv_task = tokio::task::spawn({
let relay_map = relay_map.clone();
let secret_key = SecretKey::generate(&mut rng);
async move {
let (router, gossip) = spawn_gossip(secret_key, relay_map).await?;
let addr = router.endpoint().node_addr().await?;
info!(node_id = %addr.node_id.fmt_short(), "recv node spawned");
addr_tx.send(addr).unwrap();
let mut topic = gossip.subscribe_and_join(topic_id, vec![]).await?;
while let Some(event) = topic.try_next().await.unwrap() {
if let Event::Gossip(GossipEvent::Received(message)) = event {
let message = std::str::from_utf8(&message.content)?.to_string();
msgs_recv_tx.send(message).await?;
}
}
anyhow::Ok(())
}
});
let node0_addr = addr_rx.await?;
let max_wait = Duration::from_secs(5);
let cancel = CancellationToken::new();
let secret = SecretKey::generate(&mut rng);
let join_handle_1 = run_in_thread(
cancel.clone(),
broadcast_once(
secret.clone(),
relay_map.clone(),
node0_addr.clone(),
topic_id,
"msg1".to_string(),
),
);
let msg = timeout(max_wait, msgs_recv_rx.recv()).await?.unwrap();
assert_eq!(&msg, "msg1");
info!("kill broadcast node");
cancel.cancel();
assert!(join_handle_1.join().unwrap().is_none());
let cancel = CancellationToken::new();
let join_handle_2 = run_in_thread(
cancel.clone(),
broadcast_once(
secret.clone(),
relay_map.clone(),
node0_addr.clone(),
topic_id,
"msg2".to_string(),
),
);
let msg = timeout(max_wait, msgs_recv_rx.recv()).await?.unwrap();
assert_eq!(&msg, "msg2");
info!("kill broadcast node");
cancel.cancel();
assert!(join_handle_2.join().unwrap().is_none());
info!("kill recv node");
recv_task.abort();
Ok(())
}
#[tokio::test]
#[traced_test]
async fn gossip_discovery() -> TestResult {
async fn spawn_gossip(
secret_key: SecretKey,
relay_map: RelayMap,
use_discovery: bool,
) -> anyhow::Result<(Router, Gossip)> {
let discovery = use_discovery.then(GossipDiscovery::default);
let mut ep_builder = Endpoint::builder()
.secret_key(secret_key)
.relay_mode(RelayMode::Custom(relay_map))
.insecure_skip_relay_cert_verify(true);
if let Some(discovery) = &discovery {
ep_builder = ep_builder.discovery(Box::new(discovery.clone()));
}
let ep = ep_builder.bind().await?;
let mut gossip_builder = Gossip::builder();
if let Some(discovery) = discovery {
gossip_builder = gossip_builder.use_gossip_for_discovery(discovery)
}
let gossip = gossip_builder.spawn(ep.clone());
let router = Router::builder(ep.clone())
.accept(GOSSIP_ALPN, gossip.clone())
.spawn()
.await?;
Ok((router, gossip))
}
let (relay_map, relay_url, _guard) = iroh::test_utils::run_relay_server().await.unwrap();
let mut rng = &mut rand_chacha::ChaCha12Rng::seed_from_u64(5);
let topic_id = TopicId::from_bytes(rng.gen());
let use_discovery_steps = [false, true];
for use_discovery in use_discovery_steps {
let (routers, gossips): (Vec<_>, Vec<_>) =
FuturesOrdered::from_iter((0..3).map(|_i| {
let secret_key = SecretKey::generate(&mut rng);
spawn_gossip(secret_key, relay_map.clone(), use_discovery)
}))
.try_collect::<_, _, Vec<_>>()
.await?
.into_iter()
.unzip();
let node_ids: Vec<_> = routers.iter().map(|r| r.endpoint().node_id()).collect();
let node_addrs: Vec<_> = node_ids
.iter()
.map(|node_id| NodeAddr::new(*node_id).with_relay_url(relay_url.clone()))
.collect();
routers[1].endpoint().add_node_addr(node_addrs[0].clone())?;
routers[2].endpoint().add_node_addr(node_addrs[0].clone())?;
let topics = vec![
gossips[0].subscribe(topic_id, vec![])?,
gossips[1].subscribe(topic_id, vec![node_ids[0]])?,
gossips[2].subscribe(topic_id, vec![node_ids[0]])?,
];
let futs = topics.into_iter().enumerate().map(|(i, mut topic)| {
async move {
let expected_neighbors = match (i, use_discovery) {
(0, _) => 2,
(1 | 2, false) => 1,
(1 | 2, true) => 2,
_ => unreachable!(),
};
let fut = async {
loop {
let event = topic.next().await;
assert!(
matches!(
event,
Some(Ok(Event::Gossip(GossipEvent::NeighborUp(_))))
),
"unexpected event on node {i}: {event:?}"
);
}
};
assert!(n0_future::time::timeout(Duration::from_secs(2), fut)
.await
.is_err());
assert_eq!(topic.neighbors().count(), expected_neighbors);
topic
}
});
let fut = FuturesOrdered::from_iter(futs).collect::<Vec<_>>();
let topics = n0_future::time::timeout(Duration::from_secs(3), fut).await?;
drop(topics);
}
Ok(())
}
}