use std::{collections::BTreeMap, num::NonZeroUsize, path::Path, sync::Arc, time::Duration};
use anyhow::Result;
use hickory_proto::rr::{Name, RecordSet, RecordType, RrKey};
use iroh_metrics::inc;
use lru::LruCache;
use pkarr::{mainline::dht::DhtSettings, PkarrClient, SignedPacket};
use tokio::sync::Mutex;
use tracing::{debug, trace};
use ttl_cache::TtlCache;
use self::signed_packets::SignedPacketStore;
use crate::{
config::BootstrapOption,
metrics::Metrics,
util::{signed_packet_to_hickory_records_without_origin, PublicKeyBytes},
};
mod signed_packets;
pub const DEFAULT_CACHE_CAPACITY: usize = 1024 * 1024;
pub const DHT_CACHE_TTL: Duration = Duration::from_secs(300);
pub enum PacketSource {
PkarrPublish,
}
#[derive(Debug, Clone)]
pub struct ZoneStore {
cache: Arc<Mutex<ZoneCache>>,
store: Arc<SignedPacketStore>,
pkarr: Option<Arc<PkarrClient>>,
}
impl ZoneStore {
pub fn persistent(path: impl AsRef<Path>) -> Result<Self> {
let packet_store = SignedPacketStore::persistent(path)?;
Ok(Self::new(packet_store))
}
pub fn in_memory() -> Result<Self> {
let packet_store = SignedPacketStore::in_memory()?;
Ok(Self::new(packet_store))
}
pub fn with_mainline_fallback(self, bootstrap: BootstrapOption) -> Self {
let pkarr_client = match bootstrap {
BootstrapOption::Default => PkarrClient::builder().build().unwrap(),
BootstrapOption::Custom(bootstrap) => PkarrClient::builder()
.dht_settings(DhtSettings {
bootstrap: Some(bootstrap),
..Default::default()
})
.build()
.unwrap(),
};
Self {
pkarr: Some(Arc::new(pkarr_client)),
..self
}
}
pub fn new(store: SignedPacketStore) -> Self {
let zone_cache = ZoneCache::new(DEFAULT_CACHE_CAPACITY);
Self {
store: Arc::new(store),
cache: Arc::new(Mutex::new(zone_cache)),
pkarr: None,
}
}
#[allow(clippy::unused_async)]
pub async fn resolve(
&self,
pubkey: &PublicKeyBytes,
name: &Name,
record_type: RecordType,
) -> Result<Option<Arc<RecordSet>>> {
tracing::info!("{} {}", name, record_type);
if let Some(rset) = self.cache.lock().await.resolve(pubkey, name, record_type) {
return Ok(Some(rset));
}
if let Some(packet) = self.store.get(pubkey).await? {
return self
.cache
.lock()
.await
.insert_and_resolve(&packet, name, record_type);
};
if let Some(pkarr) = self.pkarr.as_ref() {
let key = pkarr::PublicKey::try_from(pubkey.as_bytes()).expect("valid public key");
debug!("DHT resolve {}", key.to_z32());
let packet_opt = pkarr.as_ref().clone().as_async().resolve(&key).await?;
if let Some(packet) = packet_opt {
debug!("DHT resolve successful {:?}", packet.packet());
return self
.cache
.lock()
.await
.insert_and_resolve_dht(&packet, name, record_type);
} else {
debug!("DHT resolve failed");
}
}
Ok(None)
}
#[allow(clippy::unused_async)]
pub async fn get_signed_packet(&self, pubkey: &PublicKeyBytes) -> Result<Option<SignedPacket>> {
self.store.get(pubkey).await
}
#[allow(clippy::unused_async)]
pub async fn insert(&self, signed_packet: SignedPacket, _source: PacketSource) -> Result<bool> {
let pubkey = PublicKeyBytes::from_signed_packet(&signed_packet);
if self.store.upsert(signed_packet).await? {
inc!(Metrics, pkarr_publish_update);
self.cache.lock().await.remove(&pubkey);
Ok(true)
} else {
inc!(Metrics, pkarr_publish_noop);
Ok(false)
}
}
}
#[derive(derive_more::Debug)]
struct ZoneCache {
cache: LruCache<PublicKeyBytes, CachedZone>,
#[debug("dht_cache")]
dht_cache: TtlCache<PublicKeyBytes, CachedZone>,
}
impl ZoneCache {
fn new(cap: usize) -> Self {
let cache = LruCache::new(NonZeroUsize::new(cap).expect("capacity must be larger than 0"));
let dht_cache = TtlCache::new(cap);
Self { cache, dht_cache }
}
fn resolve(
&mut self,
pubkey: &PublicKeyBytes,
name: &Name,
record_type: RecordType,
) -> Option<Arc<RecordSet>> {
let zone = if let Some(zone) = self.cache.get(pubkey) {
trace!("cache hit {}", pubkey.to_z32());
zone
} else if let Some(zone) = self.dht_cache.get(pubkey) {
trace!("dht cache hit {}", pubkey.to_z32());
zone
} else {
return None;
};
zone.resolve(name, record_type)
}
fn insert_and_resolve(
&mut self,
signed_packet: &SignedPacket,
name: &Name,
record_type: RecordType,
) -> Result<Option<Arc<RecordSet>>> {
let pubkey = PublicKeyBytes::from_signed_packet(signed_packet);
self.insert(signed_packet)?;
Ok(self.resolve(&pubkey, name, record_type))
}
fn insert_and_resolve_dht(
&mut self,
signed_packet: &SignedPacket,
name: &Name,
record_type: RecordType,
) -> Result<Option<Arc<RecordSet>>> {
let pubkey = PublicKeyBytes::from_signed_packet(signed_packet);
let zone = CachedZone::from_signed_packet(signed_packet)?;
let res = zone.resolve(name, record_type);
self.dht_cache.insert(pubkey, zone, DHT_CACHE_TTL);
Ok(res)
}
fn insert(&mut self, signed_packet: &SignedPacket) -> Result<()> {
let pubkey = PublicKeyBytes::from_signed_packet(signed_packet);
if self
.cache
.peek(&pubkey)
.map(|old| old.is_newer_than(signed_packet))
.unwrap_or(false)
{
return Ok(());
}
self.cache
.put(pubkey, CachedZone::from_signed_packet(signed_packet)?);
Ok(())
}
fn remove(&mut self, pubkey: &PublicKeyBytes) {
self.cache.pop(pubkey);
self.dht_cache.remove(pubkey);
}
}
#[derive(Debug)]
struct CachedZone {
timestamp: u64,
records: BTreeMap<RrKey, Arc<RecordSet>>,
}
impl CachedZone {
fn from_signed_packet(signed_packet: &SignedPacket) -> Result<Self> {
let (_label, records) =
signed_packet_to_hickory_records_without_origin(signed_packet, |_| true)?;
Ok(Self {
records,
timestamp: signed_packet.timestamp(),
})
}
fn is_newer_than(&self, signed_packet: &SignedPacket) -> bool {
self.timestamp > signed_packet.timestamp()
}
fn resolve(&self, name: &Name, record_type: RecordType) -> Option<Arc<RecordSet>> {
let key = RrKey::new(name.into(), record_type);
for record in self.records.keys() {
tracing::info!("record {:?}", record);
}
self.records.get(&key).cloned()
}
}