From 9ecc9031ac34f6ae0f8e5b996999277b02b3038e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 22 Apr 2024 17:01:57 +0200 Subject: [PATCH 1/9] Peers and clients adminspace (#960) * Peers,clients adminspace reports under @/peer,@/client keys * Only report linkstate graphs when they exist * Remove useless zid_str --------- Co-authored-by: Luca Cominardi --- zenoh/src/net/runtime/adminspace.rs | 155 +++++++++++++++++----------- 1 file changed, 93 insertions(+), 62 deletions(-) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index f6e15ef113..d62379b862 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -28,7 +28,7 @@ use std::sync::Arc; use std::sync::Mutex; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; -use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; +use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_plugin_trait::{PluginControl, PluginStatus}; #[cfg(all(feature = "unstable", feature = "plugins"))] @@ -47,7 +47,6 @@ use zenoh_transport::unicast::TransportUnicast; pub struct AdminContext { runtime: Runtime, - zid_str: String, version: String, metadata: serde_json::Value, } @@ -141,35 +140,45 @@ impl AdminSpace { pub async fn start(runtime: &Runtime, version: String) { let zid_str = runtime.state.zid.to_string(); + let whatami_str = runtime.state.whatami.to_str(); + let mut config = runtime.config().lock(); let metadata = runtime.state.metadata.clone(); - let root_key: OwnedKeyExpr = format!("@/router/{zid_str}").try_into().unwrap(); + let root_key: OwnedKeyExpr = format!("@/{whatami_str}/{zid_str}").try_into().unwrap(); let mut handlers: HashMap<_, Handler> = HashMap::new(); - handlers.insert(root_key.clone(), Arc::new(router_data)); + handlers.insert(root_key.clone(), Arc::new(local_data)); handlers.insert( - format!("@/router/{zid_str}/metrics").try_into().unwrap(), - Arc::new(router_metrics), - ); - handlers.insert( - format!("@/router/{zid_str}/linkstate/routers") - .try_into() - .unwrap(), - Arc::new(routers_linkstate_data), - ); - handlers.insert( - format!("@/router/{zid_str}/linkstate/peers") + format!("@/{whatami_str}/{zid_str}/metrics") .try_into() .unwrap(), - Arc::new(peers_linkstate_data), + Arc::new(metrics), ); + if runtime.state.whatami == WhatAmI::Router { + handlers.insert( + format!("@/{whatami_str}/{zid_str}/linkstate/routers") + .try_into() + .unwrap(), + Arc::new(routers_linkstate_data), + ); + } + if runtime.state.whatami != WhatAmI::Client + && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate" + { + handlers.insert( + format!("@/{whatami_str}/{zid_str}/linkstate/peers") + .try_into() + .unwrap(), + Arc::new(peers_linkstate_data), + ); + } handlers.insert( - format!("@/router/{zid_str}/subscriber/**") + format!("@/{whatami_str}/{zid_str}/subscriber/**") .try_into() .unwrap(), Arc::new(subscribers_data), ); handlers.insert( - format!("@/router/{zid_str}/queryable/**") + format!("@/{whatami_str}/{zid_str}/queryable/**") .try_into() .unwrap(), Arc::new(queryables_data), @@ -177,13 +186,15 @@ impl AdminSpace { #[cfg(all(feature = "unstable", feature = "plugins"))] handlers.insert( - format!("@/router/{zid_str}/plugins/**").try_into().unwrap(), + format!("@/{whatami_str}/{zid_str}/plugins/**") + .try_into() + .unwrap(), Arc::new(plugins_data), ); #[cfg(all(feature = "unstable", feature = "plugins"))] handlers.insert( - format!("@/router/{zid_str}/status/plugins/**") + format!("@/{whatami_str}/{zid_str}/status/plugins/**") .try_into() .unwrap(), Arc::new(plugins_status), @@ -198,25 +209,18 @@ impl AdminSpace { let context = Arc::new(AdminContext { runtime: runtime.clone(), - zid_str, version, metadata, }); let admin = Arc::new(AdminSpace { - zid: runtime.zid(), + zid: runtime.state.zid, primitives: Mutex::new(None), mappings: Mutex::new(HashMap::new()), handlers, context, }); - admin - .context - .runtime - .state - .config - .lock() - .set_plugin_validator(Arc::downgrade(&admin)); + config.set_plugin_validator(Arc::downgrade(&admin)); #[cfg(all(feature = "unstable", feature = "plugins"))] { @@ -362,37 +366,42 @@ impl Primitives for AdminSpace { } } - if let Some(key) = msg - .wire_expr - .as_str() - .strip_prefix(&format!("@/router/{}/config/", &self.context.zid_str)) - { + if let Some(key) = msg.wire_expr.as_str().strip_prefix(&format!( + "@/{}/{}/config/", + self.context.runtime.state.whatami, self.context.runtime.state.zid + )) { match msg.payload { PushBody::Put(put) => match std::str::from_utf8(&put.payload.contiguous()) { Ok(json) => { tracing::trace!( - "Insert conf value /@/router/{}/config/{} : {}", - &self.context.zid_str, + "Insert conf value /@/{}/{}/config/{} : {}", + self.context.runtime.state.whatami, + self.context.runtime.state.zid, key, json ); if let Err(e) = (&self.context.runtime.state.config).insert_json5(key, json) { error!( - "Error inserting conf value /@/router/{}/config/{} : {} - {}", - &self.context.zid_str, key, json, e + "Error inserting conf value /@/{}/{}/config/{} : {} - {}", + self.context.runtime.state.whatami, + self.context.runtime.state.zid, + key, + json, + e ); } } Err(e) => error!( - "Received non utf8 conf value on /@/router/{}/config/{} : {}", - &self.context.zid_str, key, e + "Received non utf8 conf value on /@/{}/{}/config/{} : {}", + self.context.runtime.state.whatami, self.context.runtime.state.zid, key, e ), }, PushBody::Del(_) => { tracing::trace!( - "Deleting conf value /@/router/{}/config/{}", - &self.context.zid_str, + "Deleting conf value /@/{}/{}/config/{}", + self.context.runtime.state.whatami, + self.context.runtime.state.zid, key ); if let Err(e) = self.context.runtime.state.config.remove(key) { @@ -510,8 +519,13 @@ impl crate::net::primitives::EPrimitives for AdminSpace { } } -fn router_data(context: &AdminContext, query: Query) { - let reply_key: OwnedKeyExpr = format!("@/router/{}", context.zid_str).try_into().unwrap(); +fn local_data(context: &AdminContext, query: Query) { + let reply_key: OwnedKeyExpr = format!( + "@/{}/{}", + context.runtime.state.whatami, context.runtime.state.zid + ) + .try_into() + .unwrap(); let transport_mgr = context.runtime.manager().clone(); @@ -568,7 +582,7 @@ fn router_data(context: &AdminContext, query: Query) { #[allow(unused_mut)] let mut json = json!({ - "zid": context.zid_str, + "zid": context.runtime.state.zid, "version": context.version, "metadata": context.metadata, "locators": locators, @@ -601,10 +615,13 @@ fn router_data(context: &AdminContext, query: Query) { } } -fn router_metrics(context: &AdminContext, query: Query) { - let reply_key: OwnedKeyExpr = format!("@/router/{}/metrics", context.zid_str) - .try_into() - .unwrap(); +fn metrics(context: &AdminContext, query: Query) { + let reply_key: OwnedKeyExpr = format!( + "@/{}/{}/metrics", + context.runtime.state.whatami, context.runtime.state.zid + ) + .try_into() + .unwrap(); #[allow(unused_mut)] let mut metrics = format!( r#"# HELP zenoh_build Informations about zenoh. @@ -636,9 +653,12 @@ zenoh_build{{version="{}"}} 1 } fn routers_linkstate_data(context: &AdminContext, query: Query) { - let reply_key: OwnedKeyExpr = format!("@/router/{}/linkstate/routers", context.zid_str) - .try_into() - .unwrap(); + let reply_key: OwnedKeyExpr = format!( + "@/{}/{}/linkstate/routers", + context.runtime.state.whatami, context.runtime.state.zid + ) + .try_into() + .unwrap(); let tables = zread!(context.runtime.state.router.tables.tables); @@ -661,9 +681,12 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { } fn peers_linkstate_data(context: &AdminContext, query: Query) { - let reply_key: OwnedKeyExpr = format!("@/router/{}/linkstate/peers", context.zid_str) - .try_into() - .unwrap(); + let reply_key: OwnedKeyExpr = format!( + "@/{}/{}/linkstate/peers", + context.runtime.state.whatami, context.runtime.state.zid + ) + .try_into() + .unwrap(); let tables = zread!(context.runtime.state.router.tables.tables); @@ -689,8 +712,9 @@ fn subscribers_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); for sub in tables.hat_code.get_subscriptions(&tables) { let key = KeyExpr::try_from(format!( - "@/router/{}/subscriber/{}", - context.zid_str, + "@/{}/{}/subscriber/{}", + context.runtime.state.whatami, + context.runtime.state.zid, sub.expr() )) .unwrap(); @@ -706,8 +730,9 @@ fn queryables_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); for qabl in tables.hat_code.get_queryables(&tables) { let key = KeyExpr::try_from(format!( - "@/router/{}/queryable/{}", - context.zid_str, + "@/{}/{}/queryable/{}", + context.runtime.state.whatami, + context.runtime.state.zid, qabl.expr() )) .unwrap(); @@ -722,7 +747,10 @@ fn queryables_data(context: &AdminContext, query: Query) { #[cfg(all(feature = "unstable", feature = "plugins"))] fn plugins_data(context: &AdminContext, query: Query) { let guard = context.runtime.plugins_manager(); - let root_key = format!("@/router/{}/plugins", &context.zid_str); + let root_key = format!( + "@/{}/{}/plugins", + context.runtime.state.whatami, &context.runtime.state.zid + ); let root_key = unsafe { keyexpr::from_str_unchecked(&root_key) }; tracing::debug!("requested plugins status {:?}", query.key_expr()); if let [names, ..] = query.key_expr().strip_prefix(root_key)[..] { @@ -740,9 +768,12 @@ fn plugins_data(context: &AdminContext, query: Query) { #[cfg(all(feature = "unstable", feature = "plugins"))] fn plugins_status(context: &AdminContext, query: Query) { - let selector: crate::Selector<'_> = query.selector(); + let selector = query.selector(); let guard = context.runtime.plugins_manager(); - let mut root_key = format!("@/router/{}/status/plugins/", &context.zid_str); + let mut root_key = format!( + "@/{}/{}/status/plugins/", + context.runtime.state.whatami, &context.runtime.state.zid + ); for plugin in guard.started_plugins_iter() { with_extended_string(&mut root_key, &[plugin.name()], |plugin_key| { From 81217c779274f6a67ac645ef8082b09a0a4a5320 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 22 Apr 2024 18:13:55 +0200 Subject: [PATCH 2/9] Admin space: Show known origins of Subscribers and Queryables. (#959) * Subscribers are reported with known sources in adminspace * Return valid empty json in case of serialisation failure * Queryables are reported with known sources in adminspace * Address review comments --- zenoh/src/net/routing/hat/client/pubsub.rs | 18 ++++++--- zenoh/src/net/routing/hat/client/queries.rs | 18 ++++++--- .../net/routing/hat/linkstate_peer/pubsub.rs | 29 ++++++++++++-- .../net/routing/hat/linkstate_peer/queries.rs | 29 ++++++++++++-- zenoh/src/net/routing/hat/mod.rs | 23 +++++++++-- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 18 ++++++--- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 18 ++++++--- zenoh/src/net/routing/hat/router/pubsub.rs | 39 +++++++++++++++++-- zenoh/src/net/routing/hat/router/queries.rs | 39 +++++++++++++++++-- zenoh/src/net/runtime/adminspace.rs | 24 ++++++++++-- 10 files changed, 216 insertions(+), 39 deletions(-) diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 3845917240..fb92ae614d 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -17,11 +17,11 @@ use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::hat::{HatPubSubTrait, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ @@ -274,11 +274,19 @@ impl HatPubSubTrait for HatCode { forget_client_subscription(tables, face, res); } - fn get_subscriptions(&self, tables: &Tables) -> Vec> { - let mut subs = HashSet::new(); + fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { + // Compute the list of known suscriptions (keys) + let mut subs = HashMap::new(); for src_face in tables.faces.values() { for sub in &face_hat!(src_face).remote_subs { - subs.insert(sub.clone()); + // Insert the key in the list of known suscriptions + let srcs = subs.entry(sub.clone()).or_insert_with(Sources::empty); + // Append src_face as a suscription source in the proper list + match src_face.whatami { + WhatAmI::Router => srcs.routers.push(src_face.zid), + WhatAmI::Peer => srcs.peers.push(src_face.zid), + WhatAmI::Client => srcs.clients.push(src_face.zid), + } } } Vec::from_iter(subs) diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 609d6e0b04..3576148aaf 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -17,12 +17,12 @@ use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::hat::{HatQueriesTrait, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; @@ -272,11 +272,19 @@ impl HatQueriesTrait for HatCode { forget_client_queryable(tables, face, res); } - fn get_queryables(&self, tables: &Tables) -> Vec> { - let mut qabls = HashSet::new(); + fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { + // Compute the list of known queryables (keys) + let mut qabls = HashMap::new(); for src_face in tables.faces.values() { for qabl in &face_hat!(src_face).remote_qabls { - qabls.insert(qabl.clone()); + // Insert the key in the list of known queryables + let srcs = qabls.entry(qabl.clone()).or_insert_with(Sources::empty); + // Append src_face as a queryable source in the proper list + match src_face.whatami { + WhatAmI::Router => srcs.routers.push(src_face.zid), + WhatAmI::Peer => srcs.peers.push(src_face.zid), + WhatAmI::Client => srcs.clients.push(src_face.zid), + } } } Vec::from_iter(qabls) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 02b86de6b0..f0f8b77111 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -19,7 +19,7 @@ use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::hat::{HatPubSubTrait, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; @@ -605,8 +605,31 @@ impl HatPubSubTrait for HatCode { } } - fn get_subscriptions(&self, tables: &Tables) -> Vec> { - hat!(tables).peer_subs.iter().cloned().collect() + fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { + // Compute the list of known suscriptions (keys) + hat!(tables) + .peer_subs + .iter() + .map(|s| { + ( + s.clone(), + // Compute the list of routers, peers and clients that are known + // sources of those subscriptions + Sources { + routers: vec![], + peers: Vec::from_iter(res_hat!(s).peer_subs.iter().cloned()), + clients: s + .session_ctxs + .values() + .filter_map(|f| { + (f.face.whatami == WhatAmI::Client && f.subs.is_some()) + .then_some(f.face.zid) + }) + .collect(), + }, + ) + }) + .collect() } fn compute_data_route( diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index ba9b7bc02d..fa48a66ee5 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -19,7 +19,7 @@ use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::hat::{HatQueriesTrait, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; @@ -670,8 +670,31 @@ impl HatQueriesTrait for HatCode { } } - fn get_queryables(&self, tables: &Tables) -> Vec> { - hat!(tables).peer_qabls.iter().cloned().collect() + fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { + // Compute the list of known queryables (keys) + hat!(tables) + .peer_qabls + .iter() + .map(|s| { + ( + s.clone(), + // Compute the list of routers, peers and clients that are known + // sources of those queryables + Sources { + routers: vec![], + peers: Vec::from_iter(res_hat!(s).peer_qabls.keys().cloned()), + clients: s + .session_ctxs + .values() + .filter_map(|f| { + (f.face.whatami == WhatAmI::Client && f.qabl.is_some()) + .then_some(f.face.zid) + }) + .collect(), + }, + ) + }) + .collect() } fn compute_query_route( diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 4fbf9c9e5d..2752a80959 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -27,7 +27,7 @@ use super::{ use crate::runtime::Runtime; use std::{any::Any, sync::Arc}; use zenoh_buffers::ZBuf; -use zenoh_config::{unwrap_or_default, Config, WhatAmI}; +use zenoh_config::{unwrap_or_default, Config, WhatAmI, ZenohId}; use zenoh_protocol::{ core::WireExpr, network::{ @@ -47,6 +47,23 @@ zconfigurable! { pub static ref TREES_COMPUTATION_DELAY_MS: u64 = 100; } +#[derive(serde::Serialize)] +pub(crate) struct Sources { + routers: Vec, + peers: Vec, + clients: Vec, +} + +impl Sources { + pub(crate) fn empty() -> Self { + Self { + routers: vec![], + peers: vec![], + clients: vec![], + } + } +} + pub(crate) trait HatTrait: HatBaseTrait + HatPubSubTrait + HatQueriesTrait {} pub(crate) trait HatBaseTrait { @@ -129,7 +146,7 @@ pub(crate) trait HatPubSubTrait { node_id: NodeId, ); - fn get_subscriptions(&self, tables: &Tables) -> Vec>; + fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)>; fn compute_data_route( &self, @@ -159,7 +176,7 @@ pub(crate) trait HatQueriesTrait { node_id: NodeId, ); - fn get_queryables(&self, tables: &Tables) -> Vec>; + fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)>; fn compute_query_route( &self, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 432b8e137e..bbaf0f5bac 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -17,11 +17,11 @@ use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::hat::{HatPubSubTrait, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ @@ -275,11 +275,19 @@ impl HatPubSubTrait for HatCode { forget_client_subscription(tables, face, res); } - fn get_subscriptions(&self, tables: &Tables) -> Vec> { - let mut subs = HashSet::new(); + fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { + // Compute the list of known suscriptions (keys) + let mut subs = HashMap::new(); for src_face in tables.faces.values() { for sub in &face_hat!(src_face).remote_subs { - subs.insert(sub.clone()); + // Insert the key in the list of known suscriptions + let srcs = subs.entry(sub.clone()).or_insert_with(Sources::empty); + // Append src_face as a suscription source in the proper list + match src_face.whatami { + WhatAmI::Router => srcs.routers.push(src_face.zid), + WhatAmI::Peer => srcs.peers.push(src_face.zid), + WhatAmI::Client => srcs.clients.push(src_face.zid), + } } } Vec::from_iter(subs) diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 0937e22a65..aeaee21409 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -17,12 +17,12 @@ use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::hat::{HatQueriesTrait, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; @@ -272,11 +272,19 @@ impl HatQueriesTrait for HatCode { forget_client_queryable(tables, face, res); } - fn get_queryables(&self, tables: &Tables) -> Vec> { - let mut qabls = HashSet::new(); + fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { + // Compute the list of known queryables (keys) + let mut qabls = HashMap::new(); for src_face in tables.faces.values() { for qabl in &face_hat!(src_face).remote_qabls { - qabls.insert(qabl.clone()); + // Insert the key in the list of known queryables + let srcs = qabls.entry(qabl.clone()).or_insert_with(Sources::empty); + // Append src_face as a queryable source in the proper list + match src_face.whatami { + WhatAmI::Router => srcs.routers.push(src_face.zid), + WhatAmI::Peer => srcs.peers.push(src_face.zid), + WhatAmI::Client => srcs.clients.push(src_face.zid), + } } } Vec::from_iter(qabls) diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 6bf91a0605..b7d00227c0 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -19,7 +19,7 @@ use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::hat::{HatPubSubTrait, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; @@ -925,8 +925,41 @@ impl HatPubSubTrait for HatCode { } } - fn get_subscriptions(&self, tables: &Tables) -> Vec> { - hat!(tables).router_subs.iter().cloned().collect() + fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { + // Compute the list of known suscriptions (keys) + hat!(tables) + .router_subs + .iter() + .map(|s| { + ( + s.clone(), + // Compute the list of routers, peers and clients that are known + // sources of those subscriptions + Sources { + routers: Vec::from_iter(res_hat!(s).router_subs.iter().cloned()), + peers: if hat!(tables).full_net(WhatAmI::Peer) { + Vec::from_iter(res_hat!(s).peer_subs.iter().cloned()) + } else { + s.session_ctxs + .values() + .filter_map(|f| { + (f.face.whatami == WhatAmI::Peer && f.subs.is_some()) + .then_some(f.face.zid) + }) + .collect() + }, + clients: s + .session_ctxs + .values() + .filter_map(|f| { + (f.face.whatami == WhatAmI::Client && f.subs.is_some()) + .then_some(f.face.zid) + }) + .collect(), + }, + ) + }) + .collect() } fn compute_data_route( diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 2451b8c2b6..28ff0800db 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -19,7 +19,7 @@ use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::hat::{HatQueriesTrait, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; @@ -1073,8 +1073,41 @@ impl HatQueriesTrait for HatCode { } } - fn get_queryables(&self, tables: &Tables) -> Vec> { - hat!(tables).router_qabls.iter().cloned().collect() + fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { + // Compute the list of known queryables (keys) + hat!(tables) + .router_qabls + .iter() + .map(|s| { + ( + s.clone(), + // Compute the list of routers, peers and clients that are known + // sources of those queryables + Sources { + routers: Vec::from_iter(res_hat!(s).router_qabls.keys().cloned()), + peers: if hat!(tables).full_net(WhatAmI::Peer) { + Vec::from_iter(res_hat!(s).peer_qabls.keys().cloned()) + } else { + s.session_ctxs + .values() + .filter_map(|f| { + (f.face.whatami == WhatAmI::Peer && f.qabl.is_some()) + .then_some(f.face.zid) + }) + .collect() + }, + clients: s + .session_ctxs + .values() + .filter_map(|f| { + (f.face.whatami == WhatAmI::Client && f.qabl.is_some()) + .then_some(f.face.zid) + }) + .collect(), + }, + ) + }) + .collect() } fn compute_query_route( diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index d62379b862..0040c96666 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -715,11 +715,18 @@ fn subscribers_data(context: &AdminContext, query: Query) { "@/{}/{}/subscriber/{}", context.runtime.state.whatami, context.runtime.state.zid, - sub.expr() + sub.0.expr() )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { + if let Err(e) = query + .reply(Ok(Sample::new( + key, + Value::from(serde_json::to_string(&sub.1).unwrap_or_else(|_| "{}".to_string())) + .encoding(KnownEncoding::AppJson.into()), + ))) + .res() + { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -733,11 +740,20 @@ fn queryables_data(context: &AdminContext, query: Query) { "@/{}/{}/queryable/{}", context.runtime.state.whatami, context.runtime.state.zid, - qabl.expr() + qabl.0.expr() )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { + if let Err(e) = query + .reply(Ok(Sample::new( + key, + Value::from( + serde_json::to_string(&qabl.1).unwrap_or_else(|_| "{}".to_string()), + ) + .encoding(KnownEncoding::AppJson.into()), + ))) + .res() + { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } From 2fdddae537c3038c9222dadc2423d9fb82b324f8 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Tue, 23 Apr 2024 13:34:17 +0200 Subject: [PATCH 3/9] fix: Enable `unstable` and `plugins` features in plugins (#965) --- plugins/zenoh-plugin-example/Cargo.toml | 2 +- plugins/zenoh-plugin-rest/Cargo.toml | 2 +- plugins/zenoh-plugin-storage-manager/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/zenoh-plugin-example/Cargo.toml b/plugins/zenoh-plugin-example/Cargo.toml index 8e6814590f..ce12dbf18e 100644 --- a/plugins/zenoh-plugin-example/Cargo.toml +++ b/plugins/zenoh-plugin-example/Cargo.toml @@ -20,7 +20,7 @@ edition = { workspace = true } publish = false [features] -default = ["no_mangle", "zenoh/default"] +default = ["no_mangle", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] no_mangle = [] [lib] diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index 19fa9eafdc..8d88368643 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -24,7 +24,7 @@ categories = ["network-programming", "web-programming::http-server"] description = "The zenoh REST plugin" [features] -default = ["no_mangle", "zenoh/default"] +default = ["no_mangle", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] no_mangle = [] [lib] diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 35a5232cf0..1e37093a78 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -24,7 +24,7 @@ categories = { workspace = true } description = "The zenoh storages plugin." [features] -default = ["no_mangle", "zenoh/default"] +default = ["no_mangle", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] no_mangle = [] [lib] From 9a9832a407300763af6e30652ac33bcaab2c94e4 Mon Sep 17 00:00:00 2001 From: Gabriele Baldoni Date: Wed, 24 Apr 2024 14:23:49 +0000 Subject: [PATCH 4/9] fix(771): Adding mTLS support in QUIC (#899) * refactor(tls-quic): moving shared code into zenoh-link-commons::tls Signed-off-by: gabrik * fix(mtls-quic): adding support for mTLS in QUIC [no ci] - broken Signed-off-by: gabrik * fix(mtls-quic): using current release of quinn at the cost of some duplicated code Signed-off-by: gabrik * test(quic-mlts): added tests for QUIC with mTLS, using rustls 0.22 to workaround the default CryptoProvider panic Signed-off-by: gabrik * chore: addressing comments Signed-off-by: gabrik * Apply suggestions from code review --------- Signed-off-by: gabrik Co-authored-by: Luca Cominardi --- Cargo.lock | 22 +- io/zenoh-link-commons/Cargo.toml | 35 +- io/zenoh-links/zenoh-link-quic/Cargo.toml | 33 +- io/zenoh-links/zenoh-link-quic/src/lib.rs | 122 +---- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 129 +---- io/zenoh-links/zenoh-link-quic/src/utils.rs | 509 ++++++++++++++++++ io/zenoh-links/zenoh-link-tls/Cargo.toml | 20 +- io/zenoh-links/zenoh-link-tls/src/lib.rs | 157 +----- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 326 +---------- io/zenoh-links/zenoh-link-tls/src/utils.rs | 480 +++++++++++++++++ io/zenoh-transport/Cargo.toml | 1 + io/zenoh-transport/tests/unicast_transport.rs | 234 +++++++- 12 files changed, 1334 insertions(+), 734 deletions(-) create mode 100644 io/zenoh-links/zenoh-link-quic/src/utils.rs create mode 100644 io/zenoh-links/zenoh-link-tls/src/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 16f7b4d1a0..36078d0238 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3118,9 +3118,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.2" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", "ring 0.17.6", @@ -4041,7 +4041,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.2", + "rustls 0.22.4", "rustls-pki-types", "tokio", ] @@ -5109,16 +5109,19 @@ name = "zenoh-link-commons" version = "0.11.0-dev" dependencies = [ "async-trait", + "base64 0.21.4", "flume", "futures", - "rustls 0.22.2", + "rustls 0.22.4", "rustls-webpki 0.102.2", "serde", "tokio", "tokio-util", "tracing", + "webpki-roots", "zenoh-buffers", "zenoh-codec", + "zenoh-config", "zenoh-core", "zenoh-protocol", "zenoh-result", @@ -5136,13 +5139,15 @@ dependencies = [ "quinn", "rustls 0.21.7", "rustls-native-certs 0.7.0", - "rustls-pemfile 2.0.0", + "rustls-pemfile 1.0.3", + "rustls-pki-types", "rustls-webpki 0.102.2", "secrecy", "tokio", "tokio-rustls 0.24.1", "tokio-util", "tracing", + "webpki-roots", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -5198,7 +5203,7 @@ dependencies = [ "async-trait", "base64 0.21.4", "futures", - "rustls 0.22.2", + "rustls 0.22.4", "rustls-pemfile 2.0.0", "rustls-pki-types", "rustls-webpki 0.102.2", @@ -5516,6 +5521,7 @@ dependencies = [ "zenoh-core", "zenoh-crypto", "zenoh-link", + "zenoh-link-commons", "zenoh-protocol", "zenoh-result", "zenoh-runtime", @@ -5605,6 +5611,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" diff --git a/io/zenoh-link-commons/Cargo.toml b/io/zenoh-link-commons/Cargo.toml index f2e10616c1..12b70cad6d 100644 --- a/io/zenoh-link-commons/Cargo.toml +++ b/io/zenoh-link-commons/Cargo.toml @@ -12,16 +12,16 @@ # ZettaScale Zenoh Team, # [package] -rust-version = { workspace = true } -name = "zenoh-link-commons" -version = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } authors = { workspace = true } -edition = { workspace = true } -license = { workspace = true } categories = { workspace = true } description = "Internal crate for zenoh." +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +name = "zenoh-link-commons" +repository = { workspace = true } +rust-version = { workspace = true } +version = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] @@ -29,18 +29,27 @@ compression = [] [dependencies] async-trait = { workspace = true } +base64 = { workspace = true, optional = true } +flume = { workspace = true } +futures = { workspace = true } rustls = { workspace = true } rustls-webpki = { workspace = true } -flume = { workspace = true } -tracing = {workspace = true} serde = { workspace = true, features = ["default"] } +tokio = { workspace = true, features = [ + "fs", + "io-util", + "net", + "sync", + "time", +] } +tokio-util = { workspace = true, features = ["rt"] } +tracing = { workspace = true } +webpki-roots = { workspace = true, optional = true } zenoh-buffers = { workspace = true } zenoh-codec = { workspace = true } +zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } -zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } -tokio = { workspace = true, features = ["io-util", "net", "fs", "sync", "time"] } -tokio-util = { workspace = true, features = ["rt"] } -futures = { workspace = true } +zenoh-util = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index a10e18fd43..0e1c720d78 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -12,39 +12,46 @@ # ZettaScale Zenoh Team, # [package] -rust-version = { workspace = true } -name = "zenoh-link-quic" -version = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } authors = { workspace = true } -edition = { workspace = true } -license = { workspace = true } categories = { workspace = true } description = "Internal crate for zenoh." +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +name = "zenoh-link-quic" +repository = { workspace = true } +rust-version = { workspace = true } +version = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] async-trait = { workspace = true } base64 = { workspace = true } futures = { workspace = true } -tracing = {workspace = true} quinn = { workspace = true } rustls-native-certs = { workspace = true } -rustls-pemfile = { workspace = true } +rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } -secrecy = {workspace = true } -tokio = { workspace = true, features = ["io-util", "net", "fs", "sync", "time"] } +secrecy = { workspace = true } +tokio = { workspace = true, features = [ + "fs", + "io-util", + "net", + "sync", + "time", +] } tokio-util = { workspace = true, features = ["rt"] } +tracing = { workspace = true } +webpki-roots = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } +zenoh-runtime = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } -zenoh-runtime = { workspace = true } - # Lock due to quinn not supporting rustls 0.22 yet rustls = { version = "0.21", features = ["dangerous_configuration", "quic"] } tokio-rustls = "0.24.1" +rustls-pemfile = { version = "1" } diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index c6d7e16087..0c9bc7365e 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -18,25 +18,17 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use async_trait::async_trait; -use config::{ - TLS_ROOT_CA_CERTIFICATE_BASE64, TLS_ROOT_CA_CERTIFICATE_FILE, TLS_SERVER_CERTIFICATE_BASE64, - TLS_SERVER_CERTIFICATE_FILE, TLS_SERVER_NAME_VERIFICATION, TLS_SERVER_PRIVATE_KEY_BASE64, - TLS_SERVER_PRIVATE_KEY_FILE, -}; -use secrecy::ExposeSecret; -use std::net::SocketAddr; -use zenoh_config::Config; + use zenoh_core::zconfigurable; -use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{ - endpoint::{Address, Parameters}, - Locator, -}; -use zenoh_result::{bail, zerror, ZResult}; +use zenoh_link_commons::LocatorInspector; +use zenoh_protocol::core::Locator; +use zenoh_result::ZResult; mod unicast; +mod utils; mod verify; pub use unicast::*; +pub use utils::TlsConfigurator as QuicConfigurator; // Default ALPN protocol pub const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; @@ -64,77 +56,6 @@ impl LocatorInspector for QuicLocatorInspector { } } -#[derive(Default, Clone, Copy, Debug)] -pub struct QuicConfigurator; - -impl ConfigurationInspector for QuicConfigurator { - fn inspect_config(&self, config: &Config) -> ZResult { - let mut ps: Vec<(&str, &str)> = vec![]; - - let c = config.transport().link().tls(); - - match (c.root_ca_certificate(), c.root_ca_certificate_base64()) { - (Some(_), Some(_)) => { - bail!("Only one between 'root_ca_certificate' and 'root_ca_certificate_base64' can be present!") - } - (Some(ca_certificate), None) => { - ps.push((TLS_ROOT_CA_CERTIFICATE_FILE, ca_certificate)); - } - (None, Some(ca_certificate)) => { - ps.push(( - TLS_ROOT_CA_CERTIFICATE_BASE64, - ca_certificate.expose_secret(), - )); - } - _ => {} - } - - match (c.server_private_key(), c.server_private_key_base64()) { - (Some(_), Some(_)) => { - bail!("Only one between 'server_private_key' and 'server_private_key_base64' can be present!") - } - (Some(server_private_key), None) => { - ps.push((TLS_SERVER_PRIVATE_KEY_FILE, server_private_key)); - } - (None, Some(server_private_key)) => { - ps.push(( - TLS_SERVER_PRIVATE_KEY_BASE64, - server_private_key.expose_secret(), - )); - } - _ => {} - } - - match (c.server_certificate(), c.server_certificate_base64()) { - (Some(_), Some(_)) => { - bail!("Only one between 'server_certificate' and 'server_certificate_base64' can be present!") - } - (Some(server_certificate), None) => { - ps.push((TLS_SERVER_CERTIFICATE_FILE, server_certificate)); - } - (None, Some(server_certificate)) => { - ps.push(( - TLS_SERVER_CERTIFICATE_BASE64, - server_certificate.expose_secret(), - )); - } - _ => {} - } - - if let Some(server_name_verification) = c.server_name_verification() { - match server_name_verification { - true => ps.push((TLS_SERVER_NAME_VERIFICATION, "true")), - false => ps.push((TLS_SERVER_NAME_VERIFICATION, "false")), - }; - } - - let mut s = String::new(); - Parameters::extend(ps.drain(..), &mut s); - - Ok(s) - } -} - zconfigurable! { // Default MTU (QUIC PDU) in bytes. static ref QUIC_DEFAULT_MTU: u16 = QUIC_MAX_MTU; @@ -157,25 +78,20 @@ pub mod config { pub const TLS_SERVER_PRIVATE_KEY_RAW: &str = "server_private_key_raw"; pub const TLS_SERVER_PRIVATE_KEY_BASE64: &str = "server_private_key_base64"; - pub const TLS_SERVER_CERTIFICATE_FILE: &str = "tls_server_certificate_file"; - pub const TLS_SERVER_CERTIFICATE_RAW: &str = "tls_server_certificate_raw"; - pub const TLS_SERVER_CERTIFICATE_BASE64: &str = "tls_server_certificate_base64"; + pub const TLS_SERVER_CERTIFICATE_FILE: &str = "server_certificate_file"; + pub const TLS_SERVER_CERTIFICATE_RAW: &str = "server_certificate_raw"; + pub const TLS_SERVER_CERTIFICATE_BASE64: &str = "server_certificate_base64"; - pub const TLS_SERVER_NAME_VERIFICATION: &str = "server_name_verification"; - pub const TLS_SERVER_NAME_VERIFICATION_DEFAULT: &str = "true"; -} + pub const TLS_CLIENT_PRIVATE_KEY_FILE: &str = "client_private_key_file"; + pub const TLS_CLIENT_PRIVATE_KEY_RAW: &str = "client_private_key_raw"; + pub const TLS_CLIENT_PRIVATE_KEY_BASE64: &str = "client_private_key_base64"; -async fn get_quic_addr(address: &Address<'_>) -> ZResult { - match tokio::net::lookup_host(address.as_str()).await?.next() { - Some(addr) => Ok(addr), - None => bail!("Couldn't resolve QUIC locator address: {}", address), - } -} + pub const TLS_CLIENT_CERTIFICATE_FILE: &str = "client_certificate_file"; + pub const TLS_CLIENT_CERTIFICATE_RAW: &str = "client_certificate_raw"; + pub const TLS_CLIENT_CERTIFICATE_BASE64: &str = "client_certificate_base64"; -pub fn base64_decode(data: &str) -> ZResult> { - use base64::engine::general_purpose; - use base64::Engine; - Ok(general_purpose::STANDARD - .decode(data) - .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) + pub const TLS_CLIENT_AUTH: &str = "client_auth"; + + pub const TLS_SERVER_NAME_VERIFICATION: &str = "server_name_verification"; + pub const TLS_SERVER_NAME_VERIFICATION_DEFAULT: &str = "true"; } diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 8fd7777137..452fd8a122 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -12,16 +12,13 @@ // ZettaScale Zenoh Team, // -use crate::base64_decode; use crate::{ - config::*, get_quic_addr, verify::WebPkiVerifierAnyServerName, ALPN_QUIC_HTTP, - QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, + config::*, + utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, + ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, }; use async_trait::async_trait; -use rustls::{Certificate, PrivateKey}; -use rustls_pemfile::Item; use std::fmt; -use std::io::BufReader; use std::net::IpAddr; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; @@ -34,7 +31,7 @@ use zenoh_link_commons::{ ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_result::{bail, zerror, ZError, ZResult}; +use zenoh_result::{bail, zerror, ZResult}; pub struct LinkUnicastQuic { connection: quinn::Connection, @@ -219,55 +216,12 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { } // Initialize the QUIC connection - let mut root_cert_store = rustls::RootCertStore::empty(); - - // Read the certificates - let f = if let Some(value) = epconf.get(TLS_ROOT_CA_CERTIFICATE_RAW) { - value.as_bytes().to_vec() - } else if let Some(b64_certificate) = epconf.get(TLS_ROOT_CA_CERTIFICATE_BASE64) { - base64_decode(b64_certificate)? - } else if let Some(value) = epconf.get(TLS_ROOT_CA_CERTIFICATE_FILE) { - tokio::fs::read(value) - .await - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - } else { - vec![] - }; - - let certificates = if f.is_empty() { - rustls_native_certs::load_native_certs() - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - .drain(..) - .map(|x| rustls::Certificate(x.to_vec())) - .collect::>() - } else { - rustls_pemfile::certs(&mut BufReader::new(f.as_slice())) - .map(|result| { - result - .map_err(|err| zerror!("Invalid QUIC CA certificate file: {}", err)) - .map(|der| Certificate(der.to_vec())) - }) - .collect::, ZError>>()? - }; - for c in certificates.iter() { - root_cert_store.add(c).map_err(|e| zerror!("{}", e))?; - } - - let client_crypto = rustls::ClientConfig::builder().with_safe_defaults(); - - let mut client_crypto = if server_name_verification { - client_crypto - .with_root_certificates(root_cert_store) - .with_no_client_auth() - } else { - client_crypto - .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( - root_cert_store, - ))) - .with_no_client_auth() - }; + let mut client_crypto = TlsClientConfig::new(&epconf) + .await + .map_err(|e| zerror!("Cannot create a new QUIC client on {addr}: {e}"))?; - client_crypto.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); + client_crypto.client_config.alpn_protocols = + ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); let ip_addr: IpAddr = if addr.is_ipv4() { Ipv4Addr::UNSPECIFIED.into() @@ -276,7 +230,9 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { }; let mut quic_endpoint = quinn::Endpoint::client(SocketAddr::new(ip_addr, 0)) .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; - quic_endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new(client_crypto))); + quic_endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new( + client_crypto.client_config, + ))); let src_addr = quic_endpoint .local_addr() @@ -314,61 +270,14 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { let addr = get_quic_addr(&epaddr).await?; - let f = if let Some(value) = epconf.get(TLS_SERVER_CERTIFICATE_RAW) { - value.as_bytes().to_vec() - } else if let Some(b64_certificate) = epconf.get(TLS_SERVER_CERTIFICATE_BASE64) { - base64_decode(b64_certificate)? - } else if let Some(value) = epconf.get(TLS_SERVER_CERTIFICATE_FILE) { - tokio::fs::read(value) - .await - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - } else { - bail!("No QUIC CA certificate has been provided."); - }; - let certificates = rustls_pemfile::certs(&mut BufReader::new(f.as_slice())) - .map(|result| { - result - .map_err(|err| zerror!("Invalid QUIC CA certificate file: {}", err)) - .map(|der| Certificate(der.to_vec())) - }) - .collect::, ZError>>()?; - - // Private keys - let f = if let Some(value) = epconf.get(TLS_SERVER_PRIVATE_KEY_RAW) { - value.as_bytes().to_vec() - } else if let Some(b64_key) = epconf.get(TLS_SERVER_PRIVATE_KEY_BASE64) { - base64_decode(b64_key)? - } else if let Some(value) = epconf.get(TLS_SERVER_PRIVATE_KEY_FILE) { - tokio::fs::read(value) - .await - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - } else { - bail!("No QUIC CA private key has been provided."); - }; - let items: Vec = rustls_pemfile::read_all(&mut BufReader::new(f.as_slice())) - .collect::>() - .map_err(|err| zerror!("Invalid QUIC CA private key file: {}", err))?; - - let private_key = items - .into_iter() - .filter_map(|x| match x { - rustls_pemfile::Item::Pkcs1Key(k) => Some(k.secret_pkcs1_der().to_vec()), - rustls_pemfile::Item::Pkcs8Key(k) => Some(k.secret_pkcs8_der().to_vec()), - rustls_pemfile::Item::Sec1Key(k) => Some(k.secret_sec1_der().to_vec()), - _ => None, - }) - .take(1) - .next() - .ok_or_else(|| zerror!("No QUIC CA private key has been provided.")) - .map(PrivateKey)?; - // Server config - let mut server_crypto = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth() - .with_single_cert(certificates, private_key)?; - server_crypto.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); - let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(server_crypto)); + let mut server_crypto = TlsServerConfig::new(&epconf) + .await + .map_err(|e| zerror!("Cannot create a new QUIC listener on {addr}: {e}"))?; + server_crypto.server_config.alpn_protocols = + ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); + let mut server_config = + quinn::ServerConfig::with_crypto(Arc::new(server_crypto.server_config)); // We do not accept unidireactional streams. Arc::get_mut(&mut server_config.transport) diff --git a/io/zenoh-links/zenoh-link-quic/src/utils.rs b/io/zenoh-links/zenoh-link-quic/src/utils.rs new file mode 100644 index 0000000000..40367599cb --- /dev/null +++ b/io/zenoh-links/zenoh-link-quic/src/utils.rs @@ -0,0 +1,509 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::config::*; +use crate::verify::WebPkiVerifierAnyServerName; +use rustls::OwnedTrustAnchor; +use rustls::{ + server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, PrivateKey, + RootCertStore, ServerConfig, +}; +use rustls_pki_types::{CertificateDer, TrustAnchor}; +use secrecy::ExposeSecret; +use zenoh_link_commons::ConfigurationInspector; +// use rustls_pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}; +use std::fs::File; +use std::io; +use std::net::SocketAddr; +use std::{ + io::{BufReader, Cursor}, + sync::Arc, +}; +use webpki::anchor_from_trusted_cert; +use zenoh_config::Config as ZenohConfig; +use zenoh_protocol::core::endpoint::Config; +use zenoh_protocol::core::endpoint::{self, Address}; +use zenoh_result::{bail, zerror, ZError, ZResult}; + +#[derive(Default, Clone, Copy, Debug)] +pub struct TlsConfigurator; + +impl ConfigurationInspector for TlsConfigurator { + fn inspect_config(&self, config: &ZenohConfig) -> ZResult { + let mut ps: Vec<(&str, &str)> = vec![]; + + let c = config.transport().link().tls(); + + match (c.root_ca_certificate(), c.root_ca_certificate_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'root_ca_certificate' and 'root_ca_certificate_base64' can be present!") + } + (Some(ca_certificate), None) => { + ps.push((TLS_ROOT_CA_CERTIFICATE_FILE, ca_certificate)); + } + (None, Some(ca_certificate)) => { + ps.push(( + TLS_ROOT_CA_CERTIFICATE_BASE64, + ca_certificate.expose_secret(), + )); + } + _ => {} + } + + match (c.server_private_key(), c.server_private_key_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'server_private_key' and 'server_private_key_base64' can be present!") + } + (Some(server_private_key), None) => { + ps.push((TLS_SERVER_PRIVATE_KEY_FILE, server_private_key)); + } + (None, Some(server_private_key)) => { + ps.push(( + TLS_SERVER_PRIVATE_KEY_BASE64, + server_private_key.expose_secret(), + )); + } + _ => {} + } + + match (c.server_certificate(), c.server_certificate_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'server_certificate' and 'server_certificate_base64' can be present!") + } + (Some(server_certificate), None) => { + ps.push((TLS_SERVER_CERTIFICATE_FILE, server_certificate)); + } + (None, Some(server_certificate)) => { + ps.push(( + TLS_SERVER_CERTIFICATE_BASE64, + server_certificate.expose_secret(), + )); + } + _ => {} + } + + if let Some(client_auth) = c.client_auth() { + match client_auth { + true => ps.push((TLS_CLIENT_AUTH, "true")), + false => ps.push((TLS_CLIENT_AUTH, "false")), + }; + } + + match (c.client_private_key(), c.client_private_key_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'client_private_key' and 'client_private_key_base64' can be present!") + } + (Some(client_private_key), None) => { + ps.push((TLS_CLIENT_PRIVATE_KEY_FILE, client_private_key)); + } + (None, Some(client_private_key)) => { + ps.push(( + TLS_CLIENT_PRIVATE_KEY_BASE64, + client_private_key.expose_secret(), + )); + } + _ => {} + } + + match (c.client_certificate(), c.client_certificate_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'client_certificate' and 'client_certificate_base64' can be present!") + } + (Some(client_certificate), None) => { + ps.push((TLS_CLIENT_CERTIFICATE_FILE, client_certificate)); + } + (None, Some(client_certificate)) => { + ps.push(( + TLS_CLIENT_CERTIFICATE_BASE64, + client_certificate.expose_secret(), + )); + } + _ => {} + } + + if let Some(server_name_verification) = c.server_name_verification() { + match server_name_verification { + true => ps.push((TLS_SERVER_NAME_VERIFICATION, "true")), + false => ps.push((TLS_SERVER_NAME_VERIFICATION, "false")), + }; + } + + let mut s = String::new(); + endpoint::Parameters::extend(ps.drain(..), &mut s); + + Ok(s) + } +} + +pub(crate) struct TlsServerConfig { + pub(crate) server_config: ServerConfig, +} + +impl TlsServerConfig { + pub async fn new(config: &Config<'_>) -> ZResult { + let tls_server_client_auth: bool = match config.get(TLS_CLIENT_AUTH) { + Some(s) => s + .parse() + .map_err(|_| zerror!("Unknown client auth argument: {}", s))?, + None => false, + }; + let tls_server_private_key = TlsServerConfig::load_tls_private_key(config).await?; + let tls_server_certificate = TlsServerConfig::load_tls_certificate(config).await?; + + let certs: Vec = + rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) + .map_err(|err| zerror!("Error processing server certificate: {err}."))? + .into_iter() + .map(Certificate) + .collect(); + + let mut keys: Vec = + rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_server_private_key)) + .map_err(|err| zerror!("Error processing server key: {err}."))? + .into_iter() + .map(PrivateKey) + .collect(); + + if keys.is_empty() { + keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_server_private_key)) + .map_err(|err| zerror!("Error processing server key: {err}."))? + .into_iter() + .map(PrivateKey) + .collect(); + } + + if keys.is_empty() { + keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_server_private_key)) + .map_err(|err| zerror!("Error processing server key: {err}."))? + .into_iter() + .map(PrivateKey) + .collect(); + } + + if keys.is_empty() { + bail!("No private key found for TLS server."); + } + + let sc = if tls_server_client_auth { + let root_cert_store = load_trust_anchors(config)?.map_or_else( + || { + Err(zerror!( + "Missing root certificates while client authentication is enabled." + )) + }, + Ok, + )?; + let client_auth = AllowAnyAuthenticatedClient::new(root_cert_store); + ServerConfig::builder() + .with_safe_default_cipher_suites() + .with_safe_default_kx_groups() + .with_protocol_versions(&[&TLS13])? + .with_client_cert_verifier(Arc::new(client_auth)) + .with_single_cert(certs, keys.remove(0)) + .map_err(|e| zerror!(e))? + } else { + ServerConfig::builder() + .with_safe_defaults() + .with_no_client_auth() + .with_single_cert(certs, keys.remove(0)) + .map_err(|e| zerror!(e))? + }; + Ok(TlsServerConfig { server_config: sc }) + } + + async fn load_tls_private_key(config: &Config<'_>) -> ZResult> { + load_tls_key( + config, + TLS_SERVER_PRIVATE_KEY_RAW, + TLS_SERVER_PRIVATE_KEY_FILE, + TLS_SERVER_PRIVATE_KEY_BASE64, + ) + .await + } + + async fn load_tls_certificate(config: &Config<'_>) -> ZResult> { + load_tls_certificate( + config, + TLS_SERVER_CERTIFICATE_RAW, + TLS_SERVER_CERTIFICATE_FILE, + TLS_SERVER_CERTIFICATE_BASE64, + ) + .await + } +} + +pub(crate) struct TlsClientConfig { + pub(crate) client_config: ClientConfig, +} + +impl TlsClientConfig { + pub async fn new(config: &Config<'_>) -> ZResult { + let tls_client_server_auth: bool = match config.get(TLS_CLIENT_AUTH) { + Some(s) => s + .parse() + .map_err(|_| zerror!("Unknown client auth argument: {}", s))?, + None => false, + }; + + let tls_server_name_verification: bool = match config.get(TLS_SERVER_NAME_VERIFICATION) { + Some(s) => { + let s: bool = s + .parse() + .map_err(|_| zerror!("Unknown server name verification argument: {}", s))?; + if s { + tracing::warn!("Skipping name verification of servers"); + } + s + } + None => false, + }; + + // Allows mixed user-generated CA and webPKI CA + tracing::debug!("Loading default Web PKI certificates."); + let mut root_cert_store = RootCertStore { + roots: webpki_roots::TLS_SERVER_ROOTS + .iter() + .map(|ta| ta.to_owned()) + .map(|ta| { + OwnedTrustAnchor::from_subject_spki_name_constraints( + ta.subject.to_vec(), + ta.subject_public_key_info.to_vec(), + ta.name_constraints.map(|nc| nc.to_vec()), + ) + }) + .collect(), + }; + + if let Some(custom_root_cert) = load_trust_anchors(config)? { + tracing::debug!("Loading user-generated certificates."); + root_cert_store.roots.extend(custom_root_cert.roots); + } + + let cc = if tls_client_server_auth { + tracing::debug!("Loading client authentication key and certificate..."); + let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; + let tls_client_certificate = TlsClientConfig::load_tls_certificate(config).await?; + + let certs: Vec = + rustls_pemfile::certs(&mut Cursor::new(&tls_client_certificate)) + .map_err(|err| zerror!("Error processing client certificate: {err}."))? + .into_iter() + .map(Certificate) + .collect(); + + let mut keys: Vec = + rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_client_private_key)) + .map_err(|err| zerror!("Error processing client key: {err}."))? + .into_iter() + .map(PrivateKey) + .collect(); + + if keys.is_empty() { + keys = + rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_client_private_key)) + .map_err(|err| zerror!("Error processing client key: {err}."))? + .into_iter() + .map(PrivateKey) + .collect(); + } + + if keys.is_empty() { + keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_client_private_key)) + .map_err(|err| zerror!("Error processing client key: {err}."))? + .into_iter() + .map(PrivateKey) + .collect(); + } + + if keys.is_empty() { + bail!("No private key found for TLS client."); + } + + let builder = ClientConfig::builder() + .with_safe_default_cipher_suites() + .with_safe_default_kx_groups() + .with_protocol_versions(&[&TLS13])?; + + if tls_server_name_verification { + builder + .with_root_certificates(root_cert_store) + .with_client_auth_cert(certs, keys.remove(0)) + } else { + builder + .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( + root_cert_store, + ))) + .with_client_auth_cert(certs, keys.remove(0)) + } + .map_err(|e| zerror!("Bad certificate/key: {}", e))? + } else { + let builder = ClientConfig::builder() + .with_safe_default_cipher_suites() + .with_safe_default_kx_groups() + .with_protocol_versions(&[&TLS13])?; + + if tls_server_name_verification { + builder + .with_root_certificates(root_cert_store) + .with_no_client_auth() + } else { + builder + .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( + root_cert_store, + ))) + .with_no_client_auth() + } + }; + Ok(TlsClientConfig { client_config: cc }) + } + + async fn load_tls_private_key(config: &Config<'_>) -> ZResult> { + load_tls_key( + config, + TLS_CLIENT_PRIVATE_KEY_RAW, + TLS_CLIENT_PRIVATE_KEY_FILE, + TLS_CLIENT_PRIVATE_KEY_BASE64, + ) + .await + } + + async fn load_tls_certificate(config: &Config<'_>) -> ZResult> { + load_tls_certificate( + config, + TLS_CLIENT_CERTIFICATE_RAW, + TLS_CLIENT_CERTIFICATE_FILE, + TLS_CLIENT_CERTIFICATE_BASE64, + ) + .await + } +} + +fn process_pem(pem: &mut dyn io::BufRead) -> ZResult> { + let certs: Vec = rustls_pemfile::certs(pem) + .map_err(|err| zerror!("Error processing PEM certificates: {err}."))? + .into_iter() + .map(CertificateDer::from) + .collect(); + + let trust_anchors: Vec = certs + .into_iter() + .map(|cert| { + anchor_from_trusted_cert(&cert) + .map_err(|err| zerror!("Error processing trust anchor: {err}.")) + .map(|trust_anchor| trust_anchor.to_owned()) + }) + .collect::, ZError>>()? + .into_iter() + .map(|ta| { + OwnedTrustAnchor::from_subject_spki_name_constraints( + ta.subject.to_vec(), + ta.subject_public_key_info.to_vec(), + ta.name_constraints.map(|nc| nc.to_vec()), + ) + }) + .collect(); + + Ok(trust_anchors) +} + +async fn load_tls_key( + config: &Config<'_>, + tls_private_key_raw_config_key: &str, + tls_private_key_file_config_key: &str, + tls_private_key_base64_config_key: &str, +) -> ZResult> { + if let Some(value) = config.get(tls_private_key_raw_config_key) { + return Ok(value.as_bytes().to_vec()); + } + + if let Some(b64_key) = config.get(tls_private_key_base64_config_key) { + return base64_decode(b64_key); + } + + if let Some(value) = config.get(tls_private_key_file_config_key) { + return Ok(tokio::fs::read(value) + .await + .map_err(|e| zerror!("Invalid TLS private key file: {}", e))?) + .and_then(|result| { + if result.is_empty() { + Err(zerror!("Empty TLS key.").into()) + } else { + Ok(result) + } + }); + } + Err(zerror!("Missing TLS private key.").into()) +} + +async fn load_tls_certificate( + config: &Config<'_>, + tls_certificate_raw_config_key: &str, + tls_certificate_file_config_key: &str, + tls_certificate_base64_config_key: &str, +) -> ZResult> { + if let Some(value) = config.get(tls_certificate_raw_config_key) { + return Ok(value.as_bytes().to_vec()); + } + + if let Some(b64_certificate) = config.get(tls_certificate_base64_config_key) { + return base64_decode(b64_certificate); + } + + if let Some(value) = config.get(tls_certificate_file_config_key) { + return Ok(tokio::fs::read(value) + .await + .map_err(|e| zerror!("Invalid TLS certificate file: {}", e))?); + } + Err(zerror!("Missing tls certificates.").into()) +} + +fn load_trust_anchors(config: &Config<'_>) -> ZResult> { + let mut root_cert_store = RootCertStore::empty(); + if let Some(value) = config.get(TLS_ROOT_CA_CERTIFICATE_RAW) { + let mut pem = BufReader::new(value.as_bytes()); + let trust_anchors = process_pem(&mut pem)?; + root_cert_store.roots.extend(trust_anchors); + return Ok(Some(root_cert_store)); + } + + if let Some(b64_certificate) = config.get(TLS_ROOT_CA_CERTIFICATE_BASE64) { + let certificate_pem = base64_decode(b64_certificate)?; + let mut pem = BufReader::new(certificate_pem.as_slice()); + let trust_anchors = process_pem(&mut pem)?; + root_cert_store.roots.extend(trust_anchors); + return Ok(Some(root_cert_store)); + } + + if let Some(filename) = config.get(TLS_ROOT_CA_CERTIFICATE_FILE) { + let mut pem = BufReader::new(File::open(filename)?); + let trust_anchors = process_pem(&mut pem)?; + root_cert_store.roots.extend(trust_anchors); + return Ok(Some(root_cert_store)); + } + Ok(None) +} + +pub async fn get_quic_addr(address: &Address<'_>) -> ZResult { + match tokio::net::lookup_host(address.as_str()).await?.next() { + Some(addr) => Ok(addr), + None => bail!("Couldn't resolve QUIC locator address: {}", address), + } +} + +pub fn base64_decode(data: &str) -> ZResult> { + use base64::engine::general_purpose; + use base64::Engine; + Ok(general_purpose::STANDARD + .decode(data) + .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) +} diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 11d00d96d8..91fb72787e 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -12,31 +12,31 @@ # ZettaScale Zenoh Team, # [package] -rust-version = { workspace = true } -name = "zenoh-link-tls" -version = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } authors = { workspace = true } -edition = { workspace = true } -license = { workspace = true } categories = { workspace = true } description = "Internal crate for zenoh." +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +name = "zenoh-link-tls" +repository = { workspace = true } +rust-version = { workspace = true } +version = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] async-trait = { workspace = true } base64 = { workspace = true } futures = { workspace = true } -tracing = {workspace = true} rustls = { workspace = true } rustls-pemfile = { workspace = true } rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } -secrecy = {workspace = true } -tokio = { workspace = true, features = ["io-util", "net", "fs", "sync"] } +secrecy = { workspace = true } +tokio = { workspace = true, features = ["fs", "io-util", "net", "sync"] } tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } +tracing = { workspace = true } webpki-roots = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index 95d59104b4..b9002cc397 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -18,26 +18,15 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use async_trait::async_trait; -use config::{ - TLS_CLIENT_AUTH, TLS_CLIENT_CERTIFICATE_BASE64, TLS_CLIENT_CERTIFICATE_FILE, - TLS_CLIENT_PRIVATE_KEY_BASE64, TLS_CLIENT_PRIVATE_KEY_FILE, TLS_ROOT_CA_CERTIFICATE_BASE64, - TLS_ROOT_CA_CERTIFICATE_FILE, TLS_SERVER_CERTIFICATE_BASE64, TLS_SERVER_CERTIFICATE_FILE, - TLS_SERVER_NAME_VERIFICATION, TLS_SERVER_PRIVATE_KEY_BASE_64, TLS_SERVER_PRIVATE_KEY_FILE, -}; -use rustls_pki_types::ServerName; -use secrecy::ExposeSecret; -use std::{convert::TryFrom, net::SocketAddr}; -use zenoh_config::Config; use zenoh_core::zconfigurable; -use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{ - endpoint::{self, Address}, - Locator, -}; -use zenoh_result::{bail, zerror, ZResult}; +use zenoh_link_commons::LocatorInspector; +use zenoh_protocol::core::Locator; +use zenoh_result::ZResult; mod unicast; +mod utils; pub use unicast::*; +pub use utils::TlsConfigurator; // Default MTU (TLS PDU) in bytes. // NOTE: Since TLS is a byte-stream oriented transport, theoretically it has @@ -60,115 +49,6 @@ impl LocatorInspector for TlsLocatorInspector { Ok(false) } } -#[derive(Default, Clone, Copy, Debug)] -pub struct TlsConfigurator; - -impl ConfigurationInspector for TlsConfigurator { - fn inspect_config(&self, config: &Config) -> ZResult { - let mut ps: Vec<(&str, &str)> = vec![]; - - let c = config.transport().link().tls(); - - match (c.root_ca_certificate(), c.root_ca_certificate_base64()) { - (Some(_), Some(_)) => { - bail!("Only one between 'root_ca_certificate' and 'root_ca_certificate_base64' can be present!") - } - (Some(ca_certificate), None) => { - ps.push((TLS_ROOT_CA_CERTIFICATE_FILE, ca_certificate)); - } - (None, Some(ca_certificate)) => { - ps.push(( - TLS_ROOT_CA_CERTIFICATE_BASE64, - ca_certificate.expose_secret(), - )); - } - _ => {} - } - - match (c.server_private_key(), c.server_private_key_base64()) { - (Some(_), Some(_)) => { - bail!("Only one between 'server_private_key' and 'server_private_key_base64' can be present!") - } - (Some(server_private_key), None) => { - ps.push((TLS_SERVER_PRIVATE_KEY_FILE, server_private_key)); - } - (None, Some(server_private_key)) => { - ps.push(( - TLS_SERVER_PRIVATE_KEY_BASE_64, - server_private_key.expose_secret(), - )); - } - _ => {} - } - - match (c.server_certificate(), c.server_certificate_base64()) { - (Some(_), Some(_)) => { - bail!("Only one between 'server_certificate' and 'server_certificate_base64' can be present!") - } - (Some(server_certificate), None) => { - ps.push((TLS_SERVER_CERTIFICATE_FILE, server_certificate)); - } - (None, Some(server_certificate)) => { - ps.push(( - TLS_SERVER_CERTIFICATE_BASE64, - server_certificate.expose_secret(), - )); - } - _ => {} - } - - if let Some(client_auth) = c.client_auth() { - match client_auth { - true => ps.push((TLS_CLIENT_AUTH, "true")), - false => ps.push((TLS_CLIENT_AUTH, "false")), - }; - } - - match (c.client_private_key(), c.client_private_key_base64()) { - (Some(_), Some(_)) => { - bail!("Only one between 'client_private_key' and 'client_private_key_base64' can be present!") - } - (Some(client_private_key), None) => { - ps.push((TLS_CLIENT_PRIVATE_KEY_FILE, client_private_key)); - } - (None, Some(client_private_key)) => { - ps.push(( - TLS_CLIENT_PRIVATE_KEY_BASE64, - client_private_key.expose_secret(), - )); - } - _ => {} - } - - match (c.client_certificate(), c.client_certificate_base64()) { - (Some(_), Some(_)) => { - bail!("Only one between 'client_certificate' and 'client_certificate_base64' can be present!") - } - (Some(client_certificate), None) => { - ps.push((TLS_CLIENT_CERTIFICATE_FILE, client_certificate)); - } - (None, Some(client_certificate)) => { - ps.push(( - TLS_CLIENT_CERTIFICATE_BASE64, - client_certificate.expose_secret(), - )); - } - _ => {} - } - - if let Some(server_name_verification) = c.server_name_verification() { - match server_name_verification { - true => ps.push((TLS_SERVER_NAME_VERIFICATION, "true")), - false => ps.push((TLS_SERVER_NAME_VERIFICATION, "false")), - }; - } - - let mut s = String::new(); - endpoint::Parameters::extend(ps.drain(..), &mut s); - - Ok(s) - } -} zconfigurable! { // Default MTU (TLS PDU) in bytes. @@ -208,30 +88,3 @@ pub mod config { pub const TLS_SERVER_NAME_VERIFICATION: &str = "server_name_verification"; } - -pub async fn get_tls_addr(address: &Address<'_>) -> ZResult { - match tokio::net::lookup_host(address.as_str()).await?.next() { - Some(addr) => Ok(addr), - None => bail!("Couldn't resolve TLS locator address: {}", address), - } -} - -pub fn get_tls_host<'a>(address: &'a Address<'a>) -> ZResult<&'a str> { - address - .as_str() - .split(':') - .next() - .ok_or_else(|| zerror!("Invalid TLS address").into()) -} - -pub fn get_tls_server_name<'a>(address: &'a Address<'a>) -> ZResult> { - Ok(ServerName::try_from(get_tls_host(address)?).map_err(|e| zerror!(e))?) -} - -pub fn base64_decode(data: &str) -> ZResult> { - use base64::engine::general_purpose; - use base64::Engine; - Ok(general_purpose::STANDARD - .decode(data) - .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) -} diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 9eec2feb2a..b12608354e 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -12,39 +12,29 @@ // ZettaScale Zenoh Team, // use crate::{ - base64_decode, config::*, get_tls_addr, get_tls_host, get_tls_server_name, + utils::{get_tls_addr, get_tls_host, get_tls_server_name, TlsClientConfig, TlsServerConfig}, TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, }; + use async_trait::async_trait; -use rustls::{ - pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, - server::WebPkiClientVerifier, - version::TLS13, - ClientConfig, RootCertStore, ServerConfig, -}; +use std::cell::UnsafeCell; use std::convert::TryInto; use std::fmt; -use std::fs::File; -use std::io::{BufReader, Cursor}; use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use std::{cell::UnsafeCell, io}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::{TcpListener, TcpStream}; use tokio::sync::Mutex as AsyncMutex; use tokio_rustls::{TlsAcceptor, TlsConnector, TlsStream}; use tokio_util::sync::CancellationToken; -use webpki::anchor_from_trusted_cert; use zenoh_core::zasynclock; -use zenoh_link_commons::tls::WebPkiVerifierAnyServerName; use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; -use zenoh_protocol::core::endpoint::Config; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_result::{bail, zerror, ZError, ZResult}; +use zenoh_result::{zerror, ZResult}; pub struct LinkUnicastTls { // The underlying socket as returned from the async-rustls library @@ -418,311 +408,3 @@ async fn accept_task( Ok(()) } - -struct TlsServerConfig { - server_config: ServerConfig, -} - -impl TlsServerConfig { - pub async fn new(config: &Config<'_>) -> ZResult { - let tls_server_client_auth: bool = match config.get(TLS_CLIENT_AUTH) { - Some(s) => s - .parse() - .map_err(|_| zerror!("Unknown client auth argument: {}", s))?, - None => false, - }; - let tls_server_private_key = TlsServerConfig::load_tls_private_key(config).await?; - let tls_server_certificate = TlsServerConfig::load_tls_certificate(config).await?; - - let certs: Vec = - rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) - .collect::>() - .map_err(|err| zerror!("Error processing server certificate: {err}."))?; - - let mut keys: Vec = - rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|x| x.map(PrivateKeyDer::from)) - .collect::>() - .map_err(|err| zerror!("Error processing server key: {err}."))?; - - if keys.is_empty() { - keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|x| x.map(PrivateKeyDer::from)) - .collect::>() - .map_err(|err| zerror!("Error processing server key: {err}."))?; - } - - if keys.is_empty() { - keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|x| x.map(PrivateKeyDer::from)) - .collect::>() - .map_err(|err| zerror!("Error processing server key: {err}."))?; - } - - if keys.is_empty() { - bail!("No private key found for TLS server."); - } - - let sc = if tls_server_client_auth { - let root_cert_store = load_trust_anchors(config)?.map_or_else( - || { - Err(zerror!( - "Missing root certificates while client authentication is enabled." - )) - }, - Ok, - )?; - let client_auth = WebPkiClientVerifier::builder(root_cert_store.into()).build()?; - ServerConfig::builder_with_protocol_versions(&[&TLS13]) - .with_client_cert_verifier(client_auth) - .with_single_cert(certs, keys.remove(0)) - .map_err(|e| zerror!(e))? - } else { - ServerConfig::builder() - .with_no_client_auth() - .with_single_cert(certs, keys.remove(0)) - .map_err(|e| zerror!(e))? - }; - Ok(TlsServerConfig { server_config: sc }) - } - - async fn load_tls_private_key(config: &Config<'_>) -> ZResult> { - load_tls_key( - config, - TLS_SERVER_PRIVATE_KEY_RAW, - TLS_SERVER_PRIVATE_KEY_FILE, - TLS_SERVER_PRIVATE_KEY_BASE_64, - ) - .await - } - - async fn load_tls_certificate(config: &Config<'_>) -> ZResult> { - load_tls_certificate( - config, - TLS_SERVER_CERTIFICATE_RAW, - TLS_SERVER_CERTIFICATE_FILE, - TLS_SERVER_CERTIFICATE_BASE64, - ) - .await - } -} - -struct TlsClientConfig { - client_config: ClientConfig, -} - -impl TlsClientConfig { - pub async fn new(config: &Config<'_>) -> ZResult { - let tls_client_server_auth: bool = match config.get(TLS_CLIENT_AUTH) { - Some(s) => s - .parse() - .map_err(|_| zerror!("Unknown client auth argument: {}", s))?, - None => false, - }; - - let tls_server_name_verification: bool = match config.get(TLS_SERVER_NAME_VERIFICATION) { - Some(s) => { - let s: bool = s - .parse() - .map_err(|_| zerror!("Unknown server name verification argument: {}", s))?; - if s { - tracing::warn!("Skipping name verification of servers"); - } - s - } - None => false, - }; - - // Allows mixed user-generated CA and webPKI CA - tracing::debug!("Loading default Web PKI certificates."); - let mut root_cert_store = RootCertStore { - roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), - }; - - if let Some(custom_root_cert) = load_trust_anchors(config)? { - tracing::debug!("Loading user-generated certificates."); - root_cert_store.extend(custom_root_cert.roots); - } - - let cc = if tls_client_server_auth { - tracing::debug!("Loading client authentication key and certificate..."); - let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; - let tls_client_certificate = TlsClientConfig::load_tls_certificate(config).await?; - - let certs: Vec = - rustls_pemfile::certs(&mut Cursor::new(&tls_client_certificate)) - .collect::>() - .map_err(|err| zerror!("Error processing client certificate: {err}."))?; - - let mut keys: Vec = - rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|x| x.map(PrivateKeyDer::from)) - .collect::>() - .map_err(|err| zerror!("Error processing client key: {err}."))?; - - if keys.is_empty() { - keys = - rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|x| x.map(PrivateKeyDer::from)) - .collect::>() - .map_err(|err| zerror!("Error processing client key: {err}."))?; - } - - if keys.is_empty() { - keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|x| x.map(PrivateKeyDer::from)) - .collect::>() - .map_err(|err| zerror!("Error processing client key: {err}."))?; - } - - if keys.is_empty() { - bail!("No private key found for TLS client."); - } - - let builder = ClientConfig::builder_with_protocol_versions(&[&TLS13]); - - if tls_server_name_verification { - builder - .with_root_certificates(root_cert_store) - .with_client_auth_cert(certs, keys.remove(0)) - } else { - builder - .dangerous() - .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( - root_cert_store, - ))) - .with_client_auth_cert(certs, keys.remove(0)) - } - .map_err(|e| zerror!("Bad certificate/key: {}", e))? - } else { - let builder = ClientConfig::builder(); - if tls_server_name_verification { - builder - .with_root_certificates(root_cert_store) - .with_no_client_auth() - } else { - builder - .dangerous() - .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( - root_cert_store, - ))) - .with_no_client_auth() - } - }; - Ok(TlsClientConfig { client_config: cc }) - } - - async fn load_tls_private_key(config: &Config<'_>) -> ZResult> { - load_tls_key( - config, - TLS_CLIENT_PRIVATE_KEY_RAW, - TLS_CLIENT_PRIVATE_KEY_FILE, - TLS_CLIENT_PRIVATE_KEY_BASE64, - ) - .await - } - - async fn load_tls_certificate(config: &Config<'_>) -> ZResult> { - load_tls_certificate( - config, - TLS_CLIENT_CERTIFICATE_RAW, - TLS_CLIENT_CERTIFICATE_FILE, - TLS_CLIENT_CERTIFICATE_BASE64, - ) - .await - } -} - -async fn load_tls_key( - config: &Config<'_>, - tls_private_key_raw_config_key: &str, - tls_private_key_file_config_key: &str, - tls_private_key_base64_config_key: &str, -) -> ZResult> { - if let Some(value) = config.get(tls_private_key_raw_config_key) { - return Ok(value.as_bytes().to_vec()); - } - - if let Some(b64_key) = config.get(tls_private_key_base64_config_key) { - return base64_decode(b64_key); - } - - if let Some(value) = config.get(tls_private_key_file_config_key) { - return Ok(tokio::fs::read(value) - .await - .map_err(|e| zerror!("Invalid TLS private key file: {}", e))?) - .and_then(|result| { - if result.is_empty() { - Err(zerror!("Empty TLS key.").into()) - } else { - Ok(result) - } - }); - } - Err(zerror!("Missing TLS private key.").into()) -} - -async fn load_tls_certificate( - config: &Config<'_>, - tls_certificate_raw_config_key: &str, - tls_certificate_file_config_key: &str, - tls_certificate_base64_config_key: &str, -) -> ZResult> { - if let Some(value) = config.get(tls_certificate_raw_config_key) { - return Ok(value.as_bytes().to_vec()); - } - - if let Some(b64_certificate) = config.get(tls_certificate_base64_config_key) { - return base64_decode(b64_certificate); - } - - if let Some(value) = config.get(tls_certificate_file_config_key) { - return Ok(tokio::fs::read(value) - .await - .map_err(|e| zerror!("Invalid TLS certificate file: {}", e))?); - } - Err(zerror!("Missing tls certificates.").into()) -} - -fn load_trust_anchors(config: &Config<'_>) -> ZResult> { - let mut root_cert_store = RootCertStore::empty(); - if let Some(value) = config.get(TLS_ROOT_CA_CERTIFICATE_RAW) { - let mut pem = BufReader::new(value.as_bytes()); - let trust_anchors = process_pem(&mut pem)?; - root_cert_store.extend(trust_anchors); - return Ok(Some(root_cert_store)); - } - - if let Some(b64_certificate) = config.get(TLS_ROOT_CA_CERTIFICATE_BASE64) { - let certificate_pem = base64_decode(b64_certificate)?; - let mut pem = BufReader::new(certificate_pem.as_slice()); - let trust_anchors = process_pem(&mut pem)?; - root_cert_store.extend(trust_anchors); - return Ok(Some(root_cert_store)); - } - - if let Some(filename) = config.get(TLS_ROOT_CA_CERTIFICATE_FILE) { - let mut pem = BufReader::new(File::open(filename)?); - let trust_anchors = process_pem(&mut pem)?; - root_cert_store.extend(trust_anchors); - return Ok(Some(root_cert_store)); - } - Ok(None) -} - -fn process_pem(pem: &mut dyn io::BufRead) -> ZResult>> { - let certs: Vec = rustls_pemfile::certs(pem) - .map(|result| result.map_err(|err| zerror!("Error processing PEM certificates: {err}."))) - .collect::, ZError>>()?; - - let trust_anchors: Vec = certs - .into_iter() - .map(|cert| { - anchor_from_trusted_cert(&cert) - .map_err(|err| zerror!("Error processing trust anchor: {err}.")) - .map(|trust_anchor| trust_anchor.to_owned()) - }) - .collect::, ZError>>()?; - - Ok(trust_anchors) -} diff --git a/io/zenoh-links/zenoh-link-tls/src/utils.rs b/io/zenoh-links/zenoh-link-tls/src/utils.rs new file mode 100644 index 0000000000..f62757523c --- /dev/null +++ b/io/zenoh-links/zenoh-link-tls/src/utils.rs @@ -0,0 +1,480 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::config::*; +use rustls::{ + pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, + server::WebPkiClientVerifier, + version::TLS13, + ClientConfig, RootCertStore, ServerConfig, +}; +use rustls_pki_types::ServerName; +use secrecy::ExposeSecret; +use std::fs::File; +use std::io; +use std::{convert::TryFrom, net::SocketAddr}; +use std::{ + io::{BufReader, Cursor}, + sync::Arc, +}; +use webpki::anchor_from_trusted_cert; +use zenoh_config::Config as ZenohConfig; +use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; +use zenoh_protocol::core::endpoint::Config; +use zenoh_protocol::core::endpoint::{self, Address}; +use zenoh_result::{bail, zerror, ZError, ZResult}; + +#[derive(Default, Clone, Copy, Debug)] +pub struct TlsConfigurator; + +impl ConfigurationInspector for TlsConfigurator { + fn inspect_config(&self, config: &ZenohConfig) -> ZResult { + let mut ps: Vec<(&str, &str)> = vec![]; + + let c = config.transport().link().tls(); + + match (c.root_ca_certificate(), c.root_ca_certificate_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'root_ca_certificate' and 'root_ca_certificate_base64' can be present!") + } + (Some(ca_certificate), None) => { + ps.push((TLS_ROOT_CA_CERTIFICATE_FILE, ca_certificate)); + } + (None, Some(ca_certificate)) => { + ps.push(( + TLS_ROOT_CA_CERTIFICATE_BASE64, + ca_certificate.expose_secret(), + )); + } + _ => {} + } + + match (c.server_private_key(), c.server_private_key_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'server_private_key' and 'server_private_key_base64' can be present!") + } + (Some(server_private_key), None) => { + ps.push((TLS_SERVER_PRIVATE_KEY_FILE, server_private_key)); + } + (None, Some(server_private_key)) => { + ps.push(( + TLS_SERVER_PRIVATE_KEY_BASE_64, + server_private_key.expose_secret(), + )); + } + _ => {} + } + + match (c.server_certificate(), c.server_certificate_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'server_certificate' and 'server_certificate_base64' can be present!") + } + (Some(server_certificate), None) => { + ps.push((TLS_SERVER_CERTIFICATE_FILE, server_certificate)); + } + (None, Some(server_certificate)) => { + ps.push(( + TLS_SERVER_CERTIFICATE_BASE64, + server_certificate.expose_secret(), + )); + } + _ => {} + } + + if let Some(client_auth) = c.client_auth() { + match client_auth { + true => ps.push((TLS_CLIENT_AUTH, "true")), + false => ps.push((TLS_CLIENT_AUTH, "false")), + }; + } + + match (c.client_private_key(), c.client_private_key_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'client_private_key' and 'client_private_key_base64' can be present!") + } + (Some(client_private_key), None) => { + ps.push((TLS_CLIENT_PRIVATE_KEY_FILE, client_private_key)); + } + (None, Some(client_private_key)) => { + ps.push(( + TLS_CLIENT_PRIVATE_KEY_BASE64, + client_private_key.expose_secret(), + )); + } + _ => {} + } + + match (c.client_certificate(), c.client_certificate_base64()) { + (Some(_), Some(_)) => { + bail!("Only one between 'client_certificate' and 'client_certificate_base64' can be present!") + } + (Some(client_certificate), None) => { + ps.push((TLS_CLIENT_CERTIFICATE_FILE, client_certificate)); + } + (None, Some(client_certificate)) => { + ps.push(( + TLS_CLIENT_CERTIFICATE_BASE64, + client_certificate.expose_secret(), + )); + } + _ => {} + } + + if let Some(server_name_verification) = c.server_name_verification() { + match server_name_verification { + true => ps.push((TLS_SERVER_NAME_VERIFICATION, "true")), + false => ps.push((TLS_SERVER_NAME_VERIFICATION, "false")), + }; + } + + let mut s = String::new(); + endpoint::Parameters::extend(ps.drain(..), &mut s); + + Ok(s) + } +} + +pub(crate) struct TlsServerConfig { + pub(crate) server_config: ServerConfig, +} + +impl TlsServerConfig { + pub async fn new(config: &Config<'_>) -> ZResult { + let tls_server_client_auth: bool = match config.get(TLS_CLIENT_AUTH) { + Some(s) => s + .parse() + .map_err(|_| zerror!("Unknown client auth argument: {}", s))?, + None => false, + }; + let tls_server_private_key = TlsServerConfig::load_tls_private_key(config).await?; + let tls_server_certificate = TlsServerConfig::load_tls_certificate(config).await?; + + let certs: Vec = + rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) + .collect::>() + .map_err(|err| zerror!("Error processing server certificate: {err}."))?; + + let mut keys: Vec = + rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_server_private_key)) + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; + + if keys.is_empty() { + keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_server_private_key)) + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; + } + + if keys.is_empty() { + keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_server_private_key)) + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; + } + + if keys.is_empty() { + bail!("No private key found for TLS server."); + } + + let sc = if tls_server_client_auth { + let root_cert_store = load_trust_anchors(config)?.map_or_else( + || { + Err(zerror!( + "Missing root certificates while client authentication is enabled." + )) + }, + Ok, + )?; + let client_auth = WebPkiClientVerifier::builder(root_cert_store.into()).build()?; + ServerConfig::builder_with_protocol_versions(&[&TLS13]) + .with_client_cert_verifier(client_auth) + .with_single_cert(certs, keys.remove(0)) + .map_err(|e| zerror!(e))? + } else { + ServerConfig::builder() + .with_no_client_auth() + .with_single_cert(certs, keys.remove(0)) + .map_err(|e| zerror!(e))? + }; + Ok(TlsServerConfig { server_config: sc }) + } + + async fn load_tls_private_key(config: &Config<'_>) -> ZResult> { + load_tls_key( + config, + TLS_SERVER_PRIVATE_KEY_RAW, + TLS_SERVER_PRIVATE_KEY_FILE, + TLS_SERVER_PRIVATE_KEY_BASE_64, + ) + .await + } + + async fn load_tls_certificate(config: &Config<'_>) -> ZResult> { + load_tls_certificate( + config, + TLS_SERVER_CERTIFICATE_RAW, + TLS_SERVER_CERTIFICATE_FILE, + TLS_SERVER_CERTIFICATE_BASE64, + ) + .await + } +} + +pub(crate) struct TlsClientConfig { + pub(crate) client_config: ClientConfig, +} + +impl TlsClientConfig { + pub async fn new(config: &Config<'_>) -> ZResult { + let tls_client_server_auth: bool = match config.get(TLS_CLIENT_AUTH) { + Some(s) => s + .parse() + .map_err(|_| zerror!("Unknown client auth argument: {}", s))?, + None => false, + }; + + let tls_server_name_verification: bool = match config.get(TLS_SERVER_NAME_VERIFICATION) { + Some(s) => { + let s: bool = s + .parse() + .map_err(|_| zerror!("Unknown server name verification argument: {}", s))?; + if s { + tracing::warn!("Skipping name verification of servers"); + } + s + } + None => false, + }; + + // Allows mixed user-generated CA and webPKI CA + tracing::debug!("Loading default Web PKI certificates."); + let mut root_cert_store = RootCertStore { + roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), + }; + + if let Some(custom_root_cert) = load_trust_anchors(config)? { + tracing::debug!("Loading user-generated certificates."); + root_cert_store.extend(custom_root_cert.roots); + } + + let cc = if tls_client_server_auth { + tracing::debug!("Loading client authentication key and certificate..."); + let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; + let tls_client_certificate = TlsClientConfig::load_tls_certificate(config).await?; + + let certs: Vec = + rustls_pemfile::certs(&mut Cursor::new(&tls_client_certificate)) + .collect::>() + .map_err(|err| zerror!("Error processing client certificate: {err}."))?; + + let mut keys: Vec = + rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_client_private_key)) + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; + + if keys.is_empty() { + keys = + rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_client_private_key)) + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; + } + + if keys.is_empty() { + keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_client_private_key)) + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; + } + + if keys.is_empty() { + bail!("No private key found for TLS client."); + } + + let builder = ClientConfig::builder_with_protocol_versions(&[&TLS13]); + + if tls_server_name_verification { + builder + .with_root_certificates(root_cert_store) + .with_client_auth_cert(certs, keys.remove(0)) + } else { + builder + .dangerous() + .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( + root_cert_store, + ))) + .with_client_auth_cert(certs, keys.remove(0)) + } + .map_err(|e| zerror!("Bad certificate/key: {}", e))? + } else { + let builder = ClientConfig::builder(); + if tls_server_name_verification { + builder + .with_root_certificates(root_cert_store) + .with_no_client_auth() + } else { + builder + .dangerous() + .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( + root_cert_store, + ))) + .with_no_client_auth() + } + }; + Ok(TlsClientConfig { client_config: cc }) + } + + async fn load_tls_private_key(config: &Config<'_>) -> ZResult> { + load_tls_key( + config, + TLS_CLIENT_PRIVATE_KEY_RAW, + TLS_CLIENT_PRIVATE_KEY_FILE, + TLS_CLIENT_PRIVATE_KEY_BASE64, + ) + .await + } + + async fn load_tls_certificate(config: &Config<'_>) -> ZResult> { + load_tls_certificate( + config, + TLS_CLIENT_CERTIFICATE_RAW, + TLS_CLIENT_CERTIFICATE_FILE, + TLS_CLIENT_CERTIFICATE_BASE64, + ) + .await + } +} + +fn process_pem(pem: &mut dyn io::BufRead) -> ZResult>> { + let certs: Vec = rustls_pemfile::certs(pem) + .map(|result| result.map_err(|err| zerror!("Error processing PEM certificates: {err}."))) + .collect::, ZError>>()?; + + let trust_anchors: Vec = certs + .into_iter() + .map(|cert| { + anchor_from_trusted_cert(&cert) + .map_err(|err| zerror!("Error processing trust anchor: {err}.")) + .map(|trust_anchor| trust_anchor.to_owned()) + }) + .collect::, ZError>>()?; + + Ok(trust_anchors) +} + +async fn load_tls_key( + config: &Config<'_>, + tls_private_key_raw_config_key: &str, + tls_private_key_file_config_key: &str, + tls_private_key_base64_config_key: &str, +) -> ZResult> { + if let Some(value) = config.get(tls_private_key_raw_config_key) { + return Ok(value.as_bytes().to_vec()); + } + + if let Some(b64_key) = config.get(tls_private_key_base64_config_key) { + return base64_decode(b64_key); + } + + if let Some(value) = config.get(tls_private_key_file_config_key) { + return Ok(tokio::fs::read(value) + .await + .map_err(|e| zerror!("Invalid TLS private key file: {}", e))?) + .and_then(|result| { + if result.is_empty() { + Err(zerror!("Empty TLS key.").into()) + } else { + Ok(result) + } + }); + } + Err(zerror!("Missing TLS private key.").into()) +} + +async fn load_tls_certificate( + config: &Config<'_>, + tls_certificate_raw_config_key: &str, + tls_certificate_file_config_key: &str, + tls_certificate_base64_config_key: &str, +) -> ZResult> { + if let Some(value) = config.get(tls_certificate_raw_config_key) { + return Ok(value.as_bytes().to_vec()); + } + + if let Some(b64_certificate) = config.get(tls_certificate_base64_config_key) { + return base64_decode(b64_certificate); + } + + if let Some(value) = config.get(tls_certificate_file_config_key) { + return Ok(tokio::fs::read(value) + .await + .map_err(|e| zerror!("Invalid TLS certificate file: {}", e))?); + } + Err(zerror!("Missing tls certificates.").into()) +} + +fn load_trust_anchors(config: &Config<'_>) -> ZResult> { + let mut root_cert_store = RootCertStore::empty(); + if let Some(value) = config.get(TLS_ROOT_CA_CERTIFICATE_RAW) { + let mut pem = BufReader::new(value.as_bytes()); + let trust_anchors = process_pem(&mut pem)?; + root_cert_store.extend(trust_anchors); + return Ok(Some(root_cert_store)); + } + + if let Some(b64_certificate) = config.get(TLS_ROOT_CA_CERTIFICATE_BASE64) { + let certificate_pem = base64_decode(b64_certificate)?; + let mut pem = BufReader::new(certificate_pem.as_slice()); + let trust_anchors = process_pem(&mut pem)?; + root_cert_store.extend(trust_anchors); + return Ok(Some(root_cert_store)); + } + + if let Some(filename) = config.get(TLS_ROOT_CA_CERTIFICATE_FILE) { + let mut pem = BufReader::new(File::open(filename)?); + let trust_anchors = process_pem(&mut pem)?; + root_cert_store.extend(trust_anchors); + return Ok(Some(root_cert_store)); + } + Ok(None) +} + +pub fn base64_decode(data: &str) -> ZResult> { + use base64::engine::general_purpose; + use base64::Engine; + Ok(general_purpose::STANDARD + .decode(data) + .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) +} + +pub async fn get_tls_addr(address: &Address<'_>) -> ZResult { + match tokio::net::lookup_host(address.as_str()).await?.next() { + Some(addr) => Ok(addr), + None => bail!("Couldn't resolve TLS locator address: {}", address), + } +} + +pub fn get_tls_host<'a>(address: &'a Address<'a>) -> ZResult<&'a str> { + address + .as_str() + .split(':') + .next() + .ok_or_else(|| zerror!("Invalid TLS address").into()) +} + +pub fn get_tls_server_name<'a>(address: &'a Address<'a>) -> ZResult> { + Ok(ServerName::try_from(get_tls_host(address)?).map_err(|e| zerror!(e))?) +} diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index b3a299e8be..9f6594761e 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -92,3 +92,4 @@ futures-util = { workspace = true } zenoh-util = {workspace = true } zenoh-protocol = { workspace = true, features = ["test"] } futures = { workspace = true } +zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index af1dedfbce..33cfbceb17 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -69,7 +69,10 @@ use zenoh_transport::{ // the key and certificate brought in by the client. Similarly the server's certificate authority // will validate the key and certificate brought in by the server in front of the client. // -#[cfg(all(feature = "transport_tls", target_family = "unix"))] +#[cfg(all( + any(feature = "transport_tls", feature = "transport_quic"), + target_family = "unix" +))] const CLIENT_KEY: &str = "-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy @@ -98,7 +101,10 @@ tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== -----END RSA PRIVATE KEY-----"; -#[cfg(all(feature = "transport_tls", target_family = "unix"))] +#[cfg(all( + any(feature = "transport_tls", feature = "transport_quic"), + target_family = "unix" +))] const CLIENT_CERT: &str = "-----BEGIN CERTIFICATE----- MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw @@ -120,7 +126,10 @@ p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ abY= -----END CERTIFICATE-----"; -#[cfg(all(feature = "transport_tls", target_family = "unix"))] +#[cfg(all( + any(feature = "transport_tls", feature = "transport_quic"), + target_family = "unix" +))] const CLIENT_CA: &str = "-----BEGIN CERTIFICATE----- MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw @@ -1298,6 +1307,225 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { assert!(result.is_err()); } +#[cfg(all(feature = "transport_quic", target_family = "unix"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_quic_only_mutual_success() { + use zenoh_link::quic::config::*; + + zenoh_util::try_init_log_from_env(); + + let client_auth = "true"; + + // Define the locator + let mut client_endpoint: EndPoint = ("quic/localhost:10461").parse().unwrap(); + client_endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), + (TLS_CLIENT_CERTIFICATE_RAW, CLIENT_CERT), + (TLS_CLIENT_PRIVATE_KEY_RAW, CLIENT_KEY), + (TLS_CLIENT_AUTH, client_auth), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + + // Define the locator + let mut server_endpoint: EndPoint = ("quic/localhost:10461").parse().unwrap(); + server_endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), + (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), + (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), + (TLS_CLIENT_AUTH, client_auth), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + let client_endpoints = vec![client_endpoint]; + let server_endpoints = vec![server_endpoint]; + run_with_universal_transport( + &client_endpoints, + &server_endpoints, + &channel, + &MSG_SIZE_ALL, + ) + .await; +} + +#[cfg(all(feature = "transport_quic", target_family = "unix"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_quic_only_mutual_no_client_certs_failure() { + use std::vec; + use zenoh_link::quic::config::*; + + zenoh_util::try_init_log_from_env(); + + // Define the locator + let mut client_endpoint: EndPoint = ("quic/localhost:10462").parse().unwrap(); + client_endpoint + .config_mut() + .extend( + [(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + + // Define the locator + let mut server_endpoint: EndPoint = ("quic/localhost:10462").parse().unwrap(); + server_endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), + (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), + (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), + (TLS_CLIENT_AUTH, "true"), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + let client_endpoints = vec![client_endpoint]; + let server_endpoints = vec![server_endpoint]; + let result = std::panic::catch_unwind(|| { + tokio::runtime::Runtime::new() + .unwrap() + .block_on(run_with_universal_transport( + &client_endpoints, + &server_endpoints, + &channel, + &MSG_SIZE_ALL, + )) + }); + assert!(result.is_err()); +} + +#[cfg(all(feature = "transport_quic", target_family = "unix"))] +#[test] +fn transport_unicast_quic_only_mutual_wrong_client_certs_failure() { + use zenoh_link::quic::config::*; + + zenoh_util::try_init_log_from_env(); + + let client_auth = "true"; + + // Define the locator + let mut client_endpoint: EndPoint = ("quic/localhost:10463").parse().unwrap(); + client_endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), + // Using the SERVER_CERT and SERVER_KEY in the client to simulate the case the client has + // wrong certificates and keys. The SERVER_CA (cetificate authority) will not recognize + // these certificates as it is expecting to receive CLIENT_CERT and CLIENT_KEY from the + // client. + (TLS_CLIENT_CERTIFICATE_RAW, SERVER_CERT), + (TLS_CLIENT_PRIVATE_KEY_RAW, SERVER_KEY), + (TLS_CLIENT_AUTH, client_auth), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + + // Define the locator + let mut server_endpoint: EndPoint = ("quic/localhost:10463").parse().unwrap(); + server_endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), + (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), + (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), + (TLS_CLIENT_AUTH, client_auth), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + let client_endpoints = vec![client_endpoint]; + let server_endpoints = vec![server_endpoint]; + let result = std::panic::catch_unwind(|| { + tokio::runtime::Runtime::new() + .unwrap() + .block_on(run_with_universal_transport( + &client_endpoints, + &server_endpoints, + &channel, + &MSG_SIZE_ALL, + )) + }); + assert!(result.is_err()); +} + #[test] fn transport_unicast_qos_and_lowlatency_failure() { struct TestPeer; From cb5083947542d35e4e4142d9c5f977302102251e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 25 Apr 2024 15:53:59 +0200 Subject: [PATCH 5/9] Fix gossip deadlock (#976) --- .../net/routing/hat/linkstate_peer/network.rs | 50 ++++++++++--------- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 23 +++++---- zenoh/src/net/routing/hat/router/network.rs | 48 ++++++++++-------- 3 files changed, 64 insertions(+), 57 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index d5f37e3733..a4a6841644 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -486,26 +486,25 @@ impl Network { ); } - if !self.autoconnect.is_empty() { + if !self.autoconnect.is_empty() && self.autoconnect.matches(whatami) { // Connect discovered peers - if zenoh_runtime::ZRuntime::Net - .block_in_place( - strong_runtime.manager().get_transport_unicast(&zid), - ) - .is_none() - && self.autoconnect.matches(whatami) - { - if let Some(locators) = locators { - let runtime = strong_runtime.clone(); - strong_runtime.spawn(async move { + if let Some(locators) = locators { + let runtime = strong_runtime.clone(); + strong_runtime.spawn(async move { + if runtime + .manager() + .get_transport_unicast(&zid) + .await + .is_none() + { // random backoff tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, )) .await; runtime.connect_peer(&zid, &locators).await; - }); - } + } + }); } } } @@ -610,22 +609,25 @@ impl Network { for (_, idx, _) in &link_states { let node = &self.graph[*idx]; if let Some(whatami) = node.whatami { - if zenoh_runtime::ZRuntime::Net - .block_in_place(strong_runtime.manager().get_transport_unicast(&node.zid)) - .is_none() - && self.autoconnect.matches(whatami) - { + if self.autoconnect.matches(whatami) { if let Some(locators) = &node.locators { let runtime = strong_runtime.clone(); let zid = node.zid; let locators = locators.clone(); strong_runtime.spawn(async move { - // random backoff - tokio::time::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; - runtime.connect_peer(&zid, &locators).await; + if runtime + .manager() + .get_transport_unicast(&zid) + .await + .is_none() + { + // random backoff + tokio::time::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + } }); } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index a5b72a73eb..537f29aeec 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -406,24 +406,25 @@ impl Network { ); } - if !self.autoconnect.is_empty() { + if !self.autoconnect.is_empty() && self.autoconnect.matches(whatami) { // Connect discovered peers - if zenoh_runtime::ZRuntime::Acceptor - .block_in_place(strong_runtime.manager().get_transport_unicast(&zid)) - .is_none() - && self.autoconnect.matches(whatami) - { - if let Some(locators) = locators { - let runtime = strong_runtime.clone(); - strong_runtime.spawn(async move { + if let Some(locators) = locators { + let runtime = strong_runtime.clone(); + strong_runtime.spawn(async move { + if runtime + .manager() + .get_transport_unicast(&zid) + .await + .is_none() + { // random backoff tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, )) .await; runtime.connect_peer(&zid, &locators).await; - }); - } + } + }); } } } diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 727eb6763e..1ee77ae8e2 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -489,24 +489,25 @@ impl Network { ); } - if !self.autoconnect.is_empty() { + if !self.autoconnect.is_empty() && self.autoconnect.matches(whatami) { // Connect discovered peers - if zenoh_runtime::ZRuntime::Net - .block_in_place(self.runtime.manager().get_transport_unicast(&zid)) - .is_none() - && self.autoconnect.matches(whatami) - { - if let Some(locators) = locators { - let runtime = self.runtime.clone(); - self.runtime.spawn(async move { + if let Some(locators) = locators { + let runtime = self.runtime.clone(); + self.runtime.spawn(async move { + if runtime + .manager() + .get_transport_unicast(&zid) + .await + .is_none() + { // random backoff tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, )) .await; runtime.connect_peer(&zid, &locators).await; - }); - } + } + }); } } } @@ -611,22 +612,25 @@ impl Network { for (_, idx, _) in &link_states { let node = &self.graph[*idx]; if let Some(whatami) = node.whatami { - if zenoh_runtime::ZRuntime::Net - .block_in_place(self.runtime.manager().get_transport_unicast(&node.zid)) - .is_none() - && self.autoconnect.matches(whatami) - { + if self.autoconnect.matches(whatami) { if let Some(locators) = &node.locators { let runtime = self.runtime.clone(); let zid = node.zid; let locators = locators.clone(); self.runtime.spawn(async move { - // random backoff - tokio::time::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; - runtime.connect_peer(&zid, &locators).await; + if runtime + .manager() + .get_transport_unicast(&zid) + .await + .is_none() + { + // random backoff + tokio::time::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + } }); } } From ac6bbf4676949677887e96e9bb38519cab69ad28 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 25 Apr 2024 16:21:21 +0200 Subject: [PATCH 6/9] Use gen_range rather than modulo (#978) --- .../net/routing/hat/linkstate_peer/network.rs | 17 +++++++++-------- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 9 +++++---- zenoh/src/net/routing/hat/router/network.rs | 17 +++++++++-------- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index a4a6841644..3fd9f53420 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -18,6 +18,7 @@ use crate::net::runtime::Runtime; use crate::runtime::WeakRuntime; use petgraph::graph::NodeIndex; use petgraph::visit::{VisitMap, Visitable}; +use rand::Rng; use std::convert::TryInto; use vec_map::VecMap; use zenoh_buffers::writer::{DidntWrite, HasWriter}; @@ -498,10 +499,10 @@ impl Network { .is_none() { // random backoff - tokio::time::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; + let sleep_time = std::time::Duration::from_millis( + rand::thread_rng().gen_range(0..100), + ); + tokio::time::sleep(sleep_time).await; runtime.connect_peer(&zid, &locators).await; } }); @@ -622,10 +623,10 @@ impl Network { .is_none() { // random backoff - tokio::time::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; + let sleep_time = std::time::Duration::from_millis( + rand::thread_rng().gen_range(0..100), + ); + tokio::time::sleep(sleep_time).await; runtime.connect_peer(&zid, &locators).await; } }); diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index 537f29aeec..8ce3bb4792 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -16,6 +16,7 @@ use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::runtime::Runtime; use crate::runtime::WeakRuntime; use petgraph::graph::NodeIndex; +use rand::Rng; use std::convert::TryInto; use vec_map::VecMap; use zenoh_buffers::writer::{DidntWrite, HasWriter}; @@ -418,10 +419,10 @@ impl Network { .is_none() { // random backoff - tokio::time::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; + let sleep_time = std::time::Duration::from_millis( + rand::thread_rng().gen_range(0..100), + ); + tokio::time::sleep(sleep_time).await; runtime.connect_peer(&zid, &locators).await; } }); diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 1ee77ae8e2..486e0456ab 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -17,6 +17,7 @@ use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; use petgraph::graph::NodeIndex; use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; +use rand::Rng; use std::convert::TryInto; use vec_map::VecMap; use zenoh_buffers::writer::{DidntWrite, HasWriter}; @@ -501,10 +502,10 @@ impl Network { .is_none() { // random backoff - tokio::time::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; + let sleep_time = std::time::Duration::from_millis( + rand::thread_rng().gen_range(0..100), + ); + tokio::time::sleep(sleep_time).await; runtime.connect_peer(&zid, &locators).await; } }); @@ -625,10 +626,10 @@ impl Network { .is_none() { // random backoff - tokio::time::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; + let sleep_time = std::time::Duration::from_millis( + rand::thread_rng().gen_range(0..100), + ); + tokio::time::sleep(sleep_time).await; runtime.connect_peer(&zid, &locators).await; } }); From 274166d778945be0bb9250944f1374e3c0dfc892 Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Thu, 25 Apr 2024 22:41:25 +0800 Subject: [PATCH 7/9] fix(zenoh-runtime): zenoh-c DLL crash in `libc::atexit` handler (#972) * fix(zenoh-runtime): zenoh-c DLL crash in libc::atexit handler * fix(zenoh-runtime): properly handle and report the status of `ZRuntime` drop * fix: add `set_hook` to suppress the panic error. * Trigger CI * Trigger CI --- commons/zenoh-runtime/Cargo.toml | 1 + commons/zenoh-runtime/src/lib.rs | 31 ++++++++++++++++++++++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/commons/zenoh-runtime/Cargo.toml b/commons/zenoh-runtime/Cargo.toml index e3f0c7a3c0..cfb63b7e60 100644 --- a/commons/zenoh-runtime/Cargo.toml +++ b/commons/zenoh-runtime/Cargo.toml @@ -22,3 +22,4 @@ zenoh-result = { workspace = true, features = ["std"] } zenoh-collections = { workspace = true, features = ["std"] } zenoh-macros = { workspace = true } tokio = { workspace = true, features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "sync", "time"] } +tracing = { workspace = true } diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index 1a9d765420..cb58cac570 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -184,17 +184,42 @@ impl ZRuntimePool { // If there are any blocking tasks spawned by ZRuntimes, the function will block until they return. impl Drop for ZRuntimePool { fn drop(&mut self) { + std::panic::set_hook(Box::new(|_| { + // To suppress the panic error caught in the following `catch_unwind`. + })); + let handles: Vec<_> = self .0 .drain() .filter_map(|(_name, mut rt)| { - rt.take() - .map(|r| std::thread::spawn(move || r.shutdown_timeout(Duration::from_secs(1)))) + rt.take().map(|r| { + // NOTE: The error of the atexit handler in DLL (static lib is fine) + // failing to spawn a new thread in `cleanup` has been identified. + std::panic::catch_unwind(|| { + std::thread::spawn(move || r.shutdown_timeout(Duration::from_secs(1))) + }) + }) }) .collect(); for hd in handles { - let _ = hd.join(); + match hd { + Ok(handle) => { + if let Err(err) = handle.join() { + tracing::error!( + "The handle failed to join during `ZRuntimePool` drop due to {err:?}" + ); + } + } + Err(err) => { + // WARN: Windows with DLL is expected to panic for the time being. + // Otherwise, report the error. + #[cfg(not(target_os = "windows"))] + tracing::error!("`ZRuntimePool` failed to drop due to {err:?}"); + #[cfg(target_os = "windows")] + tracing::trace!("`ZRuntimePool` failed to drop due to {err:?}"); + } + } } } } From 7c64d99e9deb75070b8c3845e0a29b7027fb48e9 Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Fri, 26 Apr 2024 00:21:25 +0800 Subject: [PATCH 8/9] ci: rearrange doc test to shorten the testing time (#975) * feat(ci): rearrange doc test to shorten the testing time * doc(ci): improve the names of jobs * ci: use shorter name for better visualization --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9d9e3067a7..33432f827a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ env: jobs: check: - name: Run checks on ${{ matrix.os }} + name: Lints and doc tests on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -66,8 +66,11 @@ jobs: - name: Perform no_std checks run: cargo check --bin nostd_check --target x86_64-unknown-none --manifest-path ci/nostd-check/Cargo.toml + - name: Run doctests + run: cargo test --doc + test: - name: Run tests on ${{ matrix.os }} + name: Unit tests on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -105,9 +108,6 @@ jobs: if: ${{ matrix.os == 'ubuntu-latest' }} run: cargo nextest run -p zenohd --no-default-features - - name: Run doctests - run: cargo test --doc - valgrind: name: Memory leak checks runs-on: ubuntu-latest From 7ba77b6e04546a1910080ed899f1a2cce8ea56e4 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 13 Mar 2024 12:01:01 +0100 Subject: [PATCH 9/9] adding test files --- ca.crt | 21 +++ myserver.crt | 22 +++ myserver.key | 28 +++ pub_client.json5 | 431 +++++++++++++++++++++++++++++++++++++++++++++++ router.json5 | 431 +++++++++++++++++++++++++++++++++++++++++++++++ sub_client.json5 | 431 +++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 1364 insertions(+) create mode 100644 ca.crt create mode 100644 myserver.crt create mode 100644 myserver.key create mode 100644 pub_client.json5 create mode 100644 router.json5 create mode 100644 sub_client.json5 diff --git a/ca.crt b/ca.crt new file mode 100644 index 0000000000..50b4d876ca --- /dev/null +++ b/ca.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDiTCCAnGgAwIBAgIUO1x6LAlICgKs5+pYUTo4CughfKEwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTExNDM0MjNaFw0yNTAzMTExNDM0MjNaMFQxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRgwFgYDVQQDDA96 +c190ZXN0X3Jvb3RfY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3 +pFWM+IJNsRCYHt1v/TliecppwVZV+ZHfFw9JKN9ev4K/fWHUiAOwp91MOLxbaYKd +C6dxW28YVGltoGz3kUZJZcJRQVso1jXv24Op4muOsiYXukLc4TU2F6dG1XqkLt5t +svsYAQFf1uK3//QZFVRBosJEn+jjiJ4XCvt49mnPRolp1pNKX0z31mZO6bSly6c9 +OVlJMjWpDCYSOuf6qZZ36fa9eSut2bRJIPY0QCsgnqYBTnIEhksS+3jy6Qt+QpLz +95pFdLbW/MW4XKpaDltyYkO6QrBekF6uWRlvyAHU+NqvXZ4F/3Z5l26qLuBcsLPJ +kyawkO+yNIDxORmQgMczAgMBAAGjUzBRMB0GA1UdDgQWBBThgotd9ws2ryEEaKp2 ++RMOWV8D7jAfBgNVHSMEGDAWgBThgotd9ws2ryEEaKp2+RMOWV8D7jAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQA9QoPv78hGmvmqF4GZeqrOBKQB +N/H5wL7f8H6BXU/wpNo2nnWOJn3u37lT+zivAdGEv+x+GeKekcugKBCSluhBLpVb +VNXe4WwMm5FBuO2NRBN2nblTMm1kEO00nVk1/yNo4hI8mj7d4YLU62d7324osNpF +wHqu6B0/c99JeKRvODGswyff1i8rJ1jpcgk/JmHg7UQBHEIkn0cRR0f9W3Mxv6b5 +ZeowRe81neWNkC6IMiMmzA0iHGkhoUMA15qG1ZKOr1XR364LH5BfNNpzAWYwkvJs +0JFrrdw+rm+cRJWs55yiyCCs7pyg1IJkY/o8bifdCOUgIyonzffwREk3+kZR +-----END CERTIFICATE----- diff --git a/myserver.crt b/myserver.crt new file mode 100644 index 0000000000..a68071aa18 --- /dev/null +++ b/myserver.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDmDCCAoCgAwIBAgIUFMs3tKqT0Cvz3r0aSN9KSVPCsfkwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTExNDQ0MzZaFw0yNTAzMTExNDQ0MzZaMFQxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRgwFgYDVQQDDA90 +ZXN0X3Rsc19zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCh +86dsAI7FzJxhKykW5uzHuz9NGbmzq8G9ndUdIwTHYmawTTgr3NCBAYEF1+iOo6y9 +8yUUsTyN3bqx3biFVQHWVP6iHI7WPBazFOZOyyjc3gcRD6M5LVPBIc5Ar+zcKNzL +b8ZTW4G1T4fye5XXPS+Zu2IHjIBAPoXQVhKZjWfmpPmloF+hphF5l8L7ilDfsj3o +1qo88XzGVUjkR5fF5UE/6iuiiipXsLRtEvsYSYMvvLuKWGN+e0t3JwvfH0JnAURK +/KKOixhqnbGcnrwVY1bzgFo3u9NSQjjYREvu6QBEthuLtPkc+PCR+DxjBmdh1der +7Bwwnfa3AgKbbtoZhlkPAgMBAAGjYjBgMB4GA1UdEQQXMBWCE25ld190ZXN0X3Rs +c19zZXJ2ZXIwHQYDVR0OBBYEFG2WT0EOXqPY2QiWTxtb/detOQUDMB8GA1UdIwQY +MBaAFOGCi133CzavIQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQBKvVh0 +uzdlPkGrkU56hVOvNe2QqKXHbz0xRVeNn/rXItUnV3YbzuiNpyjkHGPBMsDtgri2 +YUf0dKfVr8+Zyr0Yc/Nhbe2gWezGMnoOo9dw6An0r4vSYmJdSaO/s5hH7/orHQxS +zCRN+6iwURT6r1quJDxJLcsA6nzOvLdQnMxTKcak/V6A7eBpoUINdFVNhvPoXMDd +PVaju1U00SEbun8Qgeh/yWz4CPQYgQqKUORkPf0ToK5V3jrbIuW9VfQi8VcOzCn9 +YPihAEzkhh+PG8FYwK3vc6u2qKNlcbEuMu6rOQTUDWAi6+PJY5ClHQYdnb4/ThjT +vcP3w3j3YhSd/9iA +-----END CERTIFICATE----- diff --git a/myserver.key b/myserver.key new file mode 100644 index 0000000000..3cad67bdc9 --- /dev/null +++ b/myserver.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCh86dsAI7FzJxh +KykW5uzHuz9NGbmzq8G9ndUdIwTHYmawTTgr3NCBAYEF1+iOo6y98yUUsTyN3bqx +3biFVQHWVP6iHI7WPBazFOZOyyjc3gcRD6M5LVPBIc5Ar+zcKNzLb8ZTW4G1T4fy +e5XXPS+Zu2IHjIBAPoXQVhKZjWfmpPmloF+hphF5l8L7ilDfsj3o1qo88XzGVUjk +R5fF5UE/6iuiiipXsLRtEvsYSYMvvLuKWGN+e0t3JwvfH0JnAURK/KKOixhqnbGc +nrwVY1bzgFo3u9NSQjjYREvu6QBEthuLtPkc+PCR+DxjBmdh1der7Bwwnfa3AgKb +btoZhlkPAgMBAAECggEAP5vQA8L6UKUrPJzzoAumL1KTq8gxYGjTCRMvS6jf7SHw +fElwCQZLHIhHMVDahf+yTs7rnwN36a6Pb+HKYg//zzuF4Y0+6tUiA0dvp73yuEE6 +XFCchs4PSdlpxY1zhgtEoWCu8DmOKfTpS+uPcEEXa5WmDJn6G4GTFD9iQc5A410D +oBf0ONw7X8nE1ZBZr6dpJBdsP68pRJC8BfhTH/dS3d4I4JYb2BgLER1ZbMqfFeW/ +sAZ3FKKETdYvCgLb380/Xpb08FRAHlQ1MowEpfe2sNBqsnkHjESExMIP8Ne7O+ts +9IUIGHZkKIl9u/B/RHCve8Db3GM9F/lMjJ9p84FEXQKBgQDTzYX+9FyAZt5NGPwW +5mTqlh5EHLZzgnVGo2DySu0Zi7MN/YYKV1wAT3i6cTATMjjdSYu2u6L1VYhfIYYq +43MIcsHe7XMAQxbQ6l6oULUa77huMzC0Js0l08kV/ERkH0/nUS9JRp5FJUKR7mkH +Am2dz040MceQMITzCewwskf+jQKBgQDDvxgxBTNJYF3tN3uNLBPRcib0Kk+p2LfW +oDv43++MiyqkTejJJqMDHtYsXNivH6T7CE2U0Qf+2MonAzoVnNsEAfonS19okn8c +LqkMlTZmiT9Tld+h+pcAsf7lYYXSuZv10lgXSN2nj8LBm/EM130ShzyrM58vCGRC +/fDPu9ZNCwKBgQCnuWdVILlnzQ5ZS2HF2Kktw7cwBPTOwA6S46pP9NmRkzk16QAO +jGOEs2pNanjBmtHBGw6SpEBFu3gErY2LxRZBKG8yVCLvoDEfO5m9/DuOmysXyV3W +K6vlOrNQv7aA+vLRoU6q3ktTQlBXM87kCB46DAJH/uuj2WhO9hqd7XBpuQKBgFCG +/9vCyOuJ0noxVgmotWp3rKDL+0PjXRXVi3aCIZlO8zbuujJuS6eP+wn7FEVPHl8L +dmcfa0ujQd60zCNyCQPoEFI0BscNZW9hnrgHdn7OPZgUUxDe91oY38TbzuL26rtB +Um4Z0t4JHVTq40qmJ9UEf6fqr7T4nc6Vi4jaPHorAoGAL1hVy8oYAKtJCTlRQalw +apM3ZJUlTK7qfkjajPEvmhuHntplHDkGEe5ZROUu3zluDiS7MHzOYtunitoMaZbG +cRMO34aDO/UXoLdUWabuk81e3/AWgs6wHVFBOpRAYKAQzigrmXanMclwiL0V5T9K +IgP5i6aUi4zduiV1YLHj4UA= +-----END PRIVATE KEY----- diff --git a/pub_client.json5 b/pub_client.json5 new file mode 100644 index 0000000000..b75f75fa16 --- /dev/null +++ b/pub_client.json5 @@ -0,0 +1,431 @@ +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + // id: "1234567890abcdef", + /// The node's mode (router, peer or client) + mode: "client", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + metadata: { + name: "strawberry", + location: "Penny Lane" + }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 + connect: { + endpoints: [ + // "/
" + "quic/127.0.0.1:7447" + + ], + }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 + listen: { + endpoints: [ + // "/
" + ], + }, + /// Configure the scouting mechanisms and their behaviours + scouting: { + /// In client mode, the period dedicated to scouting for a router before failing + timeout: 3000, + /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + delay: 200, + /// The multicast scouting configuration. + multicast: { + /// Whether multicast scouting is enabled or not + enabled: true, + /// The socket which should be used for multicast scouting + address: "224.0.0.224:7446", + /// The network interface which should be used for multicast scouting + interface: "auto", // If not set or set to "auto" the interface if picked automatically + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + /// Whether or not to listen for scout messages on UDP multicast and reply to them. + listen: true, + }, + /// The gossip scouting configuration. + gossip: { + /// Whether gossip scouting is enabled or not + enabled: true, + /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting informations are only propagated to the next hop. + /// Activating multihop gossip implies more scouting traffic and a lower scalability. + /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + /// direct connectivity with each other. + multihop: false, + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + }, + }, + /// Configuration of data messages timestamps management. + timestamping: { + /// Whether data messages should be timestamped if not already. + /// Accepts a single boolean value or different values for router, peer and client. + enabled: { router: true, peer: false, client: false + }, + /// Whether data messages with timestamps in the future should be dropped or not. + /// If set to false (default), messages with timestamps in the future are retimestamped. + /// Timestamps are ignored if timestamping is disabled. + drop_future_timestamp: false, + }, + /// The default timeout to apply to queries in milliseconds. + queries_default_timeout: 10000, + /// The routing strategy to use and it's configuration. + routing: { + /// The routing strategy to use in routers and it's configuration. + router: { + /// When set to true a router will forward data between two peers + /// directly connected to it if it detects that those peers are not + /// connected to each other. + /// The failover brokering only works if gossip discovery is enabled. + peers_failover_brokering: true, + }, + /// The routing strategy to use in peers and it's configuration. + peer: { + /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). + mode: "peer_to_peer", + }, + }, + // /// The declarations aggregation strategy. + // aggregation: { + // /// A list of key-expressions for which all included subscribers will be aggregated into. + // subscribers: [ + // // key_expression + // ], + // /// A list of key-expressions for which all included publishers will be aggregated into. + // publishers: [ + // // key_expression + // ], + // }, + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, + // ], + // }, + // ], + /// Configure internal transport parameters + transport: { + unicast: { + /// Timeout in milliseconds when opening a link + accept_timeout: 10000, + /// Maximum number of zenoh session in pending state while accepting + accept_pending: 100, + /// Maximum number of sessions that can be simultaneously alive + max_sessions: 1000, + /// Maximum number of incoming links that are admitted per session + max_links: 1, + /// Enables the LowLatency transport + /// This option does not make LowLatency transport mandatory, the actual implementation of transport + /// used will depend on Establish procedure and other party's settings + /// + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + /// enable 'lowlatency' you need to explicitly disable 'qos'. + lowlatency: false, + /// Enables QoS on unicast communications. + qos: { + enabled: true, + }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. + compression: { + enabled: false, + }, + }, + multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + qos: { + enabled: false, + }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + compression: { + enabled: false, + }, + }, + link: { + /// An optional whitelist of protocols to be used for accepting and opening sessions. + /// If not configured, all the supported protocols are automatically whitelisted. + /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] + /// For example, to only enable "tls" and "quic": + // protocols: ["tls", "quic"], + /// Configure the zenoh TX parameters of a link + tx: { + /// The resolution in bits to be used for the message sequence numbers. + /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + /// Accepted values: 8bit, 16bit, 32bit, 64bit. + sequence_number_resolution: "32bit", + /// Link lease duration in milliseconds to announce to other zenoh nodes + lease: 10000, + /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + /// messages will be sent at the configured time interval. + /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + /// set the actual keep_alive timeout to one fourth of the lease time. + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// check which considers a link as failed when no messages are received in 3.5 times the + /// target interval. + keep_alive: 4, + /// Batch size in bytes is expressed as a 16bit unsigned integer. + /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + /// The default batch size value is the maximum batch size: 65535. + batch_size: 65535, + /// Each zenoh link has a transmission queue that can be configured + queue: { + /// The size of each priority queue indicates the number of batches a given queue can contain. + /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + /// If qos is false, then only the DATA priority will be allocated. + size: { + control: 1, + real_time: 1, + interactive_high: 1, + interactive_low: 1, + data_high: 2, + data: 4, + data_low: 4, + background: 4, + }, + /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + /// Higher values lead to a more aggressive batching but it will introduce additional latency. + backoff: 100, + }, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, + }, + /// Configure the zenoh RX parameters of a link + rx: { + /// Receiving buffer size in bytes for each link + /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// more in-flight data. This is particularly relevant when dealing with large messages. + /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + buffer_size: 65535, + /// Maximum size of the defragmentation buffer at receiver end. + /// Fragmented messages that are larger than the configured size will be dropped. + /// The default value is 1GiB. This would work in most scenarios. + /// NOTE: reduce the value if you are operating on a memory constrained device. + max_message_size: 1073741824, + }, + /// Configure TLS specific parameters + tls: { + root_ca_certificate: "ca.crt" + } + // tls: { + // /// Path to the certificate of the certificate authority used to validate either the server + // /// or the client's keys and certificates, depending on the node's mode. If not specified + // /// on router mode then the default WebPKI certificates are used instead. + // root_ca_certificate: null, + // /// Path to the TLS server private key + // server_private_key: null, + // /// Path to the TLS server public certificate + // server_certificate: null, + // /// Client authentication, if true enables mTLS (mutual authentication) + // client_auth: false, + // /// Path to the TLS client private key + // client_private_key: null, + // /// Path to the TLS client public certificate + // client_certificate: null, + // // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + // server_name_verification: null, + // }, + }, + /// Shared memory configuration + shared_memory: { + enabled: false, + }, + /// Access control configuration + auth: { + /// The configuration of authentification. + /// A password implies a username is required. + usrpwd: { + user: null, + password: null, + /// The path to a file containing the user password dictionary + dictionary_file: null, + }, + pubkey: { + public_key_pem: null, + private_key_pem: null, + public_key_file: null, + private_key_file: null, + key_size: null, + known_keys_file: null, + }, + }, + }, + /// Configure the Admin Space + /// Unstable: this configuration part works as advertised, but may change in a future release + adminspace: { + // read and/or write permissions on the admin space + permissions: { + read: true, + write: false, + }, + }, + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, +} \ No newline at end of file diff --git a/router.json5 b/router.json5 new file mode 100644 index 0000000000..46817ee4b4 --- /dev/null +++ b/router.json5 @@ -0,0 +1,431 @@ +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + // id: "1234567890abcdef", + /// The node's mode (router, peer or client) + mode: "router", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + metadata: { + name: "strawberry", + location: "Penny Lane" + }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 + connect: { + endpoints: [ + // "/
" + ], + }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 + listen: { + endpoints: [ + // "/
" + "quic/127.0.0.1:7447" + ], + }, + /// Configure the scouting mechanisms and their behaviours + scouting: { + /// In client mode, the period dedicated to scouting for a router before failing + timeout: 3000, + /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + delay: 200, + /// The multicast scouting configuration. + multicast: { + /// Whether multicast scouting is enabled or not + enabled: true, + /// The socket which should be used for multicast scouting + address: "224.0.0.224:7446", + /// The network interface which should be used for multicast scouting + interface: "auto", // If not set or set to "auto" the interface if picked automatically + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + /// Whether or not to listen for scout messages on UDP multicast and reply to them. + listen: true, + }, + /// The gossip scouting configuration. + gossip: { + /// Whether gossip scouting is enabled or not + enabled: true, + /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting informations are only propagated to the next hop. + /// Activating multihop gossip implies more scouting traffic and a lower scalability. + /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + /// direct connectivity with each other. + multihop: false, + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + }, + }, + /// Configuration of data messages timestamps management. + timestamping: { + /// Whether data messages should be timestamped if not already. + /// Accepts a single boolean value or different values for router, peer and client. + enabled: { router: true, peer: false, client: false + }, + /// Whether data messages with timestamps in the future should be dropped or not. + /// If set to false (default), messages with timestamps in the future are retimestamped. + /// Timestamps are ignored if timestamping is disabled. + drop_future_timestamp: false, + }, + /// The default timeout to apply to queries in milliseconds. + queries_default_timeout: 10000, + /// The routing strategy to use and it's configuration. + routing: { + /// The routing strategy to use in routers and it's configuration. + router: { + /// When set to true a router will forward data between two peers + /// directly connected to it if it detects that those peers are not + /// connected to each other. + /// The failover brokering only works if gossip discovery is enabled. + peers_failover_brokering: true, + }, + /// The routing strategy to use in peers and it's configuration. + peer: { + /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). + mode: "peer_to_peer", + }, + }, + // /// The declarations aggregation strategy. + // aggregation: { + // /// A list of key-expressions for which all included subscribers will be aggregated into. + // subscribers: [ + // // key_expression + // ], + // /// A list of key-expressions for which all included publishers will be aggregated into. + // publishers: [ + // // key_expression + // ], + // }, + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, + // ], + // }, + // ], + /// Configure internal transport parameters + transport: { + unicast: { + /// Timeout in milliseconds when opening a link + accept_timeout: 10000, + /// Maximum number of zenoh session in pending state while accepting + accept_pending: 100, + /// Maximum number of sessions that can be simultaneously alive + max_sessions: 1000, + /// Maximum number of incoming links that are admitted per session + max_links: 1, + /// Enables the LowLatency transport + /// This option does not make LowLatency transport mandatory, the actual implementation of transport + /// used will depend on Establish procedure and other party's settings + /// + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + /// enable 'lowlatency' you need to explicitly disable 'qos'. + lowlatency: false, + /// Enables QoS on unicast communications. + qos: { + enabled: true, + }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. + compression: { + enabled: false, + }, + }, + multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + qos: { + enabled: false, + }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + compression: { + enabled: false, + }, + }, + link: { + /// An optional whitelist of protocols to be used for accepting and opening sessions. + /// If not configured, all the supported protocols are automatically whitelisted. + /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] + /// For example, to only enable "tls" and "quic": + // protocols: ["tls", "quic"], + /// Configure the zenoh TX parameters of a link + tx: { + /// The resolution in bits to be used for the message sequence numbers. + /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + /// Accepted values: 8bit, 16bit, 32bit, 64bit. + sequence_number_resolution: "32bit", + /// Link lease duration in milliseconds to announce to other zenoh nodes + lease: 10000, + /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + /// messages will be sent at the configured time interval. + /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + /// set the actual keep_alive timeout to one fourth of the lease time. + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// check which considers a link as failed when no messages are received in 3.5 times the + /// target interval. + keep_alive: 4, + /// Batch size in bytes is expressed as a 16bit unsigned integer. + /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + /// The default batch size value is the maximum batch size: 65535. + batch_size: 65535, + /// Each zenoh link has a transmission queue that can be configured + queue: { + /// The size of each priority queue indicates the number of batches a given queue can contain. + /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + /// If qos is false, then only the DATA priority will be allocated. + size: { + control: 1, + real_time: 1, + interactive_high: 1, + interactive_low: 1, + data_high: 2, + data: 4, + data_low: 4, + background: 4, + }, + /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + /// Higher values lead to a more aggressive batching but it will introduce additional latency. + backoff: 100, + }, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, + }, + /// Configure the zenoh RX parameters of a link + rx: { + /// Receiving buffer size in bytes for each link + /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// more in-flight data. This is particularly relevant when dealing with large messages. + /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + buffer_size: 65535, + /// Maximum size of the defragmentation buffer at receiver end. + /// Fragmented messages that are larger than the configured size will be dropped. + /// The default value is 1GiB. This would work in most scenarios. + /// NOTE: reduce the value if you are operating on a memory constrained device. + max_message_size: 1073741824, + }, + /// Configure TLS specific parameters + tls: { + server_private_key: "myserver.key", + server_certificate: "myserver.crt" + } + // tls: { + // /// Path to the certificate of the certificate authority used to validate either the server + // /// or the client's keys and certificates, depending on the node's mode. If not specified + // /// on router mode then the default WebPKI certificates are used instead. + // root_ca_certificate: null, + // /// Path to the TLS server private key + // server_private_key: null, + // /// Path to the TLS server public certificate + // server_certificate: null, + // /// Client authentication, if true enables mTLS (mutual authentication) + // client_auth: false, + // /// Path to the TLS client private key + // client_private_key: null, + // /// Path to the TLS client public certificate + // client_certificate: null, + // // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + // server_name_verification: null, + // }, + }, + /// Shared memory configuration + shared_memory: { + enabled: false, + }, + /// Access control configuration + auth: { + /// The configuration of authentication. + /// A password implies a username is required. + usrpwd: { + user: null, + password: null, + /// The path to a file containing the user password dictionary + dictionary_file: null, + }, + pubkey: { + public_key_pem: null, + private_key_pem: null, + public_key_file: null, + private_key_file: null, + key_size: null, + known_keys_file: null, + }, + }, + }, + /// Configure the Admin Space + /// Unstable: this configuration part works as advertised, but may change in a future release + adminspace: { + // read and/or write permissions on the admin space + permissions: { + read: true, + write: false, + }, + }, + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, +} \ No newline at end of file diff --git a/sub_client.json5 b/sub_client.json5 new file mode 100644 index 0000000000..b75f75fa16 --- /dev/null +++ b/sub_client.json5 @@ -0,0 +1,431 @@ +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + // id: "1234567890abcdef", + /// The node's mode (router, peer or client) + mode: "client", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + metadata: { + name: "strawberry", + location: "Penny Lane" + }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 + connect: { + endpoints: [ + // "/
" + "quic/127.0.0.1:7447" + + ], + }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 + listen: { + endpoints: [ + // "/
" + ], + }, + /// Configure the scouting mechanisms and their behaviours + scouting: { + /// In client mode, the period dedicated to scouting for a router before failing + timeout: 3000, + /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + delay: 200, + /// The multicast scouting configuration. + multicast: { + /// Whether multicast scouting is enabled or not + enabled: true, + /// The socket which should be used for multicast scouting + address: "224.0.0.224:7446", + /// The network interface which should be used for multicast scouting + interface: "auto", // If not set or set to "auto" the interface if picked automatically + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + /// Whether or not to listen for scout messages on UDP multicast and reply to them. + listen: true, + }, + /// The gossip scouting configuration. + gossip: { + /// Whether gossip scouting is enabled or not + enabled: true, + /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting informations are only propagated to the next hop. + /// Activating multihop gossip implies more scouting traffic and a lower scalability. + /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + /// direct connectivity with each other. + multihop: false, + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + }, + }, + /// Configuration of data messages timestamps management. + timestamping: { + /// Whether data messages should be timestamped if not already. + /// Accepts a single boolean value or different values for router, peer and client. + enabled: { router: true, peer: false, client: false + }, + /// Whether data messages with timestamps in the future should be dropped or not. + /// If set to false (default), messages with timestamps in the future are retimestamped. + /// Timestamps are ignored if timestamping is disabled. + drop_future_timestamp: false, + }, + /// The default timeout to apply to queries in milliseconds. + queries_default_timeout: 10000, + /// The routing strategy to use and it's configuration. + routing: { + /// The routing strategy to use in routers and it's configuration. + router: { + /// When set to true a router will forward data between two peers + /// directly connected to it if it detects that those peers are not + /// connected to each other. + /// The failover brokering only works if gossip discovery is enabled. + peers_failover_brokering: true, + }, + /// The routing strategy to use in peers and it's configuration. + peer: { + /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). + mode: "peer_to_peer", + }, + }, + // /// The declarations aggregation strategy. + // aggregation: { + // /// A list of key-expressions for which all included subscribers will be aggregated into. + // subscribers: [ + // // key_expression + // ], + // /// A list of key-expressions for which all included publishers will be aggregated into. + // publishers: [ + // // key_expression + // ], + // }, + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, + // ], + // }, + // ], + /// Configure internal transport parameters + transport: { + unicast: { + /// Timeout in milliseconds when opening a link + accept_timeout: 10000, + /// Maximum number of zenoh session in pending state while accepting + accept_pending: 100, + /// Maximum number of sessions that can be simultaneously alive + max_sessions: 1000, + /// Maximum number of incoming links that are admitted per session + max_links: 1, + /// Enables the LowLatency transport + /// This option does not make LowLatency transport mandatory, the actual implementation of transport + /// used will depend on Establish procedure and other party's settings + /// + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + /// enable 'lowlatency' you need to explicitly disable 'qos'. + lowlatency: false, + /// Enables QoS on unicast communications. + qos: { + enabled: true, + }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. + compression: { + enabled: false, + }, + }, + multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + qos: { + enabled: false, + }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + compression: { + enabled: false, + }, + }, + link: { + /// An optional whitelist of protocols to be used for accepting and opening sessions. + /// If not configured, all the supported protocols are automatically whitelisted. + /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] + /// For example, to only enable "tls" and "quic": + // protocols: ["tls", "quic"], + /// Configure the zenoh TX parameters of a link + tx: { + /// The resolution in bits to be used for the message sequence numbers. + /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + /// Accepted values: 8bit, 16bit, 32bit, 64bit. + sequence_number_resolution: "32bit", + /// Link lease duration in milliseconds to announce to other zenoh nodes + lease: 10000, + /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + /// messages will be sent at the configured time interval. + /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + /// set the actual keep_alive timeout to one fourth of the lease time. + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// check which considers a link as failed when no messages are received in 3.5 times the + /// target interval. + keep_alive: 4, + /// Batch size in bytes is expressed as a 16bit unsigned integer. + /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + /// The default batch size value is the maximum batch size: 65535. + batch_size: 65535, + /// Each zenoh link has a transmission queue that can be configured + queue: { + /// The size of each priority queue indicates the number of batches a given queue can contain. + /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + /// If qos is false, then only the DATA priority will be allocated. + size: { + control: 1, + real_time: 1, + interactive_high: 1, + interactive_low: 1, + data_high: 2, + data: 4, + data_low: 4, + background: 4, + }, + /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + /// Higher values lead to a more aggressive batching but it will introduce additional latency. + backoff: 100, + }, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, + }, + /// Configure the zenoh RX parameters of a link + rx: { + /// Receiving buffer size in bytes for each link + /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// more in-flight data. This is particularly relevant when dealing with large messages. + /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + buffer_size: 65535, + /// Maximum size of the defragmentation buffer at receiver end. + /// Fragmented messages that are larger than the configured size will be dropped. + /// The default value is 1GiB. This would work in most scenarios. + /// NOTE: reduce the value if you are operating on a memory constrained device. + max_message_size: 1073741824, + }, + /// Configure TLS specific parameters + tls: { + root_ca_certificate: "ca.crt" + } + // tls: { + // /// Path to the certificate of the certificate authority used to validate either the server + // /// or the client's keys and certificates, depending on the node's mode. If not specified + // /// on router mode then the default WebPKI certificates are used instead. + // root_ca_certificate: null, + // /// Path to the TLS server private key + // server_private_key: null, + // /// Path to the TLS server public certificate + // server_certificate: null, + // /// Client authentication, if true enables mTLS (mutual authentication) + // client_auth: false, + // /// Path to the TLS client private key + // client_private_key: null, + // /// Path to the TLS client public certificate + // client_certificate: null, + // // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + // server_name_verification: null, + // }, + }, + /// Shared memory configuration + shared_memory: { + enabled: false, + }, + /// Access control configuration + auth: { + /// The configuration of authentification. + /// A password implies a username is required. + usrpwd: { + user: null, + password: null, + /// The path to a file containing the user password dictionary + dictionary_file: null, + }, + pubkey: { + public_key_pem: null, + private_key_pem: null, + public_key_file: null, + private_key_file: null, + key_size: null, + known_keys_file: null, + }, + }, + }, + /// Configure the Admin Space + /// Unstable: this configuration part works as advertised, but may change in a future release + adminspace: { + // read and/or write permissions on the admin space + permissions: { + read: true, + write: false, + }, + }, + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, +} \ No newline at end of file