From 7295201a547c894e86bc0d412c936a90bc67bd5e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 22 Nov 2023 16:54:22 +0100 Subject: [PATCH 01/29] Matching status (#565) Co-authored-by: Pierre Avital Co-authored-by: Julien Enoch --- zenoh/src/net/routing/face.rs | 11 +- zenoh/src/net/routing/pubsub.rs | 43 ++- zenoh/src/net/routing/router.rs | 4 + zenoh/src/prelude.rs | 2 + zenoh/src/publication.rs | 571 ++++++++++++++++++++++++++++++++ zenoh/src/session.rs | 168 ++++++++++ zenoh/tests/matching.rs | 225 +++++++++++++ 7 files changed, 1005 insertions(+), 19 deletions(-) create mode 100644 zenoh/tests/matching.rs diff --git a/zenoh/src/net/routing/face.rs b/zenoh/src/net/routing/face.rs index d84f173d26..cb01f3ea6e 100644 --- a/zenoh/src/net/routing/face.rs +++ b/zenoh/src/net/routing/face.rs @@ -31,6 +31,7 @@ pub struct FaceState { pub(super) id: usize, pub(super) zid: ZenohId, pub(super) whatami: WhatAmI, + pub(super) local: bool, #[cfg(feature = "stats")] pub(super) stats: Option>, pub(super) primitives: Arc, @@ -47,10 +48,12 @@ pub struct FaceState { } impl FaceState { - pub(super) fn new( + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( id: usize, zid: ZenohId, whatami: WhatAmI, + local: bool, #[cfg(feature = "stats")] stats: Option>, primitives: Arc, link_id: usize, @@ -60,6 +63,7 @@ impl FaceState { id, zid, whatami, + local, #[cfg(feature = "stats")] stats, primitives, @@ -76,6 +80,11 @@ impl FaceState { }) } + #[inline] + pub fn is_local(&self) -> bool { + self.local + } + #[inline] #[allow(clippy::trivially_copy_pass_by_ref)] pub(super) fn get_mapping( diff --git a/zenoh/src/net/routing/pubsub.rs b/zenoh/src/net/routing/pubsub.rs index 0cad9cac80..75a49f1137 100644 --- a/zenoh/src/net/routing/pubsub.rs +++ b/zenoh/src/net/routing/pubsub.rs @@ -1598,75 +1598,75 @@ macro_rules! treat_timestamp { } #[inline] -fn get_data_route( +pub(crate) fn get_data_route( tables: &Tables, - face: &FaceState, + whatami: WhatAmI, + link_id: usize, res: &Option>, expr: &mut RoutingExpr, routing_context: u64, ) -> Arc { match tables.whatami { - WhatAmI::Router => match face.whatami { + WhatAmI::Router => match whatami { WhatAmI::Router => { let routers_net = tables.routers_net.as_ref().unwrap(); - let local_context = routers_net.get_local_context(routing_context, face.link_id); + let local_context = routers_net.get_local_context(routing_context, link_id); res.as_ref() .and_then(|res| res.routers_data_route(local_context)) .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) + compute_data_route(tables, expr, Some(local_context), whatami) }) } WhatAmI::Peer => { if tables.full_net(WhatAmI::Peer) { let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = peers_net.get_local_context(routing_context, face.link_id); + let local_context = peers_net.get_local_context(routing_context, link_id); res.as_ref() .and_then(|res| res.peers_data_route(local_context)) .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) + compute_data_route(tables, expr, Some(local_context), whatami) }) } else { res.as_ref() .and_then(|res| res.peer_data_route()) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)) + .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)) } } _ => res .as_ref() .and_then(|res| res.routers_data_route(0)) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)), }, WhatAmI::Peer => { if tables.full_net(WhatAmI::Peer) { - match face.whatami { + match whatami { WhatAmI::Router | WhatAmI::Peer => { let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = - peers_net.get_local_context(routing_context, face.link_id); + let local_context = peers_net.get_local_context(routing_context, link_id); res.as_ref() .and_then(|res| res.peers_data_route(local_context)) .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) + compute_data_route(tables, expr, Some(local_context), whatami) }) } _ => res .as_ref() .and_then(|res| res.peers_data_route(0)) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)), } } else { res.as_ref() - .and_then(|res| match face.whatami { + .and_then(|res| match whatami { WhatAmI::Client => res.client_data_route(), _ => res.peer_data_route(), }) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)) + .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)) } } _ => res .as_ref() .and_then(|res| res.client_data_route()) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)), } } @@ -1784,7 +1784,14 @@ pub fn full_reentrant_route_data( == *tables.elect_router(expr.full_expr(), tables.get_router_links(face.zid)) { let res = Resource::get_resource(&prefix, expr.suffix); - let route = get_data_route(&tables, face, &res, &mut expr, routing_context); + let route = get_data_route( + &tables, + face.whatami, + face.link_id, + &res, + &mut expr, + routing_context, + ); let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); if !(route.is_empty() && matching_pulls.is_empty()) { diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index dbf687ba79..60012a48eb 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -272,6 +272,7 @@ impl Tables { fid, zid, whatami, + false, #[cfg(feature = "stats")] Some(stats), primitives.clone(), @@ -304,6 +305,7 @@ impl Tables { fid, zid, whatami, + true, #[cfg(feature = "stats")] None, primitives.clone(), @@ -649,6 +651,7 @@ impl Router { fid, ZenohId::from_str("1").unwrap(), WhatAmI::Peer, + false, #[cfg(feature = "stats")] None, Arc::new(McastMux::new(transport.clone())), @@ -674,6 +677,7 @@ impl Router { fid, peer.zid, WhatAmI::Client, // Quick hack + false, #[cfg(feature = "stats")] Some(transport.get_stats().unwrap()), Arc::new(DummyPrimitives), diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index baf7439244..2f2e7650a0 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -51,6 +51,8 @@ pub(crate) mod common { pub use zenoh_protocol::core::SampleKind; pub use crate::publication::Priority; + #[zenoh_macros::unstable] + pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 3a69c19f8f..ac1d6bf55a 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -14,6 +14,10 @@ //! Publishing primitives. +#[zenoh_macros::unstable] +use crate::handlers::Callback; +#[zenoh_macros::unstable] +use crate::handlers::DefaultHandler; use crate::net::transport::Primitives; use crate::prelude::*; use crate::sample::DataInfo; @@ -199,6 +203,35 @@ use std::pin::Pin; use std::task::{Context, Poll}; use zenoh_result::Error; +#[zenoh_macros::unstable] +#[derive(Clone)] +pub enum PublisherRef<'a> { + Borrow(&'a Publisher<'a>), + Shared(std::sync::Arc>), +} + +#[zenoh_macros::unstable] +impl<'a> std::ops::Deref for PublisherRef<'a> { + type Target = Publisher<'a>; + + fn deref(&self) -> &Self::Target { + match self { + PublisherRef::Borrow(b) => b, + PublisherRef::Shared(s) => s, + } + } +} + +#[zenoh_macros::unstable] +impl std::fmt::Debug for PublisherRef<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PublisherRef::Borrow(b) => Publisher::fmt(b, f), + PublisherRef::Shared(s) => Publisher::fmt(s, f), + } + } +} + /// A publisher that allows to send data through a stream. /// /// Publishers are automatically undeclared when dropped. @@ -265,6 +298,41 @@ impl<'a> Publisher<'a> { self } + /// Consumes the given `Publisher`, returning a thread-safe reference-counting + /// pointer to it (`Arc`). This is equivalent to `Arc::new(Publisher)`. + /// + /// This is useful to share ownership of the `Publisher` between several threads + /// and tasks. It also alows to create [`MatchingListener`] with static + /// lifetime that can be moved to several threads and tasks. + /// + /// Note: the given zenoh `Publisher` will be undeclared when the last reference to + /// it is dropped. + /// + /// # Examples + /// ```no_run + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// + /// async_std::task::spawn(async move { + /// while let Ok(matching_status) = matching_listener.recv_async().await { + /// if matching_status.matching_subscribers() { + /// println!("Publisher has matching subscribers."); + /// } else { + /// println!("Publisher has NO MORE matching subscribers."); + /// } + /// } + /// }).await; + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn into_arc(self) -> std::sync::Arc { + std::sync::Arc::new(self) + } + fn _write(&self, kind: SampleKind, value: Value) -> Publication { Publication { publisher: self, @@ -328,6 +396,64 @@ impl<'a> Publisher<'a> { self._write(SampleKind::Delete, Value::empty()) } + /// Return the [`MatchingStatus`] of the publisher. + /// + /// [`MatchingStatus::matching_subscribers`] will return true if there exist Subscribers + /// matching the Publisher's key expression and false otherwise. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let matching_subscribers: bool = publisher + /// .matching_status() + /// .res() + /// .await + /// .unwrap() + /// .matching_subscribers(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn matching_status(&self) -> impl Resolve> + '_ { + zenoh_core::ResolveFuture::new(async move { + self.session + .matching_status(self.key_expr(), self.destination) + }) + } + + /// Return a [`MatchingListener`] for this Publisher. + /// + /// The [`MatchingListener`] that will send a notification each time the [`MatchingStatus`] of + /// the Publisher changes. + /// + /// # Examples + /// ```no_run + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// while let Ok(matching_status) = matching_listener.recv_async().await { + /// if matching_status.matching_subscribers() { + /// println!("Publisher has matching subscribers."); + /// } else { + /// println!("Publisher has NO MORE matching subscribers."); + /// } + /// } + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn matching_listener(&self) -> MatchingListenerBuilder<'_, DefaultHandler> { + MatchingListenerBuilder { + publisher: PublisherRef::Borrow(self), + handler: DefaultHandler, + } + } + /// Undeclares the [`Publisher`], informing the network that it needn't optimize publications for its key expression anymore. /// /// # Examples @@ -345,6 +471,91 @@ impl<'a> Publisher<'a> { } } +/// Functions to create zenoh entities with `'static` lifetime. +/// +/// This trait contains functions to create zenoh entities like +/// [`MatchingListener`] with a `'static` lifetime. +/// This is useful to move zenoh entities to several threads and tasks. +/// +/// This trait is implemented for `Arc`. +/// +/// # Examples +/// ```no_run +/// # async_std::task::block_on(async { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); +/// let matching_listener = publisher.matching_listener().res().await.unwrap(); +/// +/// async_std::task::spawn(async move { +/// while let Ok(matching_status) = matching_listener.recv_async().await { +/// if matching_status.matching_subscribers() { +/// println!("Publisher has matching subscribers."); +/// } else { +/// println!("Publisher has NO MORE matching subscribers."); +/// } +/// } +/// }).await; +/// # }) +/// ``` +#[zenoh_macros::unstable] +pub trait PublisherDeclarations { + /// # Examples + /// ```no_run + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// + /// async_std::task::spawn(async move { + /// while let Ok(matching_status) = matching_listener.recv_async().await { + /// if matching_status.matching_subscribers() { + /// println!("Publisher has matching subscribers."); + /// } else { + /// println!("Publisher has NO MORE matching subscribers."); + /// } + /// } + /// }).await; + /// # }) + /// ``` + #[zenoh_macros::unstable] + fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler>; +} + +#[zenoh_macros::unstable] +impl PublisherDeclarations for std::sync::Arc> { + /// # Examples + /// ```no_run + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// + /// async_std::task::spawn(async move { + /// while let Ok(matching_status) = matching_listener.recv_async().await { + /// if matching_status.matching_subscribers() { + /// println!("Publisher has matching subscribers."); + /// } else { + /// println!("Publisher has NO MORE matching subscribers."); + /// } + /// } + /// }).await; + /// # }) + /// ``` + #[zenoh_macros::unstable] + fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler> { + MatchingListenerBuilder { + publisher: PublisherRef::Shared(self.clone()), + handler: DefaultHandler, + } + } +} + impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { fn undeclare_inner(self, _: ()) -> PublisherUndeclaration<'a> { PublisherUndeclaration { publisher: self } @@ -700,6 +911,366 @@ impl From for zenoh_protocol::core::Priority { } } +/// A struct that indicates if there exist Subscribers matching the Publisher's key expression. +/// +/// # Examples +/// ``` +/// # async_std::task::block_on(async { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); +/// let matching_status = publisher.matching_status().res().await.unwrap(); +/// # }) +/// ``` +#[zenoh_macros::unstable] +#[derive(Copy, Clone, Debug)] +pub struct MatchingStatus { + pub(crate) matching: bool, +} + +#[zenoh_macros::unstable] +impl MatchingStatus { + /// Return true if there exist Subscribers matching the Publisher's key expression. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let matching_subscribers: bool = publisher + /// .matching_status() + /// .res() + /// .await + /// .unwrap() + /// .matching_subscribers(); + /// # }) + /// ``` + pub fn matching_subscribers(&self) -> bool { + self.matching + } +} + +/// A builder for initializing a [`MatchingListener`]. +#[zenoh_macros::unstable] +#[derive(Debug)] +pub struct MatchingListenerBuilder<'a, Handler> { + pub(crate) publisher: PublisherRef<'a>, + pub handler: Handler, +} + +#[zenoh_macros::unstable] +impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { + /// Receive the MatchingStatuses for this listener with a callback. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let matching_listener = publisher + /// .matching_listener() + /// .callback(|matching_status| { + /// if matching_status.matching_subscribers() { + /// println!("Publisher has matching subscribers."); + /// } else { + /// println!("Publisher has NO MORE matching subscribers."); + /// } + /// }) + /// .res() + /// .await + /// .unwrap(); + /// # }) + /// ``` + #[inline] + #[zenoh_macros::unstable] + pub fn callback(self, callback: Callback) -> MatchingListenerBuilder<'a, Callback> + where + Callback: Fn(MatchingStatus) + Send + Sync + 'static, + { + let MatchingListenerBuilder { + publisher, + handler: _, + } = self; + MatchingListenerBuilder { + publisher, + handler: callback, + } + } + + /// Receive the MatchingStatuses for this listener with a mutable callback. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let mut n = 0; + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let matching_listener = publisher + /// .matching_listener() + /// .callback_mut(move |_matching_status| { n += 1; }) + /// .res() + /// .await + /// .unwrap(); + /// # }) + /// ``` + #[inline] + #[zenoh_macros::unstable] + pub fn callback_mut( + self, + callback: CallbackMut, + ) -> MatchingListenerBuilder<'a, impl Fn(MatchingStatus) + Send + Sync + 'static> + where + CallbackMut: FnMut(MatchingStatus) + Send + Sync + 'static, + { + self.callback(crate::handlers::locked(callback)) + } + + /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// + /// # Examples + /// ```no_run + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let matching_listener = publisher + /// .matching_listener() + /// .with(flume::bounded(32)) + /// .res() + /// .await + /// .unwrap(); + /// while let Ok(matching_status) = matching_listener.recv_async().await { + /// if matching_status.matching_subscribers() { + /// println!("Publisher has matching subscribers."); + /// } else { + /// println!("Publisher has NO MORE matching subscribers."); + /// } + /// } + /// # }) + /// ``` + #[inline] + #[zenoh_macros::unstable] + pub fn with(self, handler: Handler) -> MatchingListenerBuilder<'a, Handler> + where + Handler: crate::prelude::IntoCallbackReceiverPair<'static, MatchingStatus>, + { + let MatchingListenerBuilder { + publisher, + handler: _, + } = self; + MatchingListenerBuilder { publisher, handler } + } +} + +#[zenoh_macros::unstable] +impl<'a, Handler> Resolvable for MatchingListenerBuilder<'a, Handler> +where + Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, + Handler::Receiver: Send, +{ + type To = ZResult>; +} + +#[zenoh_macros::unstable] +impl<'a, Handler> SyncResolve for MatchingListenerBuilder<'a, Handler> +where + Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, + Handler::Receiver: Send, +{ + #[zenoh_macros::unstable] + fn res_sync(self) -> ::To { + let (callback, receiver) = self.handler.into_cb_receiver_pair(); + self.publisher + .session + .declare_matches_listener_inner(&self.publisher, callback) + .map(|listener_state| MatchingListener { + listener: MatchingListenerInner { + publisher: self.publisher, + state: listener_state, + alive: true, + }, + receiver, + }) + } +} + +#[zenoh_macros::unstable] +impl<'a, Handler> AsyncResolve for MatchingListenerBuilder<'a, Handler> +where + Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, + Handler::Receiver: Send, +{ + type Future = Ready; + + #[zenoh_macros::unstable] + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +#[zenoh_macros::unstable] +pub(crate) struct MatchingListenerState { + pub(crate) id: Id, + pub(crate) current: std::sync::Mutex, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) destination: Locality, + pub(crate) callback: Callback<'static, MatchingStatus>, +} + +#[zenoh_macros::unstable] +impl std::fmt::Debug for MatchingListenerState { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.debug_struct("MatchingListener") + .field("id", &self.id) + .field("key_expr", &self.key_expr) + .finish() + } +} + +#[zenoh_macros::unstable] +pub(crate) struct MatchingListenerInner<'a> { + pub(crate) publisher: PublisherRef<'a>, + pub(crate) state: std::sync::Arc, + pub(crate) alive: bool, +} + +#[zenoh_macros::unstable] +impl<'a> MatchingListenerInner<'a> { + #[inline] + pub fn undeclare(self) -> MatchingListenerUndeclaration<'a> { + Undeclarable::undeclare_inner(self, ()) + } +} + +#[zenoh_macros::unstable] +impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListenerInner<'a> { + fn undeclare_inner(self, _: ()) -> MatchingListenerUndeclaration<'a> { + MatchingListenerUndeclaration { subscriber: self } + } +} + +/// A listener that sends notifications when the [`MatchingStatus`] of a +/// publisher changes. +/// +/// # Examples +/// ```no_run +/// # async_std::task::block_on(async { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); +/// let matching_listener = publisher.matching_listener().res().await.unwrap(); +/// while let Ok(matching_status) = matching_listener.recv_async().await { +/// if matching_status.matching_subscribers() { +/// println!("Publisher has matching subscribers."); +/// } else { +/// println!("Publisher has NO MORE matching subscribers."); +/// } +/// } +/// # }) +/// ``` +#[zenoh_macros::unstable] +pub struct MatchingListener<'a, Receiver> { + pub(crate) listener: MatchingListenerInner<'a>, + pub receiver: Receiver, +} + +#[zenoh_macros::unstable] +impl<'a, Receiver> MatchingListener<'a, Receiver> { + /// Close a [`MatchingListener`]. + /// + /// MatchingListeners are automatically closed when dropped, but you may want to use this function to handle errors or + /// close the MatchingListener asynchronously. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// matching_listener.undeclare().res().await.unwrap(); + /// # }) + /// ``` + #[inline] + pub fn undeclare(self) -> MatchingListenerUndeclaration<'a> { + self.listener.undeclare() + } +} + +#[zenoh_macros::unstable] +impl<'a, T> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListener<'a, T> { + fn undeclare_inner(self, _: ()) -> MatchingListenerUndeclaration<'a> { + Undeclarable::undeclare_inner(self.listener, ()) + } +} + +#[zenoh_macros::unstable] +impl std::ops::Deref for MatchingListener<'_, Receiver> { + type Target = Receiver; + + fn deref(&self) -> &Self::Target { + &self.receiver + } +} +#[zenoh_macros::unstable] +impl std::ops::DerefMut for MatchingListener<'_, Receiver> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.receiver + } +} + +#[zenoh_macros::unstable] +pub struct MatchingListenerUndeclaration<'a> { + subscriber: MatchingListenerInner<'a>, +} + +#[zenoh_macros::unstable] +impl Resolvable for MatchingListenerUndeclaration<'_> { + type To = ZResult<()>; +} + +#[zenoh_macros::unstable] +impl SyncResolve for MatchingListenerUndeclaration<'_> { + fn res_sync(mut self) -> ::To { + self.subscriber.alive = false; + self.subscriber + .publisher + .session + .undeclare_matches_listener_inner(self.subscriber.state.id) + } +} + +#[zenoh_macros::unstable] +impl AsyncResolve for MatchingListenerUndeclaration<'_> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +#[zenoh_macros::unstable] +impl Drop for MatchingListenerInner<'_> { + fn drop(&mut self) { + if self.alive { + let _ = self + .publisher + .session + .undeclare_matches_listener_inner(self.state.id); + } + } +} + mod tests { #[test] fn priority_from() { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 806041b6d5..e8314c9cc6 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -101,6 +101,8 @@ pub(crate) struct SessionState { pub(crate) queryables: HashMap>, #[cfg(feature = "unstable")] pub(crate) tokens: HashMap>, + #[cfg(feature = "unstable")] + pub(crate) matching_listeners: HashMap>, pub(crate) queries: HashMap, pub(crate) aggregated_subscribers: Vec, //pub(crate) aggregated_publishers: Vec, @@ -123,6 +125,8 @@ impl SessionState { queryables: HashMap::new(), #[cfg(feature = "unstable")] tokens: HashMap::new(), + #[cfg(feature = "unstable")] + matching_listeners: HashMap::new(), queries: HashMap::new(), aggregated_subscribers, //aggregated_publishers, @@ -1099,6 +1103,12 @@ impl Session { ext_info: *info, }), }); + + #[cfg(feature = "unstable")] + { + let state = zread!(self.state); + self.update_status_up(&state, &key_expr) + } } Ok(sub_state) @@ -1157,6 +1167,12 @@ impl Session { ext_wire_expr: WireExprType { wire_expr }, }), }); + + #[cfg(feature = "unstable")] + { + let state = zread!(self.state); + self.update_status_down(&state, &sub_state.key_expr) + } } } None => { @@ -1178,6 +1194,12 @@ impl Session { }, }), }); + + #[cfg(feature = "unstable")] + { + let state = zread!(self.state); + self.update_status_down(&state, &sub_state.key_expr) + } } } }; @@ -1423,6 +1445,148 @@ impl Session { } } + #[zenoh_macros::unstable] + pub(crate) fn declare_matches_listener_inner( + &self, + publisher: &Publisher, + callback: Callback<'static, MatchingStatus>, + ) -> ZResult> { + let mut state = zwrite!(self.state); + + let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + log::trace!("matches_listener({:?}) => {id}", publisher.key_expr); + let listener_state = Arc::new(MatchingListenerState { + id, + current: std::sync::Mutex::new(false), + destination: publisher.destination, + key_expr: publisher.key_expr.clone().into_owned(), + callback, + }); + state.matching_listeners.insert(id, listener_state.clone()); + drop(state); + match listener_state.current.lock() { + Ok(mut current) => { + if self + .matching_status(&publisher.key_expr, listener_state.destination) + .map(|s| s.matching_subscribers()) + .unwrap_or(true) + { + *current = true; + (listener_state.callback)(MatchingStatus { matching: true }); + } + } + Err(e) => log::error!("Error trying to acquire MathginListener lock: {}", e), + } + Ok(listener_state) + } + + #[zenoh_macros::unstable] + pub(crate) fn matching_status( + &self, + key_expr: &KeyExpr, + destination: Locality, + ) -> ZResult { + use crate::net::routing::router::RoutingExpr; + use zenoh_protocol::core::WhatAmI; + let tables = zread!(self.runtime.router.tables.tables); + let res = crate::net::routing::resource::Resource::get_resource( + &tables.root_res, + key_expr.as_str(), + ); + + let route = crate::net::routing::pubsub::get_data_route( + &tables, + WhatAmI::Client, + 0, + &res, + &mut RoutingExpr::new(&tables.root_res, key_expr.as_str()), + 0, + ); + let matching = match destination { + Locality::Any => !route.is_empty(), + Locality::Remote => route.values().any(|dir| !dir.0.is_local()), + Locality::SessionLocal => route.values().any(|dir| dir.0.is_local()), + }; + Ok(MatchingStatus { matching }) + } + + #[zenoh_macros::unstable] + pub(crate) fn update_status_up(&self, state: &SessionState, key_expr: &KeyExpr) { + for msub in state.matching_listeners.values() { + if key_expr.intersects(&msub.key_expr) { + // Cannot hold session lock when calling tables (matching_status()) + async_std::task::spawn({ + let session = self.clone(); + let msub = msub.clone(); + async move { + match msub.current.lock() { + Ok(mut current) => { + if !*current { + if let Ok(status) = + session.matching_status(&msub.key_expr, msub.destination) + { + if status.matching_subscribers() { + *current = true; + let callback = msub.callback.clone(); + (callback)(status) + } + } + } + } + Err(e) => { + log::error!("Error trying to acquire MathginListener lock: {}", e); + } + } + } + }); + } + } + } + + #[zenoh_macros::unstable] + pub(crate) fn update_status_down(&self, state: &SessionState, key_expr: &KeyExpr) { + for msub in state.matching_listeners.values() { + if key_expr.intersects(&msub.key_expr) { + // Cannot hold session lock when calling tables (matching_status()) + async_std::task::spawn({ + let session = self.clone(); + let msub = msub.clone(); + async move { + match msub.current.lock() { + Ok(mut current) => { + if *current { + if let Ok(status) = + session.matching_status(&msub.key_expr, msub.destination) + { + if !status.matching_subscribers() { + *current = false; + let callback = msub.callback.clone(); + (callback)(status) + } + } + } + } + Err(e) => { + log::error!("Error trying to acquire MathginListener lock: {}", e); + } + } + } + }); + } + } + } + + #[zenoh_macros::unstable] + pub(crate) fn undeclare_matches_listener_inner(&self, sid: usize) -> ZResult<()> { + let mut state = zwrite!(self.state); + if let Some(state) = state.matching_listeners.remove(&sid) { + trace!("undeclare_matches_listener_inner({:?})", state); + Ok(()) + } else { + Err(zerror!("Unable to find MatchingListener").into()) + } + } + pub(crate) fn handle_data( &self, local: bool, @@ -1939,6 +2103,8 @@ impl Primitives for Session { let state = zread!(self.state); match state.wireexpr_to_keyexpr(&m.wire_expr, false) { Ok(expr) => { + self.update_status_up(&state, &expr); + if expr .as_str() .starts_with(crate::liveliness::PREFIX_LIVELINESS) @@ -1960,6 +2126,8 @@ impl Primitives for Session { let state = zread!(self.state); match state.wireexpr_to_keyexpr(&m.ext_wire_expr.wire_expr, false) { Ok(expr) => { + self.update_status_down(&state, &expr); + if expr .as_str() .starts_with(crate::liveliness::PREFIX_LIVELINESS) diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs new file mode 100644 index 0000000000..cf637ee625 --- /dev/null +++ b/zenoh/tests/matching.rs @@ -0,0 +1,225 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use async_std::prelude::FutureExt; +use async_std::task; +use std::time::Duration; +use zenoh::prelude::r#async::*; +use zenoh_core::zasync_executor_init; + +const TIMEOUT: Duration = Duration::from_secs(60); +const RECV_TIMEOUT: Duration = Duration::from_secs(1); + +macro_rules! ztimeout { + ($f:expr) => { + $f.timeout(TIMEOUT).await.unwrap() + }; +} + +#[cfg(feature = "unstable")] +#[test] +fn zenoh_matching_status_any() { + use flume::RecvTimeoutError; + + task::block_on(async { + zasync_executor_init!(); + + let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + + let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + + let publisher1 = ztimeout!(session1 + .declare_publisher("zenoh_matching_status_any_test") + .allowed_destination(Locality::Any) + .res_async()) + .unwrap(); + + let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + + let sub = ztimeout!(session1 + .declare_subscriber("zenoh_matching_status_any_test") + .res_async()) + .unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(matching_status.matching_subscribers()); + + ztimeout!(sub.undeclare().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + + let sub = ztimeout!(session2 + .declare_subscriber("zenoh_matching_status_any_test") + .res_async()) + .unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(matching_status.matching_subscribers()); + + ztimeout!(sub.undeclare().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + }); +} + +#[cfg(feature = "unstable")] +#[test] +fn zenoh_matching_status_remote() { + use flume::RecvTimeoutError; + + task::block_on(async { + zasync_executor_init!(); + + let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + + let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + + let publisher1 = ztimeout!(session1 + .declare_publisher("zenoh_matching_status_remote_test") + .allowed_destination(Locality::Remote) + .res_async()) + .unwrap(); + + let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + + let sub = ztimeout!(session1 + .declare_subscriber("zenoh_matching_status_remote_test") + .res_async()) + .unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + + ztimeout!(sub.undeclare().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + + let sub = ztimeout!(session2 + .declare_subscriber("zenoh_matching_status_remote_test") + .res_async()) + .unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(matching_status.matching_subscribers()); + + ztimeout!(sub.undeclare().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + }); +} + +#[cfg(feature = "unstable")] +#[test] +fn zenoh_matching_status_local() { + use flume::RecvTimeoutError; + + task::block_on(async { + zasync_executor_init!(); + + let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + + let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + + let publisher1 = ztimeout!(session1 + .declare_publisher("zenoh_matching_status_local_test") + .allowed_destination(Locality::SessionLocal) + .res_async()) + .unwrap(); + + let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + + let sub = ztimeout!(session1 + .declare_subscriber("zenoh_matching_status_local_test") + .res_async()) + .unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(matching_status.matching_subscribers()); + + ztimeout!(sub.undeclare().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + + let sub = ztimeout!(session2 + .declare_subscriber("zenoh_matching_status_local_test") + .res_async()) + .unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + + ztimeout!(sub.undeclare().res_async()).unwrap(); + + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + }); +} From 8954a81cb55002698b441b94fe777c7955e8bd89 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 22 Nov 2023 16:57:24 +0100 Subject: [PATCH 02/29] Fix bug causing duplicates in querying subscriber (#597) --- zenoh-ext/src/querying_subscriber.rs | 59 ++++++++++++++++++---------- 1 file changed, 39 insertions(+), 20 deletions(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 33f3bc6153..082d00e10f 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -677,7 +677,9 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { let key_expr = conf.key_expr?; - // declare subscriber at first + // register fetch handler + let handler = register_handler(state.clone(), callback.clone()); + // declare subscriber let subscriber = match conf.session.clone() { SessionRef::Borrow(session) => match conf.key_space.into() { crate::KeySpace::User => session @@ -707,15 +709,15 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { }, }; - let mut fetch_subscriber = FetchingSubscriber { + let fetch_subscriber = FetchingSubscriber { subscriber, callback, state, receiver, }; - // start fetch - fetch_subscriber.fetch(conf.fetch).res_sync()?; + // run fetch + run_fetch(conf.fetch, handler)?; Ok(fetch_subscriber) } @@ -882,22 +884,8 @@ where >::Error: Into, { fn res_sync(self) -> ::To { - zlock!(self.state).pending_fetches += 1; - // pending fetches will be decremented in RepliesHandler drop() - let handler = RepliesHandler { - state: self.state, - callback: self.callback, - }; - - log::debug!("Fetch"); - (self.fetch)(Box::new(move |s: TryIntoSample| match s.try_into() { - Ok(s) => { - let mut state = zlock!(handler.state); - log::trace!("Fetched sample received: push it to merge_queue"); - state.merge_queue.push(s); - } - Err(e) => log::debug!("Received error fetching data: {}", e.into()), - })) + let handler = register_handler(self.state, self.callback); + run_fetch(self.fetch, handler) } } @@ -913,3 +901,34 @@ where std::future::ready(self.res_sync()) } } + +fn register_handler( + state: Arc>, + callback: Arc, +) -> RepliesHandler { + zlock!(state).pending_fetches += 1; + // pending fetches will be decremented in RepliesHandler drop() + RepliesHandler { state, callback } +} + +fn run_fetch< + Fetch: FnOnce(Box) -> ZResult<()>, + TryIntoSample, +>( + fetch: Fetch, + handler: RepliesHandler, +) -> ZResult<()> +where + TryIntoSample: TryInto, + >::Error: Into, +{ + log::debug!("Fetch data for FetchingSubscriber"); + (fetch)(Box::new(move |s: TryIntoSample| match s.try_into() { + Ok(s) => { + let mut state = zlock!(handler.state); + log::trace!("Fetched sample received: push it to merge_queue"); + state.merge_queue.push(s); + } + Err(e) => log::debug!("Received error fetching data: {}", e.into()), + })) +} From 85c6f49ff26aeca6f4f60bd13eb64136181b1821 Mon Sep 17 00:00:00 2001 From: Julien Enoch Date: Thu, 23 Nov 2023 14:31:35 +0100 Subject: [PATCH 03/29] Add 'shared_memory' config in shm examples (#598) --- examples/README.md | 10 ++++++---- examples/examples/z_pub_shm.rs | 9 +++++++-- examples/examples/z_pub_shm_thr.rs | 7 ++++++- examples/examples/z_sub.rs | 7 ++++++- examples/examples/z_sub_thr.rs | 7 ++++++- 5 files changed, 31 insertions(+), 9 deletions(-) diff --git a/examples/README.md b/examples/README.md index c83b2735d2..fd25e4322d 100644 --- a/examples/README.md +++ b/examples/README.md @@ -139,13 +139,14 @@ z_storage -k demo/** ``` -### z_pub_shm & z_sub_shm +### z_pub_shm & z_sub A pub/sub example involving the shared-memory feature. + Note that on subscriber side, the same `z_sub` example than for non-shared-memory example is used. Typical Subscriber usage: ```bash - z_sub_shm + z_sub ``` Typical Publisher usage: @@ -188,16 +189,17 @@ z_ping 1024 ``` -### z_pub_shm_thr & z_sub_shm_thr +### z_pub_shm_thr & z_sub_thr Pub/Sub throughput test involving the shared-memory feature. This example allows performing throughput measurements between a publisher performing put operations with the shared-memory feature and a subscriber receiving notifications of those puts. + Note that on subscriber side, the same `z_sub_thr` example than for non-shared-memory example is used. Typical Subscriber usage: ```bash - z_sub_shm_thr + z_sub_thr ``` Typical Publisher usage: diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 0545fb23f8..335fc5dbe0 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -26,7 +26,12 @@ async fn main() -> Result<(), zenoh::Error> { // Initiate logging env_logger::init(); - let (config, path, value) = parse_args(); + let (mut config, path, value) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); @@ -39,6 +44,7 @@ async fn main() -> Result<(), zenoh::Error> { let publisher = session.declare_publisher(&path).res().await.unwrap(); for idx in 0..(K * N as u32) { + sleep(Duration::from_secs(1)).await; let mut sbuf = match shm.alloc(1024) { Ok(buf) => buf, Err(_) => { @@ -88,7 +94,6 @@ async fn main() -> Result<(), zenoh::Error> { let defrag = shm.defragment(); println!("De-framented {defrag} bytes"); } - // sleep(Duration::from_millis(100)).await; // Dropping the SharedMemoryBuf means to free it. drop(sbuf); } diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 2da0cf102c..f72a7b46b7 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -21,7 +21,12 @@ use zenoh::shm::SharedMemoryManager; async fn main() { // initiate logging env_logger::init(); - let (config, sm_size, size) = parse_args(); + let (mut config, sm_size, size) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm_thr` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); let z = zenoh::open(config).res().await.unwrap(); let id = z.zid(); diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index dfd2669348..af23760e9d 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -25,7 +25,12 @@ async fn main() { // Initiate logging env_logger::init(); - let (config, key_expr) = parse_args(); + let (mut config, key_expr) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 3db5a82203..c6c1846f56 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -70,7 +70,12 @@ fn main() { // initiate logging env_logger::init(); - let (config, m, n) = parse_args(); + let (mut config, m, n) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm_thr` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); let session = zenoh::open(config).res().unwrap(); From 8ebf8b8d11abe8bba594258a7e14920f5c03c41f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 23 Nov 2023 15:31:38 +0100 Subject: [PATCH 04/29] FetchingSubscriber::fetch don't need mutable reference (#599) --- zenoh-ext/src/querying_subscriber.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 082d00e10f..1083c111c4 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -778,7 +778,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, >( - &mut self, + &self, fetch: Fetch, ) -> impl Resolve> where From bbd12e201e05fca30696a3f8cc2dda70abe80bc4 Mon Sep 17 00:00:00 2001 From: Darius Maitia Date: Mon, 27 Nov 2023 15:53:30 +0100 Subject: [PATCH 05/29] Sifting plugin private fields on debug logs. (#592) --- commons/zenoh-config/src/lib.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 9c07bd16e7..53ac033506 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -623,9 +623,15 @@ impl Config { impl std::fmt::Display for Config { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut json = serde_json::to_value(self).unwrap(); - sift_privates(&mut json); - write!(f, "{json}") + serde_json::to_value(self) + .map(|mut json| { + sift_privates(&mut json); + write!(f, "{json}") + }) + .map_err(|e| { + _ = write!(f, "{e:?}"); + fmt::Error + })? } } @@ -1030,9 +1036,12 @@ impl<'a> serde::Deserialize<'a> for PluginsConfig { }) } } + impl std::fmt::Debug for PluginsConfig { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", &self.values) + let mut values: Value = self.values.clone(); + sift_privates(&mut values); + write!(f, "{:?}", values) } } From 8904d16b431de934cd2c5091069a0e75c3f7c15d Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 28 Nov 2023 09:49:58 +0100 Subject: [PATCH 06/29] Queryables/Subscribers/tokens declaration order (#588) (#589) --- zenoh/src/net/routing/router.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 60012a48eb..444730e24d 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -283,8 +283,8 @@ impl Tables { .clone(); log::debug!("New {}", newface); - pubsub_new_face(self, &mut newface); queries_new_face(self, &mut newface); + pubsub_new_face(self, &mut newface); Arc::downgrade(&newface) } @@ -316,8 +316,8 @@ impl Tables { .clone(); log::debug!("New {}", newface); - pubsub_new_face(self, &mut newface); queries_new_face(self, &mut newface); + pubsub_new_face(self, &mut newface); Arc::downgrade(&newface) } @@ -362,8 +362,8 @@ impl Tables { }; log::trace!("Compute routes"); - pubsub_tree_change(&mut tables, &new_childs, net_type); queries_tree_change(&mut tables, &new_childs, net_type); + pubsub_tree_change(&mut tables, &new_childs, net_type); log::trace!("Computations completed"); match net_type { @@ -740,12 +740,12 @@ impl TransportPeerEventHandler for LinkStateInterceptor { .link_states(list.link_states, zid) .removed_nodes { - pubsub_remove_node( + queries_remove_node( &mut tables, &removed_node.zid, WhatAmI::Router, ); - queries_remove_node( + pubsub_remove_node( &mut tables, &removed_node.zid, WhatAmI::Router, @@ -796,12 +796,12 @@ impl TransportPeerEventHandler for LinkStateInterceptor { ); } else { for (_, updated_node) in changes.updated_nodes { - pubsub_linkstate_change( + queries_linkstate_change( &mut tables, &updated_node.zid, &updated_node.links, ); - queries_linkstate_change( + pubsub_linkstate_change( &mut tables, &updated_node.zid, &updated_node.links, @@ -840,8 +840,8 @@ impl TransportPeerEventHandler for LinkStateInterceptor { for (_, removed_node) in tables.routers_net.as_mut().unwrap().remove_link(&zid) { - pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); + pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); } if tables.full_net(WhatAmI::Peer) { @@ -860,8 +860,8 @@ impl TransportPeerEventHandler for LinkStateInterceptor { for (_, removed_node) in tables.peers_net.as_mut().unwrap().remove_link(&zid) { - pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); + pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); } if tables.whatami == WhatAmI::Router { From fa02cf0640a9a3c26ed00bc0c052ba3cc5d42060 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 28 Nov 2023 15:47:46 +0100 Subject: [PATCH 07/29] Impl Display for TransportMessage and NetworkMessage (#603) --- commons/zenoh-protocol/src/network/mod.rs | 10 ++++++- commons/zenoh-protocol/src/transport/mod.rs | 30 +++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 44464c4b13..1be58db5cc 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -141,7 +141,15 @@ impl NetworkMessage { impl fmt::Display for NetworkMessage { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self, f) + use NetworkBody::*; + match &self.body { + OAM(_) => write!(f, "OAM"), + Push(_) => write!(f, "Push"), + Request(_) => write!(f, "Request"), + Response(_) => write!(f, "Response"), + ResponseFinal(_) => write!(f, "ResponseFinal"), + Declare(_) => write!(f, "Declare"), + } } } diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index 301fde3343..cdf994e5dd 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -20,6 +20,8 @@ pub mod keepalive; pub mod oam; pub mod open; +use core::fmt; + pub use close::Close; pub use fragment::{Fragment, FragmentHeader}; pub use frame::{Frame, FrameHeader}; @@ -207,6 +209,34 @@ impl From for TransportMessage { } } +impl fmt::Display for TransportMessage { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use TransportBody::*; + match &self.body { + OAM(_) => write!(f, "OAM"), + InitSyn(_) => write!(f, "InitSyn"), + InitAck(_) => write!(f, "InitAck"), + OpenSyn(_) => write!(f, "OpenSyn"), + OpenAck(_) => write!(f, "OpenAck"), + Close(_) => write!(f, "Close"), + KeepAlive(_) => write!(f, "KeepAlive"), + Frame(m) => { + write!(f, "Frame[")?; + let mut netmsgs = m.payload.iter().peekable(); + while let Some(m) = netmsgs.next() { + m.fmt(f)?; + if netmsgs.peek().is_some() { + write!(f, ", ")?; + } + } + write!(f, "]") + } + Fragment(_) => write!(f, "Fragment"), + Join(_) => write!(f, "Join"), + } + } +} + pub mod ext { use crate::{common::ZExtZ64, core::Priority}; From 7fa7d6c631e7f98957291d29193c6d437a66d72e Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 29 Nov 2023 09:27:19 +0100 Subject: [PATCH 08/29] featue leak fixed, test added (#604) --- .github/workflows/ci.yml | 7 +++++ Cargo.lock | 2 ++ Cargo.toml | 2 +- commons/zenoh-util/Cargo.toml | 1 + commons/zenoh-util/src/lib.rs | 12 ++++++++ io/zenoh-link/src/lib.rs | 27 ++++++++++-------- zenoh-ext/Cargo.toml | 2 +- zenoh/Cargo.toml | 1 + zenoh/src/lib.rs | 23 ++++++++++++++++ zenohd/Cargo.toml | 1 + zenohd/src/main.rs | 52 +++++++++++++++++++++++++++++++++++ 11 files changed, 116 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5e24616f6e..60fb69a576 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -112,6 +112,13 @@ jobs: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse ASYNC_STD_THREAD_COUNT: 4 + - name: Check for feature leaks + if: ${{ matrix.os == 'ubuntu-latest' }} + uses: actions-rs/cargo@v1 + with: + command: nextest + args: run -p zenohd --no-default-features + - name: Run doctests uses: actions-rs/cargo@v1 with: diff --git a/Cargo.lock b/Cargo.lock index b01f2d25e9..1cc18137ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4483,6 +4483,7 @@ dependencies = [ "async-std", "async-trait", "base64 0.21.4", + "const_format", "env_logger", "event-listener", "flume", @@ -5028,6 +5029,7 @@ dependencies = [ "async-std", "async-trait", "clap 3.2.25", + "const_format", "flume", "futures", "hex", diff --git a/Cargo.toml b/Cargo.toml index a707ab390c..55797ab0d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -185,7 +185,7 @@ zenoh-link-unixpipe = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-lin zenoh-link-serial = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-serial" } zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } -zenoh = { version = "0.11.0-dev", path = "zenoh" } +zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } [profile.dev] debug = true diff --git a/commons/zenoh-util/Cargo.toml b/commons/zenoh-util/Cargo.toml index 8225ab534e..803645fb8a 100644 --- a/commons/zenoh-util/Cargo.toml +++ b/commons/zenoh-util/Cargo.toml @@ -39,6 +39,7 @@ default = ["std"] async-std = { workspace = true, features = ["default", "unstable"] } async-trait = { workspace = true } clap = { workspace = true } +const_format = { workspace = true } flume = { workspace = true } futures = { workspace = true } hex = { workspace = true, features = ["default"] } diff --git a/commons/zenoh-util/src/lib.rs b/commons/zenoh-util/src/lib.rs index 9739074bb3..7e02096ebb 100644 --- a/commons/zenoh-util/src/lib.rs +++ b/commons/zenoh-util/src/lib.rs @@ -22,6 +22,18 @@ extern crate alloc; #[cfg_attr(feature = "std", macro_use)] extern crate lazy_static; +#[macro_export] +macro_rules! concat_enabled_features { + (prefix = $prefix:literal, features = [$($feature:literal),*]) => { + { + use const_format::concatcp; + concatcp!("" $(, + if cfg!(feature = $feature) { concatcp!(" ", concatcp!($prefix, "/", $feature)) } else { "" } + )*) + } + }; +} + #[deprecated = "This module is now a separate crate. Use the `zenoh_core` crate directly for shorter compile-times. You may disable this re-export by disabling `zenoh-util`'s default features."] pub use zenoh_core as core; diff --git a/io/zenoh-link/src/lib.rs b/io/zenoh-link/src/lib.rs index 9d49853501..18a464cb93 100644 --- a/io/zenoh-link/src/lib.rs +++ b/io/zenoh-link/src/lib.rs @@ -18,7 +18,6 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use std::collections::HashMap; -use std::sync::Arc; use zenoh_config::Config; use zenoh_result::{bail, ZResult}; @@ -206,23 +205,27 @@ impl LinkManagerBuilderUnicast { pub fn make(_manager: NewLinkChannelSender, protocol: &str) -> ZResult { match protocol { #[cfg(feature = "transport_tcp")] - TCP_LOCATOR_PREFIX => Ok(Arc::new(LinkManagerUnicastTcp::new(_manager))), + TCP_LOCATOR_PREFIX => Ok(std::sync::Arc::new(LinkManagerUnicastTcp::new(_manager))), #[cfg(feature = "transport_udp")] - UDP_LOCATOR_PREFIX => Ok(Arc::new(LinkManagerUnicastUdp::new(_manager))), + UDP_LOCATOR_PREFIX => Ok(std::sync::Arc::new(LinkManagerUnicastUdp::new(_manager))), #[cfg(feature = "transport_tls")] - TLS_LOCATOR_PREFIX => Ok(Arc::new(LinkManagerUnicastTls::new(_manager))), + TLS_LOCATOR_PREFIX => Ok(std::sync::Arc::new(LinkManagerUnicastTls::new(_manager))), #[cfg(feature = "transport_quic")] - QUIC_LOCATOR_PREFIX => Ok(Arc::new(LinkManagerUnicastQuic::new(_manager))), + QUIC_LOCATOR_PREFIX => Ok(std::sync::Arc::new(LinkManagerUnicastQuic::new(_manager))), #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] - UNIXSOCKSTREAM_LOCATOR_PREFIX => { - Ok(Arc::new(LinkManagerUnicastUnixSocketStream::new(_manager))) - } + UNIXSOCKSTREAM_LOCATOR_PREFIX => Ok(std::sync::Arc::new( + LinkManagerUnicastUnixSocketStream::new(_manager), + )), #[cfg(feature = "transport_ws")] - WS_LOCATOR_PREFIX => Ok(Arc::new(LinkManagerUnicastWs::new(_manager))), + WS_LOCATOR_PREFIX => Ok(std::sync::Arc::new(LinkManagerUnicastWs::new(_manager))), #[cfg(feature = "transport_serial")] - SERIAL_LOCATOR_PREFIX => Ok(Arc::new(LinkManagerUnicastSerial::new(_manager))), + SERIAL_LOCATOR_PREFIX => { + Ok(std::sync::Arc::new(LinkManagerUnicastSerial::new(_manager))) + } #[cfg(feature = "transport_unixpipe")] - UNIXPIPE_LOCATOR_PREFIX => Ok(Arc::new(LinkManagerUnicastPipe::new(_manager))), + UNIXPIPE_LOCATOR_PREFIX => { + Ok(std::sync::Arc::new(LinkManagerUnicastPipe::new(_manager))) + } _ => bail!("Unicast not supported for {} protocol", protocol), } } @@ -238,7 +241,7 @@ impl LinkManagerBuilderMulticast { pub fn make(protocol: &str) -> ZResult { match protocol { #[cfg(feature = "transport_udp")] - UDP_LOCATOR_PREFIX => Ok(Arc::new(LinkManagerMulticastUdp)), + UDP_LOCATOR_PREFIX => Ok(std::sync::Arc::new(LinkManagerMulticastUdp)), _ => bail!("Multicast not supported for {} protocol", protocol), } } diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 26164040db..84c6baf83c 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -38,7 +38,7 @@ flume = { workspace = true } futures = { workspace = true } log = { workspace = true } serde = { workspace = true, features = ["default"] } -zenoh = { workspace = true, features = ["unstable"] } +zenoh = { workspace = true, features = ["unstable"], default-features = false } zenoh-core = { workspace = true } zenoh-macros = { workspace = true } zenoh-result = { workspace = true } diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 1fa34eab32..053bb7e285 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -65,6 +65,7 @@ async-global-executor = { workspace = true } async-std = { workspace = true, features = ["attributes"] } async-trait = { workspace = true } base64 = { workspace = true } +const_format = { workspace = true } env_logger = { workspace = true } event-listener = { workspace = true } flume = { workspace = true } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index d77a205d50..5c3b938e5b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -90,6 +90,7 @@ use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; pub use zenoh_macros::{kedefine, keformat, kewrite}; use zenoh_protocol::core::WhatAmIMatcher; use zenoh_result::{zerror, ZResult}; +use zenoh_util::concat_enabled_features; /// A zenoh error. pub use zenoh_result::Error; @@ -98,6 +99,28 @@ pub use zenoh_result::ZResult as Result; const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); +pub const FEATURES: &str = concat_enabled_features!( + prefix = "zenoh", + features = [ + "auth_pubkey", + "auth_usrpwd", + "complete_n", + "shared-memory", + "stats", + "transport_multilink", + "transport_quic", + "transport_serial", + "transport_unixpipe", + "transport_tcp", + "transport_tls", + "transport_udp", + "transport_unixsock-stream", + "transport_ws", + "unstable", + "default" + ] +); + mod admin; #[macro_use] mod session; diff --git a/zenohd/Cargo.toml b/zenohd/Cargo.toml index e589d1a888..754198dc73 100644 --- a/zenohd/Cargo.toml +++ b/zenohd/Cargo.toml @@ -26,6 +26,7 @@ readme = "README.md" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] +default = ["zenoh/default"] shared-memory = ["zenoh/shared-memory"] [dependencies] diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 2b23604c83..56c56bc538 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -307,3 +307,55 @@ fn config_from_args(args: &ArgMatches) -> Config { log::debug!("Config: {:?}", &config); config } + +#[test] +#[cfg(feature = "default")] +fn test_default_features() { + assert_eq!( + zenoh::FEATURES, + concat!( + " zenoh/auth_pubkey", + " zenoh/auth_usrpwd", + // " zenoh/complete_n", + // " zenoh/shared-memory", + // " zenoh/stats", + " zenoh/transport_multilink", + " zenoh/transport_quic", + // " zenoh/transport_serial", + // " zenoh/transport_unixpipe", + " zenoh/transport_tcp", + " zenoh/transport_tls", + " zenoh/transport_udp", + " zenoh/transport_unixsock-stream", + " zenoh/transport_ws", + " zenoh/unstable", + " zenoh/default", + ) + ); +} + +#[test] +#[cfg(not(feature = "default"))] +fn test_no_default_features() { + assert_eq!( + zenoh::FEATURES, + concat!( + // " zenoh/auth_pubkey", + // " zenoh/auth_usrpwd", + // " zenoh/complete_n", + // " zenoh/shared-memory", + // " zenoh/stats", + // " zenoh/transport_multilink", + // " zenoh/transport_quic", + // " zenoh/transport_serial", + // " zenoh/transport_unixpipe", + // " zenoh/transport_tcp", + // " zenoh/transport_tls", + // " zenoh/transport_udp", + // " zenoh/transport_unixsock-stream", + // " zenoh/transport_ws", + " zenoh/unstable", + // " zenoh/default", + ) + ); +} From 1dc31d47f86396f6817ba34125446d001960de51 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 30 Nov 2023 15:29:06 +0100 Subject: [PATCH 09/29] Hop-to-hop compression (#585) --- Cargo.lock | 1 + DEFAULT_CONFIG.json5 | 27 +- commons/zenoh-buffers/src/bbuf.rs | 65 +- commons/zenoh-buffers/src/lib.rs | 74 +-- commons/zenoh-buffers/src/slice.rs | 33 +- commons/zenoh-buffers/src/vec.rs | 31 +- commons/zenoh-buffers/src/zbuf.rs | 26 +- commons/zenoh-buffers/src/zslice.rs | 34 +- commons/zenoh-codec/src/core/zbuf.rs | 3 +- commons/zenoh-codec/src/transport/batch.rs | 255 ++++++++ commons/zenoh-codec/src/transport/init.rs | 127 ++-- commons/zenoh-codec/src/transport/mod.rs | 1 + commons/zenoh-codec/src/transport/open.rs | 105 +++- commons/zenoh-config/src/defaults.rs | 27 +- commons/zenoh-config/src/lib.rs | 40 +- commons/zenoh-protocol/src/transport/frame.rs | 2 +- commons/zenoh-protocol/src/transport/init.rs | 10 + commons/zenoh-protocol/src/transport/open.rs | 10 + io/zenoh-link-commons/Cargo.toml | 4 + io/zenoh-link-commons/src/unicast.rs | 83 +-- io/zenoh-transport/src/common/batch.rs | 564 +++++++++++------- .../src/common/defragmentation.rs | 2 +- io/zenoh-transport/src/common/mod.rs | 2 +- io/zenoh-transport/src/common/pipeline.rs | 55 +- io/zenoh-transport/src/lib.rs | 12 +- io/zenoh-transport/src/manager.rs | 4 +- .../src/multicast/establishment.rs | 15 +- io/zenoh-transport/src/multicast/link.rs | 345 +++++++++-- io/zenoh-transport/src/multicast/manager.rs | 22 +- io/zenoh-transport/src/multicast/mod.rs | 8 +- io/zenoh-transport/src/multicast/rx.rs | 43 +- io/zenoh-transport/src/multicast/transport.rs | 24 +- .../src/unicast/establishment/accept.rs | 304 ++++++---- .../src/unicast/establishment/cookie.rs | 10 + .../src/unicast/establishment/ext/auth/mod.rs | 150 +---- .../unicast/establishment/ext/auth/pubkey.rs | 20 +- .../unicast/establishment/ext/auth/usrpwd.rs | 20 +- .../unicast/establishment/ext/compression.rs | 196 ++++++ .../unicast/establishment/ext/lowlatency.rs | 20 +- .../src/unicast/establishment/ext/mod.rs | 2 + .../unicast/establishment/ext/multilink.rs | 20 +- .../src/unicast/establishment/ext/qos.rs | 20 +- .../src/unicast/establishment/ext/shm.rs | 20 +- .../src/unicast/establishment/mod.rs | 50 +- .../src/unicast/establishment/open.rs | 326 ++++++---- io/zenoh-transport/src/unicast/link.rs | 275 +++++++++ .../src/unicast/lowlatency/link.rs | 65 +- .../src/unicast/lowlatency/transport.rs | 45 +- io/zenoh-transport/src/unicast/manager.rs | 56 +- io/zenoh-transport/src/unicast/mod.rs | 3 +- .../src/unicast/test_helpers.rs | 4 +- .../src/unicast/transport_unicast_inner.rs | 21 +- .../src/unicast/universal/link.rs | 535 ++--------------- .../src/unicast/universal/rx.rs | 32 +- .../src/unicast/universal/transport.rs | 67 ++- .../src/unicast/universal/tx.rs | 2 +- io/zenoh-transport/tests/endpoints.rs | 4 +- .../tests/multicast_compression.rs | 376 ++++++++++++ .../tests/multicast_transport.rs | 8 +- .../tests/transport_whitelist.rs | 4 +- .../tests/unicast_authenticator.rs | 27 +- .../tests/unicast_compression.rs | 553 +++++++++++++++++ .../tests/unicast_concurrent.rs | 4 +- .../tests/unicast_intermittent.rs | 7 +- io/zenoh-transport/tests/unicast_multilink.rs | 6 +- io/zenoh-transport/tests/unicast_openclose.rs | 7 +- .../tests/unicast_priorities.rs | 8 +- io/zenoh-transport/tests/unicast_shm.rs | 6 +- .../tests/unicast_simultaneous.rs | 4 +- io/zenoh-transport/tests/unicast_transport.rs | 31 +- zenoh/Cargo.toml | 2 + zenoh/src/admin.rs | 4 +- zenoh/src/key_expr.rs | 2 +- zenoh/src/net/routing/face.rs | 2 +- zenoh/src/net/routing/network.rs | 2 +- zenoh/src/net/routing/router.rs | 6 +- zenoh/src/net/runtime/adminspace.rs | 4 +- zenoh/src/net/runtime/mod.rs | 5 +- zenoh/src/net/tests/tables.rs | 2 +- zenoh/src/prelude.rs | 6 +- zenoh/src/publication.rs | 3 +- zenoh/src/queryable.rs | 2 +- zenoh/src/session.rs | 2 +- zenoh/tests/liveliness.rs | 15 +- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 3 + 86 files changed, 3641 insertions(+), 1783 deletions(-) create mode 100644 commons/zenoh-codec/src/transport/batch.rs create mode 100644 io/zenoh-transport/src/unicast/establishment/ext/compression.rs create mode 100644 io/zenoh-transport/src/unicast/link.rs create mode 100644 io/zenoh-transport/tests/multicast_compression.rs create mode 100644 io/zenoh-transport/tests/unicast_compression.rs diff --git a/Cargo.lock b/Cargo.lock index 1cc18137ad..01378d3015 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4671,6 +4671,7 @@ dependencies = [ "async-std", "async-trait", "flume", + "lz4_flex", "serde", "typenum", "zenoh-buffers", diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index dae3ebc9aa..4a0179fb71 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -133,9 +133,20 @@ /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to /// enable 'lowlatency' you need to explicitly disable 'qos'. lowlatency: false, + qos: { + enabled: true, + }, + compression: { + enabled: false, + }, }, - qos: { - enabled: true, + multicast: { + qos: { + enabled: true, + }, + compression: { + enabled: false, + }, }, link: { /// An optional whitelist of protocols to be used for accepting and opening sessions. @@ -183,6 +194,9 @@ /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. /// Higher values lead to a more aggressive batching but it will introduce additional latency. backoff: 100, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, }, }, /// Configure the zenoh RX parameters of a link @@ -220,15 +234,6 @@ // ca to verify that the server at baz.com is actually baz.com, let this be true (default). server_name_verification: null, }, - - /// **Experimental** compression feature. - /// Will compress the batches hop to hop (as opposed to end to end). - /// The features "transport_compression" and "unstable" need to be enabled to handle - /// compression on the integrality of the network. - compression: { - /// When 'enabled' is true, batches will be sent compressed. - enabled: false, - }, }, /// Shared memory configuration shared_memory: { diff --git a/commons/zenoh-buffers/src/bbuf.rs b/commons/zenoh-buffers/src/bbuf.rs index bdb9e9a056..2f5c24d6a0 100644 --- a/commons/zenoh-buffers/src/bbuf.rs +++ b/commons/zenoh-buffers/src/bbuf.rs @@ -12,14 +12,16 @@ // ZettaScale Zenoh Team, // use crate::{ + buffer::{Buffer, SplitBuffer}, reader::HasReader, vec, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, + ZSlice, }; -use alloc::boxed::Box; -use core::num::NonZeroUsize; +use alloc::{boxed::Box, sync::Arc}; +use core::{fmt, num::NonZeroUsize, option}; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq)] pub struct BBuf { buffer: Box<[u8]>, len: usize, @@ -39,16 +41,6 @@ impl BBuf { self.buffer.len() } - #[must_use] - pub const fn len(&self) -> usize { - self.len - } - - #[must_use] - pub const fn is_empty(&self) -> bool { - self.len == 0 - } - #[must_use] pub fn as_slice(&self) -> &[u8] { // SAFETY: self.len is ensured by the writer to be smaller than buffer length. @@ -70,6 +62,40 @@ impl BBuf { } } +impl fmt::Debug for BBuf { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:02x?}", self.as_slice()) + } +} + +// Buffer +impl Buffer for BBuf { + fn len(&self) -> usize { + self.len + } +} + +impl Buffer for &BBuf { + fn len(&self) -> usize { + self.len + } +} + +impl Buffer for &mut BBuf { + fn len(&self) -> usize { + self.len + } +} + +// SplitBuffer +impl SplitBuffer for BBuf { + type Slices<'a> = option::IntoIter<&'a [u8]>; + + fn slices(&self) -> Self::Slices<'_> { + Some(self.as_slice()).into_iter() + } +} + // Writer impl HasWriter for &mut BBuf { type Writer = Self; @@ -152,6 +178,19 @@ impl<'a> HasReader for &'a BBuf { } } +// From impls +impl From for ZSlice { + fn from(value: BBuf) -> Self { + ZSlice { + buf: Arc::new(value.buffer), + start: 0, + end: value.len, + #[cfg(feature = "shared-memory")] + kind: crate::ZSliceKind::Raw, + } + } +} + #[cfg(feature = "test")] impl BBuf { pub fn rand(len: usize) -> Self { diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 718f486def..4dee599ea7 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -28,7 +28,6 @@ pub mod vec; mod zbuf; mod zslice; -use alloc::{borrow::Cow, vec::Vec}; pub use bbuf::*; pub use zbuf::*; pub use zslice::*; @@ -73,6 +72,45 @@ macro_rules! unsafe_slice_mut { }; } +pub mod buffer { + use alloc::{borrow::Cow, vec::Vec}; + + pub trait Buffer { + /// Returns the number of bytes in the buffer. + fn len(&self) -> usize; + + /// Returns `true` if the buffer has a length of 0. + fn is_empty(&self) -> bool { + self.len() == 0 + } + } + + /// A trait for buffers that can be composed of multiple non contiguous slices. + pub trait SplitBuffer: Buffer { + type Slices<'a>: Iterator + ExactSizeIterator + where + Self: 'a; + + /// Gets all the slices of this buffer. + fn slices(&self) -> Self::Slices<'_>; + + /// Returns all the bytes of this buffer in a conitguous slice. + /// This may require allocation and copy if the original buffer + /// is not contiguous. + fn contiguous(&self) -> Cow<'_, [u8]> { + let mut slices = self.slices(); + match slices.len() { + 0 => Cow::Borrowed(b""), + 1 => Cow::Borrowed(slices.next().unwrap()), + _ => Cow::Owned(slices.fold(Vec::new(), |mut acc, it| { + acc.extend(it); + acc + })), + } + } + } +} + pub mod writer { use crate::ZSlice; use core::num::NonZeroUsize; @@ -100,6 +138,7 @@ pub mod writer { where F: FnOnce(&mut [u8]) -> usize; } + pub trait BacktrackableWriter: Writer { type Mark; @@ -175,36 +214,3 @@ pub mod reader { fn reader(self) -> Self::Reader; } } - -/// A trait for buffers that can be composed of multiple non contiguous slices. -pub trait SplitBuffer<'a> { - type Slices: Iterator + ExactSizeIterator; - - /// Gets all the slices of this buffer. - fn slices(&'a self) -> Self::Slices; - - /// Returns `true` if the buffer has a length of 0. - fn is_empty(&'a self) -> bool { - self.slices().all(<[u8]>::is_empty) - } - - /// Returns the number of bytes in the buffer. - fn len(&'a self) -> usize { - self.slices().fold(0, |acc, it| acc + it.len()) - } - - /// Returns all the bytes of this buffer in a conitguous slice. - /// This may require allocation and copy if the original buffer - /// is not contiguous. - fn contiguous(&'a self) -> Cow<'a, [u8]> { - let mut slices = self.slices(); - match slices.len() { - 0 => Cow::Borrowed(b""), - 1 => Cow::Borrowed(slices.next().unwrap()), - _ => Cow::Owned(slices.fold(Vec::new(), |mut acc, it| { - acc.extend(it); - acc - })), - } - } -} diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index 6056bb9606..a652c6930e 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -12,11 +12,42 @@ // ZettaScale Zenoh Team, // use crate::{ + buffer::{Buffer, SplitBuffer}, reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, }; -use core::{marker::PhantomData, mem, num::NonZeroUsize, slice}; +use core::{ + marker::PhantomData, + mem, + num::NonZeroUsize, + option, + slice::{self}, +}; + +// Buffer +impl Buffer for &[u8] { + #[inline(always)] + fn len(&self) -> usize { + <[u8]>::len(self) + } +} + +impl Buffer for &mut [u8] { + #[inline(always)] + fn len(&self) -> usize { + <[u8]>::len(self) + } +} + +// SplitBuffer +impl<'b> SplitBuffer for &'b [u8] { + type Slices<'a> = option::IntoIter<&'a [u8]> where 'b: 'a; + + fn slices(&self) -> Self::Slices<'_> { + Some(*self).into_iter() + } +} // Writer impl HasWriter for &mut [u8] { diff --git a/commons/zenoh-buffers/src/vec.rs b/commons/zenoh-buffers/src/vec.rs index cbe1ee5801..cf5a3ad9b4 100644 --- a/commons/zenoh-buffers/src/vec.rs +++ b/commons/zenoh-buffers/src/vec.rs @@ -12,11 +12,12 @@ // ZettaScale Zenoh Team, // use crate::{ + buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, }; use alloc::vec::Vec; -use core::{mem, num::NonZeroUsize}; +use core::{mem, num::NonZeroUsize, option}; /// Allocate a vector with a given capacity and sets the length to that capacity. #[must_use] @@ -30,6 +31,34 @@ pub fn uninit(capacity: usize) -> Vec { vbuf } +// Buffer +impl Buffer for Vec { + fn len(&self) -> usize { + Vec::len(self) + } +} + +impl Buffer for &Vec { + fn len(&self) -> usize { + Vec::len(self) + } +} + +impl Buffer for &mut Vec { + fn len(&self) -> usize { + Vec::len(self) + } +} + +// SplitBuffer +impl SplitBuffer for Vec { + type Slices<'a> = option::IntoIter<&'a [u8]>; + + fn slices(&self) -> Self::Slices<'_> { + Some(self.as_slice()).into_iter() + } +} + // Writer impl<'a> HasWriter for &'a mut Vec { type Writer = Self; diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 3f941f48e3..db62e26f54 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -14,9 +14,10 @@ #[cfg(feature = "shared-memory")] use crate::ZSliceKind; use crate::{ + buffer::{Buffer, SplitBuffer}, reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - SplitBuffer, ZSlice, + ZSlice, }; use alloc::{sync::Arc, vec::Vec}; use core::{cmp, iter, mem, num::NonZeroUsize, ptr, slice}; @@ -56,18 +57,8 @@ impl ZBuf { } } -impl<'a> SplitBuffer<'a> for ZBuf { - type Slices = iter::Map, fn(&'a ZSlice) -> &'a [u8]>; - - fn slices(&'a self) -> Self::Slices { - self.slices.as_ref().iter().map(ZSlice::as_slice) - } - - #[inline(always)] - fn is_empty(&self) -> bool { - self.len() == 0 - } - +// Buffer +impl Buffer for ZBuf { #[inline(always)] fn len(&self) -> usize { self.slices @@ -77,6 +68,15 @@ impl<'a> SplitBuffer<'a> for ZBuf { } } +// SplitBuffer +impl SplitBuffer for ZBuf { + type Slices<'a> = iter::Map, fn(&'a ZSlice) -> &'a [u8]>; + + fn slices(&self) -> Self::Slices<'_> { + self.slices.as_ref().iter().map(ZSlice::as_slice) + } +} + impl PartialEq for ZBuf { fn eq(&self, other: &Self) -> bool { let mut self_slices = self.slices(); diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 294092e682..e53e6f3334 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -11,7 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::reader::{BacktrackableReader, DidntRead, HasReader, Reader}; +use crate::{ + buffer::{Buffer, SplitBuffer}, + reader::{BacktrackableReader, DidntRead, HasReader, Reader}, +}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use core::{ any::Any, @@ -19,6 +22,7 @@ use core::{ fmt, num::NonZeroUsize, ops::{Deref, Index, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}, + option, }; /*************************************/ @@ -272,6 +276,34 @@ where } } +// Buffer +impl Buffer for ZSlice { + fn len(&self) -> usize { + ZSlice::len(self) + } +} + +impl Buffer for &ZSlice { + fn len(&self) -> usize { + ZSlice::len(self) + } +} + +impl Buffer for &mut ZSlice { + fn len(&self) -> usize { + ZSlice::len(self) + } +} + +// SplitBuffer +impl SplitBuffer for ZSlice { + type Slices<'a> = option::IntoIter<&'a [u8]>; + + fn slices(&self) -> Self::Slices<'_> { + Some(self.as_slice()).into_iter() + } +} + // Reader impl HasReader for &mut ZSlice { type Reader = Self; diff --git a/commons/zenoh-codec/src/core/zbuf.rs b/commons/zenoh-codec/src/core/zbuf.rs index ccf5d595ce..137030e66c 100644 --- a/commons/zenoh-codec/src/core/zbuf.rs +++ b/commons/zenoh-codec/src/core/zbuf.rs @@ -13,9 +13,10 @@ // use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ + buffer::Buffer, reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, - SplitBuffer, ZBuf, + ZBuf, }; // ZBuf bounded diff --git a/commons/zenoh-codec/src/transport/batch.rs b/commons/zenoh-codec/src/transport/batch.rs new file mode 100644 index 0000000000..525336d6e8 --- /dev/null +++ b/commons/zenoh-codec/src/transport/batch.rs @@ -0,0 +1,255 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{RCodec, WCodec, Zenoh080}; +use core::num::NonZeroUsize; +use zenoh_buffers::reader::{BacktrackableReader, DidntRead, Reader, SiphonableReader}; +use zenoh_buffers::writer::{BacktrackableWriter, DidntWrite, Writer}; +use zenoh_buffers::ZBufReader; +use zenoh_protocol::core::Reliability; +use zenoh_protocol::network::NetworkMessage; +use zenoh_protocol::transport::{ + Fragment, FragmentHeader, Frame, FrameHeader, TransportBody, TransportMessage, TransportSn, +}; + +#[derive(Clone, Copy, Debug)] +#[repr(u8)] +pub enum CurrentFrame { + Reliable, + BestEffort, + None, +} + +#[derive(Clone, Copy, Debug)] +pub struct LatestSn { + pub reliable: Option, + pub best_effort: Option, +} + +impl LatestSn { + const fn new() -> Self { + Self { + reliable: None, + best_effort: None, + } + } +} + +#[derive(Clone, Debug)] +pub struct Zenoh080Batch { + // The current frame being serialized: BestEffort/Reliable + pub current_frame: CurrentFrame, + // The latest SN + pub latest_sn: LatestSn, +} + +impl Zenoh080Batch { + pub const fn new() -> Self { + Self { + current_frame: CurrentFrame::None, + latest_sn: LatestSn::new(), + } + } + + pub fn clear(&mut self) { + self.current_frame = CurrentFrame::None; + self.latest_sn = LatestSn::new(); + } +} + +#[repr(u8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum BatchError { + NewFrame, + DidntWrite, +} + +impl WCodec<&TransportMessage, &mut W> for &mut Zenoh080Batch +where + W: Writer + BacktrackableWriter, + ::Mark: Copy, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &TransportMessage) -> Self::Output { + // Mark the write operation + let mark = writer.mark(); + + let codec = Zenoh080::new(); + codec.write(&mut *writer, x).map_err(|e| { + // Revert the write operation + writer.rewind(mark); + e + })?; + + // Reset the current frame value + self.current_frame = CurrentFrame::None; + + Ok(()) + } +} + +impl WCodec<&NetworkMessage, &mut W> for &mut Zenoh080Batch +where + W: Writer + BacktrackableWriter, + ::Mark: Copy, +{ + type Output = Result<(), BatchError>; + + fn write(self, writer: &mut W, x: &NetworkMessage) -> Self::Output { + // Eventually update the current frame and sn based on the current status + if let (CurrentFrame::Reliable, false) + | (CurrentFrame::BestEffort, true) + | (CurrentFrame::None, _) = (self.current_frame, x.is_reliable()) + { + // We are not serializing on the right frame. + return Err(BatchError::NewFrame); + } + + // Mark the write operation + let mark = writer.mark(); + + let codec = Zenoh080::new(); + codec.write(&mut *writer, x).map_err(|_| { + // Revert the write operation + writer.rewind(mark); + BatchError::DidntWrite + }) + } +} + +impl WCodec<(&NetworkMessage, &FrameHeader), &mut W> for &mut Zenoh080Batch +where + W: Writer + BacktrackableWriter, + ::Mark: Copy, +{ + type Output = Result<(), BatchError>; + + fn write(self, writer: &mut W, x: (&NetworkMessage, &FrameHeader)) -> Self::Output { + let (m, f) = x; + + // @TODO: m.is_reliable() always return true for the time being + // if let (Reliability::Reliable, false) | (Reliability::BestEffort, true) = + // (f.reliability, m.is_reliable()) + // { + // // We are not serializing on the right frame. + // return Err(BatchError::NewFrame); + // } + + // Mark the write operation + let mark = writer.mark(); + + let codec = Zenoh080::new(); + // Write the frame header + codec.write(&mut *writer, f).map_err(|_| { + // Revert the write operation + writer.rewind(mark); + BatchError::DidntWrite + })?; + // Write the zenoh message + codec.write(&mut *writer, m).map_err(|_| { + // Revert the write operation + writer.rewind(mark); + BatchError::DidntWrite + })?; + // Update the frame + self.current_frame = match f.reliability { + Reliability::Reliable => { + self.latest_sn.reliable = Some(f.sn); + CurrentFrame::Reliable + } + Reliability::BestEffort => { + self.latest_sn.best_effort = Some(f.sn); + CurrentFrame::BestEffort + } + }; + Ok(()) + } +} + +impl WCodec<(&mut ZBufReader<'_>, &mut FragmentHeader), &mut W> for &mut Zenoh080Batch +where + W: Writer + BacktrackableWriter, + ::Mark: Copy, +{ + type Output = Result; + + fn write(self, writer: &mut W, x: (&mut ZBufReader<'_>, &mut FragmentHeader)) -> Self::Output { + let (r, f) = x; + + // Mark the buffer for the writing operation + let mark = writer.mark(); + + let codec = Zenoh080::new(); + // Write the fragment header + codec.write(&mut *writer, &*f).map_err(|e| { + // Revert the write operation + writer.rewind(mark); + e + })?; + + // Check if it is really the final fragment + if r.remaining() <= writer.remaining() { + // Revert the buffer + writer.rewind(mark); + // It is really the finally fragment, reserialize the header + f.more = false; + // Write the fragment header + codec.write(&mut *writer, &*f).map_err(|e| { + // Revert the write operation + writer.rewind(mark); + e + })?; + } + + // Write the fragment + r.siphon(&mut *writer).map_err(|_| { + // Revert the write operation + writer.rewind(mark); + DidntWrite + }) + } +} + +impl RCodec for &mut Zenoh080Batch +where + R: Reader + BacktrackableReader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let codec = Zenoh080::new(); + let x: TransportMessage = codec.read(reader)?; + + match &x.body { + TransportBody::Frame(Frame { + reliability, sn, .. + }) + | TransportBody::Fragment(Fragment { + reliability, sn, .. + }) => match reliability { + Reliability::Reliable => { + self.current_frame = CurrentFrame::Reliable; + self.latest_sn.reliable = Some(*sn); + } + Reliability::BestEffort => { + self.current_frame = CurrentFrame::BestEffort; + self.latest_sn.best_effort = Some(*sn); + } + }, + _ => self.current_frame = CurrentFrame::None, + } + + Ok(x) + } +} diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index db37c8fc03..5f98c77e5b 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -37,61 +37,80 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &InitSyn) -> Self::Output { + let InitSyn { + version, + whatami, + zid, + resolution, + batch_size, + ext_qos, + ext_shm, + ext_auth, + ext_mlink, + ext_lowlatency, + ext_compression, + } = x; + // Header let mut header = id::INIT; - if x.resolution != Resolution::default() || x.batch_size != batch_size::UNICAST { + if *resolution != Resolution::default() || *batch_size != batch_size::UNICAST { header |= flag::S; } - let mut n_exts = (x.ext_qos.is_some() as u8) - + (x.ext_shm.is_some() as u8) - + (x.ext_auth.is_some() as u8) - + (x.ext_mlink.is_some() as u8) - + (x.ext_lowlatency.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + + (ext_shm.is_some() as u8) + + (ext_auth.is_some() as u8) + + (ext_mlink.is_some() as u8) + + (ext_lowlatency.is_some() as u8) + + (ext_compression.is_some() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.version)?; + self.write(&mut *writer, version)?; - let whatami: u8 = match x.whatami { + let whatami: u8 = match whatami { WhatAmI::Router => 0b00, WhatAmI::Peer => 0b01, WhatAmI::Client => 0b10, }; - let flags: u8 = ((x.zid.size() as u8 - 1) << 4) | whatami; + let flags: u8 = ((zid.size() as u8 - 1) << 4) | whatami; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(x.zid.size()); - lodec.write(&mut *writer, &x.zid)?; + let lodec = Zenoh080Length::new(zid.size()); + lodec.write(&mut *writer, zid)?; if imsg::has_flag(header, flag::S) { - self.write(&mut *writer, x.resolution.as_u8())?; - self.write(&mut *writer, x.batch_size.to_le_bytes())?; + self.write(&mut *writer, resolution.as_u8())?; + self.write(&mut *writer, batch_size.to_le_bytes())?; } // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } - if let Some(auth) = x.ext_auth.as_ref() { + if let Some(auth) = ext_auth.as_ref() { n_exts -= 1; self.write(&mut *writer, (auth, n_exts != 0))?; } - if let Some(mlink) = x.ext_mlink.as_ref() { + if let Some(mlink) = ext_mlink.as_ref() { n_exts -= 1; self.write(&mut *writer, (mlink, n_exts != 0))?; } - if let Some(lowlatency) = x.ext_lowlatency.as_ref() { + if let Some(lowlatency) = ext_lowlatency.as_ref() { n_exts -= 1; self.write(&mut *writer, (lowlatency, n_exts != 0))?; } + if let Some(compression) = ext_compression.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (compression, n_exts != 0))?; + } Ok(()) } @@ -150,6 +169,7 @@ where let mut ext_auth = None; let mut ext_mlink = None; let mut ext_lowlatency = None; + let mut ext_compression = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { @@ -181,6 +201,11 @@ where ext_lowlatency = Some(q); has_ext = ext; } + ext::Compression::ID => { + let (q, ext): (ext::Compression, bool) = eodec.read(&mut *reader)?; + ext_compression = Some(q); + has_ext = ext; + } _ => { has_ext = extension::skip(reader, "InitSyn", ext)?; } @@ -198,6 +223,7 @@ where ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }) } } @@ -210,64 +236,84 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &InitAck) -> Self::Output { + let InitAck { + version, + whatami, + zid, + resolution, + batch_size, + cookie, + ext_qos, + ext_shm, + ext_auth, + ext_mlink, + ext_lowlatency, + ext_compression, + } = x; + // Header let mut header = id::INIT | flag::A; - if x.resolution != Resolution::default() || x.batch_size != batch_size::UNICAST { + if *resolution != Resolution::default() || *batch_size != batch_size::UNICAST { header |= flag::S; } - let mut n_exts = (x.ext_qos.is_some() as u8) - + (x.ext_shm.is_some() as u8) - + (x.ext_auth.is_some() as u8) - + (x.ext_mlink.is_some() as u8) - + (x.ext_lowlatency.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + + (ext_shm.is_some() as u8) + + (ext_auth.is_some() as u8) + + (ext_mlink.is_some() as u8) + + (ext_lowlatency.is_some() as u8) + + (ext_compression.is_some() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.version)?; + self.write(&mut *writer, version)?; - let whatami: u8 = match x.whatami { + let whatami: u8 = match whatami { WhatAmI::Router => 0b00, WhatAmI::Peer => 0b01, WhatAmI::Client => 0b10, }; - let flags: u8 = ((x.zid.size() as u8 - 1) << 4) | whatami; + let flags: u8 = ((zid.size() as u8 - 1) << 4) | whatami; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(x.zid.size()); - lodec.write(&mut *writer, &x.zid)?; + let lodec = Zenoh080Length::new(zid.size()); + lodec.write(&mut *writer, zid)?; if imsg::has_flag(header, flag::S) { - self.write(&mut *writer, x.resolution.as_u8())?; - self.write(&mut *writer, x.batch_size.to_le_bytes())?; + self.write(&mut *writer, resolution.as_u8())?; + self.write(&mut *writer, batch_size.to_le_bytes())?; } let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, &x.cookie)?; + zodec.write(&mut *writer, cookie)?; // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } - if let Some(auth) = x.ext_auth.as_ref() { + if let Some(auth) = ext_auth.as_ref() { n_exts -= 1; self.write(&mut *writer, (auth, n_exts != 0))?; } - if let Some(mlink) = x.ext_mlink.as_ref() { + if let Some(mlink) = ext_mlink.as_ref() { n_exts -= 1; self.write(&mut *writer, (mlink, n_exts != 0))?; } - if let Some(lowlatency) = x.ext_lowlatency.as_ref() { + if let Some(lowlatency) = ext_lowlatency.as_ref() { n_exts -= 1; self.write(&mut *writer, (lowlatency, n_exts != 0))?; } + if let Some(compression) = ext_compression.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (compression, n_exts != 0))?; + } Ok(()) } @@ -329,6 +375,7 @@ where let mut ext_auth = None; let mut ext_mlink = None; let mut ext_lowlatency = None; + let mut ext_compression = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { @@ -360,6 +407,11 @@ where ext_lowlatency = Some(q); has_ext = ext; } + ext::Compression::ID => { + let (q, ext): (ext::Compression, bool) = eodec.read(&mut *reader)?; + ext_compression = Some(q); + has_ext = ext; + } _ => { has_ext = extension::skip(reader, "InitAck", ext)?; } @@ -378,6 +430,7 @@ where ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }) } } diff --git a/commons/zenoh-codec/src/transport/mod.rs b/commons/zenoh-codec/src/transport/mod.rs index 3aa6423eb6..4ddf872551 100644 --- a/commons/zenoh-codec/src/transport/mod.rs +++ b/commons/zenoh-codec/src/transport/mod.rs @@ -11,6 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // +pub mod batch; mod close; mod fragment; mod frame; diff --git a/commons/zenoh-codec/src/transport/open.rs b/commons/zenoh-codec/src/transport/open.rs index bbcb43de98..17482b1610 100644 --- a/commons/zenoh-codec/src/transport/open.rs +++ b/commons/zenoh-codec/src/transport/open.rs @@ -35,16 +35,29 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &OpenSyn) -> Self::Output { + let OpenSyn { + initial_sn, + lease, + cookie, + ext_qos, + ext_shm, + ext_auth, + ext_mlink, + ext_lowlatency, + ext_compression, + } = x; + // Header let mut header = id::OPEN; - if x.lease.as_millis() % 1_000 == 0 { + if lease.as_millis() % 1_000 == 0 { header |= flag::T; } - let mut n_exts = (x.ext_qos.is_some() as u8) - + (x.ext_shm.is_some() as u8) - + (x.ext_auth.is_some() as u8) - + (x.ext_mlink.is_some() as u8) - + (x.ext_lowlatency.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + + (ext_shm.is_some() as u8) + + (ext_auth.is_some() as u8) + + (ext_mlink.is_some() as u8) + + (ext_lowlatency.is_some() as u8) + + (ext_compression.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -52,34 +65,38 @@ where // Body if imsg::has_flag(header, flag::T) { - self.write(&mut *writer, x.lease.as_secs())?; + self.write(&mut *writer, lease.as_secs())?; } else { - self.write(&mut *writer, x.lease.as_millis() as u64)?; + self.write(&mut *writer, lease.as_millis() as u64)?; } - self.write(&mut *writer, x.initial_sn)?; - self.write(&mut *writer, &x.cookie)?; + self.write(&mut *writer, initial_sn)?; + self.write(&mut *writer, cookie)?; // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } - if let Some(auth) = x.ext_auth.as_ref() { + if let Some(auth) = ext_auth.as_ref() { n_exts -= 1; self.write(&mut *writer, (auth, n_exts != 0))?; } - if let Some(mlink) = x.ext_mlink.as_ref() { + if let Some(mlink) = ext_mlink.as_ref() { n_exts -= 1; self.write(&mut *writer, (mlink, n_exts != 0))?; } - if let Some(lowlatency) = x.ext_lowlatency.as_ref() { + if let Some(lowlatency) = ext_lowlatency.as_ref() { n_exts -= 1; self.write(&mut *writer, (lowlatency, n_exts != 0))?; } + if let Some(compression) = ext_compression.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (compression, n_exts != 0))?; + } Ok(()) } @@ -125,6 +142,7 @@ where let mut ext_auth = None; let mut ext_mlink = None; let mut ext_lowlatency = None; + let mut ext_compression = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { @@ -156,6 +174,11 @@ where ext_lowlatency = Some(q); has_ext = ext; } + ext::Compression::ID => { + let (q, ext): (ext::Compression, bool) = eodec.read(&mut *reader)?; + ext_compression = Some(q); + has_ext = ext; + } _ => { has_ext = extension::skip(reader, "OpenSyn", ext)?; } @@ -171,6 +194,7 @@ where ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }) } } @@ -183,18 +207,30 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &OpenAck) -> Self::Output { + let OpenAck { + initial_sn, + lease, + ext_qos, + ext_shm, + ext_auth, + ext_mlink, + ext_lowlatency, + ext_compression, + } = x; + // Header let mut header = id::OPEN; header |= flag::A; // Verify that the timeout is expressed in seconds, i.e. subsec part is 0. - if x.lease.subsec_nanos() == 0 { + if lease.subsec_nanos() == 0 { header |= flag::T; } - let mut n_exts = (x.ext_qos.is_some() as u8) - + (x.ext_shm.is_some() as u8) - + (x.ext_auth.is_some() as u8) - + (x.ext_mlink.is_some() as u8) - + (x.ext_lowlatency.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + + (ext_shm.is_some() as u8) + + (ext_auth.is_some() as u8) + + (ext_mlink.is_some() as u8) + + (ext_lowlatency.is_some() as u8) + + (ext_compression.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -202,33 +238,37 @@ where // Body if imsg::has_flag(header, flag::T) { - self.write(&mut *writer, x.lease.as_secs())?; + self.write(&mut *writer, lease.as_secs())?; } else { - self.write(&mut *writer, x.lease.as_millis() as u64)?; + self.write(&mut *writer, lease.as_millis() as u64)?; } - self.write(&mut *writer, x.initial_sn)?; + self.write(&mut *writer, initial_sn)?; // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } - if let Some(auth) = x.ext_auth.as_ref() { + if let Some(auth) = ext_auth.as_ref() { n_exts -= 1; self.write(&mut *writer, (auth, n_exts != 0))?; } - if let Some(mlink) = x.ext_mlink.as_ref() { + if let Some(mlink) = ext_mlink.as_ref() { n_exts -= 1; self.write(&mut *writer, (mlink, n_exts != 0))?; } - if let Some(lowlatency) = x.ext_lowlatency.as_ref() { + if let Some(lowlatency) = ext_lowlatency.as_ref() { n_exts -= 1; self.write(&mut *writer, (lowlatency, n_exts != 0))?; } + if let Some(compression) = ext_compression.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (compression, n_exts != 0))?; + } Ok(()) } @@ -273,6 +313,7 @@ where let mut ext_auth = None; let mut ext_mlink = None; let mut ext_lowlatency = None; + let mut ext_compression = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { @@ -304,6 +345,11 @@ where ext_lowlatency = Some(q); has_ext = ext; } + ext::Compression::ID => { + let (q, ext): (ext::Compression, bool) = eodec.read(&mut *reader)?; + ext_compression = Some(q); + has_ext = ext; + } _ => { has_ext = extension::skip(reader, "OpenAck", ext)?; } @@ -318,6 +364,7 @@ where ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }) } } diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 5b4d3da835..8d1a5dbc0f 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -107,6 +107,8 @@ impl Default for TransportUnicastConf { max_sessions: 1_000, max_links: 1, lowlatency: false, + qos: QoSUnicastConf::default(), + compression: CompressionUnicastConf::default(), } } } @@ -116,16 +118,39 @@ impl Default for TransportMulticastConf { Self { join_interval: Some(2500), max_sessions: Some(1000), + qos: QoSMulticastConf::default(), + compression: CompressionMulticastConf::default(), } } } -impl Default for QoSConf { +impl Default for QoSUnicastConf { fn default() -> Self { Self { enabled: true } } } +#[allow(clippy::derivable_impls)] +impl Default for QoSMulticastConf { + fn default() -> Self { + Self { enabled: false } + } +} + +#[allow(clippy::derivable_impls)] +impl Default for CompressionUnicastConf { + fn default() -> Self { + Self { enabled: false } + } +} + +#[allow(clippy::derivable_impls)] +impl Default for CompressionMulticastConf { + fn default() -> Self { + Self { enabled: false } + } +} + impl Default for LinkTxConf { #[allow(clippy::unnecessary_cast)] fn default() -> Self { diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 53ac033506..c3a633b0e2 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -247,17 +247,32 @@ validated_struct::validator! { /// This option does not make LowLatency transport mandatory, the actual implementation of transport /// used will depend on Establish procedure and other party's settings lowlatency: bool, + pub qos: QoSUnicastConf { + /// Whether QoS is enabled or not. + /// If set to `false`, the QoS will be disabled. (default `true`). + enabled: bool + }, + pub compression: CompressionUnicastConf { + /// You must compile zenoh with "transport_compression" feature to be able to enable compression. + /// When enabled is true, batches will be sent compressed. (default `false`). + enabled: bool, + }, }, pub multicast: TransportMulticastConf { /// Link join interval duration in milliseconds (default: 2500) join_interval: Option, /// Maximum number of multicast sessions (default: 1000) max_sessions: Option, - }, - pub qos: QoSConf { - /// Whether QoS is enabled or not. - /// If set to `false`, the QoS will be disabled. (default `true`). - enabled: bool + pub qos: QoSMulticastConf { + /// Whether QoS is enabled or not. + /// If set to `false`, the QoS will be disabled. (default `false`). + enabled: bool + }, + pub compression: CompressionMulticastConf { + /// You must compile zenoh with "transport_compression" feature to be able to enable compression. + /// When enabled is true, batches will be sent compressed. (default `false`). + enabled: bool, + }, }, pub link: #[derive(Default)] TransportLinkConf { @@ -329,24 +344,11 @@ validated_struct::validator! { client_private_key_base64 : Option, #[serde(skip_serializing)] client_certificate_base64 : Option, - } - , + }, pub unixpipe: #[derive(Default)] UnixPipeConf { file_access_mask: Option }, - pub compression: #[derive(Default)] - /// **Experimental** compression feature. - /// Will compress the batches hop to hop (as opposed to end to end). May cause errors when - /// the batches's complexity is too high, causing the resulting compression to be bigger in - /// size than the MTU. - /// You must use the features "transport_compression" and "unstable" to enable this. - CompressionConf { - /// When enabled is true, batches will be sent compressed. It does not affect the - /// reception, which always expects compressed batches when built with thes features - /// "transport_compression" and "unstable". - enabled: bool, - } }, pub shared_memory: SharedMemoryConf { diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index bcd01e7965..184784f9f1 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -70,8 +70,8 @@ pub mod flag { pub struct Frame { pub reliability: Reliability, pub sn: TransportSn, - pub payload: Vec, pub ext_qos: ext::QoSType, + pub payload: Vec, } // Extensions diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index d553799fd1..0c60dd8a90 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -118,6 +118,7 @@ pub struct InitSyn { pub ext_auth: Option, pub ext_mlink: Option, pub ext_lowlatency: Option, + pub ext_compression: Option, } // Extensions @@ -146,6 +147,10 @@ pub mod ext { /// # LowLatency extension /// Used to negotiate the use of lowlatency transport pub type LowLatency = zextunit!(0x5, false); + + /// # Compression extension + /// Used to negotiate the use of compression on the link + pub type Compression = zextunit!(0x6, false); } impl InitSyn { @@ -166,6 +171,7 @@ impl InitSyn { let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_lowlatency = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + let ext_compression = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); Self { version, @@ -178,6 +184,7 @@ impl InitSyn { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } } } @@ -195,6 +202,7 @@ pub struct InitAck { pub ext_auth: Option, pub ext_mlink: Option, pub ext_lowlatency: Option, + pub ext_compression: Option, } impl InitAck { @@ -220,6 +228,7 @@ impl InitAck { let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_lowlatency = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + let ext_compression = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); Self { version, @@ -233,6 +242,7 @@ impl InitAck { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } } } diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index b7ec56da62..d793671b06 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -82,6 +82,7 @@ pub struct OpenSyn { pub ext_auth: Option, pub ext_mlink: Option, pub ext_lowlatency: Option, + pub ext_compression: Option, } // Extensions @@ -111,6 +112,10 @@ pub mod ext { /// # LowLatency extension /// Used to negotiate the use of lowlatency transport pub type LowLatency = zextunit!(0x5, false); + + /// # Compression extension + /// Used to negotiate the use of compression on the link + pub type Compression = zextunit!(0x6, false); } impl OpenSyn { @@ -137,6 +142,7 @@ impl OpenSyn { let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_lowlatency = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + let ext_compression = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); Self { lease, @@ -147,6 +153,7 @@ impl OpenSyn { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } } } @@ -160,6 +167,7 @@ pub struct OpenAck { pub ext_auth: Option, pub ext_mlink: Option, pub ext_lowlatency: Option, + pub ext_compression: Option, } impl OpenAck { @@ -182,6 +190,7 @@ impl OpenAck { let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); let ext_lowlatency = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + let ext_compression = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); Self { lease, @@ -191,6 +200,7 @@ impl OpenAck { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } } } diff --git a/io/zenoh-link-commons/Cargo.toml b/io/zenoh-link-commons/Cargo.toml index 51db4d671c..36e39eceed 100644 --- a/io/zenoh-link-commons/Cargo.toml +++ b/io/zenoh-link-commons/Cargo.toml @@ -24,10 +24,14 @@ categories = { workspace = true } description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +compression = [] + [dependencies] async-std = { workspace = true } async-trait = { workspace = true } flume = { workspace = true } +lz4_flex = { workspace = true } serde = { workspace = true, features = ["default"] } typenum = { workspace = true } zenoh-buffers = { workspace = true } diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index 7f3eb43518..d44686ff50 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -14,21 +14,12 @@ use alloc::{boxed::Box, sync::Arc, vec::Vec}; use async_trait::async_trait; use core::{ - convert::TryFrom, fmt, hash::{Hash, Hasher}, ops::Deref, }; -use zenoh_buffers::{ - reader::HasReader, - writer::{HasWriter, Writer}, -}; -use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::{ - core::{EndPoint, Locator}, - transport::{BatchSize, TransportMessage}, -}; -use zenoh_result::{zerror, ZResult}; +use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_result::ZResult; pub type LinkManagerUnicast = Arc; #[async_trait] @@ -44,12 +35,6 @@ pub trait ConstructibleLinkManagerUnicast: Sized { fn new(new_link_sender: NewLinkChannelSender, config: T) -> ZResult; } -#[derive(Clone, PartialEq, Eq)] -pub enum LinkUnicastDirection { - Inbound, - Outbound, -} - #[derive(Clone)] pub struct LinkUnicast(pub Arc); @@ -67,70 +52,6 @@ pub trait LinkUnicastTrait: Send + Sync { async fn close(&self) -> ZResult<()>; } -impl LinkUnicast { - pub async fn send(&self, msg: &TransportMessage) -> ZResult { - const ERR: &str = "Write error on link: "; - - // Create the buffer for serializing the message - let mut buff = Vec::new(); - let mut writer = buff.writer(); - let codec = Zenoh080::new(); - - // Reserve 16 bits to write the length - if self.is_streamed() { - writer - .write_exact(BatchSize::MIN.to_le_bytes().as_slice()) - .map_err(|_| zerror!("{ERR}{self}"))?; - } - // Serialize the message - codec - .write(&mut writer, msg) - .map_err(|_| zerror!("{ERR}{self}"))?; - - // Write the length - if self.is_streamed() { - let num = BatchSize::MIN.to_le_bytes().len(); - let len = - BatchSize::try_from(writer.len() - num).map_err(|_| zerror!("{ERR}{self}"))?; - buff[..num].copy_from_slice(len.to_le_bytes().as_slice()); - } - - // Send the message on the link - self.0.write_all(buff.as_slice()).await?; - - Ok(buff.len()) - } - - pub async fn recv(&self) -> ZResult { - // Read from the link - let buffer = if self.is_streamed() { - // Read and decode the message length - let mut length_bytes = BatchSize::MIN.to_le_bytes(); - self.read_exact(&mut length_bytes).await?; - let to_read = BatchSize::from_le_bytes(length_bytes) as usize; - // Read the message - let mut buffer = zenoh_buffers::vec::uninit(to_read); - self.read_exact(&mut buffer).await?; - buffer - } else { - // Read the message - let mut buffer = zenoh_buffers::vec::uninit(self.get_mtu() as usize); - let n = self.read(&mut buffer).await?; - buffer.truncate(n); - buffer - }; - - let mut reader = buffer.reader(); - let codec = Zenoh080::new(); - - let msg: TransportMessage = codec - .read(&mut reader) - .map_err(|_| zerror!("Read error on link: {}", self))?; - - Ok(msg) - } -} - impl Deref for LinkUnicast { type Target = Arc; diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 6fc2051242..cd029a9435 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -11,67 +11,112 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::num::NonZeroUsize; +use std::num::{NonZeroU8, NonZeroUsize}; use zenoh_buffers::{ - reader::{Reader, SiphonableReader}, - writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - BBuf, ZBufReader, + buffer::Buffer, + reader::{DidntRead, HasReader}, + writer::{DidntWrite, HasWriter, Writer}, + BBuf, ZBufReader, ZSlice, ZSliceBuffer, +}; +use zenoh_codec::{ + transport::batch::{BatchError, Zenoh080Batch}, + RCodec, WCodec, }; -use zenoh_codec::{WCodec, Zenoh080}; use zenoh_protocol::{ - core::Reliability, network::NetworkMessage, - transport::{ - fragment::FragmentHeader, frame::FrameHeader, BatchSize, TransportMessage, TransportSn, - }, + transport::{fragment::FragmentHeader, frame::FrameHeader, BatchSize, TransportMessage}, }; - -const LENGTH_BYTES: [u8; 2] = u16::MIN.to_be_bytes(); - -pub(crate) trait Encode { - type Output; - fn encode(self, message: Message) -> Self::Output; +use zenoh_result::ZResult; +#[cfg(feature = "transport_compression")] +use {std::sync::Arc, zenoh_protocol::common::imsg, zenoh_result::zerror}; + +// Split the inner buffer into (length, header, payload) inmutable slices +#[cfg(feature = "transport_compression")] +macro_rules! zsplit { + ($slice:expr, $header:expr) => {{ + match $header.get() { + Some(_) => $slice.split_at(BatchHeader::INDEX + 1), + None => (&[], $slice), + } + }}; } -pub(crate) trait Decode { - type Error; - fn decode(self) -> Result; +// Batch config +#[derive(Copy, Clone, Debug)] +pub struct BatchConfig { + pub mtu: BatchSize, + #[cfg(feature = "transport_compression")] + pub is_compression: bool, } -#[derive(Clone, Copy, Debug)] -#[repr(u8)] -pub(crate) enum CurrentFrame { - Reliable, - BestEffort, - None, +impl BatchConfig { + fn header(&self) -> BatchHeader { + #[allow(unused_mut)] // No need for mut when "transport_compression" is disabled + let mut h = 0; + #[cfg(feature = "transport_compression")] + if self.is_compression { + h |= BatchHeader::COMPRESSION; + } + BatchHeader::new(h) + } } -#[derive(Clone, Copy, Debug)] -pub(crate) struct LatestSn { - pub(crate) reliable: Option, - pub(crate) best_effort: Option, -} +// Batch header +#[repr(transparent)] +#[derive(Copy, Clone, Debug)] +pub struct BatchHeader(Option); -impl LatestSn { - fn clear(&mut self) { - self.reliable = None; - self.best_effort = None; +impl BatchHeader { + #[cfg(feature = "transport_compression")] + const INDEX: usize = 0; + #[cfg(feature = "transport_compression")] + const COMPRESSION: u8 = 1; + + fn new(h: u8) -> Self { + Self(NonZeroU8::new(h)) + } + + #[cfg(feature = "transport_compression")] + const fn is_empty(&self) -> bool { + self.0.is_none() + } + + const fn get(&self) -> Option { + self.0 + } + + /// Verify that the [`WBatch`][WBatch] is for a stream-based protocol, i.e., the first + /// 2 bytes are reserved to encode the total amount of serialized bytes as 16-bits little endian. + #[cfg(feature = "transport_compression")] + #[inline(always)] + pub fn is_compression(&self) -> bool { + self.0 + .is_some_and(|h| imsg::has_flag(h.get(), Self::COMPRESSION)) } } +// WRITE BATCH #[cfg(feature = "stats")] #[derive(Clone, Copy, Debug, Default)] -pub(crate) struct SerializationBatchStats { - pub(crate) t_msgs: usize, +pub struct WBatchStats { + pub t_msgs: usize, } #[cfg(feature = "stats")] -impl SerializationBatchStats { +impl WBatchStats { fn clear(&mut self) { self.t_msgs = 0; } } +#[repr(u8)] +#[derive(Debug)] +pub enum Finalize { + Batch, + #[cfg(feature = "transport_compression")] + Buffer, +} + /// Write Batch /// /// A [`WBatch`][WBatch] is a non-expandable and contiguous region of memory @@ -80,44 +125,38 @@ impl SerializationBatchStats { /// [`TransportMessage`][TransportMessage] are always serialized on the batch as they are, while /// [`ZenohMessage`][ZenohMessage] are always serializaed on the batch as part of a [`TransportMessage`] /// [TransportMessage] Frame. Reliable and Best Effort Frames can be interleaved on the same -/// [`SerializationBatch`][SerializationBatch] as long as they fit in the remaining buffer capacity. +/// [`WBatch`][WBatch] as long as they fit in the remaining buffer capacity. /// -/// In the serialized form, the [`SerializationBatch`][SerializationBatch] always contains one or more +/// In the serialized form, the [`WBatch`][WBatch] always contains one or more /// [`TransportMessage`][TransportMessage]. In the particular case of [`TransportMessage`][TransportMessage] Frame, /// its payload is either (i) one or more complete [`ZenohMessage`][ZenohMessage] or (ii) a fragment of a /// a [`ZenohMessage`][ZenohMessage]. /// -/// As an example, the content of the [`SerializationBatch`][SerializationBatch] in memory could be: +/// As an example, the content of the [`WBatch`][WBatch] in memory could be: /// /// | Keep Alive | Frame Reliable | Frame Best Effort | /// -#[derive(Debug)] -pub(crate) struct WBatch { +#[derive(Clone, Debug)] +pub struct WBatch { // The buffer to perform the batching on - buffer: BBuf, - // It is a streamed batch - is_streamed: bool, - // The current frame being serialized: BestEffort/Reliable - current_frame: CurrentFrame, - // The latest SN - pub(crate) latest_sn: LatestSn, + pub buffer: BBuf, + // The batch codec + pub codec: Zenoh080Batch, + // It contains 1 byte as additional header, e.g. to signal the batch is compressed + pub header: BatchHeader, // Statistics related to this batch #[cfg(feature = "stats")] - pub(crate) stats: SerializationBatchStats, + pub stats: WBatchStats, } impl WBatch { - pub(crate) fn new(size: BatchSize, is_streamed: bool) -> Self { + pub fn new(config: BatchConfig) -> Self { let mut batch = Self { - buffer: BBuf::with_capacity(size as usize), - is_streamed, - current_frame: CurrentFrame::None, - latest_sn: LatestSn { - reliable: None, - best_effort: None, - }, + buffer: BBuf::with_capacity(config.mtu as usize), + codec: Zenoh080Batch::new(), + header: config.header(), #[cfg(feature = "stats")] - stats: SerializationBatchStats::default(), + stats: WBatchStats::default(), }; // Bring the batch in a clear state @@ -126,237 +165,302 @@ impl WBatch { batch } - /// Verify that the [`SerializationBatch`][SerializationBatch] has no serialized bytes. + /// Verify that the [`WBatch`][WBatch] has no serialized bytes. #[inline(always)] - pub(crate) fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.len() == 0 } - /// Get the total number of bytes that have been serialized on the [`SerializationBatch`][SerializationBatch]. + /// Get the total number of bytes that have been serialized on the [`WBatch`][WBatch]. #[inline(always)] - pub(crate) fn len(&self) -> BatchSize { - let len = self.buffer.len() as BatchSize; - if self.is_streamed() { - len - (LENGTH_BYTES.len() as BatchSize) - } else { - len - } + pub fn len(&self) -> BatchSize { + self.buffer.len() as BatchSize } - /// Verify that the [`SerializationBatch`][SerializationBatch] is for a stream-based protocol, i.e., the first - /// 2 bytes are reserved to encode the total amount of serialized bytes as 16-bits little endian. + /// Clear the [`WBatch`][WBatch] memory buffer and related internal state. #[inline(always)] - pub(crate) fn is_streamed(&self) -> bool { - self.is_streamed - } - - /// Clear the [`SerializationBatch`][SerializationBatch] memory buffer and related internal state. - #[inline(always)] - pub(crate) fn clear(&mut self) { + pub fn clear(&mut self) { self.buffer.clear(); - self.current_frame = CurrentFrame::None; - self.latest_sn.clear(); - #[cfg(feature = "stats")] - { - self.stats.clear(); - } - if self.is_streamed() { + self.codec.clear(); + if let Some(h) = self.header.get() { let mut writer = self.buffer.writer(); - let _ = writer.write_exact(&LENGTH_BYTES[..]); + let _ = writer.write_u8(h.get()); } } - /// In case the [`SerializationBatch`][SerializationBatch] is for a stream-based protocol, use the first 2 bytes - /// to encode the total amount of serialized bytes as 16-bits little endian. + /// Get a `&[u8]` to access the internal memory buffer, usually for transmitting it on the network. #[inline(always)] - pub(crate) fn write_len(&mut self) { - if self.is_streamed() { - let length = self.len(); - self.buffer.as_mut_slice()[..LENGTH_BYTES.len()].copy_from_slice(&length.to_le_bytes()); - } + pub fn as_slice(&self) -> &[u8] { + self.buffer.as_slice() } - /// Get a `&[u8]` to access the internal memory buffer, usually for transmitting it on the network. + // Split (length, header, payload) internal buffer slice #[inline(always)] - pub(crate) fn as_bytes(&self) -> &[u8] { - self.buffer.as_slice() + #[cfg(feature = "transport_compression")] + fn split(&self) -> (&[u8], &[u8]) { + zsplit!(self.buffer.as_slice(), self.header) + } + + pub fn finalize( + &mut self, + #[cfg(feature = "transport_compression")] buffer: Option<&mut BBuf>, + ) -> ZResult { + #[cfg(feature = "transport_compression")] + if self.header.is_compression() { + let buffer = buffer.ok_or_else(|| zerror!("Support buffer not provided"))?; + buffer.clear(); + return self.compress(buffer); + } + + Ok(Finalize::Batch) + } + + #[cfg(feature = "transport_compression")] + fn compress(&mut self, support: &mut BBuf) -> ZResult { + // Write the initial bytes for the batch + let mut writer = support.writer(); + if let Some(h) = self.header.get() { + let _ = writer.write_u8(h.get()); + } + + // Compress the actual content + let (_header, payload) = self.split(); + writer + .with_slot(writer.remaining(), |b| { + lz4_flex::block::compress_into(payload, b).unwrap_or(0) + }) + .map_err(|_| zerror!("Compression error"))?; + + // Verify wether the resulting compressed data is smaller than the initial input + if support.len() < self.buffer.len() { + Ok(Finalize::Buffer) + } else { + // Keep the original uncompressed buffer and unset the compression flag from the header + let h = self + .buffer + .as_mut_slice() + .get_mut(BatchHeader::INDEX) + .ok_or_else(|| zerror!("Header not present"))?; + *h &= !BatchHeader::COMPRESSION; + Ok(Finalize::Batch) + } } } +pub trait Encode { + type Output; + + fn encode(self, x: Message) -> Self::Output; +} + impl Encode<&TransportMessage> for &mut WBatch { type Output = Result<(), DidntWrite>; - /// Try to serialize a [`TransportMessage`][TransportMessage] on the [`SerializationBatch`][SerializationBatch]. - /// - /// # Arguments - /// * `message` - The [`TransportMessage`][TransportMessage] to serialize. - /// - fn encode(self, message: &TransportMessage) -> Self::Output { - // Mark the write operation + fn encode(self, x: &TransportMessage) -> Self::Output { let mut writer = self.buffer.writer(); - let mark = writer.mark(); - - let codec = Zenoh080::new(); - codec.write(&mut writer, message).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - - // Reset the current frame value - self.current_frame = CurrentFrame::None; - #[cfg(feature = "stats")] - { - self.stats.t_msgs += 1; - } - Ok(()) + self.codec.write(&mut writer, x) } } -#[repr(u8)] -pub(crate) enum WError { - NewFrame, - DidntWrite, -} - impl Encode<&NetworkMessage> for &mut WBatch { - type Output = Result<(), WError>; - - /// Try to serialize a [`NetworkMessage`][NetworkMessage] on the [`SerializationBatch`][SerializationBatch]. - /// - /// # Arguments - /// * `message` - The [`NetworkMessage`][NetworkMessage] to serialize. - /// - fn encode(self, message: &NetworkMessage) -> Self::Output { - // Eventually update the current frame and sn based on the current status - if let (CurrentFrame::Reliable, false) - | (CurrentFrame::BestEffort, true) - | (CurrentFrame::None, _) = (self.current_frame, message.is_reliable()) - { - // We are not serializing on the right frame. - return Err(WError::NewFrame); - }; + type Output = Result<(), BatchError>; - // Mark the write operation + fn encode(self, x: &NetworkMessage) -> Self::Output { let mut writer = self.buffer.writer(); - let mark = writer.mark(); - - let codec = Zenoh080::new(); - codec.write(&mut writer, message).map_err(|_| { - // Revert the write operation - writer.rewind(mark); - WError::DidntWrite - }) + self.codec.write(&mut writer, x) } } -impl Encode<(&NetworkMessage, FrameHeader)> for &mut WBatch { - type Output = Result<(), DidntWrite>; +impl Encode<(&NetworkMessage, &FrameHeader)> for &mut WBatch { + type Output = Result<(), BatchError>; + + fn encode(self, x: (&NetworkMessage, &FrameHeader)) -> Self::Output { + let mut writer = self.buffer.writer(); + self.codec.write(&mut writer, x) + } +} - /// Try to serialize a [`NetworkMessage`][NetworkMessage] on the [`SerializationBatch`][SerializationBatch]. - /// - /// # Arguments - /// * `message` - The [`NetworkMessage`][NetworkMessage] to serialize. - /// - fn encode(self, message: (&NetworkMessage, FrameHeader)) -> Self::Output { - let (message, frame) = message; +impl Encode<(&mut ZBufReader<'_>, &mut FragmentHeader)> for &mut WBatch { + type Output = Result; - // Mark the write operation + fn encode(self, x: (&mut ZBufReader<'_>, &mut FragmentHeader)) -> Self::Output { let mut writer = self.buffer.writer(); - let mark = writer.mark(); - - let codec = Zenoh080::new(); - // Write the frame header - codec.write(&mut writer, &frame).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - // Write the zenoh message - codec.write(&mut writer, message).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - // Update the frame - self.current_frame = match frame.reliability { - Reliability::Reliable => { - self.latest_sn.reliable = Some(frame.sn); - CurrentFrame::Reliable - } - Reliability::BestEffort => { - self.latest_sn.best_effort = Some(frame.sn); - CurrentFrame::BestEffort + self.codec.write(&mut writer, x) + } +} + +// Read batch +#[derive(Debug)] +pub struct RBatch { + // The buffer to perform deserializationn from + buffer: ZSlice, + // The batch codec + codec: Zenoh080Batch, + // It contains 1 byte as additional header, e.g. to signal the batch is compressed + #[cfg(feature = "transport_compression")] + header: BatchHeader, +} + +impl RBatch { + pub fn new(#[allow(unused_variables)] config: BatchConfig, buffer: T) -> Self + where + T: Into, + { + Self { + buffer: buffer.into(), + codec: Zenoh080Batch::new(), + #[cfg(feature = "transport_compression")] + header: config.header(), + } + } + + #[inline(always)] + pub const fn is_empty(&self) -> bool { + self.buffer.is_empty() + } + + // Split (length, header, payload) internal buffer slice + #[inline(always)] + #[cfg(feature = "transport_compression")] + fn split(&self) -> (&[u8], &[u8]) { + zsplit!(self.buffer.as_slice(), self.header) + } + + pub fn initialize(&mut self, #[allow(unused_variables)] buff: C) -> ZResult<()> + where + C: Fn() -> T + Copy, + T: ZSliceBuffer + 'static, + { + #[cfg(feature = "transport_compression")] + if !self.header.is_empty() { + let h = *self + .buffer + .get(BatchHeader::INDEX) + .ok_or_else(|| zerror!("Batch header not present"))?; + let header = BatchHeader::new(h); + + if header.is_compression() { + self.decompress(buff)?; + } else { + self.buffer = self + .buffer + .subslice(BatchHeader::INDEX + 1, self.buffer.len()) + .ok_or_else(|| zerror!("Invalid batch length"))?; } - }; + } + + Ok(()) + } + + #[cfg(feature = "transport_compression")] + fn decompress(&mut self, mut buff: impl FnMut() -> T) -> ZResult<()> + where + T: ZSliceBuffer + 'static, + { + let (_h, p) = self.split(); + + let mut into = (buff)(); + let n = lz4_flex::block::decompress_into(p, into.as_mut_slice()) + .map_err(|_| zerror!("Decompression error"))?; + self.buffer = ZSlice::make(Arc::new(into), 0, n) + .map_err(|_| zerror!("Invalid decompression buffer length"))?; + Ok(()) } } -impl Encode<(&mut ZBufReader<'_>, FragmentHeader)> for &mut WBatch { - type Output = Result; +pub trait Decode { + type Error; - /// Try to serialize a [`ZenohMessage`][ZenohMessage] on the [`SerializationBatch`][SerializationBatch]. - /// - /// # Arguments - /// * `message` - The [`ZenohMessage`][ZenohMessage] to serialize. - /// - fn encode(self, message: (&mut ZBufReader<'_>, FragmentHeader)) -> Self::Output { - let (reader, mut fragment) = message; + fn decode(self) -> Result; +} - let mut writer = self.buffer.writer(); - let codec = Zenoh080::new(); - - // Mark the buffer for the writing operation - let mark = writer.mark(); - - // Write the frame header - codec.write(&mut writer, &fragment).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - - // Check if it is really the final fragment - if reader.remaining() <= writer.remaining() { - // Revert the buffer - writer.rewind(mark); - // It is really the finally fragment, reserialize the header - fragment.more = false; - // Write the frame header - codec.write(&mut writer, &fragment).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - } +impl Decode for &mut RBatch { + type Error = DidntRead; - // Write the fragment - reader.siphon(&mut writer).map_err(|_| { - // Revert the write operation - writer.rewind(mark); - DidntWrite - }) + fn decode(self) -> Result { + let mut reader = self.buffer.reader(); + self.codec.read(&mut reader) } } #[cfg(test)] mod tests { + use std::vec; + use super::*; + use rand::Rng; use zenoh_buffers::ZBuf; use zenoh_protocol::{ core::{CongestionControl, Encoding, Priority, Reliability, WireExpr}, network::{ext, Push}, transport::{ frame::{self, FrameHeader}, - KeepAlive, TransportMessage, + Fragment, KeepAlive, TransportMessage, }, zenoh::{PushBody, Put}, }; + #[test] + fn rw_batch() { + let mut rng = rand::thread_rng(); + + for _ in 0..1_000 { + let msg_ins: [TransportMessage; 2] = [TransportMessage::rand(), { + let mut msg_in = Fragment::rand(); + msg_in.payload = vec![0u8; rng.gen_range(8..1_024)].into(); + msg_in.into() + }]; + for msg_in in msg_ins { + let config = BatchConfig { + mtu: BatchSize::MAX, + #[cfg(feature = "transport_compression")] + is_compression: rng.gen_bool(0.5), + }; + let mut wbatch = WBatch::new(config); + wbatch.encode(&msg_in).unwrap(); + println!("Encoded WBatch: {:?}", wbatch); + + #[cfg(feature = "transport_compression")] + let mut buffer = config.is_compression.then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size(wbatch.as_slice().len()), + )); + + let res = wbatch + .finalize( + #[cfg(feature = "transport_compression")] + buffer.as_mut(), + ) + .unwrap(); + let bytes = match res { + Finalize::Batch => wbatch.as_slice(), + #[cfg(feature = "transport_compression")] + Finalize::Buffer => buffer.as_mut().unwrap().as_slice(), + }; + println!("Finalized WBatch: {:02x?}", bytes); + + let mut rbatch = RBatch::new(config, bytes.to_vec().into_boxed_slice()); + println!("Decoded RBatch: {:?}", rbatch); + rbatch + .initialize(|| { + zenoh_buffers::vec::uninit(config.mtu as usize).into_boxed_slice() + }) + .unwrap(); + println!("Initialized RBatch: {:?}", rbatch); + let msg_out: TransportMessage = rbatch.decode().unwrap(); + assert_eq!(msg_in, msg_out); + } + } + } + #[test] fn serialization_batch() { - let mut batch = WBatch::new(u16::MAX, true); + let config = BatchConfig { + mtu: BatchSize::MAX, + #[cfg(feature = "transport_compression")] + is_compression: false, + }; + let mut batch = WBatch::new(config); let tmsg: TransportMessage = KeepAlive.into(); let nmsg: NetworkMessage = Push { @@ -391,12 +495,12 @@ mod tests { }; // Serialize with a frame - batch.encode((&nmsg, frame)).unwrap(); + batch.encode((&nmsg, &frame)).unwrap(); assert_ne!(batch.len(), 0); nmsgs_in.push(nmsg.clone()); frame.reliability = Reliability::BestEffort; - batch.encode((&nmsg, frame)).unwrap(); + batch.encode((&nmsg, &frame)).unwrap(); assert_ne!(batch.len(), 0); nmsgs_in.push(nmsg.clone()); @@ -410,7 +514,7 @@ mod tests { // Serialize with a frame frame.sn = 1; - batch.encode((&nmsg, frame)).unwrap(); + batch.encode((&nmsg, &frame)).unwrap(); assert_ne!(batch.len(), 0); nmsgs_in.push(nmsg.clone()); } diff --git a/io/zenoh-transport/src/common/defragmentation.rs b/io/zenoh-transport/src/common/defragmentation.rs index be734cad45..8fab075fe4 100644 --- a/io/zenoh-transport/src/common/defragmentation.rs +++ b/io/zenoh-transport/src/common/defragmentation.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use super::seq_num::SeqNum; -use zenoh_buffers::{reader::HasReader, SplitBuffer, ZBuf, ZSlice}; +use zenoh_buffers::{buffer::Buffer, reader::HasReader, ZBuf, ZSlice}; use zenoh_codec::{RCodec, Zenoh080Reliability}; use zenoh_protocol::{ core::{Bits, Reliability}, diff --git a/io/zenoh-transport/src/common/mod.rs b/io/zenoh-transport/src/common/mod.rs index 0837ced4f7..c7de8a64ce 100644 --- a/io/zenoh-transport/src/common/mod.rs +++ b/io/zenoh-transport/src/common/mod.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -pub(crate) mod batch; +pub mod batch; pub(crate) mod defragmentation; pub(crate) mod pipeline; pub(crate) mod priority; diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 47c5ef4a4d..19e7a47289 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -1,3 +1,5 @@ +use crate::common::batch::BatchConfig; + // // Copyright (c) 2023 ZettaScale Technology // @@ -11,8 +13,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::batch::{Encode, WBatch, WError}; -use super::priority::{TransportChannelTx, TransportPriorityTx}; +use super::{ + batch::{Encode, WBatch}, + priority::{TransportChannelTx, TransportPriorityTx}, +}; use async_std::prelude::FutureExt; use flume::{bounded, Receiver, Sender}; use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; @@ -25,7 +29,7 @@ use zenoh_buffers::{ writer::HasWriter, ZBuf, }; -use zenoh_codec::{WCodec, Zenoh080}; +use zenoh_codec::{transport::batch::BatchError, WCodec, Zenoh080}; use zenoh_config::QueueSizeConf; use zenoh_core::zlock; use zenoh_protocol::core::Reliability; @@ -187,11 +191,11 @@ impl StageIn { ext_qos: frame::ext::QoSType::new(priority), }; - if let WError::NewFrame = e { + if let BatchError::NewFrame = e { // Attempt a serialization with a new frame - if batch.encode((&*msg, frame)).is_ok() { + if batch.encode((&*msg, &frame)).is_ok() { zretok!(batch); - }; + } } if !batch.is_empty() { @@ -201,9 +205,9 @@ impl StageIn { } // Attempt a second serialization on fully empty batch - if batch.encode((&*msg, frame)).is_ok() { + if batch.encode((&*msg, &frame)).is_ok() { zretok!(batch); - }; + } // The second serialization attempt has failed. This means that the message is // too large for the current batch size: we need to fragment. @@ -231,7 +235,7 @@ impl StageIn { batch = zgetbatch_rets!(true); // Serialize the message fragmnet - match batch.encode((&mut reader, fragment)) { + match batch.encode((&mut reader, &mut fragment)) { Ok(_) => { // Update the SN fragment.sn = tch.sn.get(); @@ -378,8 +382,7 @@ struct StageOutIn { impl StageOutIn { #[inline] fn try_pull(&mut self) -> Pull { - if let Some(mut batch) = self.s_out_r.pull() { - batch.write_len(); + if let Some(batch) = self.s_out_r.pull() { self.backoff.stop(); return Pull::Some(batch); } @@ -397,16 +400,14 @@ impl StageOutIn { // No new bytes have been written on the batch, try to pull if let Ok(mut g) = self.current.try_lock() { // First try to pull from stage OUT - if let Some(mut batch) = self.s_out_r.pull() { - batch.write_len(); + if let Some(batch) = self.s_out_r.pull() { self.backoff.stop(); return Pull::Some(batch); } // An incomplete (non-empty) batch is available in the state IN pipeline. match g.take() { - Some(mut batch) => { - batch.write_len(); + Some(batch) => { self.backoff.stop(); return Pull::Some(batch); } @@ -420,8 +421,7 @@ impl StageOutIn { } std::cmp::Ordering::Less => { // There should be a new batch in Stage OUT - if let Some(mut batch) = self.s_out_r.pull() { - batch.write_len(); + if let Some(batch) = self.s_out_r.pull() { self.backoff.stop(); return Pull::Some(batch); } @@ -469,8 +469,7 @@ impl StageOut { fn drain(&mut self, guard: &mut MutexGuard<'_, Option>) -> Vec { let mut batches = vec![]; // Empty the ring buffer - while let Some(mut batch) = self.s_in.s_out_r.pull() { - batch.write_len(); + while let Some(batch) = self.s_in.s_out_r.pull() { batches.push(batch); } // Take the current batch @@ -484,6 +483,8 @@ impl StageOut { #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct TransmissionPipelineConf { pub(crate) is_streamed: bool, + #[cfg(feature = "transport_compression")] + pub(crate) is_compression: bool, pub(crate) batch_size: BatchSize, pub(crate) queue_size: [usize; Priority::NUM], pub(crate) backoff: Duration, @@ -493,6 +494,8 @@ impl Default for TransmissionPipelineConf { fn default() -> Self { Self { is_streamed: false, + #[cfg(feature = "transport_compression")] + is_compression: false, batch_size: BatchSize::MAX, queue_size: [1; Priority::NUM], backoff: Duration::from_micros(1), @@ -530,9 +533,13 @@ impl TransmissionPipeline { let (mut s_ref_w, s_ref_r) = RingBuffer::::init(); // Fill the refill ring buffer with batches for _ in 0..*num { - assert!(s_ref_w - .push(WBatch::new(config.batch_size, config.is_streamed)) - .is_none()); + let bc = BatchConfig { + mtu: config.batch_size, + #[cfg(feature = "transport_compression")] + is_compression: config.is_compression, + }; + let batch = WBatch::new(bc); + assert!(s_ref_w.push(batch).is_none()); } // Create the channel for notifying that new batches are in the refill ring buffer // This is a SPSC channel @@ -730,6 +737,8 @@ mod tests { const CONFIG: TransmissionPipelineConf = TransmissionPipelineConf { is_streamed: true, + #[cfg(feature = "transport_compression")] + is_compression: true, batch_size: BatchSize::MAX, queue_size: [1; Priority::NUM], backoff: Duration::from_micros(1), @@ -782,7 +791,7 @@ mod tests { batches += 1; bytes += batch.len() as usize; // Create a ZBuf for deserialization starting from the batch - let bytes = batch.as_bytes(); + let bytes = batch.as_slice(); // Deserialize the messages let mut reader = bytes.reader(); let codec = Zenoh080::new(); diff --git a/io/zenoh-transport/src/lib.rs b/io/zenoh-transport/src/lib.rs index 05240710f6..5432394756 100644 --- a/io/zenoh-transport/src/lib.rs +++ b/io/zenoh-transport/src/lib.rs @@ -17,10 +17,10 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -mod common; -mod manager; -mod multicast; -mod primitives; +pub mod common; +pub mod manager; +pub mod multicast; +pub mod primitives; pub mod unicast; #[cfg(feature = "stats")] @@ -29,13 +29,11 @@ pub use common::stats; #[cfg(feature = "shared-memory")] mod shm; +use crate::{multicast::TransportMulticast, unicast::TransportUnicast}; pub use manager::*; -pub use multicast::*; -pub use primitives::*; use serde::Serialize; use std::any::Any; use std::sync::Arc; -pub use unicast::*; use zenoh_link::Link; use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_protocol::network::NetworkMessage; diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 6847b12dd8..3c225274aa 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -49,14 +49,14 @@ use zenoh_result::{bail, ZResult}; /// impl TransportEventHandler for MySH { /// fn new_unicast(&self, /// _peer: TransportPeer, -/// _transport: TransportUnicast +/// _transport: unicast::TransportUnicast /// ) -> ZResult> { /// Ok(Arc::new(DummyTransportPeerEventHandler)) /// } /// /// fn new_multicast( /// &self, -/// _transport: TransportMulticast, +/// _transport: multicast::TransportMulticast, /// ) -> ZResult> { /// Ok(Arc::new(DummyTransportMulticastEventHandler)) /// } diff --git a/io/zenoh-transport/src/multicast/establishment.rs b/io/zenoh-transport/src/multicast/establishment.rs index fc4ad21da3..e31ab05d30 100644 --- a/io/zenoh-transport/src/multicast/establishment.rs +++ b/io/zenoh-transport/src/multicast/establishment.rs @@ -13,8 +13,12 @@ // use crate::{ common::seq_num, - multicast::{transport::TransportMulticastInner, TransportMulticast}, - TransportConfigMulticast, TransportManager, + multicast::{ + link::{TransportLinkMulticast, TransportLinkMulticastConfig}, + transport::TransportMulticastInner, + TransportConfigMulticast, TransportMulticast, + }, + TransportManager, }; use rand::Rng; use std::sync::Arc; @@ -57,6 +61,13 @@ pub(crate) async fn open_link( // Create the transport let locator = link.get_dst().to_owned(); + let config = TransportLinkMulticastConfig { + mtu: link.get_mtu(), + #[cfg(feature = "transport_compression")] + is_compression: manager.config.multicast.is_compression, + }; + let link = TransportLinkMulticast::new(link, config); + let config = TransportConfigMulticast { link, sn_resolution, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index b430e7efb1..937216dd08 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -11,31 +11,256 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::common::{pipeline::TransmissionPipeline, priority::TransportPriorityTx}; -use super::transport::TransportMulticastInner; -use crate::common::batch::WBatch; -use crate::common::pipeline::{ - TransmissionPipelineConf, TransmissionPipelineConsumer, TransmissionPipelineProducer, -}; #[cfg(feature = "stats")] use crate::stats::TransportStats; -use async_std::prelude::FutureExt; -use async_std::task; -use async_std::task::JoinHandle; -use std::convert::TryInto; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use zenoh_buffers::ZSlice; +use crate::{ + common::{ + batch::{BatchConfig, Encode, Finalize, RBatch, WBatch}, + pipeline::{ + TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, + TransmissionPipelineProducer, + }, + priority::TransportPriorityTx, + }, + multicast::transport::TransportMulticastInner, +}; +use async_std::{ + prelude::FutureExt, + task::{self, JoinHandle}, +}; +use std::{ + convert::TryInto, + fmt, + sync::Arc, + time::{Duration, Instant}, +}; +#[cfg(feature = "transport_compression")] +use zenoh_buffers::BBuf; +use zenoh_buffers::{ZSlice, ZSliceBuffer}; use zenoh_core::zlock; -use zenoh_link::{LinkMulticast, Locator}; +use zenoh_link::{Link, LinkMulticast, Locator}; use zenoh_protocol::{ core::{Bits, Priority, Resolution, WhatAmI, ZenohId}, - transport::{BatchSize, Join, PrioritySn, TransportMessage, TransportSn}, + transport::{BatchSize, Close, Join, PrioritySn, TransportMessage, TransportSn}, }; -use zenoh_result::{bail, zerror, ZResult}; -use zenoh_sync::{RecyclingObjectPool, Signal}; +use zenoh_result::{zerror, ZResult}; +use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; + +/****************************/ +/* TRANSPORT MULTICAST LINK */ +/****************************/ +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) struct TransportLinkMulticastConfig { + // MTU + pub(crate) mtu: BatchSize, + // Compression is active on the link + #[cfg(feature = "transport_compression")] + pub(crate) is_compression: bool, +} + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct TransportLinkMulticast { + pub(crate) link: LinkMulticast, + pub(crate) config: TransportLinkMulticastConfig, +} + +impl TransportLinkMulticast { + pub(crate) fn new(link: LinkMulticast, mut config: TransportLinkMulticastConfig) -> Self { + config.mtu = link.get_mtu().min(config.mtu); + Self { link, config } + } + + const fn batch_config(&self) -> BatchConfig { + BatchConfig { + mtu: self.config.mtu, + #[cfg(feature = "transport_compression")] + is_compression: self.config.is_compression, + } + } + + pub(crate) fn tx(&self) -> TransportLinkMulticastTx { + TransportLinkMulticastTx { + inner: self.clone(), + #[cfg(feature = "transport_compression")] + buffer: self.config.is_compression.then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size(self.config.mtu as usize), + )), + } + } + + pub(crate) fn rx(&self) -> TransportLinkMulticastRx { + TransportLinkMulticastRx { + inner: self.clone(), + } + } + + pub(crate) async fn send(&self, msg: &TransportMessage) -> ZResult { + let mut link = self.tx(); + link.send(msg).await + } + + // pub(crate) async fn recv(&self) -> ZResult<(TransportMessage, Locator)> { + // let mut link = self.rx(); + // link.recv().await + // } + + pub(crate) async fn close(&self, reason: Option) -> ZResult<()> { + if let Some(reason) = reason { + // Build the close message + let message: TransportMessage = Close { + reason, + session: false, + } + .into(); + // Send the close message on the link + let _ = self.send(&message).await; + } + self.link.close().await + } +} + +impl fmt::Display for TransportLinkMulticast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.link) + } +} + +impl fmt::Debug for TransportLinkMulticast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransportLinkMulticast") + .field("link", &self.link) + .field("config", &self.config) + .finish() + } +} + +impl From<&TransportLinkMulticast> for Link { + fn from(link: &TransportLinkMulticast) -> Self { + Link::from(&link.link) + } +} + +impl From for Link { + fn from(link: TransportLinkMulticast) -> Self { + Link::from(link.link) + } +} + +pub(crate) struct TransportLinkMulticastTx { + pub(crate) inner: TransportLinkMulticast, + #[cfg(feature = "transport_compression")] + pub(crate) buffer: Option, +} + +impl TransportLinkMulticastTx { + pub(crate) async fn send_batch(&mut self, batch: &mut WBatch) -> ZResult<()> { + const ERR: &str = "Write error on link: "; + + let res = batch + .finalize( + #[cfg(feature = "transport_compression")] + self.buffer.as_mut(), + ) + .map_err(|_| zerror!("{ERR}{self}"))?; + + let bytes = match res { + Finalize::Batch => batch.as_slice(), + #[cfg(feature = "transport_compression")] + Finalize::Buffer => self + .buffer + .as_ref() + .ok_or_else(|| zerror!("Invalid buffer finalization"))? + .as_slice(), + }; + + // Send the message on the link + self.inner.link.write_all(bytes).await?; + + Ok(()) + } + + pub(crate) async fn send(&mut self, msg: &TransportMessage) -> ZResult { + const ERR: &str = "Write error on link: "; + + // Create the batch for serializing the message + let mut batch = WBatch::new(self.inner.batch_config()); + batch.encode(msg).map_err(|_| zerror!("{ERR}{self}"))?; + let len = batch.len() as usize; + self.send_batch(&mut batch).await?; + Ok(len) + } +} + +impl fmt::Display for TransportLinkMulticastTx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +impl fmt::Debug for TransportLinkMulticastTx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut s = f.debug_struct("TransportLinkMulticastRx"); + s.field("link", &self.inner.link) + .field("config", &self.inner.config); + #[cfg(feature = "transport_compression")] + { + s.field("buffer", &self.buffer.as_ref().map(|b| b.capacity())); + } + s.finish() + } +} + +pub(crate) struct TransportLinkMulticastRx { + pub(crate) inner: TransportLinkMulticast, +} + +impl TransportLinkMulticastRx { + pub async fn recv_batch(&self, buff: C) -> ZResult<(RBatch, Locator)> + where + C: Fn() -> T + Copy, + T: ZSliceBuffer + 'static, + { + const ERR: &str = "Read error from link: "; + + let mut into = (buff)(); + let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; + let buffer = ZSlice::make(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; + let mut batch = RBatch::new(self.inner.batch_config(), buffer); + batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; + Ok((batch, locator.into_owned())) + } + + // pub async fn recv(&mut self) -> ZResult<(TransportMessage, Locator)> { + // let mtu = self.inner.config.mtu as usize; + // let (mut batch, locator) = self + // .recv_batch(|| zenoh_buffers::vec::uninit(mtu).into_boxed_slice()) + // .await?; + // let msg = batch + // .decode() + // .map_err(|_| zerror!("Decode error on link: {}", self))?; + // Ok((msg, locator)) + // } +} + +impl fmt::Display for TransportLinkMulticastRx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} -pub(super) struct TransportLinkMulticastConfig { +impl fmt::Debug for TransportLinkMulticastRx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransportLinkMulticastRx") + .field("link", &self.inner.link) + .field("config", &self.inner.config) + .finish() + } +} + +/**************************************/ +/* TRANSPORT MULTICAST LINK UNIVERSAL */ +/**************************************/ +pub(super) struct TransportLinkMulticastConfigUniversal { pub(super) version: u8, pub(super) zid: ZenohId, pub(super) whatami: WhatAmI, @@ -46,9 +271,9 @@ pub(super) struct TransportLinkMulticastConfig { } #[derive(Clone)] -pub(super) struct TransportLinkMulticast { +pub(super) struct TransportLinkMulticastUniversal { // The underlying link - pub(super) link: LinkMulticast, + pub(super) link: TransportLinkMulticast, // The transmission pipeline pub(super) pipeline: Option, // The transport this link is associated to @@ -59,12 +284,12 @@ pub(super) struct TransportLinkMulticast { handle_rx: Option>>, } -impl TransportLinkMulticast { +impl TransportLinkMulticastUniversal { pub(super) fn new( transport: TransportMulticastInner, - link: LinkMulticast, - ) -> TransportLinkMulticast { - TransportLinkMulticast { + link: TransportLinkMulticast, + ) -> TransportLinkMulticastUniversal { + TransportLinkMulticastUniversal { transport, link, pipeline: None, @@ -75,10 +300,10 @@ impl TransportLinkMulticast { } } -impl TransportLinkMulticast { +impl TransportLinkMulticastUniversal { pub(super) fn start_tx( &mut self, - config: TransportLinkMulticastConfig, + config: TransportLinkMulticastConfigUniversal, priority_tx: Arc<[TransportPriorityTx]>, ) { let initial_sns: Vec = priority_tx @@ -106,6 +331,8 @@ impl TransportLinkMulticast { if self.handle_tx.is_none() { let tpc = TransmissionPipelineConf { is_streamed: false, + #[cfg(feature = "transport_compression")] + is_compression: self.link.config.is_compression, batch_size: config.batch_size, queue_size: self.transport.manager.config.queue_size, backoff: self.transport.manager.config.queue_backoff, @@ -120,7 +347,7 @@ impl TransportLinkMulticast { let handle = task::spawn(async move { let res = tx_task( consumer, - c_link.clone(), + c_link.tx(), config, initial_sns, #[cfg(feature = "stats")] @@ -155,7 +382,7 @@ impl TransportLinkMulticast { let handle = task::spawn(async move { // Start the consume task let res = rx_task( - c_link.clone(), + c_link.rx(), ctransport.clone(), c_signal.clone(), c_rx_buffer_size, @@ -194,7 +421,7 @@ impl TransportLinkMulticast { handle_tx.await; } - self.link.close().await + self.link.close(None).await } } @@ -203,8 +430,8 @@ impl TransportLinkMulticast { /*************************************/ async fn tx_task( mut pipeline: TransmissionPipelineConsumer, - link: LinkMulticast, - config: TransportLinkMulticastConfig, + mut link: TransportLinkMulticastTx, + config: TransportLinkMulticastConfigUniversal, mut last_sns: Vec, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { @@ -237,15 +464,14 @@ async fn tx_task( .race(join(last_join, config.join_interval)) .await { - Action::Pull((batch, priority)) => { + Action::Pull((mut batch, priority)) => { // Send the buffer on the link - let bytes = batch.as_bytes(); - link.write_all(bytes).await?; + link.send_batch(&mut batch).await?; // Keep track of next SNs - if let Some(sn) = batch.latest_sn.reliable { + if let Some(sn) = batch.codec.latest_sn.reliable { last_sns[priority].reliable = sn; } - if let Some(sn) = batch.latest_sn.best_effort { + if let Some(sn) = batch.codec.latest_sn.best_effort { last_sns[priority].best_effort = sn; } #[cfg(feature = "stats")] @@ -297,8 +523,8 @@ async fn tx_task( Action::Stop => { // Drain the transmission pipeline and write remaining bytes on the wire let mut batches = pipeline.drain(); - for (b, _) in batches.drain(..) { - link.write_all(b.as_bytes()) + for (mut b, _) in batches.drain(..) { + link.send_batch(&mut b) .timeout(config.join_interval) .await .map_err(|_| { @@ -324,20 +550,30 @@ async fn tx_task( } async fn rx_task( - link: LinkMulticast, + mut link: TransportLinkMulticastRx, transport: TransportMulticastInner, signal: Signal, rx_buffer_size: usize, batch_size: BatchSize, ) -> ZResult<()> { enum Action { - Read((usize, Locator)), + Read((RBatch, Locator)), Stop, } - async fn read(link: &LinkMulticast, buffer: &mut [u8]) -> ZResult { - let (n, loc) = link.read(buffer).await?; - Ok(Action::Read((n, loc.into_owned()))) + async fn read( + link: &mut TransportLinkMulticastRx, + pool: &RecyclingObjectPool, + ) -> ZResult + where + T: ZSliceBuffer + 'static, + F: Fn() -> T, + RecyclingObject: ZSliceBuffer, + { + let (rbatch, locator) = link + .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) + .await?; + Ok(Action::Read((rbatch, locator))) } async fn stop(signal: Signal) -> ZResult { @@ -346,35 +582,26 @@ async fn rx_task( } // The pool of buffers - let mtu = link.get_mtu() as usize; + let mtu = link.inner.config.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; } + let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); while !signal.is_triggered() { - // Retrieve one buffer - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); // Async read from the underlying link - let action = read(&link, &mut buffer).race(stop(signal.clone())).await?; + let action = read(&mut link, &pool).race(stop(signal.clone())).await?; match action { - Action::Read((n, loc)) => { - if n == 0 { - // Reading 0 bytes means error - bail!("{}: zero bytes reading", link); - } - + Action::Read((batch, locator)) => { #[cfg(feature = "stats")] - transport.stats.inc_rx_bytes(n); + transport.stats.inc_rx_bytes(zslice.len()); // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, n) - .map_err(|_| zerror!("Read {} bytes but buffer is {} bytes", n, mtu))?; transport.read_messages( - zslice, - &link, + batch, + locator, batch_size, - &loc, #[cfg(feature = "stats")] &transport, )?; diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index 5d996d25ad..7cda3d8eb3 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -19,6 +19,8 @@ use async_std::sync::Mutex; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; +#[cfg(feature = "transport_compression")] +use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] use zenoh_config::SharedMemoryConf; use zenoh_config::{Config, LinkTxConf}; @@ -36,6 +38,8 @@ pub struct TransportManagerConfigMulticast { pub is_qos: bool, #[cfg(feature = "shared-memory")] pub is_shm: bool, + #[cfg(feature = "transport_compression")] + pub is_compression: bool, } pub struct TransportManagerBuilderMulticast { @@ -46,6 +50,8 @@ pub struct TransportManagerBuilderMulticast { is_qos: bool, #[cfg(feature = "shared-memory")] is_shm: bool, + #[cfg(feature = "transport_compression")] + is_compression: bool, } pub struct TransportManagerStateMulticast { @@ -95,6 +101,12 @@ impl TransportManagerBuilderMulticast { self } + #[cfg(feature = "transport_compression")] + pub fn compression(mut self, is_compression: bool) -> Self { + self.is_compression = is_compression; + self + } + pub async fn from_config( mut self, config: &Config, @@ -107,9 +119,7 @@ impl TransportManagerBuilderMulticast { config.transport().multicast().join_interval().unwrap(), )); self = self.max_sessions(config.transport().multicast().max_sessions().unwrap()); - // @TODO: Force QoS deactivation in multicast since it is not supported - // self = self.qos(*config.transport().qos().enabled()); - self = self.qos(false); + self = self.qos(*config.transport().multicast().qos().enabled()); #[cfg(feature = "shared-memory")] { self = self.shm(*config.transport().shared_memory().enabled()); @@ -127,6 +137,8 @@ impl TransportManagerBuilderMulticast { is_qos: self.is_qos, #[cfg(feature = "shared-memory")] is_shm: self.is_shm, + #[cfg(feature = "transport_compression")] + is_compression: self.is_compression, }; let state = TransportManagerStateMulticast { @@ -147,6 +159,8 @@ impl Default for TransportManagerBuilderMulticast { let link_tx = LinkTxConf::default(); #[cfg(feature = "shared-memory")] let shm = SharedMemoryConf::default(); + #[cfg(feature = "transport_compression")] + let compression = CompressionMulticastConf::default(); let tmb = TransportManagerBuilderMulticast { lease: Duration::from_millis(*link_tx.lease()), @@ -156,6 +170,8 @@ impl Default for TransportManagerBuilderMulticast { is_qos: false, #[cfg(feature = "shared-memory")] is_shm: *shm.enabled(), + #[cfg(feature = "transport_compression")] + is_compression: *compression.enabled(), }; async_std::task::block_on(tmb.from_config(&Config::default())).unwrap() } diff --git a/io/zenoh-transport/src/multicast/mod.rs b/io/zenoh-transport/src/multicast/mod.rs index 9c1d8646f3..3ce0856df3 100644 --- a/io/zenoh-transport/src/multicast/mod.rs +++ b/io/zenoh-transport/src/multicast/mod.rs @@ -20,7 +20,9 @@ pub(crate) mod transport; pub(crate) mod tx; use super::common; -use crate::{TransportMulticastEventHandler, TransportPeer}; +use crate::{ + multicast::link::TransportLinkMulticast, TransportMulticastEventHandler, TransportPeer, +}; pub use manager::{ TransportManagerBuilderMulticast, TransportManagerConfigMulticast, TransportManagerParamsMulticast, @@ -31,7 +33,7 @@ use std::{ }; use transport::TransportMulticastInner; use zenoh_core::{zcondfeat, zread}; -use zenoh_link::{Link, LinkMulticast}; +use zenoh_link::Link; use zenoh_protocol::{ core::Bits, network::NetworkMessage, @@ -46,7 +48,7 @@ use zenoh_result::{zerror, ZResult}; pub(crate) struct TransportConfigMulticast { pub(crate) sn_resolution: Bits, pub(crate) initial_sns: Box<[PrioritySn]>, - pub(crate) link: LinkMulticast, + pub(crate) link: TransportLinkMulticast, #[cfg(feature = "shared-memory")] pub(crate) is_shm: bool, } diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 8dd4882ded..14f2fd619c 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -12,18 +12,20 @@ // ZettaScale Zenoh Team, // use super::transport::{TransportMulticastInner, TransportMulticastPeer}; -use crate::common::priority::TransportChannelRx; +use crate::common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, +}; use std::sync::MutexGuard; -use zenoh_buffers::reader::{HasReader, Reader}; -use zenoh_buffers::ZSlice; -use zenoh_codec::{RCodec, Zenoh080}; use zenoh_core::{zlock, zread}; -use zenoh_link::LinkMulticast; -use zenoh_protocol::core::{Priority, Reliability}; -use zenoh_protocol::transport::{ - BatchSize, Close, Fragment, Frame, Join, KeepAlive, TransportBody, TransportSn, +use zenoh_protocol::{ + core::{Locator, Priority, Reliability}, + network::NetworkMessage, + transport::{ + BatchSize, Close, Fragment, Frame, Join, KeepAlive, TransportBody, TransportMessage, + TransportSn, + }, }; -use zenoh_protocol::{core::Locator, network::NetworkMessage, transport::TransportMessage}; use zenoh_result::{bail, zerror, ZResult}; /*************************************/ @@ -115,7 +117,7 @@ impl TransportMulticastInner { locator, join.zid, join.batch_size, - batch_size, + batch_size ); return Ok(()); } @@ -247,18 +249,15 @@ impl TransportMulticastInner { pub(super) fn read_messages( &self, - mut zslice: ZSlice, - link: &LinkMulticast, + mut batch: RBatch, + locator: Locator, batch_size: BatchSize, - locator: &Locator, #[cfg(feature = "stats")] transport: &TransportMulticastInner, ) -> ZResult<()> { - let codec = Zenoh080::new(); - let mut reader = zslice.reader(); - while reader.can_read() { - let msg: TransportMessage = codec - .read(&mut reader) - .map_err(|_| zerror!("{}: decoding error", link))?; + while !batch.is_empty() { + let msg: TransportMessage = batch + .decode() + .map_err(|_| zerror!("{}: decoding error", locator))?; log::trace!("Received: {:?}", msg); @@ -268,7 +267,7 @@ impl TransportMulticastInner { } let r_guard = zread!(self.peers); - match r_guard.get(locator) { + match r_guard.get(&locator) { Some(peer) => { peer.active(); match msg.body { @@ -280,7 +279,7 @@ impl TransportMulticastInner { TransportBody::KeepAlive(KeepAlive { .. }) => {} TransportBody::Close(Close { reason, .. }) => { drop(r_guard); - self.del_peer(locator, reason)?; + self.del_peer(&locator, reason)?; } _ => { log::debug!( @@ -294,7 +293,7 @@ impl TransportMulticastInner { None => { drop(r_guard); if let TransportBody::Join(join) = msg.body { - self.handle_join_from_unknown(join, locator, batch_size)?; + self.handle_join_from_unknown(join, &locator, batch_size)?; } } } diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index c4412447cf..ca6cddaf2b 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -12,12 +12,14 @@ // ZettaScale Zenoh Team, // use super::common::priority::{TransportPriorityRx, TransportPriorityTx}; -use super::link::{TransportLinkMulticast, TransportLinkMulticastConfig}; +use super::link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}; #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ - TransportConfigMulticast, TransportManager, TransportMulticastEventHandler, TransportPeer, - TransportPeerEventHandler, + multicast::{ + link::TransportLinkMulticast, TransportConfigMulticast, TransportMulticastEventHandler, + }, + TransportManager, TransportPeer, TransportPeerEventHandler, }; use async_trait::async_trait; use std::{ @@ -29,7 +31,7 @@ use std::{ time::Duration, }; use zenoh_core::{zcondfeat, zread, zwrite}; -use zenoh_link::{Link, LinkMulticast, Locator}; +use zenoh_link::{Link, Locator}; use zenoh_protocol::core::Resolution; use zenoh_protocol::transport::{batch_size, Close, TransportMessage}; use zenoh_protocol::{ @@ -96,7 +98,7 @@ pub(crate) struct TransportMulticastInner { // The multicast locator - Convenience for logging pub(super) locator: Locator, // The multicast link - pub(super) link: Arc>>, + pub(super) link: Arc>>, // The callback pub(super) callback: Arc>>>, // The timer for peer leases @@ -129,7 +131,7 @@ impl TransportMulticastInner { manager, priority_tx: priority_tx.into_boxed_slice().into(), peers: Arc::new(RwLock::new(HashMap::new())), - locator: config.link.get_dst().to_owned(), + locator: config.link.link.get_dst().to_owned(), link: Arc::new(RwLock::new(None)), callback: Arc::new(RwLock::new(None)), timer: Arc::new(Timer::new(false)), @@ -137,7 +139,7 @@ impl TransportMulticastInner { stats, }; - let link = TransportLinkMulticast::new(ti.clone(), config.link); + let link = TransportLinkMulticastUniversal::new(ti.clone(), config.link); let mut guard = zwrite!(ti.link); *guard = Some(link); drop(guard); @@ -170,7 +172,7 @@ impl TransportMulticastInner { zread!(self.callback).clone() } - pub(crate) fn get_link(&self) -> LinkMulticast { + pub(crate) fn get_link(&self) -> TransportLinkMulticast { zread!(self.link).as_ref().unwrap().link.clone() } @@ -244,9 +246,9 @@ impl TransportMulticastInner { .manager .config .batch_size - .min(l.link.get_mtu()) + .min(l.link.link.get_mtu()) .min(batch_size::MULTICAST); - let config = TransportLinkMulticastConfig { + let config = TransportLinkMulticastConfigUniversal { version: self.manager.config.version, zid: self.manager.config.zid, whatami: self.manager.config.whatami, @@ -295,7 +297,7 @@ impl TransportMulticastInner { .manager .config .batch_size - .min(l.link.get_mtu()) + .min(l.link.link.get_mtu()) .min(batch_size::MULTICAST); l.start_rx(batch_size); Ok(()) diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 412affd4ea..112b471b9e 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -14,11 +14,14 @@ #[cfg(feature = "shared-memory")] use crate::unicast::shared_memory_unicast::Challenge; use crate::{ - unicast::establishment::{ - close_link, compute_sn, ext, finalize_transport, AcceptFsm, Cookie, InputFinalize, - Zenoh080Cookie, + unicast::{ + establishment::{ + compute_sn, ext, finalize_transport, AcceptFsm, Cookie, InputFinalize, Zenoh080Cookie, + }, + link::{TransportLinkUnicast, TransportLinkUnicastConfig, TransportLinkUnicastDirection}, + TransportConfigUnicast, }, - TransportConfigUnicast, TransportManager, + TransportManager, }; use async_std::sync::Mutex; use async_trait::async_trait; @@ -28,7 +31,7 @@ use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZSlice}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_core::{zasynclock, zcondfeat, zerror}; use zenoh_crypto::{BlockCipher, PseudoRng}; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; +use zenoh_link::LinkUnicast; use zenoh_protocol::{ core::{Field, Resolution, WhatAmI, ZenohId}, transport::{ @@ -41,21 +44,29 @@ use zenoh_result::ZResult; pub(super) type AcceptError = (zenoh_result::Error, Option); -struct StateZenoh { +struct StateTransport { batch_size: BatchSize, resolution: Resolution, -} - -struct State { - zenoh: StateZenoh, ext_qos: ext::qos::StateAccept, #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::StateAccept, #[cfg(feature = "shared-memory")] ext_shm: ext::shm::StateAccept, + ext_lowlatency: ext::lowlatency::StateAccept, +} + +#[cfg(any(feature = "transport_auth", feature = "transport_compression"))] +struct StateLink { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::StateAccept, - ext_lowlatency: ext::lowlatency::StateAccept, + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateAccept, +} + +struct State { + transport: StateTransport, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink, } // InitSyn @@ -106,7 +117,7 @@ struct SendOpenAckOut { // Fsm struct AcceptLink<'a> { - link: &'a LinkUnicast, + link: &'a mut TransportLinkUnicast, prng: &'a Mutex, cipher: &'a BlockCipher, ext_qos: ext::qos::QoSFsm<'a>, @@ -117,16 +128,18 @@ struct AcceptLink<'a> { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::CompressionFsm<'a>, } #[async_trait] -impl<'a> AcceptFsm for AcceptLink<'a> { +impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { type Error = AcceptError; type RecvInitSynIn = (&'a mut State, RecvInitSynIn); type RecvInitSynOut = RecvInitSynOut; async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { let (state, input) = input; @@ -160,38 +173,32 @@ impl<'a> AcceptFsm for AcceptLink<'a> { } // Compute the minimum SN resolution - state.zenoh.resolution = { + state.transport.resolution = { let mut res = Resolution::default(); // Frame SN let i_fsn_res = init_syn.resolution.get(Field::FrameSN); - let m_fsn_res = state.zenoh.resolution.get(Field::FrameSN); + let m_fsn_res = state.transport.resolution.get(Field::FrameSN); res.set(Field::FrameSN, i_fsn_res.min(m_fsn_res)); // Request ID let i_rid_res = init_syn.resolution.get(Field::RequestID); - let m_rid_res = state.zenoh.resolution.get(Field::RequestID); + let m_rid_res = state.transport.resolution.get(Field::RequestID); res.set(Field::RequestID, i_rid_res.min(m_rid_res)); res }; // Compute the minimum batch size - state.zenoh.batch_size = state - .zenoh + state.transport.batch_size = state + .transport .batch_size .min(init_syn.batch_size) .min(batch_size::UNICAST); // Extension QoS self.ext_qos - .recv_init_syn((&mut state.ext_qos, init_syn.ext_qos)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - self.ext_lowlatency - .recv_init_syn((&mut state.ext_lowlatency, init_syn.ext_lowlatency)) + .recv_init_syn((&mut state.transport.ext_qos, init_syn.ext_qos)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -199,21 +206,34 @@ impl<'a> AcceptFsm for AcceptLink<'a> { #[cfg(feature = "shared-memory")] let ext_shm = self .ext_shm - .recv_init_syn((&mut state.ext_shm, init_syn.ext_shm)) + .recv_init_syn((&mut state.transport.ext_shm, init_syn.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Auth #[cfg(feature = "transport_auth")] self.ext_auth - .recv_init_syn((&mut state.ext_auth, init_syn.ext_auth)) + .recv_init_syn((&mut state.link.ext_auth, init_syn.ext_auth)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] self.ext_mlink - .recv_init_syn((&mut state.ext_mlink, init_syn.ext_mlink)) + .recv_init_syn((&mut state.transport.ext_mlink, init_syn.ext_mlink)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension LowLatency + self.ext_lowlatency + .recv_init_syn((&mut state.transport.ext_lowlatency, init_syn.ext_lowlatency)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + #[cfg(feature = "transport_compression")] + self.ext_compression + .recv_init_syn((&mut state.link.ext_compression, init_syn.ext_compression)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -229,7 +249,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { type SendInitAckIn = (State, SendInitAckIn); type SendInitAckOut = SendInitAckOut; async fn send_init_ack( - &self, + self, input: Self::SendInitAckIn, ) -> Result { #[allow(unused_mut)] // Required for "shared-memory" feature @@ -238,14 +258,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { // Extension QoS let ext_qos = self .ext_qos - .send_init_ack(&state.ext_qos) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - let ext_lowlatency = self - .ext_lowlatency - .send_init_ack(&state.ext_lowlatency) + .send_init_ack(&state.transport.ext_qos) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -253,7 +266,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_shm = zcondfeat!( "shared-memory", self.ext_shm - .send_init_ack((&mut state.ext_shm, input.ext_shm)) + .send_init_ack((&mut state.transport.ext_shm, input.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -263,7 +276,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_auth = zcondfeat!( "transport_auth", self.ext_auth - .send_init_ack(&state.ext_auth) + .send_init_ack(&state.link.ext_auth) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -273,7 +286,24 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_mlink = zcondfeat!( "transport_multilink", self.ext_mlink - .send_init_ack(&state.ext_mlink) + .send_init_ack(&state.transport.ext_mlink) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None + ); + + // Extension LowLatency + let ext_lowlatency = self + .ext_lowlatency + .send_init_ack(&state.transport.ext_lowlatency) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension MultiLink + let ext_compression = zcondfeat!( + "transport_compression", + self.ext_compression + .send_init_ack(&state.link.ext_compression) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -284,17 +314,19 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let cookie = Cookie { zid: input.other_zid, whatami: input.other_whatami, - resolution: state.zenoh.resolution, - batch_size: state.zenoh.batch_size, + resolution: state.transport.resolution, + batch_size: state.transport.batch_size, nonce: cookie_nonce, - ext_qos: state.ext_qos, + ext_qos: state.transport.ext_qos, #[cfg(feature = "transport_multilink")] - ext_mlink: state.ext_mlink, + ext_mlink: state.transport.ext_mlink, #[cfg(feature = "shared-memory")] - ext_shm: state.ext_shm, + ext_shm: state.transport.ext_shm, #[cfg(feature = "transport_auth")] - ext_auth: state.ext_auth, - ext_lowlatency: state.ext_lowlatency, + ext_auth: state.link.ext_auth, + ext_lowlatency: state.transport.ext_lowlatency, + #[cfg(feature = "transport_compression")] + ext_compression: state.link.ext_compression, }; let mut encrypted = vec![]; @@ -317,14 +349,15 @@ impl<'a> AcceptFsm for AcceptLink<'a> { version: input.mine_version, whatami: input.mine_whatami, zid: input.mine_zid, - resolution: state.zenoh.resolution, - batch_size: state.zenoh.batch_size, + resolution: state.transport.resolution, + batch_size: state.transport.batch_size, cookie, ext_qos, ext_shm, ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } .into(); @@ -341,7 +374,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { type RecvOpenSynIn = RecvOpenSynIn; type RecvOpenSynOut = (State, RecvOpenSynOut); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { let msg = self @@ -400,50 +433,62 @@ impl<'a> AcceptFsm for AcceptLink<'a> { // Rebuild the state from the cookie let mut state = State { - zenoh: StateZenoh { + transport: StateTransport { batch_size: cookie.batch_size, resolution: cookie.resolution, + ext_qos: cookie.ext_qos, + #[cfg(feature = "transport_multilink")] + ext_mlink: cookie.ext_mlink, + #[cfg(feature = "shared-memory")] + ext_shm: cookie.ext_shm, + ext_lowlatency: cookie.ext_lowlatency, + }, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink { + #[cfg(feature = "transport_auth")] + ext_auth: cookie.ext_auth, + #[cfg(feature = "transport_compression")] + ext_compression: cookie.ext_compression, }, - ext_qos: cookie.ext_qos, - #[cfg(feature = "transport_multilink")] - ext_mlink: cookie.ext_mlink, - #[cfg(feature = "shared-memory")] - ext_shm: cookie.ext_shm, - #[cfg(feature = "transport_auth")] - ext_auth: cookie.ext_auth, - ext_lowlatency: cookie.ext_lowlatency, }; // Extension QoS self.ext_qos - .recv_open_syn((&mut state.ext_qos, open_syn.ext_qos)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - self.ext_lowlatency - .recv_open_syn((&mut state.ext_lowlatency, open_syn.ext_lowlatency)) + .recv_open_syn((&mut state.transport.ext_qos, open_syn.ext_qos)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm #[cfg(feature = "shared-memory")] self.ext_shm - .recv_open_syn((&mut state.ext_shm, open_syn.ext_shm)) + .recv_open_syn((&mut state.transport.ext_shm, open_syn.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Auth #[cfg(feature = "transport_auth")] self.ext_auth - .recv_open_syn((&mut state.ext_auth, open_syn.ext_auth)) + .recv_open_syn((&mut state.link.ext_auth, open_syn.ext_auth)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] self.ext_mlink - .recv_open_syn((&mut state.ext_mlink, open_syn.ext_mlink)) + .recv_open_syn((&mut state.transport.ext_mlink, open_syn.ext_mlink)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension LowLatency + self.ext_lowlatency + .recv_open_syn((&mut state.transport.ext_lowlatency, open_syn.ext_lowlatency)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + #[cfg(feature = "transport_compression")] + self.ext_compression + .recv_open_syn((&mut state.link.ext_compression, open_syn.ext_compression)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -459,7 +504,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { type SendOpenAckIn = (&'a mut State, SendOpenAckIn); type SendOpenAckOut = SendOpenAckOut; async fn send_open_ack( - &self, + self, input: Self::SendOpenAckIn, ) -> Result { let (state, input) = input; @@ -467,14 +512,14 @@ impl<'a> AcceptFsm for AcceptLink<'a> { // Extension QoS let ext_qos = self .ext_qos - .send_open_ack(&state.ext_qos) + .send_open_ack(&state.transport.ext_qos) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension LowLatency let ext_lowlatency = self .ext_lowlatency - .send_open_ack(&state.ext_lowlatency) + .send_open_ack(&state.transport.ext_lowlatency) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -482,7 +527,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_shm = zcondfeat!( "shared-memory", self.ext_shm - .send_open_ack(&mut state.ext_shm) + .send_open_ack(&mut state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -492,7 +537,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_auth = zcondfeat!( "transport_auth", self.ext_auth - .send_open_ack(&state.ext_auth) + .send_open_ack(&state.link.ext_auth) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -502,14 +547,25 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_mlink = zcondfeat!( "transport_multilink", self.ext_mlink - .send_open_ack(&state.ext_mlink) + .send_open_ack(&state.transport.ext_mlink) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None + ); + + // Extension Compression + let ext_compression = zcondfeat!( + "transport_compression", + self.ext_compression + .send_open_ack(&state.link.ext_compression) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None ); // Build OpenAck message - let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.zenoh.resolution); + let mine_initial_sn = + compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); let open_ack = OpenAck { lease: input.mine_lease, initial_sn: mine_initial_sn, @@ -518,6 +574,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }; // Do not send the OpenAck right now since we might still incur in MAX_LINKS error @@ -528,8 +585,16 @@ impl<'a> AcceptFsm for AcceptLink<'a> { } pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) -> ZResult<()> { - let fsm = AcceptLink { - link, + let mtu = link.get_mtu(); + let config = TransportLinkUnicastConfig { + mtu, + direction: TransportLinkUnicastDirection::Inbound, + #[cfg(feature = "transport_compression")] + is_compression: false, + }; + let mut link = TransportLinkUnicast::new(link.clone(), config); + let mut fsm = AcceptLink { + link: &mut link, prng: &manager.prng, cipher: &manager.cipher, ext_qos: ext::qos::QoSFsm::new(), @@ -540,6 +605,8 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) #[cfg(feature = "transport_auth")] ext_auth: manager.state.unicast.authenticator.fsm(&manager.prng), ext_lowlatency: ext::lowlatency::LowLatencyFsm::new(), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::CompressionFsm::new(), }; // Init handshake @@ -549,7 +616,7 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) Ok(output) => output, Err((e, reason)) => { log::debug!("{}", e); - close_link(link, reason).await; + let _ = link.close(reason).await; return Err(e); } } @@ -558,26 +625,35 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) let iack_out = { let mut state = State { - zenoh: StateZenoh { - batch_size: manager.config.batch_size, + transport: StateTransport { + batch_size: manager.config.batch_size.min(batch_size::UNICAST).min(mtu), resolution: manager.config.resolution, + ext_qos: ext::qos::StateAccept::new(manager.config.unicast.is_qos), + #[cfg(feature = "transport_multilink")] + ext_mlink: manager + .state + .unicast + .multilink + .accept(manager.config.unicast.max_links > 1), + #[cfg(feature = "shared-memory")] + ext_shm: ext::shm::StateAccept::new(manager.config.unicast.is_shm), + ext_lowlatency: ext::lowlatency::StateAccept::new( + manager.config.unicast.is_lowlatency, + ), + }, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink { + #[cfg(feature = "transport_auth")] + ext_auth: manager + .state + .unicast + .authenticator + .accept(&mut *zasynclock!(manager.prng)), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateAccept::new( + manager.config.unicast.is_compression, + ), }, - ext_qos: ext::qos::StateAccept::new(manager.config.unicast.is_qos), - ext_lowlatency: ext::lowlatency::StateAccept::new(manager.config.unicast.is_lowlatency), - #[cfg(feature = "transport_multilink")] - ext_mlink: manager - .state - .unicast - .multilink - .accept(manager.config.unicast.max_links > 1), - #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateAccept::new(manager.config.unicast.is_shm), - #[cfg(feature = "transport_auth")] - ext_auth: manager - .state - .unicast - .authenticator - .accept(&mut *zasynclock!(manager.prng)), }; // Let's scope the Init phase in such a way memory is freed by Rust @@ -618,21 +694,25 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) let config = TransportConfigUnicast { zid: osyn_out.other_zid, whatami: osyn_out.other_whatami, - sn_resolution: state.zenoh.resolution.get(Field::FrameSN), + sn_resolution: state.transport.resolution.get(Field::FrameSN), tx_initial_sn: oack_out.open_ack.initial_sn, - is_qos: state.ext_qos.is_qos(), + is_qos: state.transport.ext_qos.is_qos(), #[cfg(feature = "transport_multilink")] - multilink: state.ext_mlink.multilink(), + multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.ext_shm.is_shm(), - is_lowlatency: state.ext_lowlatency.is_lowlatency(), + is_shm: state.transport.ext_shm.is_shm(), + is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), }; - let transport = step!( - manager - .init_transport_unicast(config, link.clone(), LinkUnicastDirection::Inbound) - .await - ); + let a_config = TransportLinkUnicastConfig { + mtu: state.transport.batch_size, + direction: TransportLinkUnicastDirection::Inbound, + #[cfg(feature = "transport_compression")] + is_compression: state.link.ext_compression.is_compression(), + }; + let a_link = TransportLinkUnicast::new(link.link.clone(), a_config); + let s_link = format!("{:?}", a_link); + let transport = step!(manager.init_transport_unicast(config, a_link).await); // Send the open_ack on the link step!(link @@ -651,18 +731,16 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) let input = InputFinalize { transport: transport.clone(), other_lease: osyn_out.other_lease, - agreed_batch_size: state.zenoh.batch_size, }; - step!(finalize_transport(link, manager, input) + step!(finalize_transport(&link, manager, input) .await .map_err(|e| (e, Some(close::reason::INVALID)))); log::debug!( - "New transport link accepted from {} to {}: {}. Batch size: {}.", + "New transport link accepted from {} to {}: {}.", osyn_out.other_zid, manager.config.zid, - link, - state.zenoh.batch_size, + s_link, ); Ok(()) diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index 0c6b5519e8..e9916be7e6 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -38,6 +38,8 @@ pub(crate) struct Cookie { #[cfg(feature = "transport_auth")] pub(crate) ext_auth: ext::auth::StateAccept, pub(crate) ext_lowlatency: ext::lowlatency::StateAccept, + #[cfg(feature = "transport_compression")] + pub(crate) ext_compression: ext::compression::StateAccept, } impl WCodec<&Cookie, &mut W> for Zenoh080 @@ -62,6 +64,8 @@ where #[cfg(feature = "transport_auth")] self.write(&mut *writer, &x.ext_auth)?; self.write(&mut *writer, &x.ext_lowlatency)?; + #[cfg(feature = "transport_compression")] + self.write(&mut *writer, &x.ext_compression)?; Ok(()) } @@ -90,6 +94,8 @@ where #[cfg(feature = "transport_auth")] let ext_auth: ext::auth::StateAccept = self.read(&mut *reader)?; let ext_lowlatency: ext::lowlatency::StateAccept = self.read(&mut *reader)?; + #[cfg(feature = "transport_compression")] + let ext_compression: ext::compression::StateAccept = self.read(&mut *reader)?; let cookie = Cookie { zid, @@ -105,6 +111,8 @@ where #[cfg(feature = "transport_auth")] ext_auth, ext_lowlatency, + #[cfg(feature = "transport_compression")] + ext_compression, }; Ok(cookie) @@ -174,6 +182,8 @@ impl Cookie { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::StateAccept::rand(), ext_lowlatency: ext::lowlatency::StateAccept::rand(), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateAccept::rand(), } } } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index 0e9c385e46..99a11ee3a9 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -288,13 +288,13 @@ macro_rules! ztake { /* OPEN */ /*************************************/ #[async_trait] -impl<'a> OpenFsm for AuthFsm<'a> { +impl<'a> OpenFsm for &'a AuthFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, state: Self::SendInitSynIn, ) -> Result { const S: &str = "Auth extension - Send InitSyn."; @@ -341,7 +341,7 @@ impl<'a> OpenFsm for AuthFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "Auth extension - Recv InitAck."; @@ -385,7 +385,7 @@ impl<'a> OpenFsm for AuthFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, state: Self::SendOpenSynIn, ) -> Result { const S: &str = "Auth extension - Send OpenSyn."; @@ -432,7 +432,7 @@ impl<'a> OpenFsm for AuthFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { const S: &str = "Auth extension - Recv OpenAck."; @@ -478,13 +478,13 @@ impl<'a> OpenFsm for AuthFsm<'a> { /* ACCEPT */ /*************************************/ #[async_trait] -impl<'a> AcceptFsm for AuthFsm<'a> { +impl<'a> AcceptFsm for &'a AuthFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "Auth extension - Recv InitSyn."; @@ -528,7 +528,7 @@ impl<'a> AcceptFsm for AuthFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { const S: &str = "Auth extension - Send InitAck."; @@ -575,7 +575,7 @@ impl<'a> AcceptFsm for AuthFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "Auth extension - Recv OpenSyn."; @@ -619,7 +619,7 @@ impl<'a> AcceptFsm for AuthFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, state: Self::SendOpenAckIn, ) -> Result { const S: &str = "Auth extension - Send OpenAck."; @@ -663,133 +663,3 @@ impl<'a> AcceptFsm for AuthFsm<'a> { Ok(output) } } - -// #[derive(Clone)] -// pub struct TransportAuthenticator(Arc); - -// impl TransportAuthenticator { -// pub async fn from_config(_config: &Config) -> ZResult> { -// #[allow(unused_mut)] -// let mut pas = HashSet::new(); - -// #[cfg(feature = "auth_pubkey")] -// { -// let mut res = PubKeyAuthenticator::from_config(_config).await?; -// if let Some(pa) = res.take() { -// pas.insert(pa.into()); -// } -// } - -// #[cfg(feature = "auth_usrpwd")] -// { -// let mut res = UserPasswordAuthenticator::from_config(_config).await?; -// if let Some(pa) = res.take() { -// pas.insert(pa.into()); -// } -// } - -// Ok(pas) -// } -// } - -/*************************************/ -/* ACCEPT */ -/*************************************/ - -// Return the attachment to be included in the InitSyn message. -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the initial InitSyn message will be sent on -// -// * `node_id` - The [`ZenohId`][ZenohId] of the sender of the InitSyn, i.e., the peer -// initiating a new transport. -// -// async fn get_init_syn_properties( -// &self, -// link: &AuthenticatedLink, -// node_id: &ZenohId, -// ) -> ZResult>>; - -// Return the attachment to be included in the InitAck message to be sent -// in response of the authenticated InitSyn. -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the InitSyn message was received on -// -// * `cookie` - The Cookie containing the internal state -// -// * `property` - The optional `Property` included in the InitSyn message -// -// async fn handle_init_syn( -// &self, -// link: &AuthenticatedLink, -// cookie: &Cookie, -// property: Option>, -// ) -> ZResult<(Option>, Option>)>; // (Attachment, Cookie) - -// Return the attachment to be included in the OpenSyn message to be sent -// in response of the authenticated InitAck. -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the InitSyn message was received on -// -// * `node_id` - The [`ZenohId`][ZenohId] of the sender of the InitAck message -// -// * `sn_resolution` - The sn_resolution negotiated by the sender of the InitAck message -// -// * `properties` - The optional `Property` included in the InitAck message -// -// async fn handle_init_ack( -// &self, -// link: &AuthenticatedLink, -// node_id: &ZenohId, -// sn_resolution: u64, -// property: Option>, -// ) -> ZResult>>; - -// Return the attachment to be included in the OpenAck message to be sent -// in response of the authenticated OpenSyn. -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the OpenSyn message was received on -// -// * `properties` - The optional `Property` included in the OpenSyn message -// -// * `cookie` - The optional `Property` included in the OpenSyn message -// -// async fn handle_open_syn( -// &self, -// link: &AuthenticatedLink, -// cookie: &Cookie, -// property: (Option>, Option>), // (Attachment, Cookie) -// ) -> ZResult>>; - -// Auhtenticate the OpenAck. No message is sent back in response to an OpenAck -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the OpenAck message was received on -// -// * `properties` - The optional `Property` included in the OpenAck message -// -// async fn handle_open_ack( -// &self, -// link: &AuthenticatedLink, -// property: Option>, -// ) -> ZResult>>; - -// Handle any error on a link. This callback is mainly used to clean-up any internal state -// of the authenticator in such a way no unnecessary data is left around -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] generating the error -// -// async fn handle_link_err(&self, link: &AuthenticatedLink); - -// Handle any error on a link. This callback is mainly used to clean-up any internal state -// of the authenticator in such a way no unnecessary data is left around -// -// # Arguments -// * `peerd_id` - The [`ZenohId`][ZenohId] of the transport being closed. -// -// async fn handle_close(&self, node_id: &ZenohId); -// } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index d34480fded..25ecc0e24e 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -363,13 +363,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for AuthPubKeyFsm<'a> { +impl<'a> OpenFsm for &'a AuthPubKeyFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, _input: Self::SendInitSynIn, ) -> Result { const S: &str = "PubKey extension - Send InitSyn."; @@ -392,7 +392,7 @@ impl<'a> OpenFsm for AuthPubKeyFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "PubKey extension - Recv InitAck."; @@ -438,7 +438,7 @@ impl<'a> OpenFsm for AuthPubKeyFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, state: Self::SendOpenSynIn, ) -> Result { const S: &str = "PubKey extension - Send OpenSyn."; @@ -461,7 +461,7 @@ impl<'a> OpenFsm for AuthPubKeyFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { const S: &str = "PubKey extension - Recv OpenAck."; @@ -539,13 +539,13 @@ impl PartialEq for StateAccept { } #[async_trait] -impl<'a> AcceptFsm for AuthPubKeyFsm<'a> { +impl<'a> AcceptFsm for &'a AuthPubKeyFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "PubKey extension - Recv InitSyn."; @@ -583,7 +583,7 @@ impl<'a> AcceptFsm for AuthPubKeyFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { const S: &str = "PubKey extension - Send InitAck."; @@ -607,7 +607,7 @@ impl<'a> AcceptFsm for AuthPubKeyFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "PubKey extension - Recv OpenSyn."; @@ -646,7 +646,7 @@ impl<'a> AcceptFsm for AuthPubKeyFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, _input: Self::SendOpenAckIn, ) -> Result { const S: &str = "PubKey extension - Send OpenAck."; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 521986ae00..d66a4a02c7 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -276,13 +276,13 @@ where /// ZExtUnit #[async_trait] -impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { +impl<'a> OpenFsm for &'a AuthUsrPwdFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, _input: Self::SendInitSynIn, ) -> Result { let output = zasyncread!(self.inner) @@ -295,7 +295,7 @@ impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "UsrPwd extension - Recv InitSyn."; @@ -316,7 +316,7 @@ impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, state: Self::SendOpenSynIn, ) -> Result { const S: &str = "UsrPwd extension - Send OpenSyn."; @@ -352,7 +352,7 @@ impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { const S: &str = "UsrPwd extension - Recv OpenAck."; @@ -370,13 +370,13 @@ impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { /* ACCEPT */ /*************************************/ #[async_trait] -impl<'a> AcceptFsm for AuthUsrPwdFsm<'a> { +impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "UsrPwd extension - Recv InitSyn."; @@ -392,7 +392,7 @@ impl<'a> AcceptFsm for AuthUsrPwdFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { Ok(Some(ZExtZ64::new(state.nonce))) @@ -401,7 +401,7 @@ impl<'a> AcceptFsm for AuthUsrPwdFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "UsrPwd extension - Recv OpenSyn."; @@ -436,7 +436,7 @@ impl<'a> AcceptFsm for AuthUsrPwdFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, _input: Self::SendOpenAckIn, ) -> Result { Ok(Some(ZExtUnit::new())) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/compression.rs b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs new file mode 100644 index 0000000000..2b57eb85db --- /dev/null +++ b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs @@ -0,0 +1,196 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; +use async_trait::async_trait; +use core::marker::PhantomData; +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; +use zenoh_codec::{RCodec, WCodec, Zenoh080}; +use zenoh_protocol::transport::{init, open}; +use zenoh_result::Error as ZError; + +// Extension Fsm +pub(crate) struct CompressionFsm<'a> { + _a: PhantomData<&'a ()>, +} + +impl<'a> CompressionFsm<'a> { + pub(crate) const fn new() -> Self { + Self { _a: PhantomData } + } +} + +/*************************************/ +/* OPEN */ +/*************************************/ +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct StateOpen { + is_compression: bool, +} + +impl StateOpen { + pub(crate) const fn new(is_compression: bool) -> Self { + Self { is_compression } + } + + pub(crate) const fn is_compression(&self) -> bool { + self.is_compression + } +} + +#[async_trait] +impl<'a> OpenFsm for &'a CompressionFsm<'a> { + type Error = ZError; + + type SendInitSynIn = &'a StateOpen; + type SendInitSynOut = Option; + async fn send_init_syn( + self, + state: Self::SendInitSynIn, + ) -> Result { + let output = state + .is_compression + .then_some(init::ext::Compression::new()); + Ok(output) + } + + type RecvInitAckIn = (&'a mut StateOpen, Option); + type RecvInitAckOut = (); + async fn recv_init_ack( + self, + input: Self::RecvInitAckIn, + ) -> Result { + let (state, other_ext) = input; + state.is_compression &= other_ext.is_some(); + Ok(()) + } + + type SendOpenSynIn = &'a StateOpen; + type SendOpenSynOut = Option; + async fn send_open_syn( + self, + _state: Self::SendOpenSynIn, + ) -> Result { + Ok(None) + } + + type RecvOpenAckIn = (&'a mut StateOpen, Option); + type RecvOpenAckOut = (); + async fn recv_open_ack( + self, + _state: Self::RecvOpenAckIn, + ) -> Result { + Ok(()) + } +} + +/*************************************/ +/* ACCEPT */ +/*************************************/ +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct StateAccept { + is_compression: bool, +} + +impl StateAccept { + pub(crate) const fn new(is_compression: bool) -> Self { + Self { is_compression } + } + + pub(crate) const fn is_compression(&self) -> bool { + self.is_compression + } + + #[cfg(test)] + pub(crate) fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + Self::new(rng.gen_bool(0.5)) + } +} + +// Codec +impl WCodec<&StateAccept, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &StateAccept) -> Self::Output { + let is_compression = u8::from(x.is_compression); + self.write(&mut *writer, is_compression)?; + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let is_compression: u8 = self.read(&mut *reader)?; + let is_compression = is_compression == 1; + Ok(StateAccept { is_compression }) + } +} + +#[async_trait] +impl<'a> AcceptFsm for &'a CompressionFsm<'a> { + type Error = ZError; + + type RecvInitSynIn = (&'a mut StateAccept, Option); + type RecvInitSynOut = (); + async fn recv_init_syn( + self, + input: Self::RecvInitSynIn, + ) -> Result { + let (state, other_ext) = input; + state.is_compression &= other_ext.is_some(); + Ok(()) + } + + type SendInitAckIn = &'a StateAccept; + type SendInitAckOut = Option; + async fn send_init_ack( + self, + state: Self::SendInitAckIn, + ) -> Result { + let output = state + .is_compression + .then_some(init::ext::Compression::new()); + Ok(output) + } + + type RecvOpenSynIn = (&'a mut StateAccept, Option); + type RecvOpenSynOut = (); + async fn recv_open_syn( + self, + _state: Self::RecvOpenSynIn, + ) -> Result { + Ok(()) + } + + type SendOpenAckIn = &'a StateAccept; + type SendOpenAckOut = Option; + async fn send_open_ack( + self, + _state: Self::SendOpenAckIn, + ) -> Result { + Ok(None) + } +} diff --git a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs index 25edbde2e1..9dda9175b1 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs @@ -52,13 +52,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for LowLatencyFsm<'a> { +impl<'a> OpenFsm for &'a LowLatencyFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, state: Self::SendInitSynIn, ) -> Result { let output = state.is_lowlatency.then_some(init::ext::LowLatency::new()); @@ -68,7 +68,7 @@ impl<'a> OpenFsm for LowLatencyFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { let (state, other_ext) = input; @@ -79,7 +79,7 @@ impl<'a> OpenFsm for LowLatencyFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, _state: Self::SendOpenSynIn, ) -> Result { Ok(None) @@ -88,7 +88,7 @@ impl<'a> OpenFsm for LowLatencyFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, _state: Self::RecvOpenAckIn, ) -> Result { Ok(()) @@ -148,13 +148,13 @@ where } #[async_trait] -impl<'a> AcceptFsm for LowLatencyFsm<'a> { +impl<'a> AcceptFsm for &'a LowLatencyFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { let (state, other_ext) = input; @@ -165,7 +165,7 @@ impl<'a> AcceptFsm for LowLatencyFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { let output = state.is_lowlatency.then_some(init::ext::LowLatency::new()); @@ -175,7 +175,7 @@ impl<'a> AcceptFsm for LowLatencyFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, _state: Self::RecvOpenSynIn, ) -> Result { Ok(()) @@ -184,7 +184,7 @@ impl<'a> AcceptFsm for LowLatencyFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, _state: Self::SendOpenAckIn, ) -> Result { Ok(None) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/mod.rs index 956a8c5112..f4aafa832c 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/mod.rs @@ -13,6 +13,8 @@ // #[cfg(feature = "transport_auth")] pub mod auth; +#[cfg(feature = "transport_compression")] +pub(crate) mod compression; pub(crate) mod lowlatency; #[cfg(feature = "transport_multilink")] pub(crate) mod multilink; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs index 7a3f0d9f30..9c3c584c70 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs @@ -92,13 +92,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for MultiLinkFsm<'a> { +impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, input: Self::SendInitSynIn, ) -> Result { let pubkey = match input.pubkey.as_ref() { @@ -117,7 +117,7 @@ impl<'a> OpenFsm for MultiLinkFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "MultiLink extension - Recv InitAck."; @@ -152,7 +152,7 @@ impl<'a> OpenFsm for MultiLinkFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, input: Self::SendOpenSynIn, ) -> Result { let pubkey = match input.pubkey.as_ref() { @@ -171,7 +171,7 @@ impl<'a> OpenFsm for MultiLinkFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { let (state, mut ext) = input; @@ -267,13 +267,13 @@ where } #[async_trait] -impl<'a> AcceptFsm for MultiLinkFsm<'a> { +impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "MultiLink extension - Recv InitSyn."; @@ -309,7 +309,7 @@ impl<'a> AcceptFsm for MultiLinkFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, input: Self::SendInitAckIn, ) -> Result { let pubkey = match input.pubkey.as_ref() { @@ -328,7 +328,7 @@ impl<'a> AcceptFsm for MultiLinkFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { let (state, ext) = input; @@ -345,7 +345,7 @@ impl<'a> AcceptFsm for MultiLinkFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, input: Self::SendOpenAckIn, ) -> Result { let pubkey = match input.pubkey.as_ref() { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs index b72e34c636..4626ec5998 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs @@ -52,13 +52,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for QoSFsm<'a> { +impl<'a> OpenFsm for &'a QoSFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, state: Self::SendInitSynIn, ) -> Result { let output = state.is_qos.then_some(init::ext::QoS::new()); @@ -68,7 +68,7 @@ impl<'a> OpenFsm for QoSFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { let (state, other_ext) = input; @@ -79,7 +79,7 @@ impl<'a> OpenFsm for QoSFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, _state: Self::SendOpenSynIn, ) -> Result { Ok(None) @@ -88,7 +88,7 @@ impl<'a> OpenFsm for QoSFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, _state: Self::RecvOpenAckIn, ) -> Result { Ok(()) @@ -148,13 +148,13 @@ where } #[async_trait] -impl<'a> AcceptFsm for QoSFsm<'a> { +impl<'a> AcceptFsm for &'a QoSFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { let (state, other_ext) = input; @@ -165,7 +165,7 @@ impl<'a> AcceptFsm for QoSFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { let output = state.is_qos.then_some(init::ext::QoS::new()); @@ -175,7 +175,7 @@ impl<'a> AcceptFsm for QoSFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, _state: Self::RecvOpenSynIn, ) -> Result { Ok(()) @@ -184,7 +184,7 @@ impl<'a> AcceptFsm for QoSFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, _state: Self::SendOpenAckIn, ) -> Result { Ok(None) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index 131c0b5186..f2d6fe4dd0 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -152,13 +152,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for ShmFsm<'a> { +impl<'a> OpenFsm for &'a ShmFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, state: Self::SendInitSynIn, ) -> Result { const S: &str = "Shm extension - Send InitSyn."; @@ -184,7 +184,7 @@ impl<'a> OpenFsm for ShmFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = Challenge; async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "Shm extension - Recv InitAck."; @@ -256,7 +256,7 @@ impl<'a> OpenFsm for ShmFsm<'a> { type SendOpenSynIn = (&'a StateOpen, Self::RecvInitAckOut); type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, input: Self::SendOpenSynIn, ) -> Result { // const S: &str = "Shm extension - Send OpenSyn."; @@ -272,7 +272,7 @@ impl<'a> OpenFsm for ShmFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { const S: &str = "Shm extension - Recv OpenAck."; @@ -352,13 +352,13 @@ where } #[async_trait] -impl<'a> AcceptFsm for ShmFsm<'a> { +impl<'a> AcceptFsm for &'a ShmFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = Challenge; async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "Shm extension - Recv InitSyn."; @@ -409,7 +409,7 @@ impl<'a> AcceptFsm for ShmFsm<'a> { type SendInitAckIn = (&'a StateAccept, Self::RecvInitSynOut); type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, input: Self::SendInitAckIn, ) -> Result { const S: &str = "Shm extension - Send InitAck."; @@ -437,7 +437,7 @@ impl<'a> AcceptFsm for ShmFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "Shm extension - Recv OpenSyn."; @@ -480,7 +480,7 @@ impl<'a> AcceptFsm for ShmFsm<'a> { type SendOpenAckIn = &'a mut StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, state: Self::SendOpenAckIn, ) -> Result { // const S: &str = "Shm extension - Send OpenAck."; diff --git a/io/zenoh-transport/src/unicast/establishment/mod.rs b/io/zenoh-transport/src/unicast/establishment/mod.rs index 6bc8c898e8..523e6e9d22 100644 --- a/io/zenoh-transport/src/unicast/establishment/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/mod.rs @@ -17,7 +17,7 @@ pub mod ext; pub(crate) mod open; use super::{TransportPeer, TransportUnicast}; -use crate::{common::seq_num, TransportManager}; +use crate::{common::seq_num, unicast::link::TransportLinkUnicast, TransportManager}; use async_trait::async_trait; use cookie::*; use sha3::{ @@ -25,10 +25,10 @@ use sha3::{ Shake128, }; use std::time::Duration; -use zenoh_link::{Link, LinkUnicast}; +use zenoh_link::Link; use zenoh_protocol::{ core::{Field, Resolution, ZenohId}, - transport::{BatchSize, Close, TransportMessage, TransportSn}, + transport::TransportSn, }; use zenoh_result::ZResult; @@ -42,28 +42,28 @@ pub trait OpenFsm { type SendInitSynIn; type SendInitSynOut; async fn send_init_syn( - &self, + self, input: Self::SendInitSynIn, ) -> Result; type RecvInitAckIn; type RecvInitAckOut; async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result; type SendOpenSynIn; type SendOpenSynOut; async fn send_open_syn( - &self, + self, input: Self::SendOpenSynIn, ) -> Result; type RecvOpenAckIn; type RecvOpenAckOut; async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result; } @@ -75,28 +75,28 @@ pub trait AcceptFsm { type RecvInitSynIn; type RecvInitSynOut; async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result; type SendInitAckIn; type SendInitAckOut; async fn send_init_ack( - &self, + self, input: Self::SendInitAckIn, ) -> Result; type RecvOpenSynIn; type RecvOpenSynOut; async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result; type SendOpenAckIn; type SendOpenAckOut; async fn send_open_ack( - &self, + self, input: Self::SendOpenAckIn, ) -> Result; } @@ -116,30 +116,13 @@ pub(super) fn compute_sn(zid1: ZenohId, zid2: ZenohId, resolution: Resolution) - TransportSn::from_le_bytes(array) & seq_num::get_mask(resolution.get(Field::FrameSN)) } -pub(super) async fn close_link(link: &LinkUnicast, reason: Option) { - if let Some(reason) = reason { - // Build the close message - let message: TransportMessage = Close { - reason, - session: false, - } - .into(); - // Send the close message on the link - let _ = link.send(&message).await; - } - - // Close the link - let _ = link.close().await; -} - pub(super) struct InputFinalize { pub(super) transport: TransportUnicast, pub(super) other_lease: Duration, - pub(super) agreed_batch_size: BatchSize, } // Finalize the transport, notify the callback and start the link tasks pub(super) async fn finalize_transport( - link: &LinkUnicast, + link: &TransportLinkUnicast, manager: &TransportManager, input: self::InputFinalize, ) -> ZResult<()> { @@ -148,12 +131,7 @@ pub(super) async fn finalize_transport( // Start the TX loop let keep_alive = manager.config.unicast.lease / manager.config.unicast.keep_alive as u32; - transport.start_tx( - link, - &manager.tx_executor, - keep_alive, - input.agreed_batch_size, - )?; + transport.start_tx(link, &manager.tx_executor, keep_alive)?; // Assign a callback if the transport is new // Keep the lock to avoid concurrent new_transport and closing/closed notifications @@ -185,7 +163,7 @@ pub(super) async fn finalize_transport( drop(a_guard); // Start the RX loop - transport.start_rx(link, input.other_lease, input.agreed_batch_size)?; + transport.start_rx(link, input.other_lease)?; Ok(()) } diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index dbd4872c3e..4c1314dd29 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -14,10 +14,12 @@ #[cfg(feature = "shared-memory")] use crate::unicast::shared_memory_unicast::Challenge; use crate::{ - unicast::establishment::{ - close_link, compute_sn, ext, finalize_transport, InputFinalize, OpenFsm, + unicast::{ + establishment::{compute_sn, ext, finalize_transport, InputFinalize, OpenFsm}, + link::{TransportLinkUnicast, TransportLinkUnicastConfig, TransportLinkUnicastDirection}, + TransportConfigUnicast, TransportUnicast, }, - TransportConfigUnicast, TransportManager, TransportUnicast, + TransportManager, }; use async_trait::async_trait; use std::time::Duration; @@ -25,7 +27,7 @@ use zenoh_buffers::ZSlice; #[cfg(feature = "transport_auth")] use zenoh_core::zasynclock; use zenoh_core::{zcondfeat, zerror}; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; +use zenoh_link::LinkUnicast; use zenoh_protocol::{ core::{Field, Resolution, WhatAmI, ZenohId}, transport::{ @@ -37,21 +39,29 @@ use zenoh_result::ZResult; type OpenError = (zenoh_result::Error, Option); -struct StateZenoh { +struct StateTransport { batch_size: BatchSize, resolution: Resolution, -} - -struct State { - zenoh: StateZenoh, ext_qos: ext::qos::StateOpen, #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::StateOpen, #[cfg(feature = "shared-memory")] ext_shm: ext::shm::StateOpen, + ext_lowlatency: ext::lowlatency::StateOpen, +} + +#[cfg(any(feature = "transport_auth", feature = "transport_compression"))] +struct StateLink { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::StateOpen, - ext_lowlatency: ext::lowlatency::StateOpen, + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateOpen, +} + +struct State { + transport: StateTransport, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink, } // InitSyn @@ -92,7 +102,6 @@ struct RecvOpenAckOut { // FSM struct OpenLink<'a> { - link: &'a LinkUnicast, ext_qos: ext::qos::QoSFsm<'a>, #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::MultiLinkFsm<'a>, @@ -101,31 +110,26 @@ struct OpenLink<'a> { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::CompressionFsm<'a>, } #[async_trait] -impl<'a> OpenFsm for OpenLink<'a> { +impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { type Error = OpenError; - type SendInitSynIn = (&'a mut State, SendInitSynIn); + type SendInitSynIn = (&'a mut TransportLinkUnicast, &'a mut State, SendInitSynIn); type SendInitSynOut = (); async fn send_init_syn( - &self, + self, input: Self::SendInitSynIn, ) -> Result { - let (state, input) = input; + let (link, state, input) = input; // Extension QoS let ext_qos = self .ext_qos - .send_init_syn(&state.ext_qos) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - let ext_lowlatency = self - .ext_lowlatency - .send_init_syn(&state.ext_lowlatency) + .send_init_syn(&state.transport.ext_qos) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -133,7 +137,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_shm = zcondfeat!( "shared-memory", self.ext_shm - .send_init_syn(&state.ext_shm) + .send_init_syn(&state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -143,7 +147,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_auth = zcondfeat!( "transport_auth", self.ext_auth - .send_init_syn(&state.ext_auth) + .send_init_syn(&state.link.ext_auth) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -153,7 +157,24 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_mlink = zcondfeat!( "transport_multilink", self.ext_mlink - .send_init_syn(&state.ext_mlink) + .send_init_syn(&state.transport.ext_mlink) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None + ); + + // Extension LowLatency + let ext_lowlatency = self + .ext_lowlatency + .send_init_syn(&state.transport.ext_lowlatency) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + let ext_compression = zcondfeat!( + "transport_compression", + self.ext_compression + .send_init_syn(&state.link.ext_compression) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -163,18 +184,18 @@ impl<'a> OpenFsm for OpenLink<'a> { version: input.mine_version, whatami: input.mine_whatami, zid: input.mine_zid, - batch_size: state.zenoh.batch_size, - resolution: state.zenoh.resolution, + batch_size: state.transport.batch_size, + resolution: state.transport.resolution, ext_qos, ext_shm, ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } .into(); - let _ = self - .link + let _ = link .send(&msg) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -182,14 +203,15 @@ impl<'a> OpenFsm for OpenLink<'a> { Ok(()) } - type RecvInitAckIn = &'a mut State; + type RecvInitAckIn = (&'a mut TransportLinkUnicast, &'a mut State); type RecvInitAckOut = RecvInitAckOut; async fn recv_init_ack( - &self, - state: Self::RecvInitAckIn, + self, + input: Self::RecvInitAckIn, ) -> Result { - let msg = self - .link + let (link, state) = input; + + let msg = link .recv() .await .map_err(|e| (e, Some(close::reason::INVALID)))?; @@ -200,7 +222,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let e = zerror!( "Received a close message (reason {}) in response to an InitSyn on: {}", close::reason_to_str(reason), - self.link, + link, ); match reason { close::reason::MAX_LINKS => log::debug!("{}", e), @@ -211,7 +233,7 @@ impl<'a> OpenFsm for OpenLink<'a> { _ => { let e = zerror!( "Received an invalid message in response to an InitSyn on {}: {:?}", - self.link, + link, msg.body ); log::error!("{}", e); @@ -220,17 +242,17 @@ impl<'a> OpenFsm for OpenLink<'a> { }; // Compute the minimum SN resolution - state.zenoh.resolution = { + state.transport.resolution = { let mut res = Resolution::default(); // Frame SN let i_fsn_res = init_ack.resolution.get(Field::FrameSN); - let m_fsn_res = state.zenoh.resolution.get(Field::FrameSN); + let m_fsn_res = state.transport.resolution.get(Field::FrameSN); if i_fsn_res > m_fsn_res { let e = zerror!( "Invalid FrameSN resolution on {}: {:?} > {:?}", - self.link, + link, i_fsn_res, m_fsn_res ); @@ -241,12 +263,12 @@ impl<'a> OpenFsm for OpenLink<'a> { // Request ID let i_rid_res = init_ack.resolution.get(Field::RequestID); - let m_rid_res = state.zenoh.resolution.get(Field::RequestID); + let m_rid_res = state.transport.resolution.get(Field::RequestID); if i_rid_res > m_rid_res { let e = zerror!( "Invalid RequestID resolution on {}: {:?} > {:?}", - self.link, + link, i_rid_res, m_rid_res ); @@ -259,17 +281,11 @@ impl<'a> OpenFsm for OpenLink<'a> { }; // Compute the minimum batch size - state.zenoh.batch_size = state.zenoh.batch_size.min(init_ack.batch_size); + state.transport.batch_size = state.transport.batch_size.min(init_ack.batch_size); // Extension QoS self.ext_qos - .recv_init_ack((&mut state.ext_qos, init_ack.ext_qos)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - self.ext_lowlatency - .recv_init_ack((&mut state.ext_lowlatency, init_ack.ext_lowlatency)) + .recv_init_ack((&mut state.transport.ext_qos, init_ack.ext_qos)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -277,21 +293,34 @@ impl<'a> OpenFsm for OpenLink<'a> { #[cfg(feature = "shared-memory")] let shm_challenge = self .ext_shm - .recv_init_ack((&mut state.ext_shm, init_ack.ext_shm)) + .recv_init_ack((&mut state.transport.ext_shm, init_ack.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Auth #[cfg(feature = "transport_auth")] self.ext_auth - .recv_init_ack((&mut state.ext_auth, init_ack.ext_auth)) + .recv_init_ack((&mut state.link.ext_auth, init_ack.ext_auth)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] self.ext_mlink - .recv_init_ack((&mut state.ext_mlink, init_ack.ext_mlink)) + .recv_init_ack((&mut state.transport.ext_mlink, init_ack.ext_mlink)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension LowLatency + self.ext_lowlatency + .recv_init_ack((&mut state.transport.ext_lowlatency, init_ack.ext_lowlatency)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + #[cfg(feature = "transport_compression")] + self.ext_compression + .recv_init_ack((&mut state.link.ext_compression, init_ack.ext_compression)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -305,25 +334,18 @@ impl<'a> OpenFsm for OpenLink<'a> { Ok(output) } - type SendOpenSynIn = (&'a mut State, SendOpenSynIn); + type SendOpenSynIn = (&'a mut TransportLinkUnicast, &'a mut State, SendOpenSynIn); type SendOpenSynOut = SendOpenSynOut; async fn send_open_syn( - &self, + self, input: Self::SendOpenSynIn, ) -> Result { - let (state, input) = input; + let (link, state, input) = input; // Extension QoS let ext_qos = self .ext_qos - .send_open_syn(&state.ext_qos) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - let ext_lowlatency = self - .ext_lowlatency - .send_open_syn(&state.ext_lowlatency) + .send_open_syn(&state.transport.ext_qos) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -331,7 +353,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_shm = zcondfeat!( "shared-memory", self.ext_shm - .send_open_syn((&state.ext_shm, input.ext_shm)) + .send_open_syn((&state.transport.ext_shm, input.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -341,7 +363,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_auth = zcondfeat!( "transport_auth", self.ext_auth - .send_open_syn(&state.ext_auth) + .send_open_syn(&state.link.ext_auth) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -351,14 +373,32 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_mlink = zcondfeat!( "transport_multilink", self.ext_mlink - .send_open_syn(&state.ext_mlink) + .send_open_syn(&state.transport.ext_mlink) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None + ); + + // Extension LowLatency + let ext_lowlatency = self + .ext_lowlatency + .send_open_syn(&state.transport.ext_lowlatency) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + let ext_compression = zcondfeat!( + "transport_compression", + self.ext_compression + .send_open_syn(&state.link.ext_compression) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None ); // Build and send an OpenSyn message - let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.zenoh.resolution); + let mine_initial_sn = + compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); let message: TransportMessage = OpenSyn { lease: input.mine_lease, initial_sn: mine_initial_sn, @@ -368,11 +408,11 @@ impl<'a> OpenFsm for OpenLink<'a> { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } .into(); - let _ = self - .link + let _ = link .send(&message) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -381,14 +421,15 @@ impl<'a> OpenFsm for OpenLink<'a> { Ok(output) } - type RecvOpenAckIn = &'a mut State; + type RecvOpenAckIn = (&'a mut TransportLinkUnicast, &'a mut State); type RecvOpenAckOut = RecvOpenAckOut; async fn recv_open_ack( - &self, - state: Self::RecvOpenAckIn, + self, + input: Self::RecvOpenAckIn, ) -> Result { - let msg = self - .link + let (link, state) = input; + + let msg = link .recv() .await .map_err(|e| (e, Some(close::reason::INVALID)))?; @@ -399,7 +440,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let e = zerror!( "Received a close message (reason {}) in response to an OpenSyn on: {:?}", close::reason_to_str(reason), - self.link, + link, ); match reason { close::reason::MAX_LINKS => log::debug!("{}", e), @@ -410,7 +451,7 @@ impl<'a> OpenFsm for OpenLink<'a> { _ => { let e = zerror!( "Received an invalid message in response to an OpenSyn on {}: {:?}", - self.link, + link, msg.body ); log::error!("{}", e); @@ -420,34 +461,41 @@ impl<'a> OpenFsm for OpenLink<'a> { // Extension QoS self.ext_qos - .recv_open_ack((&mut state.ext_qos, open_ack.ext_qos)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - self.ext_lowlatency - .recv_open_ack((&mut state.ext_lowlatency, open_ack.ext_lowlatency)) + .recv_open_ack((&mut state.transport.ext_qos, open_ack.ext_qos)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm #[cfg(feature = "shared-memory")] self.ext_shm - .recv_open_ack((&mut state.ext_shm, open_ack.ext_shm)) + .recv_open_ack((&mut state.transport.ext_shm, open_ack.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Auth #[cfg(feature = "transport_auth")] self.ext_auth - .recv_open_ack((&mut state.ext_auth, open_ack.ext_auth)) + .recv_open_ack((&mut state.link.ext_auth, open_ack.ext_auth)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] self.ext_mlink - .recv_open_ack((&mut state.ext_mlink, open_ack.ext_mlink)) + .recv_open_ack((&mut state.transport.ext_mlink, open_ack.ext_mlink)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension LowLatency + self.ext_lowlatency + .recv_open_ack((&mut state.transport.ext_lowlatency, open_ack.ext_lowlatency)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + #[cfg(feature = "transport_compression")] + self.ext_compression + .recv_open_ack((&mut state.link.ext_compression, open_ack.ext_compression)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -460,11 +508,17 @@ impl<'a> OpenFsm for OpenLink<'a> { } pub(crate) async fn open_link( - link: &LinkUnicast, + link: LinkUnicast, manager: &TransportManager, ) -> ZResult { - let fsm = OpenLink { - link, + let config = TransportLinkUnicastConfig { + direction: TransportLinkUnicastDirection::Outbound, + mtu: link.get_mtu(), + #[cfg(feature = "transport_compression")] + is_compression: false, // Perform the exchange Init/Open exchange with no compression + }; + let mut link = TransportLinkUnicast::new(link, config); + let mut fsm = OpenLink { ext_qos: ext::qos::QoSFsm::new(), #[cfg(feature = "transport_multilink")] ext_mlink: manager.state.unicast.multilink.fsm(&manager.prng), @@ -473,29 +527,43 @@ pub(crate) async fn open_link( #[cfg(feature = "transport_auth")] ext_auth: manager.state.unicast.authenticator.fsm(&manager.prng), ext_lowlatency: ext::lowlatency::LowLatencyFsm::new(), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::CompressionFsm::new(), }; let mut state = State { - zenoh: StateZenoh { - batch_size: manager.config.batch_size.min(batch_size::UNICAST), + transport: StateTransport { + batch_size: manager + .config + .batch_size + .min(batch_size::UNICAST) + .min(link.config.mtu), resolution: manager.config.resolution, + ext_qos: ext::qos::StateOpen::new(manager.config.unicast.is_qos), + #[cfg(feature = "transport_multilink")] + ext_mlink: manager + .state + .unicast + .multilink + .open(manager.config.unicast.max_links > 1), + #[cfg(feature = "shared-memory")] + ext_shm: ext::shm::StateOpen::new(manager.config.unicast.is_shm), + + ext_lowlatency: ext::lowlatency::StateOpen::new(manager.config.unicast.is_lowlatency), + }, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink { + #[cfg(feature = "transport_auth")] + ext_auth: manager + .state + .unicast + .authenticator + .open(&mut *zasynclock!(manager.prng)), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateOpen::new( + manager.config.unicast.is_compression, + ), }, - ext_qos: ext::qos::StateOpen::new(manager.config.unicast.is_qos), - #[cfg(feature = "transport_multilink")] - ext_mlink: manager - .state - .unicast - .multilink - .open(manager.config.unicast.max_links > 1), - #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateOpen::new(manager.config.unicast.is_shm), - #[cfg(feature = "transport_auth")] - ext_auth: manager - .state - .unicast - .authenticator - .open(&mut *zasynclock!(manager.prng)), - ext_lowlatency: ext::lowlatency::StateOpen::new(manager.config.unicast.is_lowlatency), }; // Init handshake @@ -504,7 +572,7 @@ pub(crate) async fn open_link( match $s { Ok(output) => output, Err((e, reason)) => { - close_link(link, reason).await; + let _ = link.close(reason).await; return Err(e); } } @@ -516,9 +584,9 @@ pub(crate) async fn open_link( mine_zid: manager.config.zid, mine_whatami: manager.config.whatami, }; - step!(fsm.send_init_syn((&mut state, isyn_in)).await); + step!(fsm.send_init_syn((&mut link, &mut state, isyn_in)).await); - let iack_out = step!(fsm.recv_init_ack(&mut state).await); + let iack_out = step!(fsm.recv_init_ack((&mut link, &mut state)).await); // Open handshake let osyn_in = SendOpenSynIn { @@ -529,29 +597,33 @@ pub(crate) async fn open_link( #[cfg(feature = "shared-memory")] ext_shm: iack_out.ext_shm, }; - let osyn_out = step!(fsm.send_open_syn((&mut state, osyn_in)).await); + let osyn_out = step!(fsm.send_open_syn((&mut link, &mut state, osyn_in)).await); - let oack_out = step!(fsm.recv_open_ack(&mut state).await); + let oack_out = step!(fsm.recv_open_ack((&mut link, &mut state)).await); // Initialize the transport let config = TransportConfigUnicast { zid: iack_out.other_zid, whatami: iack_out.other_whatami, - sn_resolution: state.zenoh.resolution.get(Field::FrameSN), + sn_resolution: state.transport.resolution.get(Field::FrameSN), tx_initial_sn: osyn_out.mine_initial_sn, - is_qos: state.ext_qos.is_qos(), + is_qos: state.transport.ext_qos.is_qos(), #[cfg(feature = "transport_multilink")] - multilink: state.ext_mlink.multilink(), + multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.ext_shm.is_shm(), - is_lowlatency: state.ext_lowlatency.is_lowlatency(), + is_shm: state.transport.ext_shm.is_shm(), + is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), }; - let transport = step!( - manager - .init_transport_unicast(config, link.clone(), LinkUnicastDirection::Outbound) - .await - ); + let o_config = TransportLinkUnicastConfig { + mtu: state.transport.batch_size, + direction: TransportLinkUnicastDirection::Outbound, + #[cfg(feature = "transport_compression")] + is_compression: state.link.ext_compression.is_compression(), + }; + let o_link = TransportLinkUnicast::new(link.link.clone(), o_config); + let s_link = format!("{:?}", o_link); + let transport = step!(manager.init_transport_unicast(config, o_link).await); // Sync the RX sequence number let _ = step!(transport @@ -563,21 +635,19 @@ pub(crate) async fn open_link( let output = InputFinalize { transport, other_lease: oack_out.other_lease, - agreed_batch_size: state.zenoh.batch_size, }; let transport = output.transport.clone(); - let res = finalize_transport(link, manager, output).await; + let res = finalize_transport(&link, manager, output).await; if let Err(e) = res { let _ = transport.close().await; return Err(e); } log::debug!( - "New transport link opened from {} to {}: {}. Batch size: {}.", + "New transport link opened from {} to {}: {}.", manager.config.zid, iack_out.other_zid, - link, - state.zenoh.batch_size, + s_link, ); Ok(transport) diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs new file mode 100644 index 0000000000..afc12bc87d --- /dev/null +++ b/io/zenoh-transport/src/unicast/link.rs @@ -0,0 +1,275 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; +use std::fmt; +use std::sync::Arc; +#[cfg(feature = "transport_compression")] +use zenoh_buffers::BBuf; +use zenoh_buffers::{ZSlice, ZSliceBuffer}; +use zenoh_link::{Link, LinkUnicast}; +use zenoh_protocol::transport::{BatchSize, Close, TransportMessage}; +use zenoh_result::{zerror, ZResult}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) enum TransportLinkUnicastDirection { + Inbound, + Outbound, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) struct TransportLinkUnicastConfig { + // Inbound / outbound + pub(crate) direction: TransportLinkUnicastDirection, + // MTU + pub(crate) mtu: BatchSize, + // Compression is active on the link + #[cfg(feature = "transport_compression")] + pub(crate) is_compression: bool, +} + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct TransportLinkUnicast { + pub(crate) link: LinkUnicast, + pub(crate) config: TransportLinkUnicastConfig, +} + +impl TransportLinkUnicast { + pub(crate) fn new(link: LinkUnicast, mut config: TransportLinkUnicastConfig) -> Self { + config.mtu = link.get_mtu().min(config.mtu); + Self { link, config } + } + + const fn batch_config(&self) -> BatchConfig { + BatchConfig { + mtu: self.config.mtu, + #[cfg(feature = "transport_compression")] + is_compression: self.config.is_compression, + } + } + + pub(crate) fn tx(&self) -> TransportLinkUnicastTx { + TransportLinkUnicastTx { + inner: self.clone(), + #[cfg(feature = "transport_compression")] + buffer: self.config.is_compression.then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size(self.config.mtu as usize), + )), + } + } + + pub(crate) fn rx(&self) -> TransportLinkUnicastRx { + TransportLinkUnicastRx { + inner: self.clone(), + } + } + + pub(crate) async fn send(&self, msg: &TransportMessage) -> ZResult { + let mut link = self.tx(); + link.send(msg).await + } + + pub(crate) async fn recv(&self) -> ZResult { + let mut link = self.rx(); + link.recv().await + } + + pub(crate) async fn close(&self, reason: Option) -> ZResult<()> { + if let Some(reason) = reason { + // Build the close message + let message: TransportMessage = Close { + reason, + session: false, + } + .into(); + // Send the close message on the link + let _ = self.send(&message).await; + } + self.link.close().await + } +} + +impl fmt::Display for TransportLinkUnicast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.link) + } +} + +impl fmt::Debug for TransportLinkUnicast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransportLinkUnicast") + .field("link", &self.link) + .field("config", &self.config) + .finish() + } +} + +impl From<&TransportLinkUnicast> for Link { + fn from(link: &TransportLinkUnicast) -> Self { + Link::from(&link.link) + } +} + +impl From for Link { + fn from(link: TransportLinkUnicast) -> Self { + Link::from(link.link) + } +} + +pub(crate) struct TransportLinkUnicastTx { + pub(crate) inner: TransportLinkUnicast, + #[cfg(feature = "transport_compression")] + pub(crate) buffer: Option, +} + +impl TransportLinkUnicastTx { + pub(crate) async fn send_batch(&mut self, batch: &mut WBatch) -> ZResult<()> { + const ERR: &str = "Write error on link: "; + + // log::trace!("WBatch: {:?}", batch); + + let res = batch + .finalize( + #[cfg(feature = "transport_compression")] + self.buffer.as_mut(), + ) + .map_err(|_| zerror!("{ERR}{self}"))?; + + let bytes = match res { + Finalize::Batch => batch.as_slice(), + #[cfg(feature = "transport_compression")] + Finalize::Buffer => self + .buffer + .as_ref() + .ok_or_else(|| zerror!("Invalid buffer finalization"))? + .as_slice(), + }; + + // log::trace!("WBytes: {:02x?}", bytes); + + // Send the message on the link + if self.inner.link.is_streamed() { + let len: BatchSize = bytes + .len() + .try_into() + .map_err(|_| zerror!("Invalid batch length"))?; + let len = len.to_le_bytes(); + self.inner.link.write_all(&len).await?; + } + self.inner.link.write_all(bytes).await?; + + Ok(()) + } + + pub(crate) async fn send(&mut self, msg: &TransportMessage) -> ZResult { + const ERR: &str = "Write error on link: "; + + // Create the batch for serializing the message + let mut batch = WBatch::new(self.inner.batch_config()); + batch.encode(msg).map_err(|_| zerror!("{ERR}{self}"))?; + let len = batch.len() as usize; + self.send_batch(&mut batch).await?; + Ok(len) + } +} + +impl fmt::Display for TransportLinkUnicastTx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +impl fmt::Debug for TransportLinkUnicastTx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut s = f.debug_struct("TransportLinkUnicastRx"); + s.field("link", &self.inner.link) + .field("config", &self.inner.config); + #[cfg(feature = "transport_compression")] + { + s.field("buffer", &self.buffer.as_ref().map(|b| b.capacity())); + } + s.finish() + } +} + +pub(crate) struct TransportLinkUnicastRx { + pub(crate) inner: TransportLinkUnicast, +} + +impl TransportLinkUnicastRx { + pub async fn recv_batch(&mut self, buff: C) -> ZResult + where + C: Fn() -> T + Copy, + T: ZSliceBuffer + 'static, + { + const ERR: &str = "Read error from link: "; + + let mut into = (buff)(); + let end = if self.inner.link.is_streamed() { + // Read and decode the message length + let mut len = BatchSize::MIN.to_le_bytes(); + self.inner.link.read_exact(&mut len).await?; + let len = BatchSize::from_le_bytes(len) as usize; + + // Read the bytes + let slice = into + .as_mut_slice() + .get_mut(..len) + .ok_or_else(|| zerror!("{ERR}{self}. Invalid batch length or buffer size."))?; + self.inner.link.read_exact(slice).await?; + len + } else { + // Read the bytes + self.inner.link.read(into.as_mut_slice()).await? + }; + + // log::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); + + let buffer = ZSlice::make(Arc::new(into), 0, end) + .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; + let mut batch = RBatch::new(self.inner.batch_config(), buffer); + batch + .initialize(buff) + .map_err(|e| zerror!("{ERR}{self}. {e}."))?; + + // log::trace!("RBatch: {:?}", batch); + + Ok(batch) + } + + pub async fn recv(&mut self) -> ZResult { + let mtu = self.inner.config.mtu as usize; + let mut batch = self + .recv_batch(|| zenoh_buffers::vec::uninit(mtu).into_boxed_slice()) + .await?; + let msg = batch + .decode() + .map_err(|_| zerror!("Decode error on link: {}", self))?; + Ok(msg) + } +} + +impl fmt::Display for TransportLinkUnicastRx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +impl fmt::Debug for TransportLinkUnicastRx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransportLinkUnicastRx") + .field("link", &self.inner.link) + .field("config", &self.inner.config) + .finish() + } +} diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 111936cb95..437e9c4fa4 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -14,29 +14,25 @@ use super::transport::TransportUnicastLowlatency; #[cfg(feature = "stats")] use crate::stats::TransportStats; -use crate::TransportExecutor; +use crate::{unicast::link::TransportLinkUnicast, TransportExecutor}; use async_std::task; use async_std::{prelude::FutureExt, sync::RwLock}; -use zenoh_codec::*; -use zenoh_core::{zasyncread, zasyncwrite}; - use std::sync::Arc; use std::time::Duration; use zenoh_buffers::{writer::HasWriter, ZSlice}; -use zenoh_link::LinkUnicast; -use zenoh_protocol::transport::{ - BatchSize, KeepAlive, TransportBodyLowLatency, TransportMessageLowLatency, -}; +use zenoh_codec::*; +use zenoh_core::{zasyncread, zasyncwrite}; +use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency, TransportMessageLowLatency}; use zenoh_result::{zerror, ZResult}; use zenoh_sync::RecyclingObjectPool; pub(crate) async fn send_with_link( - link: &LinkUnicast, + link: &TransportLinkUnicast, msg: TransportMessageLowLatency, #[cfg(feature = "stats")] stats: &Arc, ) -> ZResult<()> { let len; - if link.is_streamed() { + if link.link.is_streamed() { let mut buffer = vec![0, 0, 0, 0]; let codec = Zenoh080::new(); let mut writer = buffer.writer(); @@ -49,7 +45,7 @@ pub(crate) async fn send_with_link( buffer[0..4].copy_from_slice(&le); - link.write_all(&buffer).await?; + link.link.write_all(&buffer).await?; } else { let mut buffer = vec![]; let codec = Zenoh080::new(); @@ -62,7 +58,7 @@ pub(crate) async fn send_with_link( { len = buffer.len() as u32; } - link.write_all(&buffer).await?; + link.link.write_all(&buffer).await?; } log::trace!("Sent: {:?}", msg); @@ -131,7 +127,7 @@ impl TransportUnicastLowlatency { } } - pub(super) fn internal_start_rx(&self, lease: Duration, batch_size: u16) { + pub(super) fn internal_start_rx(&self, lease: Duration) { let mut guard = async_std::task::block_on(async { zasyncwrite!(self.handle_rx) }); let c_transport = self.clone(); let handle = task::spawn(async move { @@ -141,7 +137,7 @@ impl TransportUnicastLowlatency { let rx_buffer_size = c_transport.manager.config.link_rx_buffer_size; // Start the rx task - let res = rx_task(link, c_transport.clone(), lease, batch_size, rx_buffer_size).await; + let res = rx_task(link, c_transport.clone(), lease, rx_buffer_size).await; log::debug!( "[{}] Rx task finished with result {:?}", c_transport.manager.config.zid, @@ -177,7 +173,7 @@ impl TransportUnicastLowlatency { /* TASKS */ /*************************************/ async fn keepalive_task( - link: Arc>, + link: Arc>, keep_alive: Duration, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { @@ -201,27 +197,26 @@ async fn keepalive_task( } async fn rx_task_stream( - link: LinkUnicast, + link: TransportLinkUnicast, transport: TransportUnicastLowlatency, lease: Duration, - rx_batch_size: BatchSize, rx_buffer_size: usize, ) -> ZResult<()> { - async fn read(link: &LinkUnicast, buffer: &mut [u8]) -> ZResult { + async fn read(link: &TransportLinkUnicast, buffer: &mut [u8]) -> ZResult { // 16 bits for reading the batch length let mut length = [0_u8, 0_u8, 0_u8, 0_u8]; - link.read_exact(&mut length).await?; + link.link.read_exact(&mut length).await?; let n = u32::from_le_bytes(length) as usize; let len = buffer.len(); let b = buffer.get_mut(0..n).ok_or_else(|| { zerror!("Batch len is invalid. Received {n} but negotiated max len is {len}.") })?; - link.read_exact(b).await?; + link.link.read_exact(b).await?; Ok(n) } // The pool of buffers - let mtu = link.get_mtu().min(rx_batch_size) as usize; + let mtu = link.config.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -242,19 +237,18 @@ async fn rx_task_stream( // Deserialize all the messages from the current ZBuf let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); - transport.read_messages(zslice, &link).await?; + transport.read_messages(zslice, &link.link).await?; } } async fn rx_task_dgram( - link: LinkUnicast, + link: TransportLinkUnicast, transport: TransportUnicastLowlatency, lease: Duration, - rx_batch_size: BatchSize, rx_buffer_size: usize, ) -> ZResult<()> { // The pool of buffers - let mtu = link.get_mtu().min(rx_batch_size) as usize; + let mtu = link.config.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -266,30 +260,31 @@ async fn rx_task_dgram( let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); // Async read from the underlying link - let bytes = - link.read(&mut buffer).timeout(lease).await.map_err(|_| { - zerror!("{}: expired after {} milliseconds", link, lease.as_millis()) - })??; + let bytes = link + .link + .read(&mut buffer) + .timeout(lease) + .await + .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; #[cfg(feature = "stats")] transport.stats.inc_rx_bytes(bytes); // Deserialize all the messages from the current ZBuf let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); - transport.read_messages(zslice, &link).await?; + transport.read_messages(zslice, &link.link).await?; } } async fn rx_task( - link: LinkUnicast, + link: TransportLinkUnicast, transport: TransportUnicastLowlatency, lease: Duration, - rx_batch_size: u16, rx_buffer_size: usize, ) -> ZResult<()> { - if link.is_streamed() { - rx_task_stream(link, transport, lease, rx_batch_size, rx_buffer_size).await + if link.link.is_streamed() { + rx_task_stream(link, transport, lease, rx_buffer_size).await } else { - rx_task_dgram(link, transport, lease, rx_batch_size, rx_buffer_size).await + rx_task_dgram(link, transport, lease, rx_buffer_size).await } } diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index ea97aa143b..d2d64a0310 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -15,10 +15,13 @@ use super::link::send_with_link; #[cfg(feature = "stats")] use crate::stats::TransportStats; -use crate::transport_unicast_inner::TransportUnicastTrait; -use crate::TransportConfigUnicast; -use crate::TransportManager; -use crate::{TransportExecutor, TransportPeerEventHandler}; +use crate::{ + unicast::{ + link::TransportLinkUnicast, transport_unicast_inner::TransportUnicastTrait, + TransportConfigUnicast, + }, + TransportExecutor, TransportManager, TransportPeerEventHandler, +}; use async_executor::Task; #[cfg(feature = "transport_unixpipe")] use async_std::sync::RwLockUpgradableReadGuard; @@ -29,17 +32,19 @@ use std::sync::{Arc, RwLock as SyncRwLock}; use std::time::Duration; #[cfg(feature = "transport_unixpipe")] use zenoh_core::zasyncread_upgradable; -use zenoh_core::{zasynclock, zasyncread, zread, zwrite}; +use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; #[cfg(feature = "transport_unixpipe")] use zenoh_link::unixpipe::UNIXPIPE_LOCATOR_PREFIX; #[cfg(feature = "transport_unixpipe")] use zenoh_link::Link; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; -use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::transport::TransportBodyLowLatency; use zenoh_protocol::transport::TransportMessageLowLatency; use zenoh_protocol::transport::{Close, TransportSn}; +use zenoh_protocol::{ + core::{WhatAmI, ZenohId}, + transport::close, +}; #[cfg(not(feature = "transport_unixpipe"))] use zenoh_result::bail; use zenoh_result::{zerror, ZResult}; @@ -54,7 +59,7 @@ pub(crate) struct TransportUnicastLowlatency { // Transport config pub(super) config: TransportConfigUnicast, // The link associated to the transport - pub(super) link: Arc>, + pub(super) link: Arc>, // The callback pub(super) callback: Arc>>>, // Mutex for notification @@ -72,7 +77,7 @@ impl TransportUnicastLowlatency { pub fn make( manager: TransportManager, config: TransportConfigUnicast, - link: LinkUnicast, + link: TransportLinkUnicast, ) -> ZResult { #[cfg(feature = "stats")] let stats = Arc::new(TransportStats::new(Some(manager.get_stats().clone()))); @@ -137,7 +142,9 @@ impl TransportUnicastLowlatency { // Close and drop the link self.stop_keepalive().await; self.stop_rx().await; - let _ = zasyncread!(self.link).close().await; + let _ = zasyncwrite!(self.link) + .close(Some(close::reason::GENERIC)) + .await; // Notify the callback that we have closed the transport if let Some(cb) = callback.as_ref() { @@ -162,7 +169,7 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { zasynclock!(self.alive) } - fn get_links(&self) -> Vec { + fn get_links(&self) -> Vec { let guard = async_std::task::block_on(async { zasyncread!(self.link) }); [guard.clone()].to_vec() } @@ -206,24 +213,23 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { fn start_tx( &self, - _link: &LinkUnicast, + _link: &TransportLinkUnicast, executor: &TransportExecutor, keep_alive: Duration, - _batch_size: u16, ) -> ZResult<()> { self.start_keepalive(executor, keep_alive); Ok(()) } - fn start_rx(&self, _link: &LinkUnicast, lease: Duration, batch_size: u16) -> ZResult<()> { - self.internal_start_rx(lease, batch_size); + fn start_rx(&self, _link: &TransportLinkUnicast, lease: Duration) -> ZResult<()> { + self.internal_start_rx(lease); Ok(()) } /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: LinkUnicast, _direction: LinkUnicastDirection) -> ZResult<()> { + async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()> { log::trace!("Adding link: {}", link); #[cfg(not(feature = "transport_unixpipe"))] @@ -237,8 +243,9 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { { let guard = zasyncread_upgradable!(self.link); - let existing_unixpipe = guard.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; - let new_unixpipe = link.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; + let existing_unixpipe = + guard.link.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; + let new_unixpipe = link.link.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; match (existing_unixpipe, new_unixpipe) { (false, true) => { // LowLatency transport suports only a single link, but code here also handles upgrade from non-unixpipe link to unixpipe link! @@ -308,7 +315,7 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &LinkUnicast, reason: u8) -> ZResult<()> { + async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()> { log::trace!("Closing link {} with peer: {}", link, self.config.zid); self.finalize(reason).await } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index d7d79d5387..da064e8f5b 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -18,17 +18,20 @@ use crate::unicast::establishment::ext::auth::Auth; #[cfg(feature = "transport_multilink")] use crate::unicast::establishment::ext::multilink::MultiLink; use crate::{ - lowlatency::transport::TransportUnicastLowlatency, - transport_unicast_inner::TransportUnicastTrait, - unicast::{TransportConfigUnicast, TransportUnicast}, - universal::transport::TransportUnicastUniversal, + unicast::{ + link::TransportLinkUnicast, lowlatency::transport::TransportUnicastLowlatency, + transport_unicast_inner::TransportUnicastTrait, + universal::transport::TransportUnicastUniversal, TransportConfigUnicast, TransportUnicast, + }, TransportManager, }; use async_std::{prelude::FutureExt, sync::Mutex, task}; use std::{collections::HashMap, sync::Arc, time::Duration}; +#[cfg(feature = "transport_compression")] +use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] use zenoh_config::SharedMemoryConf; -use zenoh_config::{Config, LinkTxConf, QoSConf, TransportUnicastConf}; +use zenoh_config::{Config, LinkTxConf, QoSUnicastConf, TransportUnicastConf}; use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; @@ -53,8 +56,8 @@ pub struct TransportManagerConfigUnicast { pub max_links: usize, #[cfg(feature = "shared-memory")] pub is_shm: bool, - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - pub is_compressed: bool, + #[cfg(feature = "transport_compression")] + pub is_compression: bool, } pub struct TransportManagerStateUnicast { @@ -96,11 +99,11 @@ pub struct TransportManagerBuilderUnicast { pub(super) max_links: usize, #[cfg(feature = "shared-memory")] pub(super) is_shm: bool, - #[cfg(feature = "transport_compression")] - pub(super) is_compressed: bool, #[cfg(feature = "transport_auth")] pub(super) authenticator: Auth, pub(super) is_lowlatency: bool, + #[cfg(feature = "transport_compression")] + pub(super) is_compression: bool, } impl TransportManagerBuilderUnicast { @@ -157,9 +160,9 @@ impl TransportManagerBuilderUnicast { self } - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - pub fn compression(mut self, is_compressed: bool) -> Self { - self.is_compressed = is_compressed; + #[cfg(feature = "transport_compression")] + pub fn compression(mut self, is_compression: bool) -> Self { + self.is_compression = is_compression; self } @@ -173,7 +176,7 @@ impl TransportManagerBuilderUnicast { )); self = self.accept_pending(*config.transport().unicast().accept_pending()); self = self.max_sessions(*config.transport().unicast().max_sessions()); - self = self.qos(*config.transport().qos().enabled()); + self = self.qos(*config.transport().unicast().qos().enabled()); self = self.lowlatency(*config.transport().unicast().lowlatency()); #[cfg(feature = "transport_multilink")] @@ -188,6 +191,10 @@ impl TransportManagerBuilderUnicast { { self = self.authenticator(Auth::from_config(config).await?); } + #[cfg(feature = "transport_compression")] + { + self = self.compression(*config.transport().unicast().compression().enabled()); + } Ok(self) } @@ -211,9 +218,9 @@ impl TransportManagerBuilderUnicast { max_links: self.max_links, #[cfg(feature = "shared-memory")] is_shm: self.is_shm, - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - is_compressed: self.is_compressed, is_lowlatency: self.is_lowlatency, + #[cfg(feature = "transport_compression")] + is_compression: self.is_compression, }; let state = TransportManagerStateUnicast { @@ -238,9 +245,11 @@ impl Default for TransportManagerBuilderUnicast { fn default() -> Self { let transport = TransportUnicastConf::default(); let link_tx = LinkTxConf::default(); - let qos = QoSConf::default(); + let qos = QoSUnicastConf::default(); #[cfg(feature = "shared-memory")] let shm = SharedMemoryConf::default(); + #[cfg(feature = "transport_compression")] + let compression = CompressionUnicastConf::default(); Self { lease: Duration::from_millis(*link_tx.lease()), @@ -253,11 +262,11 @@ impl Default for TransportManagerBuilderUnicast { max_links: *transport.max_links(), #[cfg(feature = "shared-memory")] is_shm: *shm.enabled(), - #[cfg(feature = "transport_compression")] - is_compressed: false, #[cfg(feature = "transport_auth")] authenticator: Auth::default(), is_lowlatency: *transport.lowlatency(), + #[cfg(feature = "transport_compression")] + is_compression: *compression.enabled(), } } } @@ -402,8 +411,7 @@ impl TransportManager { pub(super) async fn init_transport_unicast( &self, config: TransportConfigUnicast, - link: LinkUnicast, - direction: LinkUnicastDirection, + link: TransportLinkUnicast, ) -> Result)> { let mut guard = zasynclock!(self.state.unicast.transports); @@ -426,7 +434,7 @@ impl TransportManager { // Add the link to the transport transport - .add_link(link, direction) + .add_link(link) .await .map_err(|e| (e, Some(close::reason::MAX_LINKS)))?; @@ -462,7 +470,7 @@ impl TransportManager { .map_err(|e| (e, Some(close::reason::INVALID))) .map(|v| Arc::new(v) as Arc)?; // Add the link to the transport - t.add_link(link, direction) + t.add_link(link) .await .map_err(|e| (e, Some(close::reason::MAX_LINKS)))?; t @@ -538,7 +546,7 @@ impl TransportManager { // Create a new link associated by calling the Link Manager let link = manager.new_link(endpoint).await?; // Open the link - super::establishment::open::open_link(&link, self).await + super::establishment::open::open_link(link, self).await } pub async fn get_transport_unicast(&self, peer: &ZenohId) -> Option { @@ -587,7 +595,7 @@ impl TransportManager { } // A new link is available - log::trace!("New link waiting... {}", link); + log::trace!("Accepting link... {}", link); *guard += 1; drop(guard); diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index d2a14a0276..3385cbed6a 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // pub mod establishment; +pub(crate) mod link; pub(crate) mod lowlatency; pub(crate) mod manager; pub(crate) mod transport_unicast_inner; @@ -134,7 +135,7 @@ impl TransportUnicast { let link = transport .get_links() .into_iter() - .find(|l| l.get_src() == &link.src && l.get_dst() == &link.dst) + .find(|l| l.link.get_src() == &link.src && l.link.get_dst() == &link.dst) .ok_or_else(|| zerror!("Invalid link"))?; transport.close_link(&link, close::reason::GENERIC).await?; Ok(()) diff --git a/io/zenoh-transport/src/unicast/test_helpers.rs b/io/zenoh-transport/src/unicast/test_helpers.rs index 403384c851..42ed6db927 100644 --- a/io/zenoh-transport/src/unicast/test_helpers.rs +++ b/io/zenoh-transport/src/unicast/test_helpers.rs @@ -11,11 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // - +use crate::{unicast::TransportManagerBuilderUnicast, TransportManager}; use zenoh_core::zcondfeat; -use crate::{TransportManager, TransportManagerBuilderUnicast}; - pub fn make_transport_manager_builder( #[cfg(feature = "transport_multilink")] max_links: usize, #[cfg(feature = "shared-memory")] with_shm: bool, diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index acb6503c30..265607705b 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -12,11 +12,13 @@ // ZettaScale Zenoh Team, // -use std::{fmt::DebugStruct, sync::Arc, time::Duration}; - +use crate::{ + unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, + TransportExecutor, TransportPeerEventHandler, +}; use async_std::sync::MutexGuard as AsyncMutexGuard; use async_trait::async_trait; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; +use std::{fmt::DebugStruct, sync::Arc, time::Duration}; use zenoh_protocol::{ core::{WhatAmI, ZenohId}, network::NetworkMessage, @@ -24,8 +26,6 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{TransportConfigUnicast, TransportExecutor, TransportPeerEventHandler}; - /*************************************/ /* UNICAST TRANSPORT TRAIT */ /*************************************/ @@ -39,7 +39,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn get_zid(&self) -> ZenohId; fn get_whatami(&self) -> WhatAmI; fn get_callback(&self) -> Option>; - fn get_links(&self) -> Vec; + fn get_links(&self) -> Vec; #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool; fn is_qos(&self) -> bool; @@ -50,7 +50,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: LinkUnicast, direction: LinkUnicastDirection) -> ZResult<()>; + async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()>; /*************************************/ /* TX */ @@ -58,16 +58,15 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn schedule(&self, msg: NetworkMessage) -> ZResult<()>; fn start_tx( &self, - link: &LinkUnicast, + link: &TransportLinkUnicast, executor: &TransportExecutor, keep_alive: Duration, - batch_size: u16, ) -> ZResult<()>; /*************************************/ /* RX */ /*************************************/ - fn start_rx(&self, link: &LinkUnicast, lease: Duration, batch_size: u16) -> ZResult<()>; + fn start_rx(&self, link: &TransportLinkUnicast, lease: Duration) -> ZResult<()>; /*************************************/ /* INITIATION */ @@ -77,7 +76,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &LinkUnicast, reason: u8) -> ZResult<()>; + async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()>; async fn close(&self, reason: u8) -> ZResult<()>; fn add_debug_fields<'a, 'b: 'a, 'c>( diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index c4d19d2b66..3a7eafbc52 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -12,55 +12,33 @@ // ZettaScale Zenoh Team, // use super::transport::TransportUnicastUniversal; -use crate::common::pipeline::{ - TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, - TransmissionPipelineProducer, -}; -use crate::common::priority::TransportPriorityTx; #[cfg(feature = "stats")] use crate::common::stats::TransportStats; -use crate::TransportExecutor; +use crate::{ + common::{ + batch::RBatch, + pipeline::{ + TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, + TransmissionPipelineProducer, + }, + priority::TransportPriorityTx, + }, + unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx, TransportLinkUnicastTx}, + TransportExecutor, +}; use async_std::prelude::FutureExt; use async_std::task; use async_std::task::JoinHandle; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -use std::convert::TryInto; -use std::sync::Arc; -use std::time::Duration; -use zenoh_buffers::ZSlice; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; -use zenoh_protocol::transport::{BatchSize, KeepAlive, TransportMessage}; -use zenoh_result::{bail, zerror, ZResult}; -use zenoh_sync::{RecyclingObjectPool, Signal}; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const HEADER_BYTES_SIZE: usize = 2; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const COMPRESSION_BYTE_INDEX_STREAMED: usize = 2; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const COMPRESSION_BYTE_INDEX: usize = 0; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const COMPRESSION_ENABLED: u8 = 1_u8; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const COMPRESSION_DISABLED: u8 = 0_u8; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const BATCH_PAYLOAD_START_INDEX: usize = 1; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const MAX_BATCH_SIZE: usize = u16::MAX as usize; +use std::{sync::Arc, time::Duration}; +use zenoh_buffers::ZSliceBuffer; +use zenoh_protocol::transport::{KeepAlive, TransportMessage}; +use zenoh_result::{zerror, ZResult}; +use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; #[derive(Clone)] -pub(super) struct TransportLinkUnicast { - // Inbound / outbound - pub(super) direction: LinkUnicastDirection, +pub(super) struct TransportLinkUnicastUniversal { // The underlying link - pub(super) link: LinkUnicast, + pub(super) link: TransportLinkUnicast, // The transmission pipeline pub(super) pipeline: Option, // The transport this link is associated to @@ -71,17 +49,12 @@ pub(super) struct TransportLinkUnicast { handle_rx: Option>>, } -impl TransportLinkUnicast { - pub(super) fn new( - transport: TransportUnicastUniversal, - link: LinkUnicast, - direction: LinkUnicastDirection, - ) -> TransportLinkUnicast { - TransportLinkUnicast { - direction, - transport, +impl TransportLinkUnicastUniversal { + pub(super) fn new(transport: TransportUnicastUniversal, link: TransportLinkUnicast) -> Self { + Self { link, pipeline: None, + transport, handle_tx: None, signal_rx: Signal::new(), handle_rx: None, @@ -89,25 +62,23 @@ impl TransportLinkUnicast { } } -impl TransportLinkUnicast { +impl TransportLinkUnicastUniversal { pub(super) fn start_tx( &mut self, executor: &TransportExecutor, keep_alive: Duration, - batch_size: u16, priority_tx: &[TransportPriorityTx], ) { if self.handle_tx.is_none() { let config = TransmissionPipelineConf { - is_streamed: self.link.is_streamed(), - batch_size: batch_size.min(self.link.get_mtu()), + is_streamed: self.link.link.is_streamed(), + #[cfg(feature = "transport_compression")] + is_compression: self.link.config.is_compression, + batch_size: self.link.config.mtu, queue_size: self.transport.manager.config.queue_size, backoff: self.transport.manager.config.queue_backoff, }; - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - let is_compressed = self.transport.manager.config.unicast.is_compressed; - // The pipeline let (producer, consumer) = TransmissionPipeline::make(config, priority_tx); self.pipeline = Some(producer); @@ -118,12 +89,10 @@ impl TransportLinkUnicast { let handle = executor.spawn(async move { let res = tx_task( consumer, - c_link.clone(), + c_link.tx(), keep_alive, #[cfg(feature = "stats")] c_transport.stats.clone(), - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - is_compressed, ) .await; if let Err(e) = res { @@ -143,7 +112,7 @@ impl TransportLinkUnicast { } } - pub(super) fn start_rx(&mut self, lease: Duration, batch_size: u16) { + pub(super) fn start_rx(&mut self, lease: Duration) { if self.handle_rx.is_none() { // Spawn the RX task let c_link = self.link.clone(); @@ -154,11 +123,10 @@ impl TransportLinkUnicast { let handle = task::spawn(async move { // Start the consume task let res = rx_task( - c_link.clone(), + c_link.rx(), c_transport.clone(), lease, c_signal.clone(), - batch_size, c_rx_buffer_size, ) .await; @@ -194,7 +162,7 @@ impl TransportLinkUnicast { handle_tx.await; } - self.link.close().await + self.link.close(None).await } } @@ -203,35 +171,15 @@ impl TransportLinkUnicast { /*************************************/ async fn tx_task( mut pipeline: TransmissionPipelineConsumer, - link: LinkUnicast, + mut link: TransportLinkUnicastTx, keep_alive: Duration, #[cfg(feature = "stats")] stats: Arc, - #[cfg(all(feature = "unstable", feature = "transport_compression"))] is_compressed: bool, ) -> ZResult<()> { - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - let mut compression_aux_buff: Box<[u8]> = - vec![0; lz4_flex::block::get_maximum_output_size(MAX_BATCH_SIZE)].into_boxed_slice(); - loop { match pipeline.pull().timeout(keep_alive).await { Ok(res) => match res { - Some((batch, priority)) => { - // Send the buffer on the link - #[allow(unused_mut)] - let mut bytes = batch.as_bytes(); - - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - { - let (batch_size, _) = tx_compressed( - is_compressed, - link.is_streamed(), - bytes, - &mut compression_aux_buff, - )?; - bytes = &compression_aux_buff[..batch_size]; - } - - link.write_all(bytes).await?; + Some((mut batch, priority)) => { + link.send_batch(&mut batch).await?; #[cfg(feature = "stats")] { @@ -260,8 +208,8 @@ async fn tx_task( // Drain the transmission pipeline and write remaining bytes on the wire let mut batches = pipeline.drain(); - for (b, _) in batches.drain(..) { - link.write_all(b.as_bytes()) + for (mut b, _) in batches.drain(..) { + link.send_batch(&mut b) .timeout(keep_alive) .await .map_err(|_| zerror!("{}: flush failed after {} ms", link, keep_alive.as_millis()))??; @@ -276,30 +224,31 @@ async fn tx_task( Ok(()) } -async fn rx_task_stream( - link: LinkUnicast, +async fn rx_task( + mut link: TransportLinkUnicastRx, transport: TransportUnicastUniversal, lease: Duration, signal: Signal, - rx_batch_size: BatchSize, rx_buffer_size: usize, ) -> ZResult<()> { enum Action { - Read(usize), + Read(RBatch), Stop, } - async fn read(link: &LinkUnicast, buffer: &mut [u8]) -> ZResult { - // 16 bits for reading the batch length - let mut length = [0_u8, 0_u8]; - link.read_exact(&mut length).await?; - let n = BatchSize::from_le_bytes(length) as usize; - let len = buffer.len(); - let b = buffer.get_mut(0..n).ok_or_else(|| { - zerror!("Batch len is invalid. Received {n} but negotiated max len is {len}.") - })?; - link.read_exact(b).await?; - Ok(Action::Read(n)) + async fn read( + link: &mut TransportLinkUnicastRx, + pool: &RecyclingObjectPool, + ) -> ZResult + where + T: ZSliceBuffer + 'static, + F: Fn() -> T, + RecyclingObject: ZSliceBuffer, + { + let batch = link + .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) + .await?; + Ok(Action::Read(batch)) } async fn stop(signal: Signal) -> ZResult { @@ -308,7 +257,7 @@ async fn rx_task_stream( } // The pool of buffers - let mtu = link.get_mtu().min(rx_batch_size) as usize; + let mtu = link.inner.config.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -316,393 +265,23 @@ async fn rx_task_stream( let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); while !signal.is_triggered() { - // Retrieve one buffer - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); // Async read from the underlying link - let action = read(&link, &mut buffer) + let action = read(&mut link, &pool) .race(stop(signal.clone())) .timeout(lease) .await .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; match action { - Action::Read(n) => { + Action::Read(batch) => { #[cfg(feature = "stats")] { transport.stats.inc_rx_bytes(2 + n); // Account for the batch len encoding (16 bits) } - - #[allow(unused_mut)] - let mut end_pos = n; - - #[allow(unused_mut)] - let mut start_pos = 0; - - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - rx_decompress(&mut buffer, &pool, n, &mut start_pos, &mut end_pos)?; - - // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), start_pos, end_pos) - .map_err(|_| zerror!("Read {} bytes but buffer is {} bytes", n, mtu))?; - transport.read_messages(zslice, &link)?; + transport.read_messages(batch, &link.inner)?; } Action::Stop => break, } } - Ok(()) -} -async fn rx_task_dgram( - link: LinkUnicast, - transport: TransportUnicastUniversal, - lease: Duration, - signal: Signal, - rx_batch_size: BatchSize, - rx_buffer_size: usize, -) -> ZResult<()> { - enum Action { - Read(usize), - Stop, - } - - async fn read(link: &LinkUnicast, buffer: &mut [u8]) -> ZResult { - let n = link.read(buffer).await?; - Ok(Action::Read(n)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) - } - - // The pool of buffers - let mtu = link.get_mtu().min(rx_batch_size) as usize; - let mut n = rx_buffer_size / mtu; - if rx_buffer_size % mtu != 0 { - n += 1; - } - let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); - - while !signal.is_triggered() { - // Retrieve one buffer - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); - // Async read from the underlying link - let action = read(&link, &mut buffer) - .race(stop(signal.clone())) - .timeout(lease) - .await - .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; - match action { - Action::Read(n) => { - if n == 0 { - // Reading 0 bytes means error - bail!("{}: zero bytes reading", link) - } - - #[cfg(feature = "stats")] - { - transport.stats.inc_rx_bytes(n); - } - - #[allow(unused_mut)] - let mut end_pos = n; - - #[allow(unused_mut)] - let mut start_pos = 0; - - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - rx_decompress(&mut buffer, &pool, n, &mut start_pos, &mut end_pos)?; - - // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), start_pos, end_pos) - .map_err(|_| zerror!("Read {} bytes but buffer is {} bytes", n, mtu))?; - transport.read_messages(zslice, &link)?; - } - Action::Stop => break, - } - } Ok(()) } - -async fn rx_task( - link: LinkUnicast, - transport: TransportUnicastUniversal, - lease: Duration, - signal: Signal, - rx_batch_size: u16, - rx_buffer_size: usize, -) -> ZResult<()> { - if link.is_streamed() { - rx_task_stream( - link, - transport, - lease, - signal, - rx_batch_size, - rx_buffer_size, - ) - .await - } else { - rx_task_dgram( - link, - transport, - lease, - signal, - rx_batch_size, - rx_buffer_size, - ) - .await - } -} - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -/// Decompresses the received contents contained in the buffer. -fn rx_decompress( - buffer: &mut zenoh_sync::RecyclingObject>, - pool: &RecyclingObjectPool, impl Fn() -> Box<[u8]>>, - read_bytes: usize, - start_pos: &mut usize, - end_pos: &mut usize, -) -> ZResult<()> { - let is_compressed: bool = buffer[COMPRESSION_BYTE_INDEX] == COMPRESSION_ENABLED; - if is_compressed { - let mut aux_buff = pool.try_take().unwrap_or_else(|| pool.alloc()); - *end_pos = lz4_flex::block::decompress_into( - &buffer[BATCH_PAYLOAD_START_INDEX..read_bytes], - &mut aux_buff, - ) - .map_err(|e| zerror!("Decompression error: {:}", e))?; - *buffer = aux_buff; - } else { - *start_pos = BATCH_PAYLOAD_START_INDEX; - *end_pos = read_bytes; - } - Ok(()) -} - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -/// Compresses the batch into the output buffer. -/// -/// If the batch is streamed, the output contains a header of two bytes representing the size of -/// the resulting batch, otherwise it is not included. In any case, an extra byte is added (before -/// the payload and considered in the header value) representing if the batch is compressed or not. -/// If the resulting size of the compression no smaller than the original batch size, then -/// we send the original one. -/// -/// Returns a tuple containing the size of the resulting batch, along with a boolean representing -/// if the batch was indeed compressed or not. -fn tx_compressed( - is_compressed: bool, - is_streamed: bool, - batch: &[u8], - output: &mut [u8], -) -> ZResult<(/*batch_size=*/ usize, /*was_compressed=*/ bool)> { - if is_compressed { - let s_pos = if is_streamed { 3 } else { 1 }; - let payload = &batch[s_pos - 1..]; - let payload_size = payload.len(); - let compression_size = lz4_flex::block::compress_into(payload, &mut output[s_pos..]) - .map_err(|e| zerror!("Compression error: {:}", e))?; - if compression_size >= payload_size { - log::debug!( - "Compression discarded due to the original batch size being smaller than the compressed batch." - ); - return Ok(( - set_uncompressed_batch_header(batch, output, is_streamed)?, - false, - )); - } - Ok(( - set_compressed_batch_header(output, compression_size, is_streamed)?, - true, - )) - } else { - Ok(( - set_uncompressed_batch_header(batch, output, is_streamed)?, - false, - )) - } -} - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -/// Inserts the compresion byte for batches WITH compression. -/// The buffer is expected to contain the compression starting from byte 3 (if streamed) or 1 -/// (if not streamed). -/// -/// Arguments: -/// - buff: the buffer with the compression, with 3 or 1 bytes reserved at the beginning in case of -/// being streamed or not respectively. -/// - compression_size: the size of the compression -/// - is_streamed: if the batch is intended to be streamed or not -/// -/// Returns: the size of the compressed batch considering the header. -fn set_compressed_batch_header( - buff: &mut [u8], - compression_size: usize, - is_streamed: bool, -) -> ZResult { - let final_batch_size: usize; - let payload_size = 1 + compression_size; - if is_streamed { - let payload_size_u16: u16 = payload_size.try_into().map_err(|e| { - zerror!( - "Compression error: unable to convert batch size into u16: {}", - e - ) - })?; - buff[0..HEADER_BYTES_SIZE].copy_from_slice(&payload_size_u16.to_le_bytes()); - buff[COMPRESSION_BYTE_INDEX_STREAMED] = COMPRESSION_ENABLED; - final_batch_size = payload_size + HEADER_BYTES_SIZE; - } else { - buff[COMPRESSION_BYTE_INDEX] = COMPRESSION_ENABLED; - final_batch_size = payload_size; - } - if final_batch_size > MAX_BATCH_SIZE { - // May happen when the payload size is itself the MTU and adding the header exceeds it. - Err(zerror!("Failed to send uncompressed batch, batch size ({}) exceeds the maximum batch size of {}.", final_batch_size, MAX_BATCH_SIZE))? - } - Ok(final_batch_size) -} - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -/// Inserts the compression byte for batches without compression, that is inserting a 0 byte on the -/// third position of the buffer and increasing the batch size from the header by 1. -/// -/// Arguments: -/// - bytes: the source slice -/// - buff: the output slice -/// - is_streamed: if the batch is meant to be streamed or not, thus considering or not the 2 bytes -/// header specifying the size of the batch. -/// -/// Returns: the size of the batch considering the header. -fn set_uncompressed_batch_header( - bytes: &[u8], - buff: &mut [u8], - is_streamed: bool, -) -> ZResult { - let final_batch_size: usize; - if is_streamed { - let mut header = [0_u8, 0_u8]; - header[..HEADER_BYTES_SIZE].copy_from_slice(&bytes[..HEADER_BYTES_SIZE]); - let batch_size = if let Some(size) = u16::from_le_bytes(header).checked_add(1) { - size - } else { - bail!("Compression error: unable to convert compression size into u16",) - }; - buff[0..HEADER_BYTES_SIZE].copy_from_slice(&batch_size.to_le_bytes()); - buff[COMPRESSION_BYTE_INDEX_STREAMED] = COMPRESSION_DISABLED; - let batch_size: usize = batch_size.into(); - buff[3..batch_size + 2].copy_from_slice(&bytes[2..batch_size + 1]); - final_batch_size = batch_size + 2; - } else { - buff[COMPRESSION_BYTE_INDEX] = COMPRESSION_DISABLED; - let len = 1 + bytes.len(); - buff[1..1 + bytes.len()].copy_from_slice(bytes); - final_batch_size = len; - } - if final_batch_size > MAX_BATCH_SIZE { - // May happen when the payload size is itself the MTU and adding the header exceeds it. - Err(zerror!("Failed to send uncompressed batch, batch size ({}) exceeds the maximum batch size of {}.", final_batch_size, MAX_BATCH_SIZE))?; - } - Ok(final_batch_size) -} - -#[cfg(all(feature = "transport_compression", feature = "unstable"))] -#[test] -fn tx_compression_test() { - const COMPRESSION_BYTE: usize = 1; - let payload = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]; - let mut buff: Box<[u8]> = - vec![0; lz4_flex::block::get_maximum_output_size(MAX_BATCH_SIZE) + 3].into_boxed_slice(); - - // Compression done for the sake of comparing the result. - let payload_compression_size = lz4_flex::block::compress_into(&payload, &mut buff).unwrap(); - - fn get_header_value(buff: &[u8]) -> u16 { - let mut header = [0_u8, 0_u8]; - header[..HEADER_BYTES_SIZE].copy_from_slice(&buff[..HEADER_BYTES_SIZE]); - u16::from_le_bytes(header) - } - - // Streamed with compression enabled - let batch = [16, 0, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]; - let (batch_size, was_compressed) = tx_compressed(true, true, &batch, &mut buff).unwrap(); - let header = get_header_value(&buff); - assert!(was_compressed); - assert_eq!(header as usize, payload_compression_size + COMPRESSION_BYTE); - assert!(batch_size < batch.len() + COMPRESSION_BYTE); - assert_eq!(batch_size, payload_compression_size + 3); - - // Not streamed with compression enabled - let batch = payload; - let (batch_size, was_compressed) = tx_compressed(true, false, &batch, &mut buff).unwrap(); - assert!(was_compressed); - assert!(batch_size < batch.len() + COMPRESSION_BYTE); - assert_eq!(batch_size, payload_compression_size + COMPRESSION_BYTE); - - // Streamed with compression disabled - let batch = [16, 0, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]; - let (batch_size, was_compressed) = tx_compressed(false, true, &batch, &mut buff).unwrap(); - let header = get_header_value(&buff); - assert!(!was_compressed); - assert_eq!(header as usize, payload.len() + COMPRESSION_BYTE); - assert_eq!(batch_size, batch.len() + COMPRESSION_BYTE); - - // Not streamed and compression disabled - let batch = payload; - let (batch_size, was_compressed) = tx_compressed(false, false, &batch, &mut buff).unwrap(); - assert!(!was_compressed); - assert_eq!(batch_size, payload.len() + COMPRESSION_BYTE); - - // Verify that if the compression result is bigger than the original payload size, then the non compressed payload is returned. - let batch = [16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; // a non compressable payload with no repetitions - let (batch_size, was_compressed) = tx_compressed(true, true, &batch, &mut buff).unwrap(); - assert!(!was_compressed); - assert_eq!(batch_size, batch.len() + COMPRESSION_BYTE); -} - -#[cfg(all(feature = "transport_compression", feature = "unstable"))] -#[test] -fn rx_compression_test() { - let pool = RecyclingObjectPool::new(2, || vec![0_u8; MAX_BATCH_SIZE].into_boxed_slice()); - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); - - // Compressed batch - let payload: [u8; 8] = [1, 2, 3, 4, 5, 6, 7, 8]; - let compression_size = lz4_flex::block::compress_into(&payload, &mut buffer[1..]).unwrap(); - buffer[0] = 1; // is compressed byte - - let mut start_pos: usize = 0; - let mut end_pos: usize = 0; - - rx_decompress( - &mut buffer, - &pool, - compression_size + 1, - &mut start_pos, - &mut end_pos, - ) - .unwrap(); - - assert_eq!(start_pos, 0); - assert_eq!(end_pos, payload.len()); - assert_eq!(buffer[start_pos..end_pos], payload); - - // Non compressed batch - let mut start_pos: usize = 0; - let mut end_pos: usize = 0; - - buffer[0] = 0; - buffer[1..payload.len() + 1].copy_from_slice(&payload[..]); - rx_decompress( - &mut buffer, - &pool, - payload.len() + 1, - &mut start_pos, - &mut end_pos, - ) - .unwrap(); - - assert_eq!(start_pos, 1); - assert_eq!(end_pos, payload.len() + 1); - assert_eq!(buffer[start_pos..end_pos], payload); -} diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 5822b09931..459998ddcf 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -1,5 +1,3 @@ -use crate::transport_unicast_inner::TransportUnicastTrait; - // // Copyright (c) 2023 ZettaScale Technology // @@ -14,16 +12,16 @@ use crate::transport_unicast_inner::TransportUnicastTrait; // ZettaScale Zenoh Team, // use super::transport::TransportUnicastUniversal; -use crate::common::priority::TransportChannelRx; +use crate::{ + common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, + }, + unicast::{link::TransportLinkUnicast, transport_unicast_inner::TransportUnicastTrait}, +}; use async_std::task; use std::sync::MutexGuard; -use zenoh_buffers::{ - reader::{HasReader, Reader}, - ZSlice, -}; -use zenoh_codec::{RCodec, Zenoh080}; use zenoh_core::{zlock, zread}; -use zenoh_link::LinkUnicast; use zenoh_protocol::{ core::{Priority, Reliability}, network::NetworkMessage, @@ -62,7 +60,7 @@ impl TransportUnicastUniversal { } } - fn handle_close(&self, link: &LinkUnicast, _reason: u8, session: bool) -> ZResult<()> { + fn handle_close(&self, link: &TransportLinkUnicast, _reason: u8, session: bool) -> ZResult<()> { // Stop now rx and tx tasks before doing the proper cleanup let _ = self.stop_rx(link); let _ = self.stop_tx(link); @@ -189,12 +187,14 @@ impl TransportUnicastUniversal { Ok(()) } - pub(super) fn read_messages(&self, mut zslice: ZSlice, link: &LinkUnicast) -> ZResult<()> { - let codec = Zenoh080::new(); - let mut reader = zslice.reader(); - while reader.can_read() { - let msg: TransportMessage = codec - .read(&mut reader) + pub(super) fn read_messages( + &self, + mut batch: RBatch, + link: &TransportLinkUnicast, + ) -> ZResult<()> { + while !batch.is_empty() { + let msg: TransportMessage = batch + .decode() .map_err(|_| zerror!("{}: decoding error", link))?; log::trace!("Received: {:?}", msg); diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 5c17b36827..a920ac90b9 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -11,42 +11,50 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::priority::{TransportPriorityRx, TransportPriorityTx}; #[cfg(feature = "stats")] use crate::stats::TransportStats; -use crate::transport_unicast_inner::TransportUnicastTrait; -use crate::unicast::universal::link::TransportLinkUnicast; -use crate::TransportConfigUnicast; -use crate::{TransportExecutor, TransportManager, TransportPeerEventHandler}; +use crate::{ + common::priority::{TransportPriorityRx, TransportPriorityTx}, + unicast::{ + link::{TransportLinkUnicast, TransportLinkUnicastDirection}, + transport_unicast_inner::TransportUnicastTrait, + universal::link::TransportLinkUnicastUniversal, + TransportConfigUnicast, + }, + TransportExecutor, TransportManager, TransportPeerEventHandler, +}; use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use async_trait::async_trait; use std::fmt::DebugStruct; use std::sync::{Arc, RwLock}; use std::time::Duration; use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; -use zenoh_link::{Link, LinkUnicast, LinkUnicastDirection}; -use zenoh_protocol::network::NetworkMessage; +use zenoh_link::Link; use zenoh_protocol::{ core::{Priority, WhatAmI, ZenohId}, + network::NetworkMessage, transport::{Close, PrioritySn, TransportMessage, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; macro_rules! zlinkget { ($guard:expr, $link:expr) => { - $guard.iter().find(|tl| &tl.link == $link) + // Compare LinkUnicast link to not compare TransportLinkUnicast direction + $guard.iter().find(|tl| &tl.link.link == &$link.link) }; } macro_rules! zlinkgetmut { ($guard:expr, $link:expr) => { - $guard.iter_mut().find(|tl| &tl.link == $link) + // Compare LinkUnicast link to not compare TransportLinkUnicast direction + $guard.iter_mut().find(|tl| &tl.link.link == &$link.link) }; } macro_rules! zlinkindex { ($guard:expr, $link:expr) => { - $guard.iter().position(|tl| &tl.link == $link) + // Compare LinkUnicast link to not compare TransportLinkUnicast direction + $guard.iter().position(|tl| &tl.link.link == &$link.link) }; } @@ -64,7 +72,7 @@ pub(crate) struct TransportUnicastUniversal { // Rx priorities pub(super) priority_rx: Arc<[TransportPriorityRx]>, // The links associated to the channel - pub(super) links: Arc>>, + pub(super) links: Arc>>, // The callback pub(super) callback: Arc>>>, // Mutex for notification @@ -162,10 +170,10 @@ impl TransportUnicastUniversal { Ok(()) } - pub(crate) async fn del_link(&self, link: &LinkUnicast) -> ZResult<()> { + pub(crate) async fn del_link(&self, link: &TransportLinkUnicast) -> ZResult<()> { enum Target { Transport, - Link(Box), + Link(Box), } // Try to remove the link @@ -206,7 +214,7 @@ impl TransportUnicastUniversal { } } - pub(crate) fn stop_tx(&self, link: &LinkUnicast) -> ZResult<()> { + pub(crate) fn stop_tx(&self, link: &TransportLinkUnicast) -> ZResult<()> { let mut guard = zwrite!(self.links); match zlinkgetmut!(guard, link) { Some(l) => { @@ -223,7 +231,7 @@ impl TransportUnicastUniversal { } } - pub(crate) fn stop_rx(&self, link: &LinkUnicast) -> ZResult<()> { + pub(crate) fn stop_rx(&self, link: &TransportLinkUnicast) -> ZResult<()> { let mut guard = zwrite!(self.links); match zlinkgetmut!(guard, link) { Some(l) => { @@ -246,13 +254,16 @@ impl TransportUnicastTrait for TransportUnicastUniversal { /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: LinkUnicast, direction: LinkUnicastDirection) -> ZResult<()> { + async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()> { // Add the link to the channel let mut guard = zwrite!(self.links); // Check if we can add more inbound links - if let LinkUnicastDirection::Inbound = direction { - let count = guard.iter().filter(|l| l.direction == direction).count(); + if let TransportLinkUnicastDirection::Inbound = link.config.direction { + let count = guard + .iter() + .filter(|l| l.link.config.direction == link.config.direction) + .count(); let limit = zcondfeat!( "transport_multilink", @@ -275,8 +286,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { } } - // Create a channel link from a link - let link = TransportLinkUnicast::new(self.clone(), link, direction); + let link = TransportLinkUnicastUniversal::new(self.clone(), link); let mut links = Vec::with_capacity(guard.len() + 1); links.extend_from_slice(&guard); @@ -357,7 +367,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &LinkUnicast, reason: u8) -> ZResult<()> { + async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()> { log::trace!("Closing link {} with peer: {}", link, self.config.zid); let mut pipeline = zlinkget!(zread!(self.links), link) @@ -403,7 +413,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { self.delete().await } - fn get_links(&self) -> Vec { + fn get_links(&self) -> Vec { zread!(self.links).iter().map(|l| l.link.clone()).collect() } @@ -419,33 +429,32 @@ impl TransportUnicastTrait for TransportUnicastUniversal { fn start_tx( &self, - link: &LinkUnicast, + link: &TransportLinkUnicast, executor: &TransportExecutor, keep_alive: Duration, - batch_size: u16, ) -> ZResult<()> { let mut guard = zwrite!(self.links); match zlinkgetmut!(guard, link) { Some(l) => { assert!(!self.priority_tx.is_empty()); - l.start_tx(executor, keep_alive, batch_size, &self.priority_tx); + l.start_tx(executor, keep_alive, &self.priority_tx); Ok(()) } None => { bail!( - "Can not start Link TX {} with peer: {}", + "Can not start Link TX {} with ZID: {}", link, - self.config.zid + self.config.zid, ) } } } - fn start_rx(&self, link: &LinkUnicast, lease: Duration, batch_size: u16) -> ZResult<()> { + fn start_rx(&self, link: &TransportLinkUnicast, lease: Duration) -> ZResult<()> { let mut guard = zwrite!(self.links); match zlinkgetmut!(guard, link) { Some(l) => { - l.start_rx(lease, batch_size); + l.start_rx(lease); Ok(()) } None => { diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index 7dbc5329e6..bf5be7e702 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -34,7 +34,7 @@ impl TransportUnicastUniversal { if let Some(pl) = guard .iter() .filter_map(|tl| { - if msg.is_reliable() == tl.link.is_reliable() { + if msg.is_reliable() == tl.link.link.is_reliable() { tl.pipeline.as_ref() } else { None diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index e372e9e013..2ac2084552 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -21,8 +21,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs new file mode 100644 index 0000000000..fafb28e642 --- /dev/null +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -0,0 +1,376 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +// Restricting to macos by default because of no IPv6 support +// on GitHub CI actions on Linux and Windows. +#[cfg(all(target_family = "unix", feature = "transport_compression"))] +mod tests { + use async_std::{prelude::FutureExt, task}; + use std::{ + any::Any, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + use zenoh_core::zasync_executor_init; + use zenoh_link::Link; + use zenoh_protocol::{ + core::{ + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + }, + network::{ + push::{ + ext::{NodeIdType, QoSType}, + Push, + }, + NetworkMessage, + }, + zenoh::Put, + }; + use zenoh_result::ZResult; + use zenoh_transport::{ + multicast::{TransportManagerBuilderMulticast, TransportMulticast}, + unicast::TransportUnicast, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, + }; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const SLEEP_COUNT: Duration = Duration::from_millis(10); + + const MSG_COUNT: usize = 1_000; + const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + + macro_rules! ztimeout { + ($f:expr) => { + $f.timeout(TIMEOUT).await.unwrap() + }; + } + + // Transport Handler for the peer02 + struct SHPeer { + count: Arc, + } + + impl Default for SHPeer { + fn default() -> Self { + Self { + count: Arc::new(AtomicUsize::new(0)), + } + } + } + + impl SHPeer { + fn get_count(&self) -> usize { + self.count.load(Ordering::Relaxed) + } + } + + impl TransportEventHandler for SHPeer { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + panic!(); + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + let arc = Arc::new(SCPeer::new(self.count.clone())); + Ok(arc) + } + } + + // Transport Callback for the peer02 + pub struct SCPeer { + count: Arc, + } + + impl SCPeer { + pub fn new(count: Arc) -> Self { + Self { count } + } + } + + impl TransportMulticastEventHandler for SCPeer { + fn new_peer(&self, peer: TransportPeer) -> ZResult> { + println!("\tNew peer: {:?}", peer); + Ok(Arc::new(SCPeer { + count: self.count.clone(), + })) + } + fn closing(&self) {} + fn closed(&self) {} + + fn as_any(&self) -> &dyn Any { + self + } + } + + impl TransportPeerEventHandler for SCPeer { + fn handle_message(&self, _msg: NetworkMessage) -> ZResult<()> { + self.count.fetch_add(1, Ordering::Relaxed); + Ok(()) + } + + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} + + fn as_any(&self) -> &dyn Any { + self + } + } + + struct TransportMulticastPeer { + manager: TransportManager, + handler: Arc, + transport: TransportMulticast, + } + + async fn open_transport( + endpoint: &EndPoint, + ) -> (TransportMulticastPeer, TransportMulticastPeer) { + // Define peer01 and peer02 IDs + let peer01_id = ZenohId::try_from([1]).unwrap(); + let peer02_id = ZenohId::try_from([2]).unwrap(); + + // Create the peer01 transport manager + let peer01_handler = Arc::new(SHPeer::default()); + let peer01_manager = TransportManager::builder() + .zid(peer01_id) + .whatami(WhatAmI::Peer) + .multicast(TransportManagerBuilderMulticast::default().compression(true)) + .build(peer01_handler.clone()) + .unwrap(); + + // Create the peer02 transport manager + let peer02_handler = Arc::new(SHPeer::default()); + let peer02_manager = TransportManager::builder() + .zid(peer02_id) + .whatami(WhatAmI::Peer) + .multicast(TransportManagerBuilderMulticast::default().compression(true)) + .build(peer02_handler.clone()) + .unwrap(); + + // Create an empty transport with the peer01 + // Open transport -> This should be accepted + println!("Opening transport with {endpoint}"); + let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); + assert!(!peer01_manager.get_transports_multicast().await.is_empty()); + println!("\t{:?}", peer01_manager.get_transports_multicast().await); + + println!("Opening transport with {endpoint}"); + let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); + assert!(!peer02_manager.get_transports_multicast().await.is_empty()); + println!("\t{:?}", peer02_manager.get_transports_multicast().await); + + // Wait to for peer 01 and 02 to join each other + ztimeout!(async { + while peer01_manager + .get_transport_multicast(&peer02_id) + .await + .is_none() + { + task::sleep(SLEEP_COUNT).await; + } + }); + let peer01_transport = peer01_manager + .get_transport_multicast(&peer02_id) + .await + .unwrap(); + println!( + "\tPeer01 peers: {:?}", + peer01_transport.get_peers().unwrap() + ); + + ztimeout!(async { + while peer02_manager + .get_transport_multicast(&peer01_id) + .await + .is_none() + { + task::sleep(SLEEP_COUNT).await; + } + }); + let peer02_transport = peer02_manager + .get_transport_multicast(&peer01_id) + .await + .unwrap(); + println!( + "\tPeer02 peers: {:?}", + peer02_transport.get_peers().unwrap() + ); + + ( + TransportMulticastPeer { + manager: peer01_manager, + handler: peer01_handler, + transport: peer01_transport, + }, + TransportMulticastPeer { + manager: peer02_manager, + handler: peer02_handler, + transport: peer02_transport, + }, + ) + } + + async fn close_transport( + peer01: TransportMulticastPeer, + peer02: TransportMulticastPeer, + endpoint: &EndPoint, + ) { + // Close the peer01 transport + println!("Closing transport with {endpoint}"); + ztimeout!(peer01.transport.close()).unwrap(); + assert!(peer01.manager.get_transports_multicast().await.is_empty()); + ztimeout!(async { + while !peer02.transport.get_peers().unwrap().is_empty() { + task::sleep(SLEEP_COUNT).await; + } + }); + + // Close the peer02 transport + println!("Closing transport with {endpoint}"); + ztimeout!(peer02.transport.close()).unwrap(); + assert!(peer02.manager.get_transports_multicast().await.is_empty()); + + // Wait a little bit + task::sleep(SLEEP).await; + } + + async fn test_transport( + peer01: &TransportMulticastPeer, + peer02: &TransportMulticastPeer, + channel: Channel, + msg_size: usize, + ) { + // Create the message to send + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::default(), + payload: Put { + payload: vec![0u8; msg_size].into(), + timestamp: None, + encoding: Encoding::default(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_unknown: vec![], + } + .into(), + } + .into(); + + println!("Sending {MSG_COUNT} messages... {channel:?} {msg_size}"); + for _ in 0..MSG_COUNT { + peer01.transport.schedule(message.clone()).unwrap(); + } + + match channel.reliability { + Reliability::Reliable => { + ztimeout!(async { + while peer02.handler.get_count() != MSG_COUNT { + task::sleep(SLEEP_COUNT).await; + } + }); + } + Reliability::BestEffort => { + ztimeout!(async { + while peer02.handler.get_count() == 0 { + task::sleep(SLEEP_COUNT).await; + } + }); + } + }; + + // Wait a little bit + task::sleep(SLEEP).await; + } + + async fn run_single(endpoint: &EndPoint, channel: Channel, msg_size: usize) { + let (peer01, peer02) = open_transport(endpoint).await; + test_transport(&peer01, &peer02, channel, msg_size).await; + + #[cfg(feature = "stats")] + { + let stats = peer01.transport.get_stats().unwrap().report(); + println!("\tPeer 01: {:?}", stats); + let stats = peer02.transport.get_stats().unwrap().report(); + println!("\tPeer 02: {:?}", stats); + } + + close_transport(peer01, peer02, endpoint).await; + } + + async fn run(endpoints: &[EndPoint], channel: &[Channel], msg_size: &[usize]) { + for e in endpoints.iter() { + for ch in channel.iter() { + for ms in msg_size.iter() { + run_single(e, *ch, *ms).await; + } + } + } + } + + #[cfg(feature = "transport_udp")] + #[test] + fn transport_multicast_compression_udp_only() { + env_logger::init(); + + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locator + let endpoints: Vec = vec![ + format!( + "udp/224.{}.{}.{}:21000", + rand::random::(), + rand::random::(), + rand::random::() + ) + .parse() + .unwrap(), + // Disabling by default because of no IPv6 support + // on GitHub CI actions. + // format!("udp/{}", ZN_MULTICAST_IPV6_ADDRESS_DEFAULT) + // .parse() + // .unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + task::block_on(run(&endpoints, &channel, &MSG_SIZE_NOFRAG)); + } +} diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 28f69ef3b7..0822d08f58 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -42,8 +42,8 @@ mod tests { }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); @@ -330,7 +330,7 @@ mod tests { } } - #[cfg(feature = "transport_udp")] + #[cfg(all(feature = "transport_compression", feature = "transport_udp"))] #[test] fn transport_multicast_udp_only() { env_logger::init(); @@ -342,7 +342,7 @@ mod tests { // Define the locator let endpoints: Vec = vec![ format!( - "udp/224.{}.{}.{}:7447", + "udp/224.{}.{}.{}:20000", rand::random::(), rand::random::(), rand::random::() diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index 5279dcff21..5a929ed18c 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -21,8 +21,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index b22d7875fd..51e78d4ee8 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -21,11 +21,12 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - unicast::establishment::ext::auth::Auth, TransportMulticast, TransportMulticastEventHandler, + multicast::TransportMulticast, unicast::establishment::ext::auth::Auth, + TransportMulticastEventHandler, }; use zenoh_transport::{ - DummyTransportPeerEventHandler, TransportEventHandler, TransportPeer, - TransportPeerEventHandler, TransportUnicast, + unicast::TransportUnicast, DummyTransportPeerEventHandler, TransportEventHandler, + TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); @@ -109,9 +110,13 @@ impl TransportEventHandler for SHClientAuthenticator { #[cfg(feature = "auth_pubkey")] async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { use rsa::{BigUint, RsaPrivateKey, RsaPublicKey}; - use zenoh_transport::test_helpers::make_basic_transport_manager_builder; - use zenoh_transport::unicast::establishment::ext::auth::AuthPubKey; - use zenoh_transport::TransportManager; + use zenoh_transport::{ + unicast::{ + establishment::ext::auth::AuthPubKey, + test_helpers::make_basic_transport_manager_builder, + }, + TransportManager, + }; // Create the transport transport manager for the client 01 let client01_id = ZenohId::try_from([2]).unwrap(); @@ -411,9 +416,13 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { #[cfg(feature = "auth_usrpwd")] async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { - use zenoh_transport::test_helpers::make_basic_transport_manager_builder; - use zenoh_transport::unicast::establishment::ext::auth::AuthUsrPwd; - use zenoh_transport::TransportManager; + use zenoh_transport::{ + unicast::{ + establishment::ext::auth::AuthUsrPwd, + test_helpers::make_basic_transport_manager_builder, + }, + TransportManager, + }; /* [CLIENT] */ let client01_id = ZenohId::try_from([2]).unwrap(); diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs new file mode 100644 index 0000000000..be979fef23 --- /dev/null +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -0,0 +1,553 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#[cfg(feature = "transport_compression")] +mod tests { + use async_std::{prelude::FutureExt, task}; + use std::fmt::Write as _; + use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + use zenoh_core::zasync_executor_init; + use zenoh_link::Link; + use zenoh_protocol::{ + core::{ + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + }, + network::{ + push::ext::{NodeIdType, QoSType}, + NetworkMessage, Push, + }, + zenoh::Put, + }; + use zenoh_result::ZResult; + use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, + }; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const SLEEP_COUNT: Duration = Duration::from_millis(10); + + const MSG_COUNT: usize = 1_000; + const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; + const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; + const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + + macro_rules! ztimeout { + ($f:expr) => { + $f.timeout(TIMEOUT).await.unwrap() + }; + } + + // Transport Handler for the router + struct SHRouter { + count: Arc, + } + + impl Default for SHRouter { + fn default() -> Self { + Self { + count: Arc::new(AtomicUsize::new(0)), + } + } + } + + impl SHRouter { + fn get_count(&self) -> usize { + self.count.load(Ordering::SeqCst) + } + } + + impl TransportEventHandler for SHRouter { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + let arc = Arc::new(SCRouter::new(self.count.clone())); + Ok(arc) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } + } + + // Transport Callback for the router + pub struct SCRouter { + count: Arc, + } + + impl SCRouter { + pub fn new(count: Arc) -> Self { + Self { count } + } + } + + impl TransportPeerEventHandler for SCRouter { + fn handle_message(&self, _message: NetworkMessage) -> ZResult<()> { + self.count.fetch_add(1, Ordering::SeqCst); + Ok(()) + } + + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} + + fn as_any(&self) -> &dyn Any { + self + } + } + + // Transport Handler for the client + #[derive(Default)] + struct SHClient; + + impl TransportEventHandler for SHClient { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(SCClient)) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } + } + + // Transport Callback for the client + #[derive(Default)] + pub struct SCClient; + + impl TransportPeerEventHandler for SCClient { + fn handle_message(&self, _message: NetworkMessage) -> ZResult<()> { + Ok(()) + } + + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} + + fn as_any(&self) -> &dyn Any { + self + } + } + + async fn open_transport_unicast( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + lowlatency_transport: bool, + ) -> ( + TransportManager, + Arc, + TransportManager, + TransportUnicast, + ) { + // Define client and router IDs + let client_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohId::try_from([2]).unwrap(); + + // Create the router transport manager + let router_handler = Arc::new(SHRouter::default()); + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + server_endpoints.len(), + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ); + let router_manager = TransportManager::builder() + .zid(router_id) + .whatami(WhatAmI::Router) + .unicast(unicast) + .build(router_handler.clone()) + .unwrap(); + + // Create the listener on the router + for e in server_endpoints.iter() { + println!("Add endpoint: {}", e); + let _ = ztimeout!(router_manager.add_listener(e.clone())).unwrap(); + } + + // Create the client transport manager + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + client_endpoints.len(), + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .compression(true); + let client_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client_id) + .unicast(unicast) + .build(Arc::new(SHClient)) + .unwrap(); + + // Create an empty transport with the client + // Open transport -> This should be accepted + for e in client_endpoints.iter() { + println!("Opening transport with {}", e); + let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); + } + + let client_transport = client_manager + .get_transport_unicast(&router_id) + .await + .unwrap(); + + // Return the handlers + ( + router_manager, + router_handler, + client_manager, + client_transport, + ) + } + + async fn close_transport( + router_manager: TransportManager, + client_manager: TransportManager, + client_transport: TransportUnicast, + endpoints: &[EndPoint], + ) { + // Close the client transport + let mut ee = String::new(); + for e in endpoints.iter() { + let _ = write!(ee, "{e} "); + } + println!("Closing transport with {}", ee); + ztimeout!(client_transport.close()).unwrap(); + + ztimeout!(async { + while !router_manager.get_transports_unicast().await.is_empty() { + task::sleep(SLEEP).await; + } + }); + + // Stop the locators on the manager + for e in endpoints.iter() { + println!("Del locator: {}", e); + ztimeout!(router_manager.del_listener(e)).unwrap(); + } + + ztimeout!(async { + while !router_manager.get_listeners().is_empty() { + task::sleep(SLEEP).await; + } + }); + + // Wait a little bit + task::sleep(SLEEP).await; + + ztimeout!(router_manager.close()); + ztimeout!(client_manager.close()); + + // Wait a little bit + task::sleep(SLEEP).await; + } + + async fn test_transport( + router_handler: Arc, + client_transport: TransportUnicast, + channel: Channel, + msg_size: usize, + ) { + println!( + "Sending {} messages... {:?} {}", + MSG_COUNT, channel, msg_size + ); + let cctrl = match channel.reliability { + Reliability::Reliable => CongestionControl::Block, + Reliability::BestEffort => CongestionControl::Drop, + }; + // Create the message to send + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(channel.priority, cctrl, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::default(), + payload: Put { + payload: vec![0u8; msg_size].into(), + timestamp: None, + encoding: Encoding::default(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_unknown: vec![], + } + .into(), + } + .into(); + for _ in 0..MSG_COUNT { + let _ = client_transport.schedule(message.clone()); + } + + match channel.reliability { + Reliability::Reliable => { + ztimeout!(async { + while router_handler.get_count() != MSG_COUNT { + task::sleep(SLEEP_COUNT).await; + } + }); + } + Reliability::BestEffort => { + ztimeout!(async { + while router_handler.get_count() == 0 { + task::sleep(SLEEP_COUNT).await; + } + }); + } + }; + + // Wait a little bit + task::sleep(SLEEP).await; + } + + async fn run_single( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: Channel, + msg_size: usize, + lowlatency_transport: bool, + ) { + println!( + "\n>>> Running test for: {:?}, {:?}, {:?}, {}", + client_endpoints, server_endpoints, channel, msg_size + ); + + #[allow(unused_variables)] // Used when stats feature is enabled + let (router_manager, router_handler, client_manager, client_transport) = + open_transport_unicast(client_endpoints, server_endpoints, lowlatency_transport).await; + + test_transport( + router_handler.clone(), + client_transport.clone(), + channel, + msg_size, + ) + .await; + + #[cfg(feature = "stats")] + { + let c_stats = client_transport.get_stats().unwrap().report(); + println!("\tClient: {:?}", c_stats); + let r_stats = router_manager + .get_transport_unicast(&client_manager.config.zid) + .await + .unwrap() + .get_stats() + .map(|s| s.report()) + .unwrap(); + println!("\tRouter: {:?}", r_stats); + } + + close_transport( + router_manager, + client_manager, + client_transport, + client_endpoints, + ) + .await; + } + + async fn run_internal( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], + lowlatency_transport: bool, + ) { + for ch in channel.iter() { + for ms in msg_size.iter() { + run_single( + client_endpoints, + server_endpoints, + *ch, + *ms, + lowlatency_transport, + ) + .await; + } + } + } + + async fn run_with_universal_transport( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], + ) { + run_internal(client_endpoints, server_endpoints, channel, msg_size, false).await; + } + + async fn run_with_lowlatency_transport( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], + ) { + if client_endpoints.len() > 1 || server_endpoints.len() > 1 { + println!("LowLatency transport doesn't support more than one link, so this test would produce MAX_LINKS error!"); + panic!(); + } + run_internal(client_endpoints, server_endpoints, channel, msg_size, true).await; + } + + #[cfg(feature = "transport_tcp")] + #[test] + fn transport_unicast_compression_tcp_only() { + let _ = env_logger::try_init(); + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locators + let endpoints: Vec = vec![ + format!("tcp/127.0.0.1:{}", 19000).parse().unwrap(), + format!("tcp/[::1]:{}", 19001).parse().unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + ]; + // Run + task::block_on(run_with_universal_transport( + &endpoints, + &endpoints, + &channel, + &MSG_SIZE_ALL, + )); + } + + #[cfg(feature = "transport_tcp")] + #[test] + fn transport_unicast_compression_tcp_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locators + let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 19100).parse().unwrap()]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + ]; + // Run + task::block_on(run_with_lowlatency_transport( + &endpoints, + &endpoints, + &channel, + &MSG_SIZE_LOWLATENCY, + )); + } + + #[cfg(feature = "transport_udp")] + #[test] + fn transport_unicast_compression_udp_only() { + let _ = env_logger::try_init(); + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locator + let endpoints: Vec = vec![ + format!("udp/127.0.0.1:{}", 19010).parse().unwrap(), + format!("udp/[::1]:{}", 19011).parse().unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + task::block_on(run_with_universal_transport( + &endpoints, + &endpoints, + &channel, + &MSG_SIZE_NOFRAG, + )); + } + + #[cfg(feature = "transport_udp")] + #[test] + fn transport_unicast_compression_udp_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locator + let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 19110).parse().unwrap()]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + task::block_on(run_with_lowlatency_transport( + &endpoints, + &endpoints, + &channel, + &MSG_SIZE_NOFRAG, + )); + } +} diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 11f5e46ca7..64516f6f26 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -33,8 +33,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const MSG_COUNT: usize = 1_000; diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 01ee0e3751..4c7934309b 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -33,10 +33,11 @@ use zenoh_protocol::{ zenoh::Put, }; use zenoh_result::ZResult; -use zenoh_transport::test_helpers::make_transport_manager_builder; use zenoh_transport::{ - DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, + TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const MSG_SIZE: usize = 8; diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 182408f75b..cd8a48565a 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -20,9 +20,9 @@ mod tests { use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_result::ZResult; use zenoh_transport::{ - DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, - TransportMulticast, TransportMulticastEventHandler, TransportPeer, - TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, DummyTransportPeerEventHandler, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index f361f6f684..76a63cc6e0 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -18,9 +18,10 @@ use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_result::ZResult; use zenoh_transport::{ - test_helpers::make_transport_manager_builder, DummyTransportPeerEventHandler, - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, + TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index 7d8b70b4d3..07f8e43bcb 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -35,8 +35,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); @@ -102,8 +102,8 @@ impl TransportEventHandler for SHRouter { fn new_multicast( &self, - _transport: zenoh_transport::TransportMulticast, - ) -> ZResult> { + _transport: TransportMulticast, + ) -> ZResult> { panic!(); } } diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 59fc1467cf..500a174daf 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -23,7 +23,7 @@ mod tests { }, time::Duration, }; - use zenoh_buffers::SplitBuffer; + use zenoh_buffers::buffer::SplitBuffer; use zenoh_core::zasync_executor_init; use zenoh_link::Link; use zenoh_protocol::{ @@ -37,8 +37,8 @@ mod tests { use zenoh_result::ZResult; use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager}; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 3de47aba03..dad4b6f775 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -32,8 +32,8 @@ mod tests { }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index e01d9d0130..ac35090cdb 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -35,10 +35,11 @@ use zenoh_protocol::{ zenoh::Put, }; use zenoh_result::ZResult; -use zenoh_transport::test_helpers::make_transport_manager_builder; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, }; // These keys and certificates below are purposedly generated to run TLS and mTLS tests. @@ -482,9 +483,6 @@ async fn test_transport( .into(); for _ in 0..MSG_COUNT { let _ = client_transport.schedule(message.clone()); - // print!("S-{i} "); - use std::io::Write; - std::io::stdout().flush().unwrap(); } match channel.reliability { @@ -1215,6 +1213,7 @@ fn transport_unicast_quic_only_server() { fn transport_unicast_tls_only_mutual_success() { use zenoh_link::tls::config::*; + let _ = env_logger::try_init(); task::block_on(async { zasync_executor_init!(); }); @@ -1282,24 +1281,13 @@ fn transport_unicast_tls_only_mutual_success() { )); } -// Constants replicating the alert descriptions thrown by the Rustls library. -// These alert descriptions are internal of the library and cannot be reached from these tests -// as to do a proper comparison. For the sake of simplicity we verify these constants are contained -// in the expected error messages from the tests below. -// -// See: https://docs.rs/rustls/latest/src/rustls/msgs/enums.rs.html#128 -#[cfg(all(feature = "transport_tls", target_family = "unix"))] -const RUSTLS_UNKNOWN_CA_ALERT_DESCRIPTION: &str = "UnknownCA"; -#[cfg(all(feature = "transport_tls", target_family = "unix"))] -const RUSTLS_CERTIFICATE_REQUIRED_ALERT_DESCRIPTION: &str = "CertificateRequired"; - #[cfg(all(feature = "transport_tls", target_family = "unix"))] #[test] fn transport_unicast_tls_only_mutual_no_client_certs_failure() { use std::vec; - use zenoh_link::tls::config::*; + let _ = env_logger::try_init(); task::block_on(async { zasync_executor_init!(); }); @@ -1361,9 +1349,6 @@ fn transport_unicast_tls_only_mutual_no_client_certs_failure() { )) }); assert!(result.is_err()); - let err = result.unwrap_err(); - let error_msg = panic_message::panic_message(&err); - assert!(error_msg.contains(RUSTLS_CERTIFICATE_REQUIRED_ALERT_DESCRIPTION)); } #[cfg(all(feature = "transport_tls", target_family = "unix"))] @@ -1371,6 +1356,7 @@ fn transport_unicast_tls_only_mutual_no_client_certs_failure() { fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { use zenoh_link::tls::config::*; + let _ = env_logger::try_init(); task::block_on(async { zasync_executor_init!(); }); @@ -1443,9 +1429,6 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { )) }); assert!(result.is_err()); - let err = result.unwrap_err(); - let error_msg = panic_message::panic_message(&err); - assert!(error_msg.contains(RUSTLS_UNKNOWN_CA_ALERT_DESCRIPTION)); } #[test] diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 053bb7e285..0177c2d454 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -39,6 +39,7 @@ shared-memory = [ ] stats = ["zenoh-transport/stats", "zenoh-protocol/stats"] transport_multilink = ["zenoh-transport/transport_multilink"] +transport_compression = ["zenoh-transport/transport_compression"] transport_quic = ["zenoh-transport/transport_quic"] transport_serial = ["zenoh-transport/transport_serial"] transport_unixpipe = ["zenoh-transport/transport_unixpipe"] @@ -52,6 +53,7 @@ default = [ "auth_pubkey", "auth_usrpwd", "transport_multilink", + "transport_compression", "transport_quic", "transport_tcp", "transport_tls", diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 56772797ce..a8aad9c809 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -122,14 +122,14 @@ impl TransportEventHandler for Handler { fn new_unicast( &self, peer: zenoh_transport::TransportPeer, - _transport: zenoh_transport::TransportUnicast, + _transport: zenoh_transport::unicast::TransportUnicast, ) -> ZResult> { self.new_peer(peer) } fn new_multicast( &self, - _transport: zenoh_transport::TransportMulticast, + _transport: zenoh_transport::multicast::TransportMulticast, ) -> ZResult> { Ok(Arc::new(self.clone())) } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index b3e0c4b87c..9f6418974e 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -26,7 +26,7 @@ use zenoh_protocol::{ network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; -use zenoh_transport::Primitives; +use zenoh_transport::primitives::Primitives; use crate::{prelude::Selector, Session, Undeclarable}; diff --git a/zenoh/src/net/routing/face.rs b/zenoh/src/net/routing/face.rs index cb01f3ea6e..0d2ee926d1 100644 --- a/zenoh/src/net/routing/face.rs +++ b/zenoh/src/net/routing/face.rs @@ -25,7 +25,7 @@ use zenoh_protocol::{ }; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; -use zenoh_transport::{Primitives, TransportMulticast}; +use zenoh_transport::{multicast::TransportMulticast, primitives::Primitives}; pub struct FaceState { pub(super) id: usize, diff --git a/zenoh/src/net/routing/network.rs b/zenoh/src/net/routing/network.rs index 3af1e0a87c..0fb9f36120 100644 --- a/zenoh/src/net/routing/network.rs +++ b/zenoh/src/net/routing/network.rs @@ -27,7 +27,7 @@ use zenoh_protocol::common::ZExtBody; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::network::oam::id::OAM_LINKSTATE; use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; -use zenoh_transport::TransportUnicast; +use zenoh_transport::unicast::TransportUnicast; #[derive(Clone)] struct Details { diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 444730e24d..1ad5d93609 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -37,8 +37,10 @@ use zenoh_protocol::network::{Mapping, NetworkBody, NetworkMessage}; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; use zenoh_transport::{ - DeMux, DummyPrimitives, McastMux, Mux, Primitives, TransportMulticast, TransportPeer, - TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, + primitives::{DeMux, DummyPrimitives, McastMux, Mux, Primitives}, + unicast::TransportUnicast, + TransportPeer, TransportPeerEventHandler, }; // use zenoh_collections::Timer; use zenoh_core::zconfigurable; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 0eb099a098..08b00c5047 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -26,7 +26,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::sync::Arc; use std::sync::Mutex; -use zenoh_buffers::SplitBuffer; +use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::ValidatedMap; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, ExprId, KnownEncoding, WireExpr, ZenohId, EMPTY_EXPR_ID}, @@ -38,7 +38,7 @@ use zenoh_protocol::{ zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; -use zenoh_transport::{Primitives, TransportUnicast}; +use zenoh_transport::{primitives::Primitives, unicast::TransportUnicast}; pub struct AdminContext { runtime: Runtime, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 92d369e998..f9486ea59c 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -42,8 +42,9 @@ use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_result::{bail, ZResult}; use zenoh_sync::get_mut_unchecked; use zenoh_transport::{ - DeMux, TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, primitives::DeMux, unicast::TransportUnicast, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, }; pub struct RuntimeState { diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 5dadf8d8a9..933a2e46a4 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -27,7 +27,7 @@ use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; use zenoh_protocol::network::declare::Mode; use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; use zenoh_protocol::zenoh::{PushBody, Put}; -use zenoh_transport::{DummyPrimitives, Primitives}; +use zenoh_transport::primitives::{DummyPrimitives, Primitives}; #[test] fn base_test() { diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 2f2e7650a0..36a841d1ef 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -25,7 +25,11 @@ pub use common::*; pub(crate) mod common { pub use crate::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; - pub use zenoh_buffers::{reader::HasReader, writer::HasWriter, SplitBuffer}; + pub use zenoh_buffers::{ + buffer::{Buffer, SplitBuffer}, + reader::HasReader, + writer::HasWriter, + }; pub use zenoh_core::Resolve; pub(crate) type Id = usize; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index ac1d6bf55a..be439b6f2d 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,12 +13,11 @@ // //! Publishing primitives. - #[zenoh_macros::unstable] use crate::handlers::Callback; #[zenoh_macros::unstable] use crate::handlers::DefaultHandler; -use crate::net::transport::Primitives; +use crate::net::transport::primitives::Primitives; use crate::prelude::*; use crate::sample::DataInfo; use crate::Encoding; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6156b0aa78..4881de6ec1 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -32,7 +32,7 @@ use zenoh_protocol::zenoh::ext::ValueType; use zenoh_protocol::zenoh::reply::ext::ConsolidationType; use zenoh_protocol::zenoh::{self, ResponseBody}; use zenoh_result::ZResult; -use zenoh_transport::Primitives; +use zenoh_transport::primitives::Primitives; pub(crate) struct QueryInner { /// The key expression of this Query. diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index e8314c9cc6..8c566a6640 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -22,7 +22,7 @@ use crate::key_expr::KeyExprInner; use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::routing::face::Face; use crate::net::runtime::Runtime; -use crate::net::transport::Primitives; +use crate::net::transport::primitives::Primitives; use crate::prelude::Locality; use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index f719fce3eb..96cca533df 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -32,9 +32,18 @@ fn zenoh_liveliness() { task::block_on(async { zasync_executor_init!(); - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); - - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let mut c1 = config::peer(); + c1.listen + .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) + .unwrap(); + c1.scouting.multicast.set_enabled(Some(false)).unwrap(); + let session1 = ztimeout!(zenoh::open(c1).res_async()).unwrap(); + let mut c2 = config::peer(); + c2.connect + .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) + .unwrap(); + c2.scouting.multicast.set_enabled(Some(false)).unwrap(); + let session2 = ztimeout!(zenoh::open(c2).res_async()).unwrap(); let replies = ztimeout!(session2 .liveliness() diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index ad8ea1d05a..7219bf5ff2 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -319,7 +319,7 @@ fn gossip() -> Result<()> { async_std::task::block_on(async { zasync_executor_init!(); - let locator = String::from("tcp/127.0.0.1:17448"); + let locator = String::from("tcp/127.0.0.1:17449"); let ke = String::from("testKeyExprGossip"); let msg_size = 8; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index c94cb36510..c2cec7c627 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -126,6 +126,9 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re } }); + // Wait for the messages to arrive + task::sleep(SLEEP).await; + println!("[PS][03b] Unsubscribing on peer01 session"); ztimeout!(sub.undeclare().res_async()).unwrap(); From e22a312daec911e7df1881dc66b352b36328a579 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 30 Nov 2023 15:36:16 +0100 Subject: [PATCH 10/29] Add comments to DEFAULT_CONFIG.json5 --- DEFAULT_CONFIG.json5 | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 4a0179fb71..bde1b8fd03 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -133,17 +133,25 @@ /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to /// enable 'lowlatency' you need to explicitly disable 'qos'. lowlatency: false, + /// Enables QoS on unicast communications. qos: { enabled: true, }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. compression: { enabled: false, }, - }, + }, multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. qos: { - enabled: true, + enabled: false, }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. compression: { enabled: false, }, From ecd11ad77eb0c974e76aa944fc61328f3c3ce27f Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Thu, 30 Nov 2023 17:03:24 +0100 Subject: [PATCH 11/29] Add homebrew lib search path for Apple Silicon systems (#606) --- commons/zenoh-util/src/std_only/lib_loader.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-util/src/std_only/lib_loader.rs b/commons/zenoh-util/src/std_only/lib_loader.rs index 2fadfad7f2..b1aa77b06b 100644 --- a/commons/zenoh-util/src/std_only/lib_loader.rs +++ b/commons/zenoh-util/src/std_only/lib_loader.rs @@ -26,7 +26,7 @@ zconfigurable! { /// The libraries suffix for the current platform (`".dll"` or `".so"` or `".dylib"`...) pub static ref LIB_SUFFIX: String = DLL_SUFFIX.to_string(); /// The default list of paths where to search for libraries to load - pub static ref LIB_DEFAULT_SEARCH_PATHS: String = "/usr/local/lib:/usr/lib:~/.zenoh/lib:.".to_string(); + pub static ref LIB_DEFAULT_SEARCH_PATHS: String = "/usr/local/lib:/usr/lib:/opt/homebrew/lib:~/.zenoh/lib:.".to_string(); } /// LibLoader allows search for librairies and to load them. From 7276d0c839c13f8eff5e3e5309db878e32009896 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 1 Dec 2023 00:54:12 +0800 Subject: [PATCH 12/29] Set example and plugin-example crate to non-publish to reduce release build time (#602) --- examples/Cargo.toml | 1 + examples/README.md | 2 +- plugins/example-plugin/Cargo.toml | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 116e3dff8d..b0f6507d20 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -24,6 +24,7 @@ categories = { workspace = true } description = "Internal crate for zenoh." readme = "README.md" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +publish = false [features] shared-memory = ["zenoh/shared-memory"] diff --git a/examples/README.md b/examples/README.md index fd25e4322d..8e5b3085ba 100644 --- a/examples/README.md +++ b/examples/README.md @@ -253,4 +253,4 @@ or ```bash z_sub_liveliness -k group1/** - ``` \ No newline at end of file + ``` diff --git a/plugins/example-plugin/Cargo.toml b/plugins/example-plugin/Cargo.toml index 82cdeba6e5..0a315b7a35 100644 --- a/plugins/example-plugin/Cargo.toml +++ b/plugins/example-plugin/Cargo.toml @@ -17,6 +17,7 @@ name = "zenoh-plugin-example" version = { workspace = true } authors = { workspace = true } edition = { workspace = true } +publish = false [lib] # When auto-detecting the "example" plugin, `zenohd` will look for a dynamic library named "zenoh_plugin_example" From c0ebfff664dd925e9e16d148239eba1ade0ade06 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 1 Dec 2023 10:28:53 +0100 Subject: [PATCH 13/29] Remove additional executor spawning (#605) --- zenoh/src/net/runtime/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index f9486ea59c..5599d1ed1e 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -84,8 +84,6 @@ impl Runtime { pub(crate) async fn init(config: Config) -> ZResult { log::debug!("Zenoh Rust API {}", GIT_VERSION); - // Make sure to have have enough threads spawned in the async futures executor - zasync_executor_init!(); let zid = *config.id(); From 5ee57d9503d0867b4511b9ce57f4a3d8aa3a1ce8 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 5 Dec 2023 12:07:13 +0100 Subject: [PATCH 14/29] Fix bug regarding matching status and publisher allowed destination (#608) --- zenoh/src/session.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 8c566a6640..23369e5790 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -1502,10 +1502,23 @@ impl Session { &mut RoutingExpr::new(&tables.root_res, key_expr.as_str()), 0, ); + drop(tables); let matching = match destination { Locality::Any => !route.is_empty(), - Locality::Remote => route.values().any(|dir| !dir.0.is_local()), - Locality::SessionLocal => route.values().any(|dir| dir.0.is_local()), + Locality::Remote => { + if let Some(face) = zread!(self.state).primitives.as_ref() { + route.values().any(|dir| !Arc::ptr_eq(&dir.0, &face.state)) + } else { + !route.is_empty() + } + } + Locality::SessionLocal => { + if let Some(face) = zread!(self.state).primitives.as_ref() { + route.values().any(|dir| Arc::ptr_eq(&dir.0, &face.state)) + } else { + false + } + } }; Ok(MatchingStatus { matching }) } From f84138ca0c006083894cf786dde74be096da137c Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Wed, 6 Dec 2023 09:45:11 +0100 Subject: [PATCH 15/29] Fix type of `__path__` in `Config` (#609) --- plugins/zenoh-plugin-rest/config_schema.json5 | 8 +- plugins/zenoh-plugin-rest/src/config.rs | 142 +++++++++++++++++- 2 files changed, 147 insertions(+), 3 deletions(-) diff --git a/plugins/zenoh-plugin-rest/config_schema.json5 b/plugins/zenoh-plugin-rest/config_schema.json5 index 743fb0bdac..bd13828c10 100644 --- a/plugins/zenoh-plugin-rest/config_schema.json5 +++ b/plugins/zenoh-plugin-rest/config_schema.json5 @@ -13,10 +13,14 @@ ] }, "__path__": { + "default": null, "type": [ - "string", + "array", "null" - ] + ], + "items": { + "type": "string" + } }, "__required__": { "type": [ diff --git a/plugins/zenoh-plugin-rest/src/config.rs b/plugins/zenoh-plugin-rest/src/config.rs index e8badbc8c9..56b9960467 100644 --- a/plugins/zenoh-plugin-rest/src/config.rs +++ b/plugins/zenoh-plugin-rest/src/config.rs @@ -23,7 +23,8 @@ const DEFAULT_HTTP_INTERFACE: &str = "[::]"; pub struct Config { #[serde(deserialize_with = "deserialize_http_port")] pub http_port: String, - __path__: Option, + #[serde(default, deserialize_with = "deserialize_path")] + __path__: Option>, __required__: Option, __config__: Option, } @@ -76,3 +77,142 @@ impl<'de> Visitor<'de> for HttpPortVisitor { Ok(format!("{interface}:{port}")) } } + +fn deserialize_path<'de, D>(deserializer: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + deserializer.deserialize_option(OptPathVisitor) +} + +struct OptPathVisitor; + +impl<'de> serde::de::Visitor<'de> for OptPathVisitor { + type Value = Option>; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "none or a string or an array of strings") + } + + fn visit_none(self) -> Result + where + E: de::Error, + { + Ok(None) + } + + fn visit_some(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_any(PathVisitor).map(Some) + } +} + +struct PathVisitor; + +impl<'de> serde::de::Visitor<'de> for PathVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a string or an array of strings") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + Ok(vec![v.into()]) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: de::SeqAccess<'de>, + { + let mut v = seq.size_hint().map_or_else(Vec::new, Vec::with_capacity); + + while let Some(s) = seq.next_element()? { + v.push(s); + } + Ok(v) + } +} + +#[cfg(test)] +mod tests { + use super::{Config, DEFAULT_HTTP_INTERFACE}; + + #[test] + fn test_path_field() { + // See: https://github.com/eclipse-zenoh/zenoh-plugin-webserver/issues/19 + let config = + serde_json::from_str::(r#"{"__path__": "/example/path", "http_port": 8080}"#); + + assert!(config.is_ok()); + let Config { + http_port, + __required__, + __path__, + .. + } = config.unwrap(); + + assert_eq!(http_port, format!("{DEFAULT_HTTP_INTERFACE}:8080")); + assert_eq!(__path__, Some(vec![String::from("/example/path")])); + assert_eq!(__required__, None); + } + + #[test] + fn test_required_field() { + // See: https://github.com/eclipse-zenoh/zenoh-plugin-webserver/issues/19 + let config = serde_json::from_str::(r#"{"__required__": true, "http_port": 8080}"#); + assert!(config.is_ok()); + let Config { + http_port, + __required__, + __path__, + .. + } = config.unwrap(); + + assert_eq!(http_port, format!("{DEFAULT_HTTP_INTERFACE}:8080")); + assert_eq!(__path__, None); + assert_eq!(__required__, Some(true)); + } + + #[test] + fn test_path_field_and_required_field() { + // See: https://github.com/eclipse-zenoh/zenoh-plugin-webserver/issues/19 + let config = serde_json::from_str::( + r#"{"__path__": "/example/path", "__required__": true, "http_port": 8080}"#, + ); + + assert!(config.is_ok()); + let Config { + http_port, + __required__, + __path__, + .. + } = config.unwrap(); + + assert_eq!(http_port, format!("{DEFAULT_HTTP_INTERFACE}:8080")); + assert_eq!(__path__, Some(vec![String::from("/example/path")])); + assert_eq!(__required__, Some(true)); + } + + #[test] + fn test_no_path_field_and_no_required_field() { + // See: https://github.com/eclipse-zenoh/zenoh-plugin-webserver/issues/19 + let config = serde_json::from_str::(r#"{"http_port": 8080}"#); + + assert!(config.is_ok()); + let Config { + http_port, + __required__, + __path__, + .. + } = config.unwrap(); + + assert_eq!(http_port, format!("{DEFAULT_HTTP_INTERFACE}:8080")); + assert_eq!(__path__, None); + assert_eq!(__required__, None); + } +} From 9f7a37eefda7cc96156001fa83d7b740db43811a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 6 Dec 2023 13:08:49 +0100 Subject: [PATCH 16/29] Fix stats feature (#614) --- .github/workflows/ci.yml | 4 ++-- io/zenoh-transport/src/common/batch.rs | 8 ++++++++ io/zenoh-transport/src/multicast/link.rs | 4 ++-- io/zenoh-transport/src/unicast/universal/link.rs | 2 +- zenoh/src/net/routing/pubsub.rs | 2 +- zenoh/src/net/routing/queries.rs | 4 ++-- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 60fb69a576..42f32fbef0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,11 +54,11 @@ jobs: env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse - - name: Clippy unstable + - name: Clippy unstable and stats uses: actions-rs/cargo@v1 with: command: clippy - args: --all-targets --features unstable -- -D warnings + args: --all-targets --features unstable --features stats -- -D warnings env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index cd029a9435..5305d0a50c 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -182,6 +182,10 @@ impl WBatch { pub fn clear(&mut self) { self.buffer.clear(); self.codec.clear(); + #[cfg(feature = "stats")] + { + self.stats.clear(); + } if let Some(h) = self.header.get() { let mut writer = self.buffer.writer(); let _ = writer.write_u8(h.get()); @@ -314,6 +318,10 @@ impl RBatch { } } + pub fn len(&self) -> usize { + self.buffer.len() + } + #[inline(always)] pub const fn is_empty(&self) -> bool { self.buffer.is_empty() diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 937216dd08..fbb917c281 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -477,7 +477,7 @@ async fn tx_task( #[cfg(feature = "stats")] { stats.inc_tx_t_msgs(batch.stats.t_msgs); - stats.inc_tx_bytes(bytes.len()); + stats.inc_tx_bytes(batch.len() as usize); } // Reinsert the batch into the queue pipeline.refill(batch, priority); @@ -595,7 +595,7 @@ async fn rx_task( match action { Action::Read((batch, locator)) => { #[cfg(feature = "stats")] - transport.stats.inc_rx_bytes(zslice.len()); + transport.stats.inc_rx_bytes(batch.len()); // Deserialize all the messages from the current ZBuf transport.read_messages( diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 3a7eafbc52..74db7f751e 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -184,7 +184,7 @@ async fn tx_task( #[cfg(feature = "stats")] { stats.inc_tx_t_msgs(batch.stats.t_msgs); - stats.inc_tx_bytes(bytes.len()); + stats.inc_tx_bytes(batch.len() as usize); } // Reinsert the batch into the queue diff --git a/zenoh/src/net/routing/pubsub.rs b/zenoh/src/net/routing/pubsub.rs index 75a49f1137..e9dc80b024 100644 --- a/zenoh/src/net/routing/pubsub.rs +++ b/zenoh/src/net/routing/pubsub.rs @@ -1734,7 +1734,7 @@ macro_rules! inc_stats { ) => { paste::paste! { if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::SplitBuffer; + use zenoh_buffers::buffer::Buffer; match &$body { PushBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); diff --git a/zenoh/src/net/routing/queries.rs b/zenoh/src/net/routing/queries.rs index a9d5ffd698..06b81a998b 100644 --- a/zenoh/src/net/routing/queries.rs +++ b/zenoh/src/net/routing/queries.rs @@ -2004,7 +2004,7 @@ macro_rules! inc_req_stats { ) => { paste::paste! { if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::SplitBuffer; + use zenoh_buffers::buffer::Buffer; match &$body { RequestBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); @@ -2036,7 +2036,7 @@ macro_rules! inc_res_stats { ) => { paste::paste! { if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::SplitBuffer; + use zenoh_buffers::buffer::Buffer; match &$body { ResponseBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); From db235af1b45d7668ddbde4d1700e29321f5ddf9b Mon Sep 17 00:00:00 2001 From: Pierre Avital Date: Thu, 7 Dec 2023 15:19:48 +0100 Subject: [PATCH 17/29] chore: upgrade clap to v4, refactor example arguments (#616) Co-authored-by: Pierre Avital --- Cargo.lock | 220 +++++++++--------- Cargo.toml | 6 +- examples/Cargo.toml | 2 +- examples/examples/z_delete.rs | 60 ++--- examples/examples/z_forward.rs | 67 ++---- examples/examples/z_get.rs | 112 ++++----- examples/examples/z_get_liveliness.rs | 68 ++---- examples/examples/z_info.rs | 49 +--- examples/examples/z_liveliness.rs | 61 ++--- examples/examples/z_ping.rs | 89 ++----- examples/examples/z_pong.rs | 61 +---- examples/examples/z_pub.rs | 66 ++---- examples/examples/z_pub_shm.rs | 66 ++---- examples/examples/z_pub_shm_thr.rs | 75 ++---- examples/examples/z_pub_thr.rs | 103 +++----- examples/examples/z_pull.rs | 58 ++--- examples/examples/z_put.rs | 66 ++---- examples/examples/z_put_float.rs | 71 ++---- examples/examples/z_queryable.rs | 76 ++---- examples/examples/z_storage.rs | 65 ++---- examples/examples/z_sub.rs | 61 ++--- examples/examples/z_sub_liveliness.rs | 61 ++--- examples/examples/z_sub_thr.rs | 80 ++----- examples/src/lib.rs | 75 ++++++ .../zenoh-plugin-rest/examples/z_serve_sse.rs | 41 ++-- zenoh-ext/Cargo.toml | 2 +- zenoh-ext/examples/z_pub_cache.rs | 55 ++--- zenoh-ext/examples/z_query_sub.rs | 43 ++-- zenoh-ext/examples/z_view_size.rs | 52 ++--- zenoh/src/selector.rs | 7 + zenohd/Cargo.toml | 2 +- zenohd/src/main.rs | 215 +++++++++-------- 32 files changed, 748 insertions(+), 1387 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 01378d3015..be46441b2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -164,9 +164,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.5.0" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" dependencies = [ "anstyle", "anstyle-parse", @@ -197,17 +197,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "anstyle-wincon" -version = "2.1.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -355,7 +355,7 @@ dependencies = [ "futures-lite", "rustix 0.37.25", "signal-hook", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -454,17 +454,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -669,7 +658,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -720,24 +709,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" -dependencies = [ - "atty", - "bitflags 1.3.2", - "clap_lex 0.2.4", - "indexmap 1.9.3", - "strsim", - "termcolor", - "textwrap", -] - -[[package]] -name = "clap" -version = "4.4.3" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ed82781cea27b43c9b106a979fe450a13a31aab0500595fb3fc06616de08e6" +checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" dependencies = [ "clap_builder", "clap_derive", @@ -745,21 +719,21 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.2" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" +checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" dependencies = [ "anstream", "anstyle", - "clap_lex 0.5.1", + "clap_lex", "strsim", ] [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", @@ -769,18 +743,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - -[[package]] -name = "clap_lex" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "cobs" @@ -919,7 +884,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.3", + "clap", "criterion-plot", "is-terminal", "itertools", @@ -1114,7 +1079,7 @@ dependencies = [ "libc", "option-ext", "redox_users", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1191,7 +1156,7 @@ checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" dependencies = [ "errno-dragonfly", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1548,15 +1513,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.3.2" @@ -1614,7 +1570,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1798,9 +1754,9 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1824,9 +1780,9 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "rustix 0.38.13", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1883,7 +1839,7 @@ dependencies = [ "anyhow", "base64 0.21.4", "bytecount", - "clap 4.4.3", + "clap", "fancy-regex", "fraction", "getrandom 0.2.10", @@ -1952,7 +1908,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d580318f95776505201b28cf98eb1fa5e4be3b689633ba6a3e6cd880ff22d8cb" dependencies = [ "cfg-if 1.0.0", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2083,7 +2039,7 @@ dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2267,7 +2223,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "libc", ] @@ -2319,12 +2275,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "os_str_bytes" -version = "6.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" - [[package]] name = "panic-message" version = "0.3.0" @@ -2357,7 +2307,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -2637,7 +2587,7 @@ dependencies = [ "libc", "log", "pin-project-lite 0.2.13", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2717,7 +2667,7 @@ dependencies = [ "libc", "socket2 0.5.4", "tracing", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3020,7 +2970,7 @@ dependencies = [ "io-lifetimes", "libc", "linux-raw-sys 0.3.8", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3033,7 +2983,7 @@ dependencies = [ "errno 0.3.3", "libc", "linux-raw-sys 0.4.7", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3100,7 +3050,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3453,7 +3403,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3668,12 +3618,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" version = "1.0.48" @@ -3830,7 +3774,7 @@ dependencies = [ "pin-project-lite 0.2.13", "socket2 0.5.4", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4344,7 +4288,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -4353,7 +4297,16 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] @@ -4362,21 +4315,42 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", + "windows_aarch64_gnullvm 0.48.5", "windows_aarch64_msvc 0.48.5", "windows_i686_gnu 0.48.5", "windows_i686_msvc 0.48.5", "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm", + "windows_x86_64_gnullvm 0.48.5", "windows_x86_64_msvc 0.48.5", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.34.0" @@ -4389,6 +4363,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.34.0" @@ -4401,6 +4381,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.34.0" @@ -4413,6 +4399,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.34.0" @@ -4425,12 +4417,24 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.34.0" @@ -4443,6 +4447,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winreg" version = "0.50.0" @@ -4450,7 +4460,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if 1.0.0", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4595,7 +4605,7 @@ name = "zenoh-examples" version = "0.11.0-dev" dependencies = [ "async-std", - "clap 3.2.25", + "clap", "env_logger", "flume", "futures", @@ -4614,7 +4624,7 @@ version = "0.11.0-dev" dependencies = [ "async-std", "bincode", - "clap 3.2.25", + "clap", "env_logger", "flume", "futures", @@ -4857,7 +4867,7 @@ name = "zenoh-plugin-example" version = "0.11.0-dev" dependencies = [ "async-std", - "clap 3.2.25", + "clap", "env_logger", "futures", "log", @@ -4876,7 +4886,7 @@ dependencies = [ "anyhow", "async-std", "base64 0.21.4", - "clap 3.2.25", + "clap", "env_logger", "flume", "futures", @@ -4903,7 +4913,7 @@ dependencies = [ "async-global-executor", "async-std", "async-trait", - "clap 3.2.25", + "clap", "crc", "derive-new", "env_logger", @@ -5029,7 +5039,7 @@ version = "0.11.0-dev" dependencies = [ "async-std", "async-trait", - "clap 3.2.25", + "clap", "const_format", "flume", "futures", @@ -5068,7 +5078,7 @@ name = "zenohd" version = "0.11.0-dev" dependencies = [ "async-std", - "clap 3.2.25", + "clap", "env_logger", "futures", "git-version", diff --git a/Cargo.toml b/Cargo.toml index 55797ab0d3..4cba5d4dd2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,7 +79,7 @@ async-std = { version = "=1.12.0", default-features = false } # Default features async-trait = "0.1.60" base64 = "0.21.4" bincode = "1.3.3" -clap = "3.2.23" +clap = { version = "4.4.11", features = ["derive"] } const_format = "0.2.30" crc = "3.0.1" criterion = "0.5" @@ -128,7 +128,7 @@ rustls = { version = "0.21.5", features = ["dangerous_configuration"] } rustls-native-certs = "0.6.2" rustls-pemfile = "1.0.2" schemars = "0.8.12" -secrecy = {version = "0.8.0", features = ["serde", "alloc"]} +secrecy = { version = "0.8.0", features = ["serde", "alloc"] } serde = { version = "1.0.154", default-features = false, features = [ "derive", ] } # Default features are disabled due to usage in no_std crates @@ -137,7 +137,7 @@ serde_yaml = "0.9.19" sha3 = "0.10.6" shared_memory = "0.12.4" shellexpand = "3.0.0" -socket2 = { version ="0.5.1", features = [ "all" ] } +socket2 = { version = "0.5.1", features = ["all"] } stop-token = "0.7.0" syn = "2.0" tide = "0.16.0" diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b0f6507d20..0330972caa 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -42,7 +42,7 @@ transport_unixpipe = ["zenoh/transport_unixpipe"] [dependencies] async-std = { workspace = true, features = ["attributes"] } -clap = { workspace = true } +clap = { workspace = true, features = ["derive"] } env_logger = { workspace = true } flume = { workspace = true } futures = { workspace = true } diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index 183fcfbafb..f80e199d6d 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -31,51 +32,16 @@ async fn main() { session.close().res().await.unwrap(); } -fn parse_args() -> (Config, String) { - let args = App::new("zenoh delete example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg( - Arg::from_usage( - "-k, --key=[KEYEXPR] 'The key expression matching resources to delete.'", - ) - .default_value("demo/example/zenoh-rs-put"), - ) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = args.value_of("key").unwrap().to_string(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-put")] + /// The key expression to write to. + key: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr) +fn parse_args() -> (Config, KeyExpr<'static>) { + let args = Args::parse(); + (args.common.into(), args.key) } diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index c5280f3c5b..5dd786843e 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; #[async_std::main] @@ -34,55 +35,19 @@ async fn main() { subscriber.forward(publisher).await.unwrap(); } -fn parse_args() -> (Config, String, String) { - let args = App::new("zenoh sub example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression to subscribe to.'") - .default_value("demo/example/**"), - ) - .arg( - Arg::from_usage("-f, --forward=[KEYEXPR] 'The key expression to forward to.'") - .default_value("demo/forward"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = args.value_of("key").unwrap().to_string(); - - let forward = args.value_of("forward").unwrap().to_string(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/**")] + /// The key expression to subscribe to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "demo/forward")] + /// The key expression to forward to. + forward: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr, forward) +fn parse_args() -> (Config, KeyExpr<'static>, KeyExpr<'static>) { + let args = Args::parse(); + (args.common.into(), args.key, args.forward) } diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 70bec5adbe..57c36c2e62 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -49,72 +50,49 @@ async fn main() { } } -fn parse_args() -> (Config, String, Option, QueryTarget, Duration) { - let args = App::new("zenoh query example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-s, --selector=[SELECTOR] 'The selection of resources to query'") - .default_value("demo/example/**"), - ) - .arg(Arg::from_usage( - "-v, --value=[VALUE] 'An optional value to put in the query.'", - )) - .arg( - Arg::from_usage("-t, --target=[TARGET] 'The target queryables of the query'") - .possible_values(["BEST_MATCHING", "ALL", "ALL_COMPLETE"]) - .default_value("BEST_MATCHING"), - ) - .arg( - Arg::from_usage("-o, --timeout=[TIME] 'The query timeout in milliseconds'") - .default_value("10000"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let selector = args.value_of("selector").unwrap().to_string(); - - let value = args.value_of("value").map(ToOwned::to_owned); - - let target = match args.value_of("target") { - Some("BEST_MATCHING") => QueryTarget::BestMatching, - Some("ALL") => QueryTarget::All, - Some("ALL_COMPLETE") => QueryTarget::AllComplete, - _ => QueryTarget::default(), - }; +#[derive(clap::ValueEnum, Clone, Copy, Debug)] +#[value(rename_all = "SCREAMING_SNAKE_CASE")] +enum Qt { + BestMatching, + All, + AllComplete, +} - let timeout = Duration::from_millis(args.value_of("timeout").unwrap().parse::().unwrap()); +#[derive(Parser, Clone, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/**")] + /// The selection of resources to query + selector: Selector<'static>, + #[arg(short, long)] + /// An optional value to put in the query. + value: Option, + #[arg(short, long, default_value = "BEST_MATCHING")] + /// The target queryables of the query. + target: Qt, + #[arg(short = 'o', long, default_value = "10000")] + /// The query timeout in milliseconds. + timeout: u64, + #[command(flatten)] + common: CommonArgs, +} - (config, selector, value, target, timeout) +fn parse_args() -> ( + Config, + Selector<'static>, + Option, + QueryTarget, + Duration, +) { + let args = Args::parse(); + ( + args.common.into(), + args.selector, + args.value, + match args.target { + Qt::BestMatching => QueryTarget::BestMatching, + Qt::All => QueryTarget::All, + Qt::AllComplete => QueryTarget::AllComplete, + }, + Duration::from_millis(args.timeout), + ) } diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 03e3566df1..e0aaf8cd23 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -43,55 +44,20 @@ async fn main() { } } -fn parse_args() -> (Config, String, Duration) { - let args = App::new("zenoh liveliness query example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key_expr=[KEYEXPR] 'The key expression matching liveliness tokens to query.'") - .default_value("group1/**"), - ) - .arg( - Arg::from_usage("-o, --timeout=[TIME] 'The query timeout in milliseconds'") - .default_value("10000"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = args.value_of("key_expr").unwrap().to_string(); - - let timeout = Duration::from_millis(args.value_of("timeout").unwrap().parse::().unwrap()); +#[derive(Parser, Clone, Debug)] +struct Args { + #[arg(short, long, default_value = "group1/**")] + /// The key expression matching liveliness tokens to query. + key_expr: KeyExpr<'static>, + #[arg(short = 'o', long, default_value = "10000")] + /// The query timeout in milliseconds. + timeout: u64, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr, timeout) +fn parse_args() -> (Config, KeyExpr<'static>, Duration) { + let args = Args::parse(); + let timeout = Duration::from_millis(args.timeout); + (args.common.into(), args.key_expr, timeout) } diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index ccf51b211a..ce752b2e7e 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -37,43 +38,13 @@ async fn main() { ); } -fn parse_args() -> Config { - let args = App::new("zenoh info example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[command(flatten)] + common: CommonArgs, +} - config +fn parse_args() -> Config { + let args = Args::parse(); + args.common.into() } diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index ae691f286c..41890f7d77 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -12,12 +12,12 @@ // ZettaScale Zenoh Team, // use async_std::task::sleep; -use clap::{App, Arg}; +use clap::Parser; use futures::prelude::*; -use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -58,51 +58,16 @@ async fn main() { } } -fn parse_args() -> (Config, KeyExpr<'static>) { - let args = App::new("zenoh liveliness example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression of the liveliness token.'") - .default_value("group1/zenoh-rs"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = KeyExpr::try_from(args.value_of("key").unwrap()) - .unwrap() - .into_owned(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "group1/zenoh-rs")] + /// The key expression of the liveliness token. + key: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr) +fn parse_args() -> (Config, KeyExpr<'static>) { + let args = Args::parse(); + (args.common.into(), args.key) } diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 173d270a83..fe5ed4d46b 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -11,13 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; -#[cfg(not(feature = "shared-memory"))] -use std::process::exit; +use clap::Parser; use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh_examples::CommonArgs; fn main() { // initiate logging @@ -77,68 +76,26 @@ fn main() { } } -fn parse_args() -> (Config, Duration, usize, usize) { - let args = App::new("zenoh roundtrip ping example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-n, --samples=[N] 'The number of round-trips to measure'") - .default_value("100"), - ) - .arg( - Arg::from_usage("-w, --warmup=[N] 'The number of seconds to warm up'") - .default_value("1"), - ) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .arg(Arg::from_usage("--enable-shm 'Enable SHM transport.'")) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - " 'Sets the size of the payload to publish'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - if args.is_present("enable-shm") { - #[cfg(feature = "shared-memory")] - config.transport.shared_memory.set_enabled(true).unwrap(); - #[cfg(not(feature = "shared-memory"))] - { - println!("enable-shm argument: SHM cannot be enabled, because Zenoh is compiled without shared-memory feature!"); - exit(-1); - } - } - - let n: usize = args.value_of("samples").unwrap().parse().unwrap(); - let w: f64 = args.value_of("warmup").unwrap().parse().unwrap(); - let size: usize = args.value_of("PAYLOAD_SIZE").unwrap().parse().unwrap(); +#[derive(Parser)] +struct Args { + #[arg(short, long, default_value = "1")] + /// The number of seconds to warm up (float) + warmup: f64, + #[arg(short = 'n', long, default_value = "100")] + /// The number of round-trips to measure + samples: usize, + /// Sets the size of the payload to publish + payload_size: usize, + #[command(flatten)] + common: CommonArgs, +} - (config, Duration::from_secs_f64(w), size, n) +fn parse_args() -> (Config, Duration, usize, usize) { + let args = Args::parse(); + ( + args.common.into(), + Duration::from_secs_f64(args.warmup), + args.payload_size, + args.samples, + ) } diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index d1c7bbb867..f057075434 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -13,12 +13,11 @@ use std::io::{stdin, Read}; // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; -#[cfg(not(feature = "shared-memory"))] -use std::process::exit; +use clap::Parser; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh_examples::CommonArgs; fn main() { // initiate logging @@ -48,53 +47,13 @@ fn main() { for _ in stdin().bytes().take_while(|b| !matches!(b, Ok(b'q'))) {} } -fn parse_args() -> Config { - let args = App::new("zenoh roundtrip pong example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .arg(Arg::from_usage("--enable-shm 'Enable SHM transport.'")) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - if args.is_present("enable-shm") { - #[cfg(feature = "shared-memory")] - config.transport.shared_memory.set_enabled(true).unwrap(); - #[cfg(not(feature = "shared-memory"))] - { - println!("enable-shm argument: SHM cannot be enabled, because Zenoh is compiled without shared-memory feature!"); - exit(-1); - } - } +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[command(flatten)] + common: CommonArgs, +} - config +fn parse_args() -> Config { + let args = Args::parse(); + args.common.into() } diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 2632eeeda6..54563df1d8 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -12,10 +12,11 @@ // ZettaScale Zenoh Team, // use async_std::task::sleep; -use clap::{App, Arg}; +use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -38,54 +39,19 @@ async fn main() { } } -fn parse_args() -> (Config, String, String) { - let args = App::new("zenoh pub example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression to publish onto.'") - .default_value("demo/example/zenoh-rs-pub"), - ) - .arg( - Arg::from_usage("-v, --value=[VALUE] 'The value to publish.'") - .default_value("Pub from Rust!"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = args.value_of("key").unwrap().to_string(); - let value = args.value_of("value").unwrap().to_string(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-pub")] + /// The key expression to write to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Put from Rust")] + /// The value to write. + value: String, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr, value) +fn parse_args() -> (Config, KeyExpr<'static>, String) { + let args = Args::parse(); + (args.common.into(), args.key, args.value) } diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 335fc5dbe0..fc329cadfc 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,11 +12,12 @@ // ZettaScale Zenoh Team, // use async_std::task::sleep; -use clap::{App, Arg}; +use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::shm::SharedMemoryManager; +use zenoh_examples::CommonArgs; const N: usize = 10; const K: u32 = 3; @@ -104,54 +105,19 @@ async fn main() -> Result<(), zenoh::Error> { Ok(()) } -fn parse_args() -> (Config, String, String) { - let args = App::new("zenoh shared-memory pub example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-p, --path=[PATH] 'The key expression to publish onto.'") - .default_value("demo/example/zenoh-rs-pub"), - ) - .arg( - Arg::from_usage("-v, --value=[VALUE] 'The value of to publish.'") - .default_value("Pub from SharedMemory Rust!"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let path = args.value_of("path").unwrap(); - let value = args.value_of("value").unwrap(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-pub")] + /// The key expression to publish onto. + path: KeyExpr<'static>, + #[arg(short, long, default_value = "Pub from SharedMemory Rust!")] + /// The value of to publish. + value: String, + #[command(flatten)] + common: CommonArgs, +} - (config, path.to_string(), value.to_string()) +fn parse_args() -> (Config, KeyExpr<'static>, String) { + let args = Args::parse(); + (args.common.into(), args.path, args.value) } diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index f72a7b46b7..9921c869e5 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::shm::SharedMemoryManager; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -46,62 +47,20 @@ async fn main() { } } -fn parse_args() -> (Config, usize, usize) { - let args = App::new("zenoh shared-memory throughput pub example") - .arg( - Arg::from_usage("-s, --shared-memory=[MB] 'shared memory size in MBytes'") - .default_value("32"), - ) - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - " 'Sets the size of the payload to publish'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - let sm_size = args - .value_of("shared-memory") - .unwrap() - .parse::() - .unwrap() - * 1024 - * 1024; +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "32")] + /// shared memory size in MBytes. + shared_memory: usize, + /// Sets the size of the payload to publish. + payload_size: usize, + #[command(flatten)] + common: CommonArgs, +} - let size = args - .value_of("PAYLOAD_SIZE") - .unwrap() - .parse::() - .unwrap(); - (config, sm_size, size) +fn parse_args() -> (Config, usize, usize) { + let args = Args::parse(); + let sm_size = args.shared_memory * 1024 * 1024; + let size = args.payload_size; + (args.common.into(), sm_size, size) } diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index b761128f3f..433444b8de 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -11,13 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use std::convert::TryInto; -#[cfg(not(feature = "shared-memory"))] -use std::process::exit; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh_examples::CommonArgs; fn main() { // initiate logging @@ -56,80 +55,36 @@ fn main() { } } -fn parse_args() -> (Config, usize, Priority, bool, usize) { - let args = App::new("zenoh throughput pub example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-p, --priority=[PRIO]... 'Priority for sending data.'", - )) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage("-t, --print 'Print the statistics.'")) - .arg( - Arg::from_usage( - "-n, --number=[number] 'Number of messages in each throughput measurements.'", - ) - .default_value("100000"), - ) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .arg(Arg::from_usage("--enable-shm 'Enable SHM transport.'")) - .arg(Arg::from_usage( - " 'Sets the size of the payload to publish'", - )) - .get_matches(); +#[derive(Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long)] + /// Priority for sending data + priority: Option, + #[arg(short = 't', long)] + /// Print the statistics + print: bool, + #[arg(short, long, default_value = "100000")] + /// Number of messages in each throughput measurements + number: usize, + /// Sets the size of the payload to publish + payload_size: usize, + #[command(flatten)] + common: CommonArgs, +} - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; +fn parse_args() -> (Config, usize, Priority, bool, usize) { + let args = Args::parse(); let mut prio = Priority::default(); - if let Some(p) = args.value_of("priority") { - prio = p.parse::().unwrap().try_into().unwrap(); - } - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); + if let Some(p) = args.priority { + prio = p.try_into().unwrap(); } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - if args.is_present("enable-shm") { - #[cfg(feature = "shared-memory")] - config.transport.shared_memory.set_enabled(true).unwrap(); - #[cfg(not(feature = "shared-memory"))] - { - println!("enable-shm argument: SHM cannot be enabled, because Zenoh is compiled without shared-memory feature!"); - exit(-1); - } - } - - let number: usize = args.value_of("number").unwrap().parse().unwrap(); - - let size = args - .value_of("PAYLOAD_SIZE") - .unwrap() - .parse::() - .unwrap(); - (config, size, prio, args.is_present("print"), number) + ( + args.common.into(), + args.payload_size, + prio, + args.print, + args.number, + ) } diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index cd36fc57f4..812c47294e 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,11 +13,12 @@ // use async_std::prelude::FutureExt; use async_std::task::sleep; -use clap::{App, Arg}; +use clap::Parser; use futures::prelude::*; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -70,49 +71,16 @@ async fn main() { subs.race(keyb).await; } -fn parse_args() -> (Config, String) { - let args = App::new("zenoh pull example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression matching resources to pull'") - .default_value("demo/example/**"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = args.value_of("key").unwrap().to_string(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct SubArgs { + #[arg(short, long, default_value = "demo/example/**")] + /// The Key Expression to subscribe to. + key: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr) +fn parse_args() -> (Config, KeyExpr<'static>) { + let args = SubArgs::parse(); + (args.common.into(), args.key) } diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index 2cc3d15bbc..9b625be552 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -29,54 +30,19 @@ async fn main() { session.put(&key_expr, value).res().await.unwrap(); } -fn parse_args() -> (Config, String, String) { - let args = App::new("zenoh put example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression to write.'") - .default_value("demo/example/zenoh-rs-put"), - ) - .arg( - Arg::from_usage("-v, --value=[VALUE] 'The value to write.'") - .default_value("Put from Rust!"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = args.value_of("key").unwrap().to_string(); - let value = args.value_of("value").unwrap().to_string(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-put")] + /// The key expression to write to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Put from Rust")] + /// The value to write. + value: String, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr, value) +fn parse_args() -> (Config, KeyExpr<'static>, String) { + let args = Args::parse(); + (args.common.into(), args.key, args.value) } diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 439bb04c3d..cc667df02c 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -31,59 +32,19 @@ async fn main() { session.close().res().await.unwrap(); } -// -// Argument parsing -- look at the main for the zenoh-related code -// -fn parse_args() -> (Config, String, f64) { - let default_value = std::f64::consts::PI.to_string(); - - let args = App::new("zenoh put float example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression to put.'") - .default_value("demo/example/zenoh-rs-put"), - ) - .arg( - Arg::from_usage("-v, --value=[VALUE] 'The float value to put.'") - .default_value(&default_value), - ) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = args.value_of("key").unwrap().to_string(); - let value: f64 = args.value_of("value").unwrap().parse().unwrap(); +#[derive(clap::Parser, Clone, PartialEq, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-put")] + /// The key expression to write to. + key: KeyExpr<'static>, + #[arg(short, long, default_value_t = std::f64::consts::PI)] + /// The value to write. + value: f64, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr, value) +fn parse_args() -> (Config, KeyExpr<'static>, f64) { + let args = Args::parse(); + (args.common.into(), args.key, args.value) } diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 751a794a32..5738c67f6c 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -12,14 +12,14 @@ // ZettaScale Zenoh Team, // use async_std::task::sleep; -use clap::{App, Arg}; +use clap::Parser; use futures::prelude::*; use futures::select; -use std::convert::TryFrom; use std::sync::atomic::Ordering::Relaxed; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -29,7 +29,6 @@ async fn main() { let (config, key_expr, value, complete) = parse_args(); let send_errors = std::sync::atomic::AtomicBool::new(false); - let key_expr = KeyExpr::try_from(key_expr).unwrap(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); @@ -85,59 +84,22 @@ async fn main() { } } -fn parse_args() -> (Config, String, String, bool) { - let args = App::new("zenoh queryable example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage( - "-k, --key=[KEYEXPR] 'The key expression matching queries to reply to.'", - ) - .default_value("demo/example/zenoh-rs-queryable"), - ) - .arg( - Arg::from_usage("-v, --value=[VALUE] 'The value to reply to queries.'") - .default_value("Queryable from Rust!"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .arg(Arg::from_usage( - "--complete 'Declare the queryable as complete w.r.t. the key expression.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - let key_expr = args.value_of("key").unwrap().to_string(); - let value = args.value_of("value").unwrap().to_string(); - let complete = args.is_present("complete"); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] + /// The key expression matching queries to reply to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Queryable from Rust")] + /// The value to reply to queries. + value: String, + #[arg(long)] + /// Declare the queryable as complete w.r.t. the key expression. + complete: bool, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr, value, complete) +fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { + let args = Args::parse(); + (args.common.into(), args.key, args.value, args.complete) } diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 1d5287c72d..79164c914a 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -14,13 +14,14 @@ #![recursion_limit = "256"] use async_std::task::sleep; -use clap::{App, Arg}; +use clap::Parser; use futures::prelude::*; use futures::select; use std::collections::HashMap; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -82,53 +83,19 @@ async fn main() { } } -fn parse_args() -> (Config, String, bool) { - let args = App::new("zenoh storage example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The selection of resources to store'") - .default_value("demo/example/**"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .arg(Arg::from_usage( - "--complete 'Declare the storage as complete w.r.t. the key expression.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = args.value_of("key").unwrap().to_string(); - let complete = args.is_present("complete"); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/**")] + /// The selection of resources to store. + key: KeyExpr<'static>, + #[arg(long)] + /// Declare the storage as complete w.r.t. the key expression. + complete: bool, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr, complete) +fn parse_args() -> (Config, KeyExpr<'static>, bool) { + let args = Args::parse(); + (args.common.into(), args.key, args.complete) } diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index af23760e9d..0542f85870 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -12,13 +12,13 @@ // ZettaScale Zenoh Team, // use async_std::task::sleep; -use clap::{App, Arg}; +use clap::Parser; use futures::prelude::*; use futures::select; -use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -61,51 +61,16 @@ async fn main() { } } -fn parse_args() -> (Config, KeyExpr<'static>) { - let args = App::new("zenoh sub example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression to subscribe to.'") - .default_value("demo/example/**"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = KeyExpr::try_from(args.value_of("key").unwrap()) - .unwrap() - .into_owned(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct SubArgs { + #[arg(short, long, default_value = "demo/example/**")] + /// The Key Expression to subscribe to. + key: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr) +fn parse_args() -> (Config, KeyExpr<'static>) { + let args = SubArgs::parse(); + (args.common.into(), args.key) } diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index ebb4a87069..52ba53875c 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -12,13 +12,13 @@ // ZettaScale Zenoh Team, // use async_std::task::sleep; -use clap::{App, Arg}; +use clap::Parser; use futures::prelude::*; use futures::select; -use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; #[async_std::main] async fn main() { @@ -67,51 +67,16 @@ async fn main() { } } -fn parse_args() -> (Config, KeyExpr<'static>) { - let args = App::new("zenoh liveliness sub example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression mathing liveliness changes to subscribe to.'") - .default_value("group1/**"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - - let key_expr = KeyExpr::try_from(args.value_of("key").unwrap()) - .unwrap() - .into_owned(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "group1/**")] + /// The key expression to write to. + key: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} - (config, key_expr) +fn parse_args() -> (Config, KeyExpr<'static>) { + let args = Args::parse(); + (args.common.into(), args.key) } diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index c6c1846f56..671e50f88b 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -11,13 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::Parser; use std::io::{stdin, Read}; -#[cfg(not(feature = "shared-memory"))] -use std::process::exit; use std::time::Instant; use zenoh::config::Config; use zenoh::prelude::sync::*; +use zenoh_examples::CommonArgs; struct Stats { round_count: usize, @@ -101,66 +100,19 @@ fn main() { } } -fn parse_args() -> (Config, usize, usize) { - let args = App::new("zenoh throughput sub example") - .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), - ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg( - Arg::from_usage("-s, --samples=[number] 'Number of throughput measurements.'") - .default_value("10"), - ) - .arg( - Arg::from_usage( - "-n, --number=[number] 'Number of messages in each throughput measurements.'", - ) - .default_value("100000"), - ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage("--enable-shm 'Enable SHM transport.'")) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) - .get_matches(); - - let mut config = if let Some(conf_file) = args.value_of("config") { - Config::from_file(conf_file).unwrap() - } else { - Config::default() - }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { - config.set_mode(Some(mode)).unwrap(); - } - if let Some(values) = args.values_of("connect") { - config.connect.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if let Some(values) = args.values_of("listen") { - config.listen.endpoints = values.map(|v| v.parse().unwrap()).collect(); - } - if args.is_present("no-multicast-scouting") { - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - } - if args.is_present("enable-shm") { - #[cfg(feature = "shared-memory")] - config.transport.shared_memory.set_enabled(true).unwrap(); - #[cfg(not(feature = "shared-memory"))] - { - println!("enable-shm argument: SHM cannot be enabled, because Zenoh is compiled without shared-memory feature!"); - exit(-1); - } - } - - let samples: usize = args.value_of("samples").unwrap().parse().unwrap(); - let number: usize = args.value_of("number").unwrap().parse().unwrap(); +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "10")] + /// Number of throughput measurements. + samples: usize, + #[arg(short, long, default_value = "100000")] + /// Number of messages in each throughput measurements. + number: usize, + #[command(flatten)] + common: CommonArgs, +} - (config, samples, number) +fn parse_args() -> (Config, usize, usize) { + let args = Args::parse(); + (args.common.into(), args.samples, args.number) } diff --git a/examples/src/lib.rs b/examples/src/lib.rs index 8f5ad114ae..e7c00cb606 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -1,3 +1,78 @@ //! Examples on using Zenoh. //! See the code in ../examples/ //! Check ../README.md for usage. +//! +use zenoh::config::Config; + +#[derive(clap::ValueEnum, Default, Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum Wai { + #[default] + Peer, + Client, + Router, +} +impl core::fmt::Display for Wai { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + core::fmt::Debug::fmt(&self, f) + } +} +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +pub struct CommonArgs { + #[arg(short, long)] + /// A configuration file. + config: Option, + #[arg(short, long, default_value_t)] + /// The Zenoh session mode. + mode: Wai, + #[arg(short = 'e', long)] + /// Endpoints to connect to. + connect: Vec, + #[arg(short, long)] + /// Endpoints to listen on. + listen: Vec, + #[arg(long)] + /// Disable the multicast-based scouting mechanism. + no_multicast_scouting: bool, + #[arg(long)] + /// Disable the multicast-based scouting mechanism. + enable_shm: bool, +} + +impl From for Config { + fn from(value: CommonArgs) -> Self { + (&value).into() + } +} +impl From<&CommonArgs> for Config { + fn from(value: &CommonArgs) -> Self { + let mut config = match &value.config { + Some(path) => Config::from_file(path).unwrap(), + None => Config::default(), + }; + match value.mode { + Wai::Peer => config.set_mode(Some(zenoh::scouting::WhatAmI::Peer)), + Wai::Client => config.set_mode(Some(zenoh::scouting::WhatAmI::Client)), + Wai::Router => config.set_mode(Some(zenoh::scouting::WhatAmI::Router)), + } + .unwrap(); + if !value.connect.is_empty() { + config.connect.endpoints = value.connect.iter().map(|v| v.parse().unwrap()).collect(); + } + if !value.listen.is_empty() { + config.listen.endpoints = value.listen.iter().map(|v| v.parse().unwrap()).collect(); + } + if value.no_multicast_scouting { + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + } + if value.enable_shm { + #[cfg(feature = "shared-memory")] + config.transport.shared_memory.set_enabled(true).unwrap(); + #[cfg(not(feature = "shared-memory"))] + { + println!("enable-shm argument: SHM cannot be enabled, because Zenoh is compiled without shared-memory feature!"); + std::process::exit(-1); + } + } + config + } +} diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index daf3f80d29..d196b52c59 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; @@ -85,50 +85,45 @@ async fn main() { } fn parse_args() -> Config { - let args = App::new("zenoh ssl server example") + let args = Command::new("zenoh ssl server example") .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), + arg!(-m --mode [MODE] "The zenoh session mode (peer by default).") + .value_parser(["peer", "client"]), + ) + .arg(arg!(-e --connect [ENDPOINT]... "Endpoints to connect to.")) + .arg(arg!(-l --listen [ENDPOINT]... "Endpoints to listen on.")) + .arg(arg!(-c --config [FILE] "A configuration file.")) + .arg( + arg!(--no-multicast-scouting "Disable the multicast-based scouting mechanism." + ), ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) .get_matches(); - let mut config = if let Some(conf_file) = args.value_of("config") { + let mut config = if let Some(conf_file) = args.get_one::<&String>("config") { Config::from_file(conf_file).unwrap() } else { Config::default() }; - match args.value_of("mode").map(|m| m.parse()) { + match args.get_one::<&String>("mode").map(|m| m.parse()) { Some(Ok(mode)) => { config.set_mode(Some(mode)).unwrap(); } Some(Err(e)) => panic!("Invalid mode: {}", e), None => {} }; - if let Some(values) = args.values_of("connect") { + if let Some(values) = args.get_many::<&String>("connect") { config .connect .endpoints - .extend(values.map(|v| v.parse().unwrap())) + .extend(values.into_iter().map(|v| v.parse().unwrap())) } - if let Some(values) = args.values_of("listen") { + if let Some(values) = args.get_many::<&String>("listen") { config .listen .endpoints - .extend(values.map(|v| v.parse().unwrap())) + .extend(values.into_iter().map(|v| v.parse().unwrap())) } - if args.is_present("no-multicast-scouting") { + if args.get_flag("no-multicast-scouting") { config.scouting.multicast.set_enabled(Some(false)).unwrap(); } diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 84c6baf83c..91b0283ddb 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -46,7 +46,7 @@ zenoh-sync = { workspace = true } zenoh-util = { workspace = true } [dev-dependencies] -clap = { workspace = true } +clap = { workspace = true, features = ["derive"] } [[example]] name = "z_query_sub" diff --git a/zenoh-ext/examples/z_pub_cache.rs b/zenoh-ext/examples/z_pub_cache.rs index 67879087a1..516cb3bd83 100644 --- a/zenoh-ext/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/z_pub_cache.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use async_std::task::sleep; -use clap::{App, Arg}; +use clap::{arg, Command}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; @@ -46,61 +46,48 @@ async fn main() { } fn parse_args() -> (Config, String, String, usize, Option) { - let args = App::new("zenoh-ext pub cache example") + let args = Command::new("zenoh-ext pub cache example") .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), + arg!(-m --mode [MODE] "The zenoh session mode (peer by default)") + .value_parser(["peer", "client"]), ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) + .arg(arg!(-e --connect [ENDPOINT]... "Endpoints to connect to.")) + .arg(arg!(-l --listen [ENDPOINT]... "Endpoints to listen on.")) .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression to publish.'") + arg!(-k --key [KEYEXPR] "The key expression to publish.") .default_value("demo/example/zenoh-rs-pub"), ) + .arg(arg!(-v --value [VALUE] "The value to publish.").default_value("Pub from Rust!")) .arg( - Arg::from_usage("-v, --value=[VALUE] 'The value to publish.'") - .default_value("Pub from Rust!"), - ) - .arg( - Arg::from_usage("-h, --history=[SIZE] 'The number of publications to keep in cache'") + arg!(-h --history [SIZE] "The number of publications to keep in cache") .default_value("1"), ) - .arg(Arg::from_usage( - "-x, --prefix=[STRING] 'An optional queryable prefix'", - )) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) + .arg(arg!(-x --prefix [STRING] "An optional queryable prefix")) + .arg(arg!(-c --config [FILE] "A configuration file.")) + .arg(arg!(--no-multicast-scouting "Disable the multicast-based scouting mechanism.")) .get_matches(); - let mut config = if let Some(conf_file) = args.value_of("config") { + let mut config = if let Some(conf_file) = args.get_one::<&String>("config") { Config::from_file(conf_file).unwrap() } else { Config::default() }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { + if let Some(Ok(mode)) = args.get_one::<&String>("mode").map(|mode| mode.parse()) { config.set_mode(Some(mode)).unwrap(); } - if let Some(values) = args.values_of("connect") { + if let Some(values) = args.get_many::<&String>("connect") { config .connect .endpoints .extend(values.map(|v| v.parse().unwrap())) } - if let Some(values) = args.values_of("listen") { + if let Some(values) = args.get_many::<&String>("listen") { config .listen .endpoints .extend(values.map(|v| v.parse().unwrap())) } - if args.is_present("no-multicast-scouting") { + if args.get_flag("no-multicast-scouting") { config.scouting.multicast.set_enabled(Some(false)).unwrap(); } @@ -110,10 +97,10 @@ fn parse_args() -> (Config, String, String, usize, Option) { .set_enabled(Some(ModeDependentValue::Unique(true))) .unwrap(); - let key_expr = args.value_of("key").unwrap().to_string(); - let value = args.value_of("value").unwrap().to_string(); - let history: usize = args.value_of("history").unwrap().parse().unwrap(); - let prefix = args.value_of("prefix").map(String::from); + let key_expr = args.get_one::<&String>("key").unwrap().to_string(); + let value = args.get_one::<&String>("value").unwrap().to_string(); + let history: usize = args.get_one::<&String>("history").unwrap().parse().unwrap(); + let prefix = args.get_one::<&String>("prefix").map(|s| (*s).to_owned()); (config, key_expr, value, history, prefix) } diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 6c53ebd03d..4d308bca51 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -12,7 +12,8 @@ // ZettaScale Zenoh Team, // use async_std::task::sleep; -use clap::{App, Arg}; +use clap::arg; +use clap::Command; use futures::prelude::*; use futures::select; use std::time::Duration; @@ -77,58 +78,50 @@ async fn main() { } fn parse_args() -> (Config, String, Option) { - let args = App::new("zenoh-ext query sub example") + let args = Command::new("zenoh-ext query sub example") .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), + arg!(-m --mode [MODE] "The zenoh session mode (peer by default).") + .value_parser(["peer", "client"]), ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", - )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", - )) + .arg(arg!(-e --connect [ENDPOINT]... "Endpoints to connect to.")) + .arg(arg!(-l --listen [ENDPOINT]... "Endpoints to listen on.")) .arg( - Arg::from_usage("-k, --key=[KEYEXPR] 'The key expression to subscribe onto'") + arg!(-k --key [KEYEXPR] "The key expression to subscribe onto") .default_value("demo/example/**"), ) .arg( - Arg::from_usage("-q, --query=[SELECTOR] 'The selector to use for queries (by default it's same than 'selector' option)'"), + arg!(-q --query [SELECTOR] "The selector to use for queries (by default it's same than 'selector' option)") ) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", - )) - .arg(Arg::from_usage( - "--no-multicast-scouting 'Disable the multicast-based scouting mechanism.'", - )) + .arg(arg!(-c --config [FILE] "A configuration file.")) + .arg(arg!(--no-multicast-scouting "Disable the multicast-based scouting mechanism.")) .get_matches(); - let mut config = if let Some(conf_file) = args.value_of("config") { + let mut config = if let Some(conf_file) = args.get_one::<&String>("config") { Config::from_file(conf_file).unwrap() } else { Config::default() }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { + if let Some(Ok(mode)) = args.get_one::<&String>("mode").map(|mode| mode.parse()) { config.set_mode(Some(mode)).unwrap(); } - if let Some(values) = args.values_of("connect") { + if let Some(values) = args.get_many::<&String>("connect") { config .connect .endpoints .extend(values.map(|v| v.parse().unwrap())) } - if let Some(values) = args.values_of("listen") { + if let Some(values) = args.get_many::<&String>("listen") { config .listen .endpoints .extend(values.map(|v| v.parse().unwrap())) } - if args.is_present("no-multicast-scouting") { + if args.get_flag("no-multicast-scouting") { config.scouting.multicast.set_enabled(Some(false)).unwrap(); } - let key_expr = args.value_of("key").unwrap().to_string(); - let query = args.value_of("query").map(ToString::to_string); + let key_expr = args.get_one::<&String>("key").unwrap().to_string(); + let query = args.get_one::<&String>("query").map(ToString::to_string); (config, key_expr, query) } diff --git a/zenoh-ext/examples/z_view_size.rs b/zenoh-ext/examples/z_view_size.rs index 7f9c1d179c..5756048853 100644 --- a/zenoh-ext/examples/z_view_size.rs +++ b/zenoh-ext/examples/z_view_size.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{App, Arg}; +use clap::{arg, Command}; use std::sync::Arc; use std::time::Duration; use zenoh::config::Config; @@ -50,59 +50,59 @@ async fn main() { } fn parse_args() -> (Config, String, Option, usize, u64) { - let args = App::new("zenoh-ext group view size example") + let args = Command::new("zenoh-ext group view size example") .arg( - Arg::from_usage("-m, --mode=[MODE] 'The zenoh session mode (peer by default).") - .possible_values(["peer", "client"]), + arg!(-m --mode [MODE] "The zenoh session mode (peer by default).") + .value_parser(["peer", "client"]), ) - .arg(Arg::from_usage( - "-e, --connect=[ENDPOINT]... 'Endpoints to connect to.'", + .arg(arg!( + -e --connect [ENDPOINT]... "Endpoints to connect to." )) - .arg(Arg::from_usage( - "-l, --listen=[ENDPOINT]... 'Endpoints to listen on.'", + .arg(arg!( + -l --listen [ENDPOINT]... "Endpoints to listen on." )) - .arg(Arg::from_usage( - "-c, --config=[FILE] 'A configuration file.'", + .arg(arg!( + -c --config [FILE] "A configuration file." )) - .arg(Arg::from_usage( - "-g, --group=[STRING] 'The group name'", + .arg(arg!( + -g --group [STRING] "The group name" ).default_value("zgroup")) - .arg(Arg::from_usage( - "-i, --id=[STRING] 'The group member id (default is the zenoh ID)'", + .arg(arg!( + -i --id [STRING] "The group member id (default is the zenoh ID)" )) - .arg(Arg::from_usage( - "-s, --size=[INT] 'The expected group size. The example will wait for the group to reach this size'", + .arg(arg!( + -s --size [INT] "The expected group size. The example will wait for the group to reach this size" ).default_value("3")) - .arg(Arg::from_usage( - "-t, --timeout=[SEC] 'The duration (in seconds) this example will wait for the group to reach the expected size.'", + .arg(arg!( + -t --timeout [SEC] "The duration (in seconds) this example will wait for the group to reach the expected size." ).default_value("15")) .get_matches(); - let mut config = if let Some(conf_file) = args.value_of("config") { + let mut config = if let Some(conf_file) = args.get_one::<&String>("config") { Config::from_file(conf_file).unwrap() } else { Config::default() }; - if let Some(Ok(mode)) = args.value_of("mode").map(|mode| mode.parse()) { + if let Some(Ok(mode)) = args.get_one::<&String>("mode").map(|mode| mode.parse()) { config.set_mode(Some(mode)).unwrap(); } - if let Some(values) = args.values_of("connect") { + if let Some(values) = args.get_many::<&String>("connect") { config .connect .endpoints .extend(values.map(|v| v.parse().unwrap())) } - if let Some(values) = args.values_of("listen") { + if let Some(values) = args.get_many::<&String>("listen") { config .listen .endpoints .extend(values.map(|v| v.parse().unwrap())) } - let group = args.value_of("group").unwrap().to_string(); - let id = args.value_of("id").map(String::from); - let size: usize = args.value_of("size").unwrap().parse().unwrap(); - let timeout: u64 = args.value_of("timeout").unwrap().parse().unwrap(); + let group = args.get_one::<&String>("group").unwrap().to_string(); + let id = args.get_one::<&String>("id").map(|v| (*v).to_owned()); + let size: usize = args.get_one::<&String>("size").unwrap().parse().unwrap(); + let timeout: u64 = args.get_one::<&String>("timeout").unwrap().parse().unwrap(); (config, group, id, size, timeout) } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 025645612a..2a9a38c02c 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -25,6 +25,7 @@ use std::{ collections::HashMap, convert::TryFrom, hash::Hash, + str::FromStr, }; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the @@ -483,6 +484,12 @@ impl<'a> TryFrom<&'a str> for Selector<'a> { } } } +impl FromStr for Selector<'static> { + type Err = zenoh_result::Error; + fn from_str(s: &str) -> Result { + s.to_owned().try_into() + } +} impl<'a> TryFrom<&'a String> for Selector<'a> { type Error = zenoh_result::Error; diff --git a/zenohd/Cargo.toml b/zenohd/Cargo.toml index 754198dc73..e3177a5658 100644 --- a/zenohd/Cargo.toml +++ b/zenohd/Cargo.toml @@ -31,7 +31,7 @@ shared-memory = ["zenoh/shared-memory"] [dependencies] async-std = { workspace = true, features = ["attributes"] } -clap = { workspace = true } +clap = { workspace = true, features = ["derive"] } env_logger = { workspace = true } futures = { workspace = true } git-version = { workspace = true } diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 56c56bc538..c864c303b5 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use async_std::task; -use clap::{ArgMatches, Command}; +use clap::Parser; use futures::future; use git_version::git_version; use std::collections::HashSet; @@ -29,6 +29,56 @@ lazy_static::lazy_static!( const DEFAULT_LISTENER: &str = "tcp/[::]:7447"; +#[derive(Debug, Parser)] +#[command(version=GIT_VERSION, long_version=LONG_VERSION.as_str(), about="The zenoh router")] +struct Args { + /// The configuration file. Currently, this file must be a valid JSON5 or YAML file. + #[arg(short, long, value_name = "PATH")] + config: Option, + /// Locators on which this router will listen for incoming sessions. Repeat this option to open several listeners. + #[arg(short, long, value_name = "ENDPOINT")] + listen: Vec, + /// A peer locator this router will try to connect to. + /// Repeat this option to connect to several peers. + #[arg(short = 'e', long, value_name = "ENDPOINT")] + connect: Vec, + /// The identifier (as an hexadecimal string, with odd number of chars - e.g.: A0B23...) that zenohd must use. If not set, a random unsigned 128bit integer will be used. + /// WARNING: this identifier must be unique in the system and must be 16 bytes maximum (32 chars)! + #[arg(short, long)] + id: Option, + /// A plugin that MUST be loaded. You can give just the name of the plugin, zenohd will search for a library named 'libzenoh_plugin_.so' (exact name depending the OS). Or you can give such a string: ": + /// Repeat this option to load several plugins. If loading failed, zenohd will exit. + #[arg(short = 'P', long)] + plugin: Vec, + /// Directory where to search for plugins libraries to load. + /// Repeat this option to specify several search directories. + #[arg(long, value_name = "PATH")] + plugin_search_dir: Vec, + /// By default zenohd adds a HLC-generated Timestamp to each routed Data if there isn't already one. This option disables this feature. + #[arg(long)] + no_timestamp: bool, + /// By default zenohd replies to multicast scouting messages for being discovered by peers and clients. This option disables this feature. + #[arg(long)] + no_multicast_scouting: bool, + /// Configures HTTP interface for the REST API (enabled by default on port 8000). Accepted values: + /// - a port number + /// - a string with format `:` (to bind the HTTP server to a specific interface) + /// - `none` to disable the REST API + #[arg(long, value_name = "SOCKET")] + rest_http_port: Option, + /// Allows arbitrary configuration changes as column-separated KEY:VALUE pairs, where: + /// - KEY must be a valid config path. + /// - VALUE must be a valid JSON5 string that can be deserialized to the expected type for the KEY field. + /// Examples: + /// --cfg='startup/subscribe:["demo/**"]' + /// --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}' + #[arg(long)] + cfg: Vec, + /// Configure the read and/or write permissions on the admin space. Default is read only. + #[arg(long, default_value = "r", value_name = "[r|w|rw|none]")] + adminspace_permissions: String, +} + fn main() { task::block_on(async { let mut log_builder = @@ -40,39 +90,7 @@ fn main() { log::info!("zenohd {}", *LONG_VERSION); - let app = Command::new("The zenoh router") - .version(GIT_VERSION) - .long_version(LONG_VERSION.as_str()).args( - &[ -clap::arg!(-c --config [FILE] "The configuration file. Currently, this file must be a valid JSON5 or YAML file."), -clap::Arg::new("listen").short('l').long("listen").value_name("ENDPOINT").help(r"A locator on which this router will listen for incoming sessions. -Repeat this option to open several listeners.").takes_value(true).multiple_occurrences(true), -clap::Arg::new("connect").short('e').long("connect").value_name("ENDPOINT").help(r"A peer locator this router will try to connect to. -Repeat this option to connect to several peers.").takes_value(true).multiple_occurrences(true), -clap::Arg::new("id").short('i').long("id").value_name("HEX_STRING").help(r"The identifier (as an hexadecimal string, with odd number of chars - e.g.: A0B23...) that zenohd must use. If not set, a random unsigned 128bit integer will be used. -WARNING: this identifier must be unique in the system and must be 16 bytes maximum (32 chars)!").multiple_values(false).multiple_occurrences(false), -clap::Arg::new("plugin").short('P').long("plugin").value_name("PLUGIN").takes_value(true).multiple_occurrences(true).help(r#"A plugin that MUST be loaded. You can give just the name of the plugin, zenohd will search for a library named 'libzenoh_plugin_.so' (exact name depending the OS). Or you can give such a string: ":". -Repeat this option to load several plugins. If loading failed, zenohd will exit."#), -clap::Arg::new("plugin-search-dir").long("plugin-search-dir").takes_value(true).multiple_occurrences(true).value_name("DIRECTORY").help(r"A directory where to search for plugins libraries to load. -Repeat this option to specify several search directories."), -clap::arg!(--"no-timestamp" r"By default zenohd adds a HLC-generated Timestamp to each routed Data if there isn't already one. This option disables this feature."), -clap::arg!(--"no-multicast-scouting" r"By default zenohd replies to multicast scouting messages for being discovered by peers and clients. This option disables this feature."), -clap::arg!(--"rest-http-port" [SOCKET] r"Configures HTTP interface for the REST API (enabled by default). Accepted values: - - a port number - - a string with format `:` (to bind the HTTP server to a specific interface) - - `none` to disable the REST API -").default_value("8000").multiple_values(false).multiple_occurrences(false), -clap::Arg::new("cfg").long("cfg").takes_value(true).multiple_occurrences(true).value_name("KEY:VALUE").help( -r#"Allows arbitrary configuration changes as column-separated KEY:VALUE pairs, where: - - KEY must be a valid config path. - - VALUE must be a valid JSON5 string that can be deserialized to the expected type for the KEY field. -Examples: ---cfg='startup/subscribe:["demo/**"]' ---cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'"#), -clap::Arg::new("adminspace-permissions").long("adminspace-permissions").value_name("[r|w|rw|none]").help(r"Configure the read and/or write permissions on the admin space. Default is read only."), - ] - ); - let args = app.get_matches(); + let args = Args::parse(); let config = config_from_args(&args); log::info!("Initial conf: {}", &config); @@ -150,9 +168,10 @@ clap::Arg::new("adminspace-permissions").long("adminspace-permissions").value_na }); } -fn config_from_args(args: &ArgMatches) -> Config { +fn config_from_args(args: &Args) -> Config { let mut config = args - .value_of("config") + .config + .as_ref() .map_or_else(Config::default, |conf_file| { Config::from_file(conf_file).unwrap() }); @@ -160,15 +179,13 @@ fn config_from_args(args: &ArgMatches) -> Config { if config.mode().is_none() { config.set_mode(Some(WhatAmI::Router)).unwrap(); } - if args.occurrences_of("id") > 0 { - config - .set_id(args.value_of("id").unwrap().parse().unwrap()) - .unwrap(); + if let Some(id) = &args.id { + config.set_id(id.parse().unwrap()).unwrap(); } // apply '--rest-http-port' to config only if explicitly set (overwritting config), // or if no config file is set (to apply its default value) - if args.occurrences_of("rest-http-port") > 0 || args.occurrences_of("config") == 0 { - let value = args.value_of("rest-http-port").unwrap(); + if args.rest_http_port.is_some() || args.config.is_none() { + let value = args.rest_http_port.as_deref().unwrap_or("8000"); if !value.eq_ignore_ascii_case("none") { config .insert_json5("plugins/rest/http_port", &format!(r#""{value}""#)) @@ -178,33 +195,32 @@ fn config_from_args(args: &ArgMatches) -> Config { .unwrap(); } } - if let Some(plugins_search_dirs) = args.values_of("plugin-search-dir") { + if !args.plugin_search_dir.is_empty() { config - .set_plugins_search_dirs(plugins_search_dirs.map(|c| c.to_owned()).collect()) + .set_plugins_search_dirs(args.plugin_search_dir.clone()) .unwrap(); } - if let Some(plugins) = args.values_of("plugin") { - for plugin in plugins { - match plugin.split_once(':') { - Some((name, path)) => { - config - .insert_json5(&format!("plugins/{name}/__required__"), "true") - .unwrap(); - config - .insert_json5(&format!("plugins/{name}/__path__"), &format!("\"{path}\"")) - .unwrap(); - } - None => config - .insert_json5(&format!("plugins/{plugin}/__required__"), "true") - .unwrap(), + for plugin in &args.plugin { + match plugin.split_once(':') { + Some((name, path)) => { + config + .insert_json5(&format!("plugins/{name}/__required__"), "true") + .unwrap(); + config + .insert_json5(&format!("plugins/{name}/__path__"), &format!("\"{path}\"")) + .unwrap(); } + None => config + .insert_json5(&format!("plugins/{plugin}/__required__"), "true") + .unwrap(), } } - if let Some(peers) = args.values_of("connect") { + if !args.connect.is_empty() { config .connect .set_endpoints( - peers + args.connect + .iter() .map(|v| match v.parse::() { Ok(v) => v, Err(e) => { @@ -215,11 +231,12 @@ fn config_from_args(args: &ArgMatches) -> Config { ) .unwrap(); } - if let Some(listeners) = args.values_of("listen") { + if !args.listen.is_empty() { config .listen .set_endpoints( - listeners + args.listen + .iter() .map(|v| match v.parse::() { Ok(v) => v, Err(e) => { @@ -236,7 +253,7 @@ fn config_from_args(args: &ArgMatches) -> Config { .endpoints .push(DEFAULT_LISTENER.parse().unwrap()) } - if args.is_present("no-timestamp") { + if args.no_timestamp { config .timestamping .set_enabled(Some(ModeDependentValue::Unique(false))) @@ -244,7 +261,7 @@ fn config_from_args(args: &ArgMatches) -> Config { }; match ( config.scouting.multicast.enabled().is_none(), - args.is_present("no-multicast-scouting"), + args.no_multicast_scouting, ) { (_, true) => { config.scouting.multicast.set_enabled(Some(false)).unwrap(); @@ -254,43 +271,41 @@ fn config_from_args(args: &ArgMatches) -> Config { } (false, false) => {} }; - if let Some(permissions) = args.value_of("adminspace-permissions") { - match permissions { - "r" => config - .adminspace - .set_permissions(PermissionsConf { - read: true, - write: false, - }) - .unwrap(), - "w" => config - .adminspace - .set_permissions(PermissionsConf { - read: false, - write: true, - }) - .unwrap(), - "rw" => config - .adminspace - .set_permissions(PermissionsConf { - read: true, - write: true, - }) - .unwrap(), - "none" => config - .adminspace - .set_permissions(PermissionsConf { - read: false, - write: false, - }) - .unwrap(), - s => panic!( - r#"Invalid option: --adminspace-permissions={} - Accepted values: "r", "w", "rw" or "none""#, - s - ), - }; + match args.adminspace_permissions.as_str() { + "r" => config + .adminspace + .set_permissions(PermissionsConf { + read: true, + write: false, + }) + .unwrap(), + "w" => config + .adminspace + .set_permissions(PermissionsConf { + read: false, + write: true, + }) + .unwrap(), + "rw" => config + .adminspace + .set_permissions(PermissionsConf { + read: true, + write: true, + }) + .unwrap(), + "none" => config + .adminspace + .set_permissions(PermissionsConf { + read: false, + write: false, + }) + .unwrap(), + s => panic!( + r#"Invalid option: --adminspace-permissions={} - Accepted values: "r", "w", "rw" or "none""#, + s + ), }; - for json in args.values_of("cfg").unwrap_or_default() { + for json in &args.cfg { if let Some((key, value)) = json.split_once(':') { match json5::Deserializer::from_str(value) { Ok(mut deserializer) => { From cb0c0279af913c75319cd80c76f7f2fcbff0807a Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Fri, 8 Dec 2023 16:26:12 +0100 Subject: [PATCH 18/29] fix examples session mode default value --- examples/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/lib.rs b/examples/src/lib.rs index e7c00cb606..a766bd0695 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -21,7 +21,7 @@ pub struct CommonArgs { #[arg(short, long)] /// A configuration file. config: Option, - #[arg(short, long, default_value_t)] + #[arg(short, long, default_value = "peer")] /// The Zenoh session mode. mode: Wai, #[arg(short = 'e', long)] From fa612ce1342d5aaf2bcabd6d0b559602fb3e08aa Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 11 Dec 2023 10:07:31 +0100 Subject: [PATCH 19/29] Update outdated dependencies (#607) Co-authored-by: Darius Maitia --- Cargo.lock | 132 +++++++++++---- Cargo.toml | 14 +- commons/zenoh-sync/src/condition.rs | 4 +- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 58 ++++--- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 151 ++++++++++++------ 5 files changed, 245 insertions(+), 114 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index be46441b2e..f2fcc8b187 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -251,7 +251,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] @@ -337,7 +337,7 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ - "event-listener", + "event-listener 2.5.3", ] [[package]] @@ -351,7 +351,7 @@ dependencies = [ "autocfg", "blocking", "cfg-if 1.0.0", - "event-listener", + "event-listener 2.5.3", "futures-lite", "rustix 0.37.25", "signal-hook", @@ -1018,13 +1018,13 @@ checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" [[package]] name = "derive-new" -version = "0.5.9" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" +checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.33", ] [[package]] @@ -1175,6 +1175,17 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite 0.2.13", +] + [[package]] name = "fancy-regex" version = "0.11.0" @@ -2268,9 +2279,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "3.9.1" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a54938017eacd63036332b4ae5c8a49fc8c0c1d6d629893057e4f13609edd06" +checksum = "536900a8093134cf9ccf00a27deb3532421099e958d9dd431135d0c7543ca1e8" dependencies = [ "num-traits", ] @@ -2647,10 +2658,10 @@ checksum = "e13f81c9a9d574310b8351f8666f5a93ac3b0069c45c28ad52c10291389a7cf9" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", "rustls", - "rustls-native-certs", + "rustls-native-certs 0.6.3", "slab", "thiserror", "tinyvec", @@ -2779,7 +2790,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4954fbc00dcd4d8282c987710e50ba513d351400dbdd00e803a05172a90d8976" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time 0.3.28", "yasna", ] @@ -2886,11 +2897,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "684d5e6e18f669ccebf64a92236bb7db9a34f07be010e3627368182027180866" +dependencies = [ + "cc", + "getrandom 0.2.10", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys", +] + [[package]] name = "ringbuffer-spsc" version = "0.1.9" @@ -2993,8 +3018,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", - "ring", - "rustls-webpki", + "ring 0.16.20", + "rustls-webpki 0.101.5", "sct", ] @@ -3005,7 +3030,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 1.0.3", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.0.0", + "rustls-pki-types", "schannel", "security-framework", ] @@ -3019,14 +3057,41 @@ dependencies = [ "base64 0.21.4", ] +[[package]] +name = "rustls-pemfile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +dependencies = [ + "base64 0.21.4", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb0a1f9b9efec70d32e6d6aa3e58ebd88c3754ec98dfe9145c63cf54cc829b83" + [[package]] name = "rustls-webpki" version = "0.101.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89" +dependencies = [ + "ring 0.17.6", + "rustls-pki-types", + "untrusted 0.9.0", ] [[package]] @@ -3089,8 +3154,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -3368,7 +3433,7 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38aabbeafa6f6dead8cebf246fe9fae1f9215c8d29b3a69f93bd62a9e4a3dcd6" dependencies = [ - "event-listener", + "event-listener 2.5.3", ] [[package]] @@ -3986,6 +4051,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "unzip-n" version = "0.1.2" @@ -4225,9 +4296,12 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "0de2cfda980f21be5a7ed2eadb3e6fe074d56022bea2cdeb1a62eb220fc04188" +dependencies = [ + "rustls-pki-types", +] [[package]] name = "win-sys" @@ -4495,7 +4569,7 @@ dependencies = [ "base64 0.21.4", "const_format", "env_logger", - "event-listener", + "event-listener 4.0.0", "flume", "form_urlencoded", "futures", @@ -4702,9 +4776,9 @@ dependencies = [ "log", "quinn", "rustls", - "rustls-native-certs", - "rustls-pemfile", - "rustls-webpki", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.0.0", + "rustls-webpki 0.102.0", "secrecy", "zenoh-config", "zenoh-core", @@ -4762,8 +4836,8 @@ dependencies = [ "futures", "log", "rustls", - "rustls-pemfile", - "rustls-webpki", + "rustls-pemfile 2.0.0", + "rustls-webpki 0.102.0", "secrecy", "webpki-roots", "zenoh-config", @@ -4991,7 +5065,7 @@ name = "zenoh-sync" version = "0.11.0-dev" dependencies = [ "async-std", - "event-listener", + "event-listener 4.0.0", "flume", "futures", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 4cba5d4dd2..7216d3075f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,9 +84,9 @@ const_format = "0.2.30" crc = "3.0.1" criterion = "0.5" derive_more = "0.99.17" -derive-new = "0.5.9" +derive-new = "0.6.0" env_logger = "0.10.0" -event-listener = "2.5.3" +event-listener = "4.0.0" flume = "0.11" form_urlencoded = "1.1.0" futures = "0.3.25" @@ -108,7 +108,7 @@ log = "0.4.17" lz4_flex = "0.11" nix = { version = "0.27", features = ["fs"] } num_cpus = "1.15.0" -ordered-float = "3.4.0" +ordered-float = "4.1.1" panic-message = "0.3.0" paste = "1.0.12" petgraph = "0.6.3" @@ -125,8 +125,9 @@ ringbuffer-spsc = "0.1.9" rsa = "0.9" rustc_version = "0.4.0" rustls = { version = "0.21.5", features = ["dangerous_configuration"] } -rustls-native-certs = "0.6.2" -rustls-pemfile = "1.0.2" +rustls-native-certs = "0.7.0" +rustls-pemfile = "2.0.0" +rustls-webpki = "0.102.0" schemars = "0.8.12" secrecy = { version = "0.8.0", features = ["serde", "alloc"] } serde = { version = "1.0.154", default-features = false, features = [ @@ -154,8 +155,7 @@ uuid = { version = "1.3.0", default-features = false, features = [ ] } # Default features are disabled due to usage in no_std crates validated_struct = "2.1.0" vec_map = "0.8.2" -rustls-webpki = "0.101.4" -webpki-roots = "0.25" +webpki-roots = "0.26.0" winapi = { version = "0.3.9", features = ["iphlpapi"] } z-serial = "0.2.1" zenoh-ext = { version = "0.11.0-dev", path = "zenoh-ext" } diff --git a/commons/zenoh-sync/src/condition.rs b/commons/zenoh-sync/src/condition.rs index 7606936c67..bae030abbb 100644 --- a/commons/zenoh-sync/src/condition.rs +++ b/commons/zenoh-sync/src/condition.rs @@ -13,9 +13,9 @@ // use async_std::sync::MutexGuard as AysncMutexGuard; use event_listener::{Event, EventListener}; -use std::sync::MutexGuard; +use std::{pin::Pin, sync::MutexGuard}; -pub type ConditionWaiter = EventListener; +pub type ConditionWaiter = Pin>; /// This is a Condition Variable similar to that provided by POSIX. /// As for POSIX condition variables, this assumes that a mutex is /// properly used to coordinate behaviour. In other terms there should diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 70bd3ee769..2b1c59ad23 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -23,6 +23,8 @@ use async_std::sync::Mutex as AsyncMutex; use async_std::task; use async_std::task::JoinHandle; use async_trait::async_trait; +use rustls::{Certificate, PrivateKey}; +use rustls_pemfile::Item; use std::collections::HashMap; use std::fmt; use std::io::BufReader; @@ -35,7 +37,7 @@ use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_result::{bail, zerror, ZResult}; +use zenoh_result::{bail, zerror, ZError, ZResult}; use zenoh_sync::Signal; pub struct LinkUnicastQuic { @@ -261,14 +263,16 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { rustls_native_certs::load_native_certs() .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? .drain(..) - .map(|x| rustls::Certificate(x.0)) + .map(|x| rustls::Certificate(x.to_vec())) .collect::>() } else { rustls_pemfile::certs(&mut BufReader::new(f.as_slice())) - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - .drain(..) - .map(rustls::Certificate) - .collect::>() + .map(|result| { + result + .map_err(|err| zerror!("Invalid QUIC CA certificate file: {}", err)) + .map(|der| Certificate(der.to_vec())) + }) + .collect::, ZError>>()? }; for c in certificates.iter() { root_cert_store.add(c).map_err(|e| zerror!("{}", e))?; @@ -347,10 +351,12 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { bail!("No QUIC CA certificate has been provided."); }; let certificates = rustls_pemfile::certs(&mut BufReader::new(f.as_slice())) - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - .drain(..) - .map(rustls::Certificate) - .collect(); + .map(|result| { + result + .map_err(|err| zerror!("Invalid QUIC CA certificate file: {}", err)) + .map(|der| Certificate(der.to_vec())) + }) + .collect::, ZError>>()?; // Private keys let f = if let Some(value) = epconf.get(TLS_SERVER_PRIVATE_KEY_RAW) { @@ -364,20 +370,24 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { } else { bail!("No QUIC CA private key has been provided."); }; - let private_key = rustls::PrivateKey( - rustls_pemfile::read_all(&mut BufReader::new(f.as_slice())) - .map_err(|e| zerror!("Invalid QUIC CA private key file: {}", e))? - .iter() - .filter_map(|x| match x { - rustls_pemfile::Item::RSAKey(k) - | rustls_pemfile::Item::PKCS8Key(k) - | rustls_pemfile::Item::ECKey(k) => Some(k.to_vec()), - _ => None, - }) - .take(1) - .next() - .ok_or_else(|| zerror!("No QUIC CA private key has been provided."))?, - ); + let items: Vec = rustls_pemfile::read_all(&mut BufReader::new(f.as_slice())) + .map(|result| { + result.map_err(|err| zerror!("Invalid QUIC CA private key file: {}", err)) + }) + .collect::, ZError>>()?; + + let private_key = items + .into_iter() + .filter_map(|x| match x { + rustls_pemfile::Item::Pkcs1Key(k) => Some(k.secret_pkcs1_der().to_vec()), + rustls_pemfile::Item::Pkcs8Key(k) => Some(k.secret_pkcs8_der().to_vec()), + rustls_pemfile::Item::Sec1Key(k) => Some(k.secret_sec1_der().to_vec()), + _ => None, + }) + .take(1) + .next() + .ok_or_else(|| zerror!("No QUIC CA private key has been provided.")) + .map(PrivateKey)?; // Server config let mut server_crypto = rustls::ServerConfig::builder() diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 63c6d63b1e..7761195e4b 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -32,7 +32,6 @@ use async_std::task::JoinHandle; use async_trait::async_trait; use futures::io::AsyncReadExt; use futures::io::AsyncWriteExt; -use std::cell::UnsafeCell; use std::collections::HashMap; use std::convert::TryInto; use std::fmt; @@ -42,14 +41,18 @@ use std::net::{IpAddr, Shutdown}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, RwLock}; use std::time::Duration; -use webpki::TrustAnchor; +use std::{cell::UnsafeCell, io}; +use webpki::{ + anchor_from_trusted_cert, + types::{CertificateDer, TrustAnchor}, +}; use zenoh_core::{zasynclock, zread, zwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::endpoint::Config; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_result::{bail, zerror, ZResult}; +use zenoh_result::{bail, zerror, ZError, ZResult}; use zenoh_sync::Signal; pub struct LinkUnicastTls { @@ -525,32 +528,48 @@ impl TlsServerConfig { let tls_server_private_key = TlsServerConfig::load_tls_private_key(config).await?; let tls_server_certificate = TlsServerConfig::load_tls_certificate(config).await?; + let certs: Vec = + rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) + .map(|result| { + result + .map_err(|err| zerror!("Error processing server certificate: {err}.")) + .map(|der| Certificate(der.to_vec())) + }) + .collect::, ZError>>()?; + let mut keys: Vec = rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|e| zerror!(e)) - .map(|mut keys| keys.drain(..).map(PrivateKey).collect())?; + .map(|result| { + result + .map_err(|err| zerror!("Error processing server key: {err}.")) + .map(|key| PrivateKey(key.secret_pkcs1_der().to_vec())) + }) + .collect::, ZError>>()?; if keys.is_empty() { keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|e| zerror!(e)) - .map(|mut keys| keys.drain(..).map(PrivateKey).collect())?; + .map(|result| { + result + .map_err(|err| zerror!("Error processing server key: {err}.")) + .map(|key| PrivateKey(key.secret_pkcs8_der().to_vec())) + }) + .collect::, ZError>>()?; } if keys.is_empty() { keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|e| zerror!(e)) - .map(|mut keys| keys.drain(..).map(PrivateKey).collect())?; + .map(|result| { + result + .map_err(|err| zerror!("Error processing server key: {err}.")) + .map(|key| PrivateKey(key.secret_sec1_der().to_vec())) + }) + .collect::, ZError>>()?; } if keys.is_empty() { - bail!("No private key found"); + bail!("No private key found for TLS server."); } - let certs: Vec = - rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) - .map_err(|e| zerror!(e)) - .map(|mut certs| certs.drain(..).map(Certificate).collect())?; - let sc = if tls_server_client_auth { let root_cert_store = load_trust_anchors(config)?.map_or_else( || { @@ -643,23 +662,45 @@ impl TlsClientConfig { let certs: Vec = rustls_pemfile::certs(&mut Cursor::new(&tls_client_certificate)) - .map_err(|e| zerror!(e)) - .map(|mut certs| certs.drain(..).map(Certificate).collect())?; + .map(|result| { + result + .map_err(|err| zerror!("Error processing client certificate: {err}.")) + .map(|der| Certificate(der.to_vec())) + }) + .collect::, ZError>>()?; let mut keys: Vec = rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map_err(|e| zerror!(e)) - .map(|mut keys| keys.drain(..).map(PrivateKey).collect())?; + .map(|result| { + result + .map_err(|err| zerror!("Error processing client key: {err}.")) + .map(|key| PrivateKey(key.secret_pkcs1_der().to_vec())) + }) + .collect::, ZError>>()?; if keys.is_empty() { keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map_err(|e| zerror!(e)) - .map(|mut keys| keys.drain(..).map(PrivateKey).collect())?; + .map(|result| { + result + .map_err(|err| zerror!("Error processing client key: {err}.")) + .map(|key| PrivateKey(key.secret_pkcs8_der().to_vec())) + }) + .collect::, ZError>>()?; } if keys.is_empty() { - bail!("No private key found"); + keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_client_private_key)) + .map(|result| { + result + .map_err(|err| zerror!("Error processing client key: {err}.")) + .map(|key| PrivateKey(key.secret_sec1_der().to_vec())) + }) + .collect::, ZError>>()?; + } + + if keys.is_empty() { + bail!("No private key found for TLS client."); } let builder = ClientConfig::builder() @@ -765,57 +806,63 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { let mut root_cert_store = RootCertStore::empty(); if let Some(value) = config.get(TLS_ROOT_CA_CERTIFICATE_RAW) { let mut pem = BufReader::new(value.as_bytes()); - let certs = rustls_pemfile::certs(&mut pem)?; - let trust_anchors = certs.iter().map(|cert| { - let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap(); - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject, - ta.spki, - ta.name_constraints, - ) - }); + let trust_anchors = process_pem(&mut pem)?; root_cert_store.add_trust_anchors(trust_anchors.into_iter()); return Ok(Some(root_cert_store)); } + if let Some(b64_certificate) = config.get(TLS_ROOT_CA_CERTIFICATE_BASE64) { let certificate_pem = base64_decode(b64_certificate)?; let mut pem = BufReader::new(certificate_pem.as_slice()); - let certs = rustls_pemfile::certs(&mut pem)?; - let trust_anchors = certs.iter().map(|cert| { - let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap(); - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject, - ta.spki, - ta.name_constraints, - ) - }); + let trust_anchors = process_pem(&mut pem)?; root_cert_store.add_trust_anchors(trust_anchors.into_iter()); return Ok(Some(root_cert_store)); } + if let Some(filename) = config.get(TLS_ROOT_CA_CERTIFICATE_FILE) { let mut pem = BufReader::new(File::open(filename)?); - let certs = rustls_pemfile::certs(&mut pem)?; - let trust_anchors = certs.iter().map(|cert| { - let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap(); - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject, - ta.spki, - ta.name_constraints, - ) - }); + let trust_anchors = process_pem(&mut pem)?; root_cert_store.add_trust_anchors(trust_anchors.into_iter()); return Ok(Some(root_cert_store)); } Ok(None) } +fn process_pem(pem: &mut dyn io::BufRead) -> ZResult> { + let certs: Vec = rustls_pemfile::certs(pem) + .map(|result| result.map_err(|err| zerror!("Error processing PEM certificates: {err}."))) + .collect::, ZError>>()?; + + let trust_anchors: Vec = certs + .into_iter() + .map(|cert| { + anchor_from_trusted_cert(&cert) + .map_err(|err| zerror!("Error processing trust anchor: {err}.")) + .map(|trust_anchor| trust_anchor.to_owned()) + }) + .collect::, ZError>>()?; + + let owned_trust_anchors: Vec = trust_anchors + .into_iter() + .map(|ta| { + OwnedTrustAnchor::from_subject_spki_name_constraints( + ta.subject.to_vec(), + ta.subject_public_key_info.to_vec(), + ta.name_constraints.map(|x| x.to_vec()), + ) + }) + .collect(); + + Ok(owned_trust_anchors) +} + fn load_default_webpki_certs() -> RootCertStore { let mut root_cert_store = RootCertStore::empty(); root_cert_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject, - ta.spki, - ta.name_constraints, + ta.subject.to_vec(), + ta.subject_public_key_info.to_vec(), + ta.name_constraints.clone().map(|x| x.to_vec()), ) })); root_cert_store From e3a6c34961afe2fea9d0835a11548cc2500c0dcf Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 11 Dec 2023 17:35:27 +0100 Subject: [PATCH 20/29] Storage manager plugin does not reply with errors when queried key is not found (#620) --- .../src/replica/storage.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f486f25f3c..16f5fd4a36 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -550,18 +550,6 @@ impl StorageService { let mut storage = self.storage.lock().await; match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { - // if key is not available, return Error - if stored_data.is_empty() { - log::info!("Requested key `{}` not found", q.key_expr()); - if let Err(e) = q.reply(Err("Key not found".into())).res().await { - log::warn!( - "Storage {} raised an error replying a query: {}", - self.name, - e - ) - } - return; - } for entry in stored_data { let sample = Sample::new(q.key_expr().clone(), entry.value) .with_timestamp(entry.timestamp); From bc679d1341cfee7c31f2182fdab8070d5e5c9099 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 12 Dec 2023 10:28:40 +0100 Subject: [PATCH 21/29] Fix examples values --- examples/examples/z_pub.rs | 2 +- examples/examples/z_put.rs | 2 +- examples/examples/z_queryable.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 54563df1d8..aebca309ad 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -44,7 +44,7 @@ struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-pub")] /// The key expression to write to. key: KeyExpr<'static>, - #[arg(short, long, default_value = "Put from Rust")] + #[arg(short, long, default_value = "Pub from Rust!")] /// The value to write. value: String, #[command(flatten)] diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index 9b625be552..a38f0c7f01 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -35,7 +35,7 @@ struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-put")] /// The key expression to write to. key: KeyExpr<'static>, - #[arg(short, long, default_value = "Put from Rust")] + #[arg(short, long, default_value = "Put from Rust!")] /// The value to write. value: String, #[command(flatten)] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 5738c67f6c..54b9858cf0 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -89,7 +89,7 @@ struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] /// The key expression matching queries to reply to. key: KeyExpr<'static>, - #[arg(short, long, default_value = "Queryable from Rust")] + #[arg(short, long, default_value = "Queryable from Rust!")] /// The value to reply to queries. value: String, #[arg(long)] From 0fd53fe30c655bbf390fe86179bb2dcf74183538 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 12 Dec 2023 14:40:40 +0100 Subject: [PATCH 22/29] Fix latency regression (#619) --- io/zenoh-transport/src/common/batch.rs | 269 ++++++++++++------ io/zenoh-transport/src/common/pipeline.rs | 34 +-- .../src/multicast/establishment.rs | 11 +- io/zenoh-transport/src/multicast/link.rs | 57 ++-- .../src/unicast/establishment/accept.rs | 20 +- .../src/unicast/establishment/open.rs | 22 +- io/zenoh-transport/src/unicast/link.rs | 75 ++--- .../src/unicast/lowlatency/link.rs | 4 +- .../src/unicast/universal/link.rs | 14 +- 9 files changed, 289 insertions(+), 217 deletions(-) diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 5305d0a50c..d3cd38684f 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::num::{NonZeroU8, NonZeroUsize}; +use std::num::NonZeroUsize; use zenoh_buffers::{ buffer::Buffer, reader::{DidntRead, HasReader}, @@ -26,62 +26,125 @@ use zenoh_protocol::{ network::NetworkMessage, transport::{fragment::FragmentHeader, frame::FrameHeader, BatchSize, TransportMessage}, }; -use zenoh_result::ZResult; +use zenoh_result::{zerror, ZResult}; #[cfg(feature = "transport_compression")] -use {std::sync::Arc, zenoh_protocol::common::imsg, zenoh_result::zerror}; +use {std::sync::Arc, zenoh_protocol::common::imsg}; + +const L_LEN: usize = (BatchSize::BITS / 8) as usize; +const H_LEN: usize = BatchHeader::SIZE; // Split the inner buffer into (length, header, payload) inmutable slices -#[cfg(feature = "transport_compression")] macro_rules! zsplit { - ($slice:expr, $header:expr) => {{ - match $header.get() { - Some(_) => $slice.split_at(BatchHeader::INDEX + 1), - None => (&[], $slice), + ($slice:expr, $config:expr) => {{ + match ($config.is_streamed, $config.has_header()) { + (true, true) => { + let (l, s) = $slice.split_at(L_LEN); + let (h, p) = s.split_at(H_LEN); + (l, h, p) + } + (true, false) => { + let (l, p) = $slice.split_at(L_LEN); + (l, &[], p) + } + (false, true) => { + let (h, p) = $slice.split_at(H_LEN); + (&[], h, p) + } + (false, false) => (&[], &[], $slice), + } + }}; +} + +macro_rules! zsplit_mut { + ($slice:expr, $config:expr) => {{ + match ($config.is_streamed, $config.has_header()) { + (true, true) => { + let (l, s) = $slice.split_at_mut(L_LEN); + let (h, p) = s.split_at_mut(H_LEN); + (l, h, p) + } + (true, false) => { + let (l, p) = $slice.split_at_mut(L_LEN); + (l, &mut [], p) + } + (false, true) => { + let (h, p) = $slice.split_at_mut(H_LEN); + (&mut [], h, p) + } + (false, false) => (&mut [], &mut [], $slice), } }}; } // Batch config -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct BatchConfig { pub mtu: BatchSize, + pub is_streamed: bool, #[cfg(feature = "transport_compression")] pub is_compression: bool, } +impl Default for BatchConfig { + fn default() -> Self { + BatchConfig { + mtu: BatchSize::MAX, + is_streamed: false, + #[cfg(feature = "transport_compression")] + is_compression: false, + } + } +} + impl BatchConfig { - fn header(&self) -> BatchHeader { - #[allow(unused_mut)] // No need for mut when "transport_compression" is disabled - let mut h = 0; + const fn has_header(&self) -> bool { + #[cfg(not(feature = "transport_compression"))] + { + false + } + #[cfg(feature = "transport_compression")] + { + self.is_compression + } + } + + fn header(&self) -> Option { + #[cfg(not(feature = "transport_compression"))] + { + None + } #[cfg(feature = "transport_compression")] - if self.is_compression { - h |= BatchHeader::COMPRESSION; + { + self.is_compression + .then_some(BatchHeader::new(BatchHeader::COMPRESSION)) + } + } + + pub fn max_buffer_size(&self) -> usize { + let mut len = self.mtu as usize; + if self.is_streamed { + len += BatchSize::BITS as usize / 8; } - BatchHeader::new(h) + len } } // Batch header #[repr(transparent)] #[derive(Copy, Clone, Debug)] -pub struct BatchHeader(Option); +pub struct BatchHeader(u8); impl BatchHeader { + const SIZE: usize = 1; #[cfg(feature = "transport_compression")] - const INDEX: usize = 0; - #[cfg(feature = "transport_compression")] - const COMPRESSION: u8 = 1; - - fn new(h: u8) -> Self { - Self(NonZeroU8::new(h)) - } + const COMPRESSION: u8 = 1; // 1 << 0 #[cfg(feature = "transport_compression")] - const fn is_empty(&self) -> bool { - self.0.is_none() + const fn new(h: u8) -> Self { + Self(h) } - const fn get(&self) -> Option { + const fn as_u8(&self) -> u8 { self.0 } @@ -90,8 +153,7 @@ impl BatchHeader { #[cfg(feature = "transport_compression")] #[inline(always)] pub fn is_compression(&self) -> bool { - self.0 - .is_some_and(|h| imsg::has_flag(h.get(), Self::COMPRESSION)) + imsg::has_flag(self.as_u8(), Self::COMPRESSION) } } @@ -113,7 +175,6 @@ impl WBatchStats { #[derive(Debug)] pub enum Finalize { Batch, - #[cfg(feature = "transport_compression")] Buffer, } @@ -143,7 +204,7 @@ pub struct WBatch { // The batch codec pub codec: Zenoh080Batch, // It contains 1 byte as additional header, e.g. to signal the batch is compressed - pub header: BatchHeader, + pub config: BatchConfig, // Statistics related to this batch #[cfg(feature = "stats")] pub stats: WBatchStats, @@ -152,9 +213,9 @@ pub struct WBatch { impl WBatch { pub fn new(config: BatchConfig) -> Self { let mut batch = Self { - buffer: BBuf::with_capacity(config.mtu as usize), + buffer: BBuf::with_capacity(config.max_buffer_size()), codec: Zenoh080Batch::new(), - header: config.header(), + config, #[cfg(feature = "stats")] stats: WBatchStats::default(), }; @@ -174,7 +235,8 @@ impl WBatch { /// Get the total number of bytes that have been serialized on the [`WBatch`][WBatch]. #[inline(always)] pub fn len(&self) -> BatchSize { - self.buffer.len() as BatchSize + let (_l, _h, p) = Self::split(self.buffer.as_slice(), &self.config); + p.len() as BatchSize } /// Clear the [`WBatch`][WBatch] memory buffer and related internal state. @@ -186,10 +248,7 @@ impl WBatch { { self.stats.clear(); } - if let Some(h) = self.header.get() { - let mut writer = self.buffer.writer(); - let _ = writer.write_u8(h.get()); - } + Self::init(&mut self.buffer, &self.config); } /// Get a `&[u8]` to access the internal memory buffer, usually for transmitting it on the network. @@ -198,37 +257,70 @@ impl WBatch { self.buffer.as_slice() } + fn init(buffer: &mut BBuf, config: &BatchConfig) { + let mut writer = buffer.writer(); + if config.is_streamed { + let _ = writer.write_exact(&BatchSize::MIN.to_be_bytes()); + } + if let Some(h) = config.header() { + let _ = writer.write_u8(h.as_u8()); + } + } + // Split (length, header, payload) internal buffer slice #[inline(always)] - #[cfg(feature = "transport_compression")] - fn split(&self) -> (&[u8], &[u8]) { - zsplit!(self.buffer.as_slice(), self.header) + fn split<'a>(buffer: &'a [u8], config: &BatchConfig) -> (&'a [u8], &'a [u8], &'a [u8]) { + zsplit!(buffer, config) + } + + // Split (length, header, payload) internal buffer slice + #[inline(always)] + fn split_mut<'a>( + buffer: &'a mut [u8], + config: &BatchConfig, + ) -> (&'a mut [u8], &'a mut [u8], &'a mut [u8]) { + zsplit_mut!(buffer, config) } - pub fn finalize( - &mut self, - #[cfg(feature = "transport_compression")] buffer: Option<&mut BBuf>, - ) -> ZResult { + pub fn finalize(&mut self, mut buffer: Option<&mut BBuf>) -> ZResult { + #[allow(unused_mut)] + let mut res = Finalize::Batch; + #[cfg(feature = "transport_compression")] - if self.header.is_compression() { - let buffer = buffer.ok_or_else(|| zerror!("Support buffer not provided"))?; - buffer.clear(); - return self.compress(buffer); + if let Some(h) = self.config.header() { + if h.is_compression() { + let buffer = buffer + .as_mut() + .ok_or_else(|| zerror!("Support buffer not provided"))?; + res = self.compress(buffer)?; + } + } + + if self.config.is_streamed { + let buff = match res { + Finalize::Batch => self.buffer.as_mut_slice(), + Finalize::Buffer => buffer + .as_mut() + .ok_or_else(|| zerror!("Support buffer not provided"))? + .as_mut_slice(), + }; + let (length, header, payload) = Self::split_mut(buff, &self.config); + let len: BatchSize = (header.len() as BatchSize) + (payload.len() as BatchSize); + length.copy_from_slice(&len.to_le_bytes()); } - Ok(Finalize::Batch) + Ok(res) } #[cfg(feature = "transport_compression")] fn compress(&mut self, support: &mut BBuf) -> ZResult { // Write the initial bytes for the batch - let mut writer = support.writer(); - if let Some(h) = self.header.get() { - let _ = writer.write_u8(h.get()); - } + support.clear(); + Self::init(support, &self.config); // Compress the actual content - let (_header, payload) = self.split(); + let (_length, _header, payload) = Self::split(self.buffer.as_slice(), &self.config); + let mut writer = support.writer(); writer .with_slot(writer.remaining(), |b| { lz4_flex::block::compress_into(payload, b).unwrap_or(0) @@ -240,11 +332,8 @@ impl WBatch { Ok(Finalize::Buffer) } else { // Keep the original uncompressed buffer and unset the compression flag from the header - let h = self - .buffer - .as_mut_slice() - .get_mut(BatchHeader::INDEX) - .ok_or_else(|| zerror!("Header not present"))?; + let (_l, h, _p) = Self::split_mut(self.buffer.as_mut_slice(), &self.config); + let h = h.first_mut().ok_or_else(|| zerror!("Empty BatchHeader"))?; *h &= !BatchHeader::COMPRESSION; Ok(Finalize::Batch) } @@ -300,21 +389,19 @@ pub struct RBatch { buffer: ZSlice, // The batch codec codec: Zenoh080Batch, - // It contains 1 byte as additional header, e.g. to signal the batch is compressed - #[cfg(feature = "transport_compression")] - header: BatchHeader, + // The batch config + config: BatchConfig, } impl RBatch { - pub fn new(#[allow(unused_variables)] config: BatchConfig, buffer: T) -> Self + pub fn new(config: BatchConfig, buffer: T) -> Self where T: Into, { Self { buffer: buffer.into(), codec: Zenoh080Batch::new(), - #[cfg(feature = "transport_compression")] - header: config.header(), + config, } } @@ -329,9 +416,8 @@ impl RBatch { // Split (length, header, payload) internal buffer slice #[inline(always)] - #[cfg(feature = "transport_compression")] - fn split(&self) -> (&[u8], &[u8]) { - zsplit!(self.buffer.as_slice(), self.header) + fn split<'a>(buffer: &'a [u8], config: &BatchConfig) -> (&'a [u8], &'a [u8], &'a [u8]) { + zsplit!(buffer, config) } pub fn initialize(&mut self, #[allow(unused_variables)] buff: C) -> ZResult<()> @@ -339,41 +425,44 @@ impl RBatch { C: Fn() -> T + Copy, T: ZSliceBuffer + 'static, { + #[allow(unused_variables)] + let (l, h, p) = Self::split(self.buffer.as_slice(), &self.config); + #[cfg(feature = "transport_compression")] - if !self.header.is_empty() { - let h = *self - .buffer - .get(BatchHeader::INDEX) - .ok_or_else(|| zerror!("Batch header not present"))?; - let header = BatchHeader::new(h); - - if header.is_compression() { - self.decompress(buff)?; - } else { - self.buffer = self - .buffer - .subslice(BatchHeader::INDEX + 1, self.buffer.len()) - .ok_or_else(|| zerror!("Invalid batch length"))?; + { + if self.config.has_header() { + let b = *h + .first() + .ok_or_else(|| zerror!("Batch header not present"))?; + let header = BatchHeader::new(b); + + if header.is_compression() { + let zslice = self.decompress(p, buff)?; + self.buffer = zslice; + return Ok(()); + } } } + self.buffer = self + .buffer + .subslice(l.len() + h.len(), self.buffer.len()) + .ok_or_else(|| zerror!("Invalid batch length"))?; + Ok(()) } #[cfg(feature = "transport_compression")] - fn decompress(&mut self, mut buff: impl FnMut() -> T) -> ZResult<()> + fn decompress(&self, payload: &[u8], mut buff: impl FnMut() -> T) -> ZResult where T: ZSliceBuffer + 'static, { - let (_h, p) = self.split(); - let mut into = (buff)(); - let n = lz4_flex::block::decompress_into(p, into.as_mut_slice()) + let n = lz4_flex::block::decompress_into(payload, into.as_mut_slice()) .map_err(|_| zerror!("Decompression error"))?; - self.buffer = ZSlice::make(Arc::new(into), 0, n) + let zslice = ZSlice::make(Arc::new(into), 0, n) .map_err(|_| zerror!("Invalid decompression buffer length"))?; - - Ok(()) + Ok(zslice) } } @@ -422,6 +511,7 @@ mod tests { for msg_in in msg_ins { let config = BatchConfig { mtu: BatchSize::MAX, + is_streamed: rng.gen_bool(0.5), #[cfg(feature = "transport_compression")] is_compression: rng.gen_bool(0.5), }; @@ -465,6 +555,7 @@ mod tests { fn serialization_batch() { let config = BatchConfig { mtu: BatchSize::MAX, + is_streamed: false, #[cfg(feature = "transport_compression")] is_compression: false, }; diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 19e7a47289..256dfbef47 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -482,10 +482,7 @@ impl StageOut { #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct TransmissionPipelineConf { - pub(crate) is_streamed: bool, - #[cfg(feature = "transport_compression")] - pub(crate) is_compression: bool, - pub(crate) batch_size: BatchSize, + pub(crate) batch: BatchConfig, pub(crate) queue_size: [usize; Priority::NUM], pub(crate) backoff: Duration, } @@ -493,10 +490,12 @@ pub(crate) struct TransmissionPipelineConf { impl Default for TransmissionPipelineConf { fn default() -> Self { Self { - is_streamed: false, - #[cfg(feature = "transport_compression")] - is_compression: false, - batch_size: BatchSize::MAX, + batch: BatchConfig { + mtu: BatchSize::MAX, + is_streamed: false, + #[cfg(feature = "transport_compression")] + is_compression: false, + }, queue_size: [1; Priority::NUM], backoff: Duration::from_micros(1), } @@ -533,12 +532,7 @@ impl TransmissionPipeline { let (mut s_ref_w, s_ref_r) = RingBuffer::::init(); // Fill the refill ring buffer with batches for _ in 0..*num { - let bc = BatchConfig { - mtu: config.batch_size, - #[cfg(feature = "transport_compression")] - is_compression: config.is_compression, - }; - let batch = WBatch::new(bc); + let batch = WBatch::new(config.batch); assert!(s_ref_w.push(batch).is_none()); } // Create the channel for notifying that new batches are in the refill ring buffer @@ -736,10 +730,12 @@ mod tests { const TIMEOUT: Duration = Duration::from_secs(60); const CONFIG: TransmissionPipelineConf = TransmissionPipelineConf { - is_streamed: true, - #[cfg(feature = "transport_compression")] - is_compression: true, - batch_size: BatchSize::MAX, + batch: BatchConfig { + mtu: BatchSize::MAX, + is_streamed: true, + #[cfg(feature = "transport_compression")] + is_compression: true, + }, queue_size: [1; Priority::NUM], backoff: Duration::from_micros(1), }; @@ -874,7 +870,7 @@ mod tests { // Make sure to put only one message per batch: set the payload size // to half of the batch in such a way the serialized zenoh message // will be larger then half of the batch size (header + payload). - let payload_size = (CONFIG.batch_size / 2) as usize; + let payload_size = (CONFIG.batch.mtu / 2) as usize; // Send reliable messages let key = "test".into(); diff --git a/io/zenoh-transport/src/multicast/establishment.rs b/io/zenoh-transport/src/multicast/establishment.rs index e31ab05d30..cec09ebdf2 100644 --- a/io/zenoh-transport/src/multicast/establishment.rs +++ b/io/zenoh-transport/src/multicast/establishment.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use crate::{ - common::seq_num, + common::{batch::BatchConfig, seq_num}, multicast::{ link::{TransportLinkMulticast, TransportLinkMulticastConfig}, transport::TransportMulticastInner, @@ -62,9 +62,12 @@ pub(crate) async fn open_link( // Create the transport let locator = link.get_dst().to_owned(); let config = TransportLinkMulticastConfig { - mtu: link.get_mtu(), - #[cfg(feature = "transport_compression")] - is_compression: manager.config.multicast.is_compression, + batch: BatchConfig { + mtu: link.get_mtu(), + #[cfg(feature = "transport_compression")] + is_compression: manager.config.multicast.is_compression, + ..Default::default() + }, }; let link = TransportLinkMulticast::new(link, config); diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index fbb917c281..8e1d17fefe 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -34,10 +34,8 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; -#[cfg(feature = "transport_compression")] -use zenoh_buffers::BBuf; -use zenoh_buffers::{ZSlice, ZSliceBuffer}; -use zenoh_core::zlock; +use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; +use zenoh_core::{zcondfeat, zlock}; use zenoh_link::{Link, LinkMulticast, Locator}; use zenoh_protocol::{ core::{Bits, Priority, Resolution, WhatAmI, ZenohId}, @@ -51,11 +49,7 @@ use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; /****************************/ #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub(crate) struct TransportLinkMulticastConfig { - // MTU - pub(crate) mtu: BatchSize, - // Compression is active on the link - #[cfg(feature = "transport_compression")] - pub(crate) is_compression: bool, + pub(crate) batch: BatchConfig, } #[derive(Clone, PartialEq, Eq)] @@ -66,25 +60,26 @@ pub(crate) struct TransportLinkMulticast { impl TransportLinkMulticast { pub(crate) fn new(link: LinkMulticast, mut config: TransportLinkMulticastConfig) -> Self { - config.mtu = link.get_mtu().min(config.mtu); + config.batch.mtu = link.get_mtu().min(config.batch.mtu); + config.batch.is_streamed = false; Self { link, config } } - const fn batch_config(&self) -> BatchConfig { - BatchConfig { - mtu: self.config.mtu, - #[cfg(feature = "transport_compression")] - is_compression: self.config.is_compression, - } - } - pub(crate) fn tx(&self) -> TransportLinkMulticastTx { TransportLinkMulticastTx { inner: self.clone(), - #[cfg(feature = "transport_compression")] - buffer: self.config.is_compression.then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size(self.config.mtu as usize), - )), + buffer: zcondfeat!( + "transport_compression", + self.config + .batch + .is_compression + .then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size( + self.config.batch.max_buffer_size() + ), + )), + None + ), } } @@ -148,7 +143,6 @@ impl From for Link { pub(crate) struct TransportLinkMulticastTx { pub(crate) inner: TransportLinkMulticast, - #[cfg(feature = "transport_compression")] pub(crate) buffer: Option, } @@ -157,15 +151,11 @@ impl TransportLinkMulticastTx { const ERR: &str = "Write error on link: "; let res = batch - .finalize( - #[cfg(feature = "transport_compression")] - self.buffer.as_mut(), - ) + .finalize(self.buffer.as_mut()) .map_err(|_| zerror!("{ERR}{self}"))?; let bytes = match res { Finalize::Batch => batch.as_slice(), - #[cfg(feature = "transport_compression")] Finalize::Buffer => self .buffer .as_ref() @@ -183,7 +173,7 @@ impl TransportLinkMulticastTx { const ERR: &str = "Write error on link: "; // Create the batch for serializing the message - let mut batch = WBatch::new(self.inner.batch_config()); + let mut batch = WBatch::new(self.inner.config.batch); batch.encode(msg).map_err(|_| zerror!("{ERR}{self}"))?; let len = batch.len() as usize; self.send_batch(&mut batch).await?; @@ -225,7 +215,7 @@ impl TransportLinkMulticastRx { let mut into = (buff)(); let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; let buffer = ZSlice::make(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; - let mut batch = RBatch::new(self.inner.batch_config(), buffer); + let mut batch = RBatch::new(self.inner.config.batch, buffer); batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; Ok((batch, locator.into_owned())) } @@ -330,10 +320,7 @@ impl TransportLinkMulticastUniversal { if self.handle_tx.is_none() { let tpc = TransmissionPipelineConf { - is_streamed: false, - #[cfg(feature = "transport_compression")] - is_compression: self.link.config.is_compression, - batch_size: config.batch_size, + batch: self.link.config.batch, queue_size: self.transport.manager.config.queue_size, backoff: self.transport.manager.config.queue_backoff, }; @@ -582,7 +569,7 @@ async fn rx_task( } // The pool of buffers - let mtu = link.inner.config.mtu as usize; + let mtu = link.inner.config.batch.max_buffer_size(); let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 112b471b9e..a3e5651bdb 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -14,6 +14,7 @@ #[cfg(feature = "shared-memory")] use crate::unicast::shared_memory_unicast::Challenge; use crate::{ + common::batch::BatchConfig, unicast::{ establishment::{ compute_sn, ext, finalize_transport, AcceptFsm, Cookie, InputFinalize, Zenoh080Cookie, @@ -586,11 +587,15 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) -> ZResult<()> { let mtu = link.get_mtu(); + let is_streamed = link.is_streamed(); let config = TransportLinkUnicastConfig { - mtu, direction: TransportLinkUnicastDirection::Inbound, - #[cfg(feature = "transport_compression")] - is_compression: false, + batch: BatchConfig { + mtu, + is_streamed, + #[cfg(feature = "transport_compression")] + is_compression: false, + }, }; let mut link = TransportLinkUnicast::new(link.clone(), config); let mut fsm = AcceptLink { @@ -705,10 +710,13 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) }; let a_config = TransportLinkUnicastConfig { - mtu: state.transport.batch_size, direction: TransportLinkUnicastDirection::Inbound, - #[cfg(feature = "transport_compression")] - is_compression: state.link.ext_compression.is_compression(), + batch: BatchConfig { + mtu: state.transport.batch_size, + is_streamed, + #[cfg(feature = "transport_compression")] + is_compression: state.link.ext_compression.is_compression(), + }, }; let a_link = TransportLinkUnicast::new(link.link.clone(), a_config); let s_link = format!("{:?}", a_link); diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 4c1314dd29..6e10509d69 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -14,6 +14,7 @@ #[cfg(feature = "shared-memory")] use crate::unicast::shared_memory_unicast::Challenge; use crate::{ + common::batch::BatchConfig, unicast::{ establishment::{compute_sn, ext, finalize_transport, InputFinalize, OpenFsm}, link::{TransportLinkUnicast, TransportLinkUnicastConfig, TransportLinkUnicastDirection}, @@ -511,11 +512,15 @@ pub(crate) async fn open_link( link: LinkUnicast, manager: &TransportManager, ) -> ZResult { + let is_streamed = link.is_streamed(); let config = TransportLinkUnicastConfig { direction: TransportLinkUnicastDirection::Outbound, - mtu: link.get_mtu(), - #[cfg(feature = "transport_compression")] - is_compression: false, // Perform the exchange Init/Open exchange with no compression + batch: BatchConfig { + mtu: link.get_mtu(), + is_streamed, + #[cfg(feature = "transport_compression")] + is_compression: false, // Perform the exchange Init/Open exchange with no compression + }, }; let mut link = TransportLinkUnicast::new(link, config); let mut fsm = OpenLink { @@ -537,7 +542,7 @@ pub(crate) async fn open_link( .config .batch_size .min(batch_size::UNICAST) - .min(link.config.mtu), + .min(link.config.batch.mtu), resolution: manager.config.resolution, ext_qos: ext::qos::StateOpen::new(manager.config.unicast.is_qos), #[cfg(feature = "transport_multilink")] @@ -616,10 +621,13 @@ pub(crate) async fn open_link( }; let o_config = TransportLinkUnicastConfig { - mtu: state.transport.batch_size, direction: TransportLinkUnicastDirection::Outbound, - #[cfg(feature = "transport_compression")] - is_compression: state.link.ext_compression.is_compression(), + batch: BatchConfig { + mtu: state.transport.batch_size, + is_streamed, + #[cfg(feature = "transport_compression")] + is_compression: state.link.ext_compression.is_compression(), + }, }; let o_link = TransportLinkUnicast::new(link.link.clone(), o_config); let s_link = format!("{:?}", o_link); diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index afc12bc87d..5b4da7365b 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -14,9 +14,8 @@ use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; use std::fmt; use std::sync::Arc; -#[cfg(feature = "transport_compression")] -use zenoh_buffers::BBuf; -use zenoh_buffers::{ZSlice, ZSliceBuffer}; +use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; +use zenoh_core::zcondfeat; use zenoh_link::{Link, LinkUnicast}; use zenoh_protocol::transport::{BatchSize, Close, TransportMessage}; use zenoh_result::{zerror, ZResult}; @@ -31,11 +30,7 @@ pub(crate) enum TransportLinkUnicastDirection { pub(crate) struct TransportLinkUnicastConfig { // Inbound / outbound pub(crate) direction: TransportLinkUnicastDirection, - // MTU - pub(crate) mtu: BatchSize, - // Compression is active on the link - #[cfg(feature = "transport_compression")] - pub(crate) is_compression: bool, + pub(crate) batch: BatchConfig, } #[derive(Clone, PartialEq, Eq)] @@ -46,25 +41,23 @@ pub(crate) struct TransportLinkUnicast { impl TransportLinkUnicast { pub(crate) fn new(link: LinkUnicast, mut config: TransportLinkUnicastConfig) -> Self { - config.mtu = link.get_mtu().min(config.mtu); + config.batch.mtu = link.get_mtu().min(config.batch.mtu); Self { link, config } } - const fn batch_config(&self) -> BatchConfig { - BatchConfig { - mtu: self.config.mtu, - #[cfg(feature = "transport_compression")] - is_compression: self.config.is_compression, - } - } - pub(crate) fn tx(&self) -> TransportLinkUnicastTx { TransportLinkUnicastTx { inner: self.clone(), - #[cfg(feature = "transport_compression")] - buffer: self.config.is_compression.then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size(self.config.mtu as usize), - )), + buffer: zcondfeat!( + "transport_compression", + self.config + .batch + .is_compression + .then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size(self.config.batch.mtu as usize), + )), + None + ), } } @@ -128,7 +121,6 @@ impl From for Link { pub(crate) struct TransportLinkUnicastTx { pub(crate) inner: TransportLinkUnicast, - #[cfg(feature = "transport_compression")] pub(crate) buffer: Option, } @@ -139,15 +131,11 @@ impl TransportLinkUnicastTx { // log::trace!("WBatch: {:?}", batch); let res = batch - .finalize( - #[cfg(feature = "transport_compression")] - self.buffer.as_mut(), - ) + .finalize(self.buffer.as_mut()) .map_err(|_| zerror!("{ERR}{self}"))?; let bytes = match res { Finalize::Batch => batch.as_slice(), - #[cfg(feature = "transport_compression")] Finalize::Buffer => self .buffer .as_ref() @@ -158,14 +146,6 @@ impl TransportLinkUnicastTx { // log::trace!("WBytes: {:02x?}", bytes); // Send the message on the link - if self.inner.link.is_streamed() { - let len: BatchSize = bytes - .len() - .try_into() - .map_err(|_| zerror!("Invalid batch length"))?; - let len = len.to_le_bytes(); - self.inner.link.write_all(&len).await?; - } self.inner.link.write_all(bytes).await?; Ok(()) @@ -175,7 +155,7 @@ impl TransportLinkUnicastTx { const ERR: &str = "Write error on link: "; // Create the batch for serializing the message - let mut batch = WBatch::new(self.inner.batch_config()); + let mut batch = WBatch::new(self.inner.config.batch); batch.encode(msg).map_err(|_| zerror!("{ERR}{self}"))?; let len = batch.len() as usize; self.send_batch(&mut batch).await?; @@ -191,14 +171,11 @@ impl fmt::Display for TransportLinkUnicastTx { impl fmt::Debug for TransportLinkUnicastTx { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut s = f.debug_struct("TransportLinkUnicastRx"); - s.field("link", &self.inner.link) - .field("config", &self.inner.config); - #[cfg(feature = "transport_compression")] - { - s.field("buffer", &self.buffer.as_ref().map(|b| b.capacity())); - } - s.finish() + f.debug_struct("TransportLinkUnicastRx") + .field("link", &self.inner.link) + .field("config", &self.inner.config) + .field("buffer", &self.buffer.as_ref().map(|b| b.capacity())) + .finish() } } @@ -219,15 +196,15 @@ impl TransportLinkUnicastRx { // Read and decode the message length let mut len = BatchSize::MIN.to_le_bytes(); self.inner.link.read_exact(&mut len).await?; - let len = BatchSize::from_le_bytes(len) as usize; + let l = BatchSize::from_le_bytes(len) as usize; // Read the bytes let slice = into .as_mut_slice() - .get_mut(..len) + .get_mut(len.len()..len.len() + l) .ok_or_else(|| zerror!("{ERR}{self}. Invalid batch length or buffer size."))?; self.inner.link.read_exact(slice).await?; - len + len.len() + l } else { // Read the bytes self.inner.link.read(into.as_mut_slice()).await? @@ -237,7 +214,7 @@ impl TransportLinkUnicastRx { let buffer = ZSlice::make(Arc::new(into), 0, end) .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; - let mut batch = RBatch::new(self.inner.batch_config(), buffer); + let mut batch = RBatch::new(self.inner.config.batch, buffer); batch .initialize(buff) .map_err(|e| zerror!("{ERR}{self}. {e}."))?; @@ -248,7 +225,7 @@ impl TransportLinkUnicastRx { } pub async fn recv(&mut self) -> ZResult { - let mtu = self.inner.config.mtu as usize; + let mtu = self.inner.config.batch.mtu as usize; let mut batch = self .recv_batch(|| zenoh_buffers::vec::uninit(mtu).into_boxed_slice()) .await?; diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 437e9c4fa4..4cfbbee115 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -216,7 +216,7 @@ async fn rx_task_stream( } // The pool of buffers - let mtu = link.config.mtu as usize; + let mtu = link.config.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -248,7 +248,7 @@ async fn rx_task_dgram( rx_buffer_size: usize, ) -> ZResult<()> { // The pool of buffers - let mtu = link.config.mtu as usize; + let mtu = link.config.batch.max_buffer_size(); let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 74db7f751e..aba680bc43 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -16,7 +16,7 @@ use super::transport::TransportUnicastUniversal; use crate::common::stats::TransportStats; use crate::{ common::{ - batch::RBatch, + batch::{BatchConfig, RBatch}, pipeline::{ TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, TransmissionPipelineProducer, @@ -71,10 +71,12 @@ impl TransportLinkUnicastUniversal { ) { if self.handle_tx.is_none() { let config = TransmissionPipelineConf { - is_streamed: self.link.link.is_streamed(), - #[cfg(feature = "transport_compression")] - is_compression: self.link.config.is_compression, - batch_size: self.link.config.mtu, + batch: BatchConfig { + mtu: self.link.config.batch.mtu, + is_streamed: self.link.link.is_streamed(), + #[cfg(feature = "transport_compression")] + is_compression: self.link.config.batch.is_compression, + }, queue_size: self.transport.manager.config.queue_size, backoff: self.transport.manager.config.queue_backoff, }; @@ -257,7 +259,7 @@ async fn rx_task( } // The pool of buffers - let mtu = link.inner.config.mtu as usize; + let mtu = link.inner.config.batch.max_buffer_size(); let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; From 92b9909efd28b9ac3fd65c41d846c1cd16fd5217 Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Tue, 12 Dec 2023 21:11:17 +0300 Subject: [PATCH 23/29] =?UTF-8?q?This=20test=20didn't=20compile=20when=20'?= =?UTF-8?q?transport=5Fcompression'=20feature=20is=20disa=E2=80=A6=20(#621?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- io/zenoh-transport/src/common/batch.rs | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index d3cd38684f..c6dc748e5b 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -488,6 +488,7 @@ mod tests { use super::*; use rand::Rng; use zenoh_buffers::ZBuf; + use zenoh_core::zcondfeat; use zenoh_protocol::{ core::{CongestionControl, Encoding, Priority, Reliability, WireExpr}, network::{ext, Push}, @@ -519,20 +520,17 @@ mod tests { wbatch.encode(&msg_in).unwrap(); println!("Encoded WBatch: {:?}", wbatch); - #[cfg(feature = "transport_compression")] - let mut buffer = config.is_compression.then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size(wbatch.as_slice().len()), - )); + let mut buffer = zcondfeat!( + "transport_compression", + config.is_compression.then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size(wbatch.as_slice().len()), + )), + None + ); - let res = wbatch - .finalize( - #[cfg(feature = "transport_compression")] - buffer.as_mut(), - ) - .unwrap(); + let res = wbatch.finalize(buffer.as_mut()).unwrap(); let bytes = match res { Finalize::Batch => wbatch.as_slice(), - #[cfg(feature = "transport_compression")] Finalize::Buffer => buffer.as_mut().unwrap().as_slice(), }; println!("Finalized WBatch: {:02x?}", bytes); From 677b2b4a775d0bf5dfa5ce1b07cfd200fdcdff49 Mon Sep 17 00:00:00 2001 From: YuanYuYuan Date: Wed, 13 Dec 2023 19:04:56 +0800 Subject: [PATCH 24/29] Return the message length while decoding TransportMessage (#623) --- io/zenoh-transport/src/common/batch.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index c6dc748e5b..488e357236 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -481,6 +481,18 @@ impl Decode for &mut RBatch { } } +impl Decode<(TransportMessage, BatchSize)> for &mut RBatch { + type Error = DidntRead; + + fn decode(self) -> Result<(TransportMessage, BatchSize), Self::Error> { + let len = self.buffer.len() as BatchSize; + let mut reader = self.buffer.reader(); + let msg = self.codec.read(&mut reader)?; + let end = self.buffer.len() as BatchSize; + Ok((msg, len - end)) + } +} + #[cfg(test)] mod tests { use std::vec; From 734015338eb3bc588e2b6f3a8dfcda18eb5b2678 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 14 Dec 2023 12:33:27 +0100 Subject: [PATCH 25/29] zenohd does not override configured adminspace permissions when --adminspace-permissions argument is not present (#624) --- zenohd/src/main.rs | 74 ++++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index c864c303b5..c7e3f7b3da 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -75,8 +75,8 @@ struct Args { #[arg(long)] cfg: Vec, /// Configure the read and/or write permissions on the admin space. Default is read only. - #[arg(long, default_value = "r", value_name = "[r|w|rw|none]")] - adminspace_permissions: String, + #[arg(long, value_name = "[r|w|rw|none]")] + adminspace_permissions: Option, } fn main() { @@ -271,40 +271,42 @@ fn config_from_args(args: &Args) -> Config { } (false, false) => {} }; - match args.adminspace_permissions.as_str() { - "r" => config - .adminspace - .set_permissions(PermissionsConf { - read: true, - write: false, - }) - .unwrap(), - "w" => config - .adminspace - .set_permissions(PermissionsConf { - read: false, - write: true, - }) - .unwrap(), - "rw" => config - .adminspace - .set_permissions(PermissionsConf { - read: true, - write: true, - }) - .unwrap(), - "none" => config - .adminspace - .set_permissions(PermissionsConf { - read: false, - write: false, - }) - .unwrap(), - s => panic!( - r#"Invalid option: --adminspace-permissions={} - Accepted values: "r", "w", "rw" or "none""#, - s - ), - }; + if let Some(adminspace_permissions) = &args.adminspace_permissions { + match adminspace_permissions.as_str() { + "r" => config + .adminspace + .set_permissions(PermissionsConf { + read: true, + write: false, + }) + .unwrap(), + "w" => config + .adminspace + .set_permissions(PermissionsConf { + read: false, + write: true, + }) + .unwrap(), + "rw" => config + .adminspace + .set_permissions(PermissionsConf { + read: true, + write: true, + }) + .unwrap(), + "none" => config + .adminspace + .set_permissions(PermissionsConf { + read: false, + write: false, + }) + .unwrap(), + s => panic!( + r#"Invalid option: --adminspace-permissions={} - Accepted values: "r", "w", "rw" or "none""#, + s + ), + }; + } for json in &args.cfg { if let Some((key, value)) = json.split_once(':') { match json5::Deserializer::from_str(value) { From 68aadaf77302f66364bc5a718e2007c3187059ab Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 14 Dec 2023 16:59:57 +0300 Subject: [PATCH 26/29] Refactor transport trait (#610) --- Cargo.lock | 2 +- commons/zenoh-shm/src/lib.rs | 9 +- io/zenoh-link-commons/src/lib.rs | 12 + .../zenoh-link-unixpipe/src/unix/unicast.rs | 13 +- io/zenoh-transport/src/common/seq_num.rs | 31 ++ .../src/unicast/establishment/accept.rs | 45 +-- .../src/unicast/establishment/mod.rs | 58 +-- .../src/unicast/establishment/open.rs | 37 +- io/zenoh-transport/src/unicast/link.rs | 107 ++++- .../src/unicast/lowlatency/link.rs | 23 +- .../src/unicast/lowlatency/transport.rs | 185 +++------ io/zenoh-transport/src/unicast/manager.rs | 368 ++++++++++++------ io/zenoh-transport/src/unicast/mod.rs | 24 +- .../src/unicast/transport_unicast_inner.rs | 44 ++- .../src/unicast/universal/link.rs | 151 +++---- .../src/unicast/universal/rx.rs | 72 ++-- .../src/unicast/universal/transport.rs | 261 ++++++------- .../src/unicast/universal/tx.rs | 4 +- io/zenoh-transport/tests/unicast_shm.rs | 24 +- io/zenoh-transport/tests/unicast_transport.rs | 1 + zenoh/tests/routing.rs | 2 +- 21 files changed, 800 insertions(+), 673 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2fcc8b187..7ff6cbd6ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2913,7 +2913,7 @@ dependencies = [ "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 62e90ba208..61a7ea9be3 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -308,11 +308,10 @@ impl SharedMemoryManager { { Ok(m) => m, Err(ShmemError::LinkExists) => { - log::trace!("SharedMemory already exists, opening it"); - ShmemConf::new() - .flink(path.clone()) - .open() - .map_err(|e| ShmError(zerror!("Unable to open SharedMemoryManager: {}", e)))? + return Err(ShmError(zerror!( + "Unable to open SharedMemoryManager: SharedMemory already exists" + )) + .into()) } Err(e) => { return Err(ShmError(zerror!("Unable to open SharedMemoryManager: {}", e)).into()) diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 114990726a..790f4792a4 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -99,3 +99,15 @@ impl From for Link { Link::from(&link) } } + +impl PartialEq for Link { + fn eq(&self, other: &LinkUnicast) -> bool { + self.src == *other.get_src() && self.dst == *other.get_dst() + } +} + +impl PartialEq for Link { + fn eq(&self, other: &LinkMulticast) -> bool { + self.src == *other.get_src() && self.dst == *other.get_dst() + } +} diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 72d7859326..156698d195 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -19,18 +19,20 @@ use async_std::fs::remove_file; use async_std::task::JoinHandle; use async_trait::async_trait; use filepath::FilePath; +use nix::libc; use nix::unistd::unlink; use rand::Rng; use std::cell::UnsafeCell; use std::collections::HashMap; use std::fmt; -use std::fs::File; +use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; +use std::os::unix::fs::OpenOptionsExt; use std::sync::Arc; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_protocol::core::{EndPoint, Locator}; -use unix_named_pipe::{create, open_read, open_write}; +use unix_named_pipe::{create, open_write}; use zenoh_link_commons::{ ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, @@ -161,7 +163,12 @@ impl PipeR { } fn open_unique_pipe_for_read(path: &str) -> ZResult { - let read = open_read(path)?; + let read = OpenOptions::new() + .read(true) + .write(true) + .custom_flags(libc::O_NONBLOCK) + .open(path)?; + #[cfg(not(target_os = "macos"))] read.try_lock(FileLockMode::Exclusive)?; Ok(read) diff --git a/io/zenoh-transport/src/common/seq_num.rs b/io/zenoh-transport/src/common/seq_num.rs index 159fd56712..f286d14741 100644 --- a/io/zenoh-transport/src/common/seq_num.rs +++ b/io/zenoh-transport/src/common/seq_num.rs @@ -121,6 +121,37 @@ impl SeqNum { Ok((gap != 0) && ((gap & !(self.mask >> 1)) == 0)) } + /// Checks to see if two sequence number are in a precedence relationship, + /// while taking into account roll backs AND do update the sn value if check succeed. + /// + /// Two case are considered: + /// + /// ## Case 1: sna < snb + /// + /// In this case *sna* precedes *snb* iff (snb - sna) <= semi_int where + /// semi_int is defined as half the sequence number resolution. + /// In other terms, sna precedes snb iff there are less than half + /// the length for the interval that separates them. + /// + /// ## Case 2: sna > snb + /// + /// In this case *sna* precedes *snb* iff (sna - snb) > semi_int. + /// + /// # Arguments + /// + /// * `value` - The sequence number which should be checked for precedence relation. + pub(crate) fn roll(&mut self, value: TransportSn) -> ZResult { + if (value & !self.mask) != 0 { + bail!("The sequence number value must be smaller than the resolution"); + } + let gap = value.wrapping_sub(self.value) & self.mask; + if (gap != 0) && ((gap & !(self.mask >> 1)) == 0) { + self.value = value; + return Ok(true); + } + Ok(false) + } + /// Computes the modulo gap between two sequence numbers. #[cfg(test)] // @TODO: remove #[cfg(test)] once reliability is implemented pub(crate) fn gap(&self, value: TransportSn) -> ZResult { diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index a3e5651bdb..72e676f6ec 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -16,10 +16,11 @@ use crate::unicast::shared_memory_unicast::Challenge; use crate::{ common::batch::BatchConfig, unicast::{ - establishment::{ - compute_sn, ext, finalize_transport, AcceptFsm, Cookie, InputFinalize, Zenoh080Cookie, + establishment::{compute_sn, ext, AcceptFsm, Cookie, Zenoh080Cookie}, + link::{ + LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, + TransportLinkUnicastDirection, }, - link::{TransportLinkUnicast, TransportLinkUnicastConfig, TransportLinkUnicastDirection}, TransportConfigUnicast, }, TransportManager, @@ -585,7 +586,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { } } -pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) -> ZResult<()> { +pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) -> ZResult<()> { let mtu = link.get_mtu(); let is_streamed = link.is_streamed(); let config = TransportLinkUnicastConfig { @@ -597,7 +598,7 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) is_compression: false, }, }; - let mut link = TransportLinkUnicast::new(link.clone(), config); + let mut link = TransportLinkUnicast::new(link, config); let mut fsm = AcceptLink { link: &mut link, prng: &manager.prng, @@ -718,31 +719,17 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) is_compression: state.link.ext_compression.is_compression(), }, }; - let a_link = TransportLinkUnicast::new(link.link.clone(), a_config); + let a_link = link.reconfigure(a_config); let s_link = format!("{:?}", a_link); - let transport = step!(manager.init_transport_unicast(config, a_link).await); - - // Send the open_ack on the link - step!(link - .send(&oack_out.open_ack.into()) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))); - - // Sync the RX sequence number - let _ = step!(transport - .get_inner() - .map_err(|e| (e, Some(close::reason::INVALID)))) - .sync(osyn_out.other_initial_sn) - .await; - - // Finalize the transport - let input = InputFinalize { - transport: transport.clone(), - other_lease: osyn_out.other_lease, - }; - step!(finalize_transport(&link, manager, input) - .await - .map_err(|e| (e, Some(close::reason::INVALID)))); + let a_link = LinkUnicastWithOpenAck::new(a_link, Some(oack_out.open_ack)); + let _transport = manager + .init_transport_unicast( + config, + a_link, + osyn_out.other_initial_sn, + osyn_out.other_lease, + ) + .await?; log::debug!( "New transport link accepted from {} to {}: {}.", diff --git a/io/zenoh-transport/src/unicast/establishment/mod.rs b/io/zenoh-transport/src/unicast/establishment/mod.rs index 523e6e9d22..f79aa826d0 100644 --- a/io/zenoh-transport/src/unicast/establishment/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/mod.rs @@ -16,21 +16,17 @@ pub(super) mod cookie; pub mod ext; pub(crate) mod open; -use super::{TransportPeer, TransportUnicast}; -use crate::{common::seq_num, unicast::link::TransportLinkUnicast, TransportManager}; +use crate::common::seq_num; use async_trait::async_trait; use cookie::*; use sha3::{ digest::{ExtendableOutput, Update, XofReader}, Shake128, }; -use std::time::Duration; -use zenoh_link::Link; use zenoh_protocol::{ core::{Field, Resolution, ZenohId}, transport::TransportSn, }; -use zenoh_result::ZResult; /*************************************/ /* TRAITS */ @@ -115,55 +111,3 @@ pub(super) fn compute_sn(zid1: ZenohId, zid2: ZenohId, resolution: Resolution) - hasher.finalize_xof().read(&mut array); TransportSn::from_le_bytes(array) & seq_num::get_mask(resolution.get(Field::FrameSN)) } - -pub(super) struct InputFinalize { - pub(super) transport: TransportUnicast, - pub(super) other_lease: Duration, -} -// Finalize the transport, notify the callback and start the link tasks -pub(super) async fn finalize_transport( - link: &TransportLinkUnicast, - manager: &TransportManager, - input: self::InputFinalize, -) -> ZResult<()> { - // Retrive the transport's transport - let transport = input.transport.get_inner()?; - - // Start the TX loop - let keep_alive = manager.config.unicast.lease / manager.config.unicast.keep_alive as u32; - transport.start_tx(link, &manager.tx_executor, keep_alive)?; - - // Assign a callback if the transport is new - // Keep the lock to avoid concurrent new_transport and closing/closed notifications - let a_guard = transport.get_alive().await; - if transport.get_callback().is_none() { - let peer = TransportPeer { - zid: transport.get_zid(), - whatami: transport.get_whatami(), - links: vec![Link::from(link)], - is_qos: transport.is_qos(), - #[cfg(feature = "shared-memory")] - is_shm: transport.is_shm(), - }; - // Notify the transport handler that there is a new transport and get back a callback - // NOTE: the read loop of the link the open message was sent on remains blocked - // until new_unicast() returns. The read_loop in the various links - // waits for any eventual transport to associate to. - let callback = manager - .config - .handler - .new_unicast(peer, input.transport.clone())?; - // Set the callback on the transport - transport.set_callback(callback); - } - if let Some(callback) = transport.get_callback() { - // Notify the transport handler there is a new link on this transport - callback.new_link(Link::from(link)); - } - drop(a_guard); - - // Start the RX loop - transport.start_rx(link, input.other_lease)?; - - Ok(()) -} diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 6e10509d69..c3f1bfbb8a 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -16,8 +16,11 @@ use crate::unicast::shared_memory_unicast::Challenge; use crate::{ common::batch::BatchConfig, unicast::{ - establishment::{compute_sn, ext, finalize_transport, InputFinalize, OpenFsm}, - link::{TransportLinkUnicast, TransportLinkUnicastConfig, TransportLinkUnicastDirection}, + establishment::{compute_sn, ext, OpenFsm}, + link::{ + LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, + TransportLinkUnicastDirection, + }, TransportConfigUnicast, TransportUnicast, }, TransportManager, @@ -629,27 +632,17 @@ pub(crate) async fn open_link( is_compression: state.link.ext_compression.is_compression(), }, }; - let o_link = TransportLinkUnicast::new(link.link.clone(), o_config); + let o_link = link.reconfigure(o_config); let s_link = format!("{:?}", o_link); - let transport = step!(manager.init_transport_unicast(config, o_link).await); - - // Sync the RX sequence number - let _ = step!(transport - .get_inner() - .map_err(|e| (e, Some(close::reason::INVALID)))) - .sync(oack_out.other_initial_sn) - .await; - - let output = InputFinalize { - transport, - other_lease: oack_out.other_lease, - }; - let transport = output.transport.clone(); - let res = finalize_transport(&link, manager, output).await; - if let Err(e) = res { - let _ = transport.close().await; - return Err(e); - } + let o_link = LinkUnicastWithOpenAck::new(o_link, None); + let transport = manager + .init_transport_unicast( + config, + o_link, + oack_out.other_initial_sn, + oack_out.other_lease, + ) + .await?; log::debug!( "New transport link opened from {} to {}: {}.", diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index 5b4da7365b..bd756d6396 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::zcondfeat; use zenoh_link::{Link, LinkUnicast}; -use zenoh_protocol::transport::{BatchSize, Close, TransportMessage}; +use zenoh_protocol::transport::{BatchSize, Close, OpenAck, TransportMessage}; use zenoh_result::{zerror, ZResult}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -40,11 +40,23 @@ pub(crate) struct TransportLinkUnicast { } impl TransportLinkUnicast { - pub(crate) fn new(link: LinkUnicast, mut config: TransportLinkUnicastConfig) -> Self { + pub(crate) fn new(link: LinkUnicast, config: TransportLinkUnicastConfig) -> Self { + Self::init(link, config) + } + + pub(crate) fn reconfigure(self, new_config: TransportLinkUnicastConfig) -> Self { + Self::init(self.link, new_config) + } + + fn init(link: LinkUnicast, mut config: TransportLinkUnicastConfig) -> Self { config.batch.mtu = link.get_mtu().min(config.batch.mtu); Self { link, config } } + pub(crate) fn link(&self) -> Link { + (&self.link).into() + } + pub(crate) fn tx(&self) -> TransportLinkUnicastTx { TransportLinkUnicastTx { inner: self.clone(), @@ -63,7 +75,8 @@ impl TransportLinkUnicast { pub(crate) fn rx(&self) -> TransportLinkUnicastRx { TransportLinkUnicastRx { - inner: self.clone(), + link: self.link.clone(), + batch: self.config.batch, } } @@ -115,7 +128,13 @@ impl From<&TransportLinkUnicast> for Link { impl From for Link { fn from(link: TransportLinkUnicast) -> Self { - Link::from(link.link) + Link::from(&link.link) + } +} + +impl PartialEq for TransportLinkUnicast { + fn eq(&self, other: &Link) -> bool { + &other.src == self.link.get_src() && &other.dst == self.link.get_dst() } } @@ -180,7 +199,8 @@ impl fmt::Debug for TransportLinkUnicastTx { } pub(crate) struct TransportLinkUnicastRx { - pub(crate) inner: TransportLinkUnicast, + pub(crate) link: LinkUnicast, + pub(crate) batch: BatchConfig, } impl TransportLinkUnicastRx { @@ -192,10 +212,10 @@ impl TransportLinkUnicastRx { const ERR: &str = "Read error from link: "; let mut into = (buff)(); - let end = if self.inner.link.is_streamed() { + let end = if self.link.is_streamed() { // Read and decode the message length let mut len = BatchSize::MIN.to_le_bytes(); - self.inner.link.read_exact(&mut len).await?; + self.link.read_exact(&mut len).await?; let l = BatchSize::from_le_bytes(len) as usize; // Read the bytes @@ -203,18 +223,18 @@ impl TransportLinkUnicastRx { .as_mut_slice() .get_mut(len.len()..len.len() + l) .ok_or_else(|| zerror!("{ERR}{self}. Invalid batch length or buffer size."))?; - self.inner.link.read_exact(slice).await?; + self.link.read_exact(slice).await?; len.len() + l } else { // Read the bytes - self.inner.link.read(into.as_mut_slice()).await? + self.link.read(into.as_mut_slice()).await? }; // log::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); let buffer = ZSlice::make(Arc::new(into), 0, end) .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; - let mut batch = RBatch::new(self.inner.config.batch, buffer); + let mut batch = RBatch::new(self.batch, buffer); batch .initialize(buff) .map_err(|e| zerror!("{ERR}{self}. {e}."))?; @@ -225,7 +245,7 @@ impl TransportLinkUnicastRx { } pub async fn recv(&mut self) -> ZResult { - let mtu = self.inner.config.batch.mtu as usize; + let mtu = self.batch.mtu as usize; let mut batch = self .recv_batch(|| zenoh_buffers::vec::uninit(mtu).into_boxed_slice()) .await?; @@ -238,15 +258,74 @@ impl TransportLinkUnicastRx { impl fmt::Display for TransportLinkUnicastRx { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.inner) + write!(f, "{}:{:?}", self.link, self.batch) } } impl fmt::Debug for TransportLinkUnicastRx { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TransportLinkUnicastRx") - .field("link", &self.inner.link) - .field("config", &self.inner.config) + .field("link", &self.link) + .field("config", &self.batch) .finish() } } + +pub(crate) struct MaybeOpenAck { + link: TransportLinkUnicastTx, + open_ack: Option, +} + +impl MaybeOpenAck { + pub(crate) fn new(link: &TransportLinkUnicast, open_ack: Option) -> Self { + Self { + link: link.tx(), + open_ack, + } + } + + pub(crate) async fn send_open_ack(mut self) -> ZResult<()> { + if let Some(msg) = self.open_ack { + return self.link.send(&msg.into()).await.map(|_| {}); + } + Ok(()) + } + + pub(crate) fn link(&self) -> Link { + self.link.inner.link() + } +} + +#[derive(PartialEq, Eq)] +pub(crate) struct LinkUnicastWithOpenAck { + link: TransportLinkUnicast, + ack: Option, +} + +impl LinkUnicastWithOpenAck { + pub(crate) fn new(link: TransportLinkUnicast, ack: Option) -> Self { + Self { link, ack } + } + + pub(crate) fn inner_config(&self) -> &TransportLinkUnicastConfig { + &self.link.config + } + + pub(crate) fn unpack(self) -> (TransportLinkUnicast, MaybeOpenAck) { + let ack = MaybeOpenAck::new(&self.link, self.ack); + (self.link, ack) + } + + pub(crate) fn fail(self) -> TransportLinkUnicast { + self.link + } +} + +impl fmt::Display for LinkUnicastWithOpenAck { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.ack.as_ref() { + Some(ack) => write!(f, "{}({:?})", self.link, ack), + None => write!(f, "{}", self.link), + } + } +} diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 4cfbbee115..6a382f5960 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -14,6 +14,7 @@ use super::transport::TransportUnicastLowlatency; #[cfg(feature = "stats")] use crate::stats::TransportStats; +use crate::unicast::link::TransportLinkUnicastRx; use crate::{unicast::link::TransportLinkUnicast, TransportExecutor}; use async_std::task; use async_std::{prelude::FutureExt, sync::RwLock}; @@ -77,8 +78,9 @@ impl TransportUnicastLowlatency { pub(super) async fn send_async(&self, msg: TransportMessageLowLatency) -> ZResult<()> { let guard = zasyncwrite!(self.link); + let link = guard.as_ref().ok_or_else(|| zerror!("No link"))?; send_with_link( - &guard, + link, msg, #[cfg(feature = "stats")] &self.stats, @@ -132,7 +134,7 @@ impl TransportUnicastLowlatency { let c_transport = self.clone(); let handle = task::spawn(async move { let guard = zasyncread!(c_transport.link); - let link = guard.clone(); + let link = guard.as_ref().unwrap().rx(); drop(guard); let rx_buffer_size = c_transport.manager.config.link_rx_buffer_size; @@ -173,7 +175,7 @@ impl TransportUnicastLowlatency { /* TASKS */ /*************************************/ async fn keepalive_task( - link: Arc>, + link: Arc>>, keep_alive: Duration, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { @@ -185,8 +187,9 @@ async fn keepalive_task( }; let guard = zasyncwrite!(link); + let link = guard.as_ref().ok_or_else(|| zerror!("No link"))?; let _ = send_with_link( - &guard, + link, keepailve, #[cfg(feature = "stats")] &stats, @@ -197,12 +200,12 @@ async fn keepalive_task( } async fn rx_task_stream( - link: TransportLinkUnicast, + link: TransportLinkUnicastRx, transport: TransportUnicastLowlatency, lease: Duration, rx_buffer_size: usize, ) -> ZResult<()> { - async fn read(link: &TransportLinkUnicast, buffer: &mut [u8]) -> ZResult { + async fn read(link: &TransportLinkUnicastRx, buffer: &mut [u8]) -> ZResult { // 16 bits for reading the batch length let mut length = [0_u8, 0_u8, 0_u8, 0_u8]; link.link.read_exact(&mut length).await?; @@ -216,7 +219,7 @@ async fn rx_task_stream( } // The pool of buffers - let mtu = link.config.batch.mtu as usize; + let mtu = link.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -242,13 +245,13 @@ async fn rx_task_stream( } async fn rx_task_dgram( - link: TransportLinkUnicast, + link: TransportLinkUnicastRx, transport: TransportUnicastLowlatency, lease: Duration, rx_buffer_size: usize, ) -> ZResult<()> { // The pool of buffers - let mtu = link.config.batch.max_buffer_size(); + let mtu = link.batch.max_buffer_size(); let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -277,7 +280,7 @@ async fn rx_task_dgram( } async fn rx_task( - link: TransportLinkUnicast, + link: TransportLinkUnicastRx, transport: TransportUnicastLowlatency, lease: Duration, rx_buffer_size: usize, diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index d2d64a0310..afc7d3c849 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -11,31 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "transport_unixpipe")] -use super::link::send_with_link; #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ unicast::{ - link::TransportLinkUnicast, transport_unicast_inner::TransportUnicastTrait, + link::{LinkUnicastWithOpenAck, TransportLinkUnicast}, + transport_unicast_inner::{AddLinkResult, TransportUnicastTrait}, TransportConfigUnicast, }, - TransportExecutor, TransportManager, TransportPeerEventHandler, + TransportManager, TransportPeerEventHandler, }; use async_executor::Task; -#[cfg(feature = "transport_unixpipe")] -use async_std::sync::RwLockUpgradableReadGuard; use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; use async_std::task::JoinHandle; use async_trait::async_trait; use std::sync::{Arc, RwLock as SyncRwLock}; use std::time::Duration; -#[cfg(feature = "transport_unixpipe")] -use zenoh_core::zasyncread_upgradable; use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; -#[cfg(feature = "transport_unixpipe")] -use zenoh_link::unixpipe::UNIXPIPE_LOCATOR_PREFIX; -#[cfg(feature = "transport_unixpipe")] use zenoh_link::Link; use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::transport::TransportBodyLowLatency; @@ -45,8 +37,6 @@ use zenoh_protocol::{ core::{WhatAmI, ZenohId}, transport::close, }; -#[cfg(not(feature = "transport_unixpipe"))] -use zenoh_result::bail; use zenoh_result::{zerror, ZResult}; /*************************************/ @@ -59,7 +49,7 @@ pub(crate) struct TransportUnicastLowlatency { // Transport config pub(super) config: TransportConfigUnicast, // The link associated to the transport - pub(super) link: Arc>, + pub(super) link: Arc>>, // The callback pub(super) callback: Arc>>>, // Mutex for notification @@ -68,7 +58,7 @@ pub(crate) struct TransportUnicastLowlatency { #[cfg(feature = "stats")] pub(super) stats: Arc, - // The flags to stop TX/RX tasks + // The handles for TX/RX tasks pub(crate) handle_keepalive: Arc>>>, pub(crate) handle_rx: Arc>>>, } @@ -77,23 +67,20 @@ impl TransportUnicastLowlatency { pub fn make( manager: TransportManager, config: TransportConfigUnicast, - link: TransportLinkUnicast, - ) -> ZResult { + ) -> Arc { #[cfg(feature = "stats")] let stats = Arc::new(TransportStats::new(Some(manager.get_stats().clone()))); - let t = TransportUnicastLowlatency { + Arc::new(TransportUnicastLowlatency { manager, config, - link: Arc::new(RwLock::new(link)), + link: Arc::new(RwLock::new(None)), callback: Arc::new(SyncRwLock::new(None)), alive: Arc::new(AsyncMutex::new(false)), #[cfg(feature = "stats")] stats, handle_keepalive: Arc::new(RwLock::new(None)), handle_rx: Arc::new(RwLock::new(None)), - }; - - Ok(t) + }) as Arc } /*************************************/ @@ -142,9 +129,9 @@ impl TransportUnicastLowlatency { // Close and drop the link self.stop_keepalive().await; self.stop_rx().await; - let _ = zasyncwrite!(self.link) - .close(Some(close::reason::GENERIC)) - .await; + if let Some(val) = zasyncwrite!(self.link).as_ref() { + let _ = val.close(Some(close::reason::GENERIC)).await; + } // Notify the callback that we have closed the transport if let Some(cb) = callback.as_ref() { @@ -153,6 +140,20 @@ impl TransportUnicastLowlatency { Ok(()) } + + async fn sync(&self, _initial_sn_rx: TransportSn) -> ZResult<()> { + // Mark the transport as alive + let mut a_guard = zasynclock!(self.alive); + if *a_guard { + let e = zerror!("Transport already synched with peer: {}", self.config.zid); + log::trace!("{}", e); + return Err(e.into()); + } + + *a_guard = true; + + Ok(()) + } } #[async_trait] @@ -161,17 +162,19 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { /* ACCESSORS */ /*************************************/ fn set_callback(&self, callback: Arc) { - let mut guard = zwrite!(self.callback); - *guard = Some(callback); + *zwrite!(self.callback) = Some(callback); } async fn get_alive(&self) -> AsyncMutexGuard<'_, bool> { zasynclock!(self.alive) } - fn get_links(&self) -> Vec { + fn get_links(&self) -> Vec { let guard = async_std::task::block_on(async { zasyncread!(self.link) }); - [guard.clone()].to_vec() + if let Some(val) = guard.as_ref() { + return [val.link()].to_vec(); + } + vec![] } fn get_zid(&self) -> ZenohId { @@ -211,111 +214,49 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { self.internal_schedule(msg) } - fn start_tx( - &self, - _link: &TransportLinkUnicast, - executor: &TransportExecutor, - keep_alive: Duration, - ) -> ZResult<()> { - self.start_keepalive(executor, keep_alive); - Ok(()) - } - - fn start_rx(&self, _link: &TransportLinkUnicast, lease: Duration) -> ZResult<()> { - self.internal_start_rx(lease); - Ok(()) - } - /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()> { + async fn add_link( + &self, + link: LinkUnicastWithOpenAck, + other_initial_sn: TransportSn, + other_lease: Duration, + ) -> AddLinkResult { log::trace!("Adding link: {}", link); - #[cfg(not(feature = "transport_unixpipe"))] - bail!( - "Can not add Link {} with peer {}: link already exists and only unique link is supported!", - link, - self.config.zid, - ); + let _ = self.sync(other_initial_sn).await; - #[cfg(feature = "transport_unixpipe")] - { - let guard = zasyncread_upgradable!(self.link); - - let existing_unixpipe = - guard.link.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; - let new_unixpipe = link.link.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; - match (existing_unixpipe, new_unixpipe) { - (false, true) => { - // LowLatency transport suports only a single link, but code here also handles upgrade from non-unixpipe link to unixpipe link! - log::trace!( - "Upgrading {} LowLatency transport's link from {} to {}", - self.config.zid, - guard, - link - ); - - // Prepare and send close message on old link - { - let close = TransportMessageLowLatency { - body: TransportBodyLowLatency::Close(Close { - reason: 0, - session: false, - }), - }; - let _ = send_with_link( - &guard, - close, - #[cfg(feature = "stats")] - &self.stats, - ) - .await; - }; - // Notify the callback - if let Some(callback) = zread!(self.callback).as_ref() { - callback.del_link(Link::from(guard.clone())); - } - - // Set the new link - let mut write_guard = RwLockUpgradableReadGuard::upgrade(guard).await; - *write_guard = link; - - Ok(()) - } - _ => { - let e = zerror!( - "Can not add Link {} with peer {}: link already exists and only unique link is supported!", - link, - self.config.zid, - ); - Err(e.into()) - } - } + let mut guard = zasyncwrite!(self.link); + if guard.is_some() { + return Err(( + zerror!("Lowlatency transport cannot support more than one link!").into(), + link.fail(), + close::reason::GENERIC, + )); } - } - - /*************************************/ - /* INITIATION */ - /*************************************/ - async fn sync(&self, _initial_sn_rx: TransportSn) -> ZResult<()> { - // Mark the transport as alive - let mut a_guard = zasynclock!(self.alive); - if *a_guard { - let e = zerror!("Transport already synched with peer: {}", self.config.zid); - log::trace!("{}", e); - return Err(e.into()); - } - - *a_guard = true; - - Ok(()) + let (link, ack) = link.unpack(); + *guard = Some(link); + drop(guard); + + // create a callback to start the link + let start_link = Box::new(move || { + // start keepalive task + let keep_alive = + self.manager.config.unicast.lease / self.manager.config.unicast.keep_alive as u32; + self.start_keepalive(&self.manager.tx_executor, keep_alive); + + // start RX task + self.internal_start_rx(other_lease); + }); + + return Ok((start_link, ack)); } /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()> { + async fn close_link(&self, link: Link, reason: u8) -> ZResult<()> { log::trace!("Closing link {} with peer: {}", link, self.config.zid); self.finalize(reason).await } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index da064e8f5b..2328e78a76 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -13,19 +13,25 @@ // #[cfg(feature = "shared-memory")] use super::shared_memory_unicast::SharedMemoryUnicast; +use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; #[cfg(feature = "transport_auth")] use crate::unicast::establishment::ext::auth::Auth; #[cfg(feature = "transport_multilink")] use crate::unicast::establishment::ext::multilink::MultiLink; use crate::{ unicast::{ - link::TransportLinkUnicast, lowlatency::transport::TransportUnicastLowlatency, - transport_unicast_inner::TransportUnicastTrait, - universal::transport::TransportUnicastUniversal, TransportConfigUnicast, TransportUnicast, + lowlatency::transport::TransportUnicastLowlatency, + transport_unicast_inner::{InitTransportError, TransportUnicastTrait}, + universal::transport::TransportUnicastUniversal, + TransportConfigUnicast, TransportUnicast, }, - TransportManager, + TransportManager, TransportPeer, +}; +use async_std::{ + prelude::FutureExt, + sync::{Mutex, MutexGuard}, + task, }; -use async_std::{prelude::FutureExt, sync::Mutex, task}; use std::{collections::HashMap, sync::Arc, time::Duration}; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; @@ -37,9 +43,9 @@ use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ core::{endpoint, ZenohId}, - transport::close, + transport::{close, TransportSn}, }; -use zenoh_result::{bail, zerror, Error, ZResult}; +use zenoh_result::{bail, zerror, ZResult}; /*************************************/ /* TRANSPORT CONFIG */ @@ -408,111 +414,254 @@ impl TransportManager { /*************************************/ /* TRANSPORT */ /*************************************/ - pub(super) async fn init_transport_unicast( + async fn init_existing_transport_unicast( &self, config: TransportConfigUnicast, - link: TransportLinkUnicast, - ) -> Result)> { - let mut guard = zasynclock!(self.state.unicast.transports); + link: LinkUnicastWithOpenAck, + other_initial_sn: TransportSn, + other_lease: Duration, + transport: Arc, + ) -> InitTransportResult { + let existing_config = transport.get_config(); + // Verify that fundamental parameters are correct. + // Ignore the non fundamental parameters like initial SN. + if *existing_config != config { + let e = zerror!( + "Transport with peer {} already exist. Invalid config: {:?}. Expected: {:?}.", + config.zid, + config, + existing_config + ); + log::trace!("{}", e); + return Err(InitTransportError::Link(( + e.into(), + link.fail(), + close::reason::INVALID, + ))); + } - // First verify if the transport already exists - match guard.get(&config.zid) { - Some(transport) => { - let existing_config = transport.get_config(); - // If it exists, verify that fundamental parameters like are correct. - // Ignore the non fundamental parameters like initial SN. - if *existing_config != config { - let e = zerror!( - "Transport with peer {} already exist. Invalid config: {:?}. Expected: {:?}.", - config.zid, - config, - existing_config - ); - log::trace!("{}", e); - return Err((e.into(), Some(close::reason::INVALID))); + // Add the link to the transport + let (start_tx_rx, ack) = transport + .add_link(link, other_initial_sn, other_lease) + .await + .map_err(InitTransportError::Link)?; + + // complete establish procedure + let c_link = ack.link(); + let c_t = transport.clone(); + ack.send_open_ack() + .await + .map_err(|e| InitTransportError::Transport((e, c_t, close::reason::GENERIC)))?; + + // notify transport's callback interface that there is a new link + Self::notify_new_link_unicast(&transport, c_link); + + start_tx_rx(); + + Ok(transport) + } + + fn notify_new_link_unicast(transport: &Arc, link: Link) { + if let Some(callback) = &transport.get_callback() { + callback.new_link(link); + } + } + + fn notify_new_transport_unicast( + &self, + transport: &Arc, + ) -> ZResult<()> { + // Assign a callback to the new transport + let peer = TransportPeer { + zid: transport.get_zid(), + whatami: transport.get_whatami(), + links: transport.get_links(), + is_qos: transport.get_config().is_qos, + #[cfg(feature = "shared-memory")] + is_shm: transport.is_shm(), + }; + // Notify the transport handler that there is a new transport and get back a callback + // NOTE: the read loop of the link the open message was sent on remains blocked + // until new_unicast() returns. The read_loop in the various links + // waits for any eventual transport to associate to. + let callback = self + .config + .handler + .new_unicast(peer, TransportUnicast(Arc::downgrade(transport)))?; + + // Set the callback on the transport + transport.set_callback(callback); + + Ok(()) + } + + pub(super) async fn init_new_transport_unicast( + &self, + config: TransportConfigUnicast, + link: LinkUnicastWithOpenAck, + other_initial_sn: TransportSn, + other_lease: Duration, + mut guard: MutexGuard<'_, HashMap>>, + ) -> InitTransportResult { + macro_rules! link_error { + ($s:expr, $reason:expr) => { + match $s { + Ok(output) => output, + Err(e) => { + return Err(InitTransportError::Link((e, link.fail(), $reason))); + } } + }; + } - // Add the link to the transport - transport - .add_link(link) - .await - .map_err(|e| (e, Some(close::reason::MAX_LINKS)))?; + // Verify that we haven't reached the transport number limit + if guard.len() >= self.config.unicast.max_sessions { + let e = zerror!( + "Max transports reached ({}). Denying new transport with peer: {}", + self.config.unicast.max_sessions, + config.zid + ); + log::trace!("{e}"); + return Err(InitTransportError::Link(( + e.into(), + link.fail(), + close::reason::INVALID, + ))); + } + + // Create the transport + let is_multilink = zcondfeat!("transport_multilink", config.multilink.is_some(), false); + + // Select and create transport implementation depending on the cfg and enabled features + let t = if config.is_lowlatency { + log::debug!("Will use LowLatency transport!"); + TransportUnicastLowlatency::make(self.clone(), config.clone()) + } else { + log::debug!("Will use Universal transport!"); + link_error!( + TransportUnicastUniversal::make(self.clone(), config.clone()), + close::reason::INVALID + ) + }; - Ok(TransportUnicast(Arc::downgrade(transport))) + // Add the link to the transport + let (start_tx_rx, ack) = match t.add_link(link, other_initial_sn, other_lease).await { + Ok(val) => val, + Err(e) => { + let _ = t.close(e.2).await; + return Err(InitTransportError::Link(e)); } - None => { - // Then verify that we haven't reached the transport number limit - if guard.len() >= self.config.unicast.max_sessions { - let e = zerror!( - "Max transports reached ({}). Denying new transport with peer: {}", - self.config.unicast.max_sessions, - config.zid - ); - log::trace!("{}", e); - return Err((e.into(), Some(close::reason::INVALID))); - } + }; - // Create the transport - let is_multilink = - zcondfeat!("transport_multilink", config.multilink.is_some(), false); - - // select and create transport implementation depending on the cfg and enabled features - let a_t = { - if config.is_lowlatency { - log::debug!("Will use LowLatency transport!"); - TransportUnicastLowlatency::make(self.clone(), config.clone(), link) - .map_err(|e| (e, Some(close::reason::INVALID))) - .map(|v| Arc::new(v) as Arc)? - } else { - log::debug!("Will use Universal transport!"); - let t: Arc = - TransportUnicastUniversal::make(self.clone(), config.clone()) - .map_err(|e| (e, Some(close::reason::INVALID))) - .map(|v| Arc::new(v) as Arc)?; - // Add the link to the transport - t.add_link(link) - .await - .map_err(|e| (e, Some(close::reason::MAX_LINKS)))?; - t + macro_rules! transport_error { + ($s:expr, $reason:expr) => { + match $s { + Ok(output) => output, + Err(e) => { + return Err(InitTransportError::Transport((e, t.clone(), $reason))); } - }; - - // Add the transport transport to the list of active transports - let transport = TransportUnicast(Arc::downgrade(&a_t)); - guard.insert(config.zid, a_t); - - zcondfeat!( - "shared-memory", - { - log::debug!( - "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {}, multilink: {}, lowlatency: {}", - self.config.zid, - config.zid, - config.whatami, - config.sn_resolution, - config.tx_initial_sn, - config.is_qos, - config.is_shm, - is_multilink, - config.is_lowlatency - ); - }, - { - log::debug!( - "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, multilink: {}, lowlatency: {}", - self.config.zid, - config.zid, - config.whatami, - config.sn_resolution, - config.tx_initial_sn, - config.is_qos, - is_multilink, - config.is_lowlatency - ); - } - ); + } + }; + } + + // Complete establish procedure + let c_link = ack.link(); + transport_error!(ack.send_open_ack().await, close::reason::GENERIC); + + // Add the transport transport to the list of active transports + guard.insert(config.zid, t.clone()); + drop(guard); - Ok(transport) + // Notify manager's interface that there is a new transport + transport_error!( + self.notify_new_transport_unicast(&t), + close::reason::GENERIC + ); + + // Notify transport's callback interface that there is a new link + Self::notify_new_link_unicast(&t, c_link); + + start_tx_rx(); + + zcondfeat!( + "shared-memory", + { + log::debug!( + "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {}, multilink: {}, lowlatency: {}", + self.config.zid, + config.zid, + config.whatami, + config.sn_resolution, + config.tx_initial_sn, + config.is_qos, + config.is_shm, + is_multilink, + config.is_lowlatency + ); + }, + { + log::debug!( + "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, multilink: {}, lowlatency: {}", + self.config.zid, + config.zid, + config.whatami, + config.sn_resolution, + config.tx_initial_sn, + config.is_qos, + is_multilink, + config.is_lowlatency + ); + } + ); + + Ok(t) + } + + pub(super) async fn init_transport_unicast( + &self, + config: TransportConfigUnicast, + link: LinkUnicastWithOpenAck, + other_initial_sn: TransportSn, + other_lease: Duration, + ) -> ZResult { + // First verify if the transport already exists + let init_result = { + let guard = zasynclock!(self.state.unicast.transports); + match guard.get(&config.zid) { + Some(transport) => { + let transport = transport.clone(); + drop(guard); + self.init_existing_transport_unicast( + config, + link, + other_initial_sn, + other_lease, + transport, + ) + .await + } + None => { + self.init_new_transport_unicast( + config, + link, + other_initial_sn, + other_lease, + guard, + ) + .await + } + } + }; + + match init_result { + Ok(transport) => Ok(TransportUnicast(Arc::downgrade(&transport))), + Err(InitTransportError::Link((e, link, reason))) => { + let _ = link.close(Some(reason)).await; + Err(e) + } + Err(InitTransportError::Transport((e, transport, reason))) => { + let _ = transport.close(reason).await; + Err(e) } } } @@ -552,21 +701,13 @@ impl TransportManager { pub async fn get_transport_unicast(&self, peer: &ZenohId) -> Option { zasynclock!(self.state.unicast.transports) .get(peer) - .map(|t| { - // todo: I cannot find a way to make transport.into() work for TransportUnicastTrait - let weak = Arc::downgrade(t); - TransportUnicast(weak) - }) + .map(|t| TransportUnicast(Arc::downgrade(t))) } pub async fn get_transports_unicast(&self) -> Vec { zasynclock!(self.state.unicast.transports) .values() - .map(|t| { - // todo: I cannot find a way to make transport.into() work for TransportUnicastTrait - let weak = Arc::downgrade(t); - TransportUnicast(weak) - }) + .map(|t| TransportUnicast(Arc::downgrade(t))) .collect() } @@ -602,12 +743,11 @@ impl TransportManager { // Spawn a task to accept the link let c_manager = self.clone(); task::spawn(async move { - if let Err(e) = super::establishment::accept::accept_link(&link, &c_manager) + if let Err(e) = super::establishment::accept::accept_link(link, &c_manager) .timeout(c_manager.config.unicast.accept_timeout) .await { log::debug!("{}", e); - let _ = link.close().await; } let mut guard = zasynclock!(c_manager.state.unicast.incoming); *guard -= 1; diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 3385cbed6a..55226f287c 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -101,11 +101,7 @@ impl TransportUnicast { let tp = TransportPeer { zid: transport.get_zid(), whatami: transport.get_whatami(), - links: transport - .get_links() - .into_iter() - .map(|l| l.into()) - .collect(), + links: transport.get_links(), is_qos: transport.is_qos(), #[cfg(feature = "shared-memory")] is_shm: transport.is_shm(), @@ -116,11 +112,7 @@ impl TransportUnicast { #[inline(always)] pub fn get_links(&self) -> ZResult> { let transport = self.get_inner()?; - Ok(transport - .get_links() - .into_iter() - .map(|l| l.into()) - .collect()) + Ok(transport.get_links()) } #[inline(always)] @@ -129,18 +121,6 @@ impl TransportUnicast { transport.schedule(message) } - #[inline(always)] - pub async fn close_link(&self, link: &Link) -> ZResult<()> { - let transport = self.get_inner()?; - let link = transport - .get_links() - .into_iter() - .find(|l| l.link.get_src() == &link.src && l.link.get_dst() == &link.dst) - .ok_or_else(|| zerror!("Invalid link"))?; - transport.close_link(&link, close::reason::GENERIC).await?; - Ok(()) - } - #[inline(always)] pub async fn close(&self) -> ZResult<()> { // Return Ok if the transport has already been closed diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index 265607705b..92093959dd 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -14,11 +14,12 @@ use crate::{ unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, - TransportExecutor, TransportPeerEventHandler, + TransportPeerEventHandler, }; use async_std::sync::MutexGuard as AsyncMutexGuard; use async_trait::async_trait; use std::{fmt::DebugStruct, sync::Arc, time::Duration}; +use zenoh_link::Link; use zenoh_protocol::{ core::{WhatAmI, ZenohId}, network::NetworkMessage, @@ -26,6 +27,19 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +use super::link::{LinkUnicastWithOpenAck, MaybeOpenAck}; + +pub(crate) type LinkError = (zenoh_result::Error, TransportLinkUnicast, u8); +pub(crate) type TransportError = (zenoh_result::Error, Arc, u8); +pub(crate) enum InitTransportError { + Link(LinkError), + Transport(TransportError), +} + +pub(crate) type AddLinkResult<'a> = + Result<(Box, MaybeOpenAck), LinkError>; +pub(crate) type InitTransportResult = Result, InitTransportError>; + /*************************************/ /* UNICAST TRANSPORT TRAIT */ /*************************************/ @@ -35,11 +49,12 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { /* ACCESSORS */ /*************************************/ fn set_callback(&self, callback: Arc); + async fn get_alive(&self) -> AsyncMutexGuard<'_, bool>; fn get_zid(&self) -> ZenohId; fn get_whatami(&self) -> WhatAmI; fn get_callback(&self) -> Option>; - fn get_links(&self) -> Vec; + fn get_links(&self) -> Vec; #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool; fn is_qos(&self) -> bool; @@ -50,33 +65,22 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()>; + async fn add_link( + &self, + link: LinkUnicastWithOpenAck, + other_initial_sn: TransportSn, + other_lease: Duration, + ) -> AddLinkResult; /*************************************/ /* TX */ /*************************************/ fn schedule(&self, msg: NetworkMessage) -> ZResult<()>; - fn start_tx( - &self, - link: &TransportLinkUnicast, - executor: &TransportExecutor, - keep_alive: Duration, - ) -> ZResult<()>; - - /*************************************/ - /* RX */ - /*************************************/ - fn start_rx(&self, link: &TransportLinkUnicast, lease: Duration) -> ZResult<()>; - - /*************************************/ - /* INITIATION */ - /*************************************/ - async fn sync(&self, _initial_sn_rx: TransportSn) -> ZResult<()>; /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()>; + async fn close_link(&self, link: Link, reason: u8) -> ZResult<()>; async fn close(&self, reason: u8) -> ZResult<()>; fn add_debug_fields<'a, 'b: 'a, 'c>( diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index aba680bc43..513cefc0a6 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -29,107 +29,122 @@ use crate::{ use async_std::prelude::FutureExt; use async_std::task; use async_std::task::JoinHandle; -use std::{sync::Arc, time::Duration}; +use std::{ + sync::{Arc, RwLock}, + time::Duration, +}; use zenoh_buffers::ZSliceBuffer; +use zenoh_core::zwrite; use zenoh_protocol::transport::{KeepAlive, TransportMessage}; use zenoh_result::{zerror, ZResult}; use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; +pub(super) struct Tasks { + // The handlers to stop TX/RX tasks + handle_tx: RwLock>>, + signal_rx: Signal, + handle_rx: RwLock>>, +} + #[derive(Clone)] pub(super) struct TransportLinkUnicastUniversal { // The underlying link pub(super) link: TransportLinkUnicast, // The transmission pipeline - pub(super) pipeline: Option, - // The transport this link is associated to - transport: TransportUnicastUniversal, - // The signals to stop TX/RX tasks - handle_tx: Option>>, - signal_rx: Signal, - handle_rx: Option>>, + pub(super) pipeline: TransmissionPipelineProducer, + // The task handling substruct + tasks: Arc, } impl TransportLinkUnicastUniversal { - pub(super) fn new(transport: TransportUnicastUniversal, link: TransportLinkUnicast) -> Self { - Self { - link, - pipeline: None, - transport, - handle_tx: None, + pub(super) fn new( + transport: &TransportUnicastUniversal, + link: TransportLinkUnicast, + priority_tx: &[TransportPriorityTx], + ) -> (Self, TransmissionPipelineConsumer) { + assert!(!priority_tx.is_empty()); + + let config = TransmissionPipelineConf { + batch: BatchConfig { + mtu: link.config.batch.mtu, + is_streamed: link.link.is_streamed(), + #[cfg(feature = "transport_compression")] + is_compression: link.config.batch.is_compression, + }, + queue_size: transport.manager.config.queue_size, + backoff: transport.manager.config.queue_backoff, + }; + + // The pipeline + let (producer, consumer) = TransmissionPipeline::make(config, priority_tx); + + let tasks = Arc::new(Tasks { + handle_tx: RwLock::new(None), signal_rx: Signal::new(), - handle_rx: None, - } + handle_rx: RwLock::new(None), + }); + + let result = Self { + link, + pipeline: producer, + tasks, + }; + + (result, consumer) } } impl TransportLinkUnicastUniversal { pub(super) fn start_tx( &mut self, + transport: TransportUnicastUniversal, + consumer: TransmissionPipelineConsumer, executor: &TransportExecutor, keep_alive: Duration, - priority_tx: &[TransportPriorityTx], ) { - if self.handle_tx.is_none() { - let config = TransmissionPipelineConf { - batch: BatchConfig { - mtu: self.link.config.batch.mtu, - is_streamed: self.link.link.is_streamed(), - #[cfg(feature = "transport_compression")] - is_compression: self.link.config.batch.is_compression, - }, - queue_size: self.transport.manager.config.queue_size, - backoff: self.transport.manager.config.queue_backoff, - }; - - // The pipeline - let (producer, consumer) = TransmissionPipeline::make(config, priority_tx); - self.pipeline = Some(producer); - + let mut guard = zwrite!(self.tasks.handle_tx); + if guard.is_none() { // Spawn the TX task - let c_link = self.link.clone(); - let c_transport = self.transport.clone(); + let mut tx = self.link.tx(); let handle = executor.spawn(async move { let res = tx_task( consumer, - c_link.tx(), + &mut tx, keep_alive, #[cfg(feature = "stats")] - c_transport.stats.clone(), + transport.stats.clone(), ) .await; if let Err(e) = res { log::debug!("{}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle - task::spawn(async move { c_transport.del_link(&c_link).await }); + task::spawn(async move { transport.del_link(tx.inner.link()).await }); } }); - self.handle_tx = Some(Arc::new(handle)); + *guard = Some(handle); } } pub(super) fn stop_tx(&mut self) { - if let Some(pl) = self.pipeline.as_ref() { - pl.disable(); - } + self.pipeline.disable(); } - pub(super) fn start_rx(&mut self, lease: Duration) { - if self.handle_rx.is_none() { + pub(super) fn start_rx(&mut self, transport: TransportUnicastUniversal, lease: Duration) { + let mut guard = zwrite!(self.tasks.handle_rx); + if guard.is_none() { // Spawn the RX task - let c_link = self.link.clone(); - let c_transport = self.transport.clone(); - let c_signal = self.signal_rx.clone(); - let c_rx_buffer_size = self.transport.manager.config.link_rx_buffer_size; + let mut rx = self.link.rx(); + let c_signal = self.tasks.signal_rx.clone(); let handle = task::spawn(async move { // Start the consume task let res = rx_task( - c_link.rx(), - c_transport.clone(), + &mut rx, + transport.clone(), lease, c_signal.clone(), - c_rx_buffer_size, + transport.manager.config.link_rx_buffer_size, ) .await; c_signal.trigger(); @@ -137,31 +152,30 @@ impl TransportLinkUnicastUniversal { log::debug!("{}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle - task::spawn(async move { c_transport.del_link(&c_link).await }); + task::spawn(async move { transport.del_link((&rx.link).into()).await }); } }); - self.handle_rx = Some(Arc::new(handle)); + *guard = Some(handle); } } pub(super) fn stop_rx(&mut self) { - self.signal_rx.trigger(); + self.tasks.signal_rx.trigger(); } pub(super) async fn close(mut self) -> ZResult<()> { log::trace!("{}: closing", self.link); + self.stop_tx(); self.stop_rx(); - if let Some(handle) = self.handle_rx.take() { - // SAFETY: it is safe to unwrap the Arc since we have the ownership of the whole link - let handle_rx = Arc::try_unwrap(handle).unwrap(); - handle_rx.await; + + let handle_tx = zwrite!(self.tasks.handle_tx).take(); + if let Some(handle) = handle_tx { + handle.await; } - self.stop_tx(); - if let Some(handle) = self.handle_tx.take() { - // SAFETY: it is safe to unwrap the Arc since we have the ownership of the whole link - let handle_tx = Arc::try_unwrap(handle).unwrap(); - handle_tx.await; + let handle_rx = zwrite!(self.tasks.handle_rx).take(); + if let Some(handle) = handle_rx { + handle.await; } self.link.close(None).await @@ -173,7 +187,7 @@ impl TransportLinkUnicastUniversal { /*************************************/ async fn tx_task( mut pipeline: TransmissionPipelineConsumer, - mut link: TransportLinkUnicastTx, + link: &mut TransportLinkUnicastTx, keep_alive: Duration, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { @@ -227,7 +241,7 @@ async fn tx_task( } async fn rx_task( - mut link: TransportLinkUnicastRx, + link: &mut TransportLinkUnicastRx, transport: TransportUnicastUniversal, lease: Duration, signal: Signal, @@ -259,16 +273,17 @@ async fn rx_task( } // The pool of buffers - let mtu = link.inner.config.batch.max_buffer_size(); + let mtu = link.batch.max_buffer_size(); let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; } let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); + let l = (&link.link).into(); while !signal.is_triggered() { // Async read from the underlying link - let action = read(&mut link, &pool) + let action = read(link, &pool) .race(stop(signal.clone())) .timeout(lease) .await @@ -279,7 +294,7 @@ async fn rx_task( { transport.stats.inc_rx_bytes(2 + n); // Account for the batch len encoding (16 bits) } - transport.read_messages(batch, &link.inner)?; + transport.read_messages(batch, &l)?; } Action::Stop => break, } diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 459998ddcf..935a1814b0 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -17,11 +17,13 @@ use crate::{ batch::{Decode, RBatch}, priority::TransportChannelRx, }, - unicast::{link::TransportLinkUnicast, transport_unicast_inner::TransportUnicastTrait}, + unicast::transport_unicast_inner::TransportUnicastTrait, + TransportPeerEventHandler, }; use async_std::task; use std::sync::MutexGuard; use zenoh_core::{zlock, zread}; +use zenoh_link::Link; use zenoh_protocol::{ core::{Priority, Reliability}, network::NetworkMessage, @@ -35,35 +37,22 @@ use zenoh_result::{bail, zerror, ZResult}; impl TransportUnicastUniversal { fn trigger_callback( &self, + callback: &dyn TransportPeerEventHandler, #[allow(unused_mut)] // shared-memory feature requires mut mut msg: NetworkMessage, ) -> ZResult<()> { - let callback = zread!(self.callback).clone(); - if let Some(callback) = callback.as_ref() { - #[cfg(feature = "shared-memory")] - { - if self.config.is_shm { - crate::shm::map_zmsg_to_shmbuf( - &mut msg, - &self.manager.state.unicast.shm.reader, - )?; - } + #[cfg(feature = "shared-memory")] + { + if self.config.is_shm { + crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.unicast.shm.reader)?; } - callback.handle_message(msg) - } else { - log::debug!( - "Transport: {}. No callback available, dropping message: {}", - self.config.zid, - msg - ); - Ok(()) } + callback.handle_message(msg) } - fn handle_close(&self, link: &TransportLinkUnicast, _reason: u8, session: bool) -> ZResult<()> { + fn handle_close(&self, link: &Link, _reason: u8, session: bool) -> ZResult<()> { // Stop now rx and tx tasks before doing the proper cleanup - let _ = self.stop_rx(link); - let _ = self.stop_tx(link); + let _ = self.stop_rx_tx(link); // Delete and clean up let c_transport = self.clone(); @@ -74,7 +63,7 @@ impl TransportUnicastUniversal { if session { let _ = c_transport.delete().await; } else { - let _ = c_transport.del_link(&c_link).await; + let _ = c_transport.del_link(c_link).await; } }); @@ -109,8 +98,17 @@ impl TransportUnicastUniversal { self.verify_sn(sn, &mut guard)?; - for msg in payload.drain(..) { - self.trigger_callback(msg)?; + let callback = zread!(self.callback).clone(); + if let Some(callback) = callback.as_ref() { + for msg in payload.drain(..) { + self.trigger_callback(callback.as_ref(), msg)?; + } + } else { + log::debug!( + "Transport: {}. No callback available, dropping messages: {:?}", + self.config.zid, + payload + ); } Ok(()) } @@ -153,7 +151,17 @@ impl TransportUnicastUniversal { .defrag .defragment() .ok_or_else(|| zerror!("Transport: {}. Defragmentation error.", self.config.zid))?; - return self.trigger_callback(msg); + + let callback = zread!(self.callback).clone(); + if let Some(callback) = callback.as_ref() { + return self.trigger_callback(callback.as_ref(), msg); + } else { + log::debug!( + "Transport: {}. No callback available, dropping messages: {:?}", + self.config.zid, + msg + ); + } } Ok(()) @@ -164,7 +172,7 @@ impl TransportUnicastUniversal { sn: TransportSn, guard: &mut MutexGuard<'_, TransportChannelRx>, ) -> ZResult<()> { - let precedes = guard.sn.precedes(sn)?; + let precedes = guard.sn.roll(sn)?; if !precedes { log::debug!( "Transport: {}. Frame with invalid SN dropped: {}. Expected: {}.", @@ -180,18 +188,10 @@ impl TransportUnicastUniversal { return Ok(()); } - // Set will always return OK because we have already checked - // with precedes() that the sn has the right resolution - let _ = guard.sn.set(sn); - Ok(()) } - pub(super) fn read_messages( - &self, - mut batch: RBatch, - link: &TransportLinkUnicast, - ) -> ZResult<()> { + pub(super) fn read_messages(&self, mut batch: RBatch, link: &Link) -> ZResult<()> { while !batch.is_empty() { let msg: TransportMessage = batch .decode() diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index a920ac90b9..942b723365 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -16,12 +16,12 @@ use crate::stats::TransportStats; use crate::{ common::priority::{TransportPriorityRx, TransportPriorityTx}, unicast::{ - link::{TransportLinkUnicast, TransportLinkUnicastDirection}, - transport_unicast_inner::TransportUnicastTrait, + link::{LinkUnicastWithOpenAck, TransportLinkUnicastDirection}, + transport_unicast_inner::{AddLinkResult, TransportUnicastTrait}, universal::link::TransportLinkUnicastUniversal, TransportConfigUnicast, }, - TransportExecutor, TransportManager, TransportPeerEventHandler, + TransportManager, TransportPeerEventHandler, }; use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use async_trait::async_trait; @@ -33,28 +33,28 @@ use zenoh_link::Link; use zenoh_protocol::{ core::{Priority, WhatAmI, ZenohId}, network::NetworkMessage, - transport::{Close, PrioritySn, TransportMessage, TransportSn}, + transport::{close, Close, PrioritySn, TransportMessage, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; macro_rules! zlinkget { ($guard:expr, $link:expr) => { // Compare LinkUnicast link to not compare TransportLinkUnicast direction - $guard.iter().find(|tl| &tl.link.link == &$link.link) + $guard.iter().find(|tl| tl.link == $link) }; } macro_rules! zlinkgetmut { ($guard:expr, $link:expr) => { // Compare LinkUnicast link to not compare TransportLinkUnicast direction - $guard.iter_mut().find(|tl| &tl.link.link == &$link.link) + $guard.iter_mut().find(|tl| tl.link == $link) }; } macro_rules! zlinkindex { ($guard:expr, $link:expr) => { // Compare LinkUnicast link to not compare TransportLinkUnicast direction - $guard.iter().position(|tl| &tl.link.link == &$link.link) + $guard.iter().position(|tl| tl.link == $link) }; } @@ -75,6 +75,8 @@ pub(crate) struct TransportUnicastUniversal { pub(super) links: Arc>>, // The callback pub(super) callback: Arc>>>, + // Lock used to ensure no race in add_link method + add_link_lock: Arc>, // Mutex for notification pub(super) alive: Arc>, // Transport statistics @@ -86,7 +88,7 @@ impl TransportUnicastUniversal { pub fn make( manager: TransportManager, config: TransportConfigUnicast, - ) -> ZResult { + ) -> ZResult> { let mut priority_tx = vec![]; let mut priority_rx = vec![]; @@ -113,17 +115,18 @@ impl TransportUnicastUniversal { #[cfg(feature = "stats")] let stats = Arc::new(TransportStats::new(Some(manager.get_stats().clone()))); - let t = TransportUnicastUniversal { + let t = Arc::new(TransportUnicastUniversal { manager, config, priority_tx: priority_tx.into_boxed_slice().into(), priority_rx: priority_rx.into_boxed_slice().into(), links: Arc::new(RwLock::new(vec![].into_boxed_slice())), + add_link_lock: Arc::new(AsyncMutex::new(())), callback: Arc::new(RwLock::new(None)), alive: Arc::new(AsyncMutex::new(false)), #[cfg(feature = "stats")] stats, - }; + }); Ok(t) } @@ -170,7 +173,7 @@ impl TransportUnicastUniversal { Ok(()) } - pub(crate) async fn del_link(&self, link: &TransportLinkUnicast) -> ZResult<()> { + pub(crate) async fn del_link(&self, link: Link) -> ZResult<()> { enum Target { Transport, Link(Box), @@ -205,7 +208,7 @@ impl TransportUnicastUniversal { // Notify the callback if let Some(callback) = zread!(self.callback).as_ref() { - callback.del_link(Link::from(link)); + callback.del_link(link); } match target { @@ -214,16 +217,17 @@ impl TransportUnicastUniversal { } } - pub(crate) fn stop_tx(&self, link: &TransportLinkUnicast) -> ZResult<()> { + pub(crate) fn stop_rx_tx(&self, link: &Link) -> ZResult<()> { let mut guard = zwrite!(self.links); - match zlinkgetmut!(guard, link) { + match zlinkgetmut!(guard, *link) { Some(l) => { + l.stop_rx(); l.stop_tx(); Ok(()) } None => { bail!( - "Can not stop Link TX {} with peer: {}", + "Can not stop Link RX {} with peer: {}", link, self.config.zid ) @@ -231,21 +235,27 @@ impl TransportUnicastUniversal { } } - pub(crate) fn stop_rx(&self, link: &TransportLinkUnicast) -> ZResult<()> { - let mut guard = zwrite!(self.links); - match zlinkgetmut!(guard, link) { - Some(l) => { - l.stop_rx(); - Ok(()) - } - None => { - bail!( - "Can not stop Link RX {} with peer: {}", - link, - self.config.zid - ) - } + async fn sync(&self, initial_sn_rx: TransportSn) -> ZResult<()> { + // Mark the transport as alive and keep the lock + // to avoid concurrent new_transport and closing/closed notifications + let mut a_guard = zasynclock!(self.alive); + if *a_guard { + let e = zerror!("Transport already synched with peer: {}", self.config.zid); + log::trace!("{}", e); + return Err(e.into()); } + + *a_guard = true; + + let csn = PrioritySn { + reliable: initial_sn_rx, + best_effort: initial_sn_rx, + }; + for c in self.priority_rx.iter() { + c.sync(csn)?; + } + + Ok(()) } } @@ -254,54 +264,88 @@ impl TransportUnicastTrait for TransportUnicastUniversal { /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()> { - // Add the link to the channel - let mut guard = zwrite!(self.links); + async fn add_link( + &self, + link: LinkUnicastWithOpenAck, + other_initial_sn: TransportSn, + other_lease: Duration, + ) -> AddLinkResult { + let add_link_guard = zasynclock!(self.add_link_lock); // Check if we can add more inbound links - if let TransportLinkUnicastDirection::Inbound = link.config.direction { - let count = guard - .iter() - .filter(|l| l.link.config.direction == link.config.direction) - .count(); - - let limit = zcondfeat!( - "transport_multilink", - match self.config.multilink { - Some(_) => self.manager.config.unicast.max_links, - None => 1, - }, - 1 - ); - - if count >= limit { - let e = zerror!( - "Can not add Link {} with peer {}: max num of links reached {}/{}", - link, - self.config.zid, - count, - limit + { + let guard = zread!(self.links); + if let TransportLinkUnicastDirection::Inbound = link.inner_config().direction { + let count = guard + .iter() + .filter(|l| l.link.config.direction == link.inner_config().direction) + .count(); + + let limit = zcondfeat!( + "transport_multilink", + match self.config.multilink { + Some(_) => self.manager.config.unicast.max_links, + None => 1, + }, + 1 ); - return Err(e.into()); + + if count >= limit { + let e = zerror!( + "Can not add Link {} with peer {}: max num of links reached {}/{}", + link, + self.config.zid, + count, + limit + ); + return Err((e.into(), link.fail(), close::reason::MAX_LINKS)); + } } } - let link = TransportLinkUnicastUniversal::new(self.clone(), link); + // sync the RX sequence number + let _ = self.sync(other_initial_sn).await; + + // Wrap the link + let (link, ack) = link.unpack(); + let (mut link, consumer) = + TransportLinkUnicastUniversal::new(self, link, &self.priority_tx); + // Add the link to the channel + let mut guard = zwrite!(self.links); let mut links = Vec::with_capacity(guard.len() + 1); links.extend_from_slice(&guard); - links.push(link); + links.push(link.clone()); *guard = links.into_boxed_slice(); - Ok(()) + drop(guard); + drop(add_link_guard); + + // create a callback to start the link + let transport = self.clone(); + let start_link = Box::new(move || { + // Start the TX loop + let keep_alive = + self.manager.config.unicast.lease / self.manager.config.unicast.keep_alive as u32; + link.start_tx( + transport.clone(), + consumer, + &self.manager.tx_executor, + keep_alive, + ); + + // Start the RX loop + link.start_rx(transport, other_lease); + }); + + Ok((start_link, ack)) } /*************************************/ /* ACCESSORS */ /*************************************/ fn set_callback(&self, callback: Arc) { - let mut guard = zwrite!(self.callback); - *guard = Some(callback); + *zwrite!(self.callback) = Some(callback); } async fn get_alive(&self) -> AsyncMutexGuard<'_, bool> { @@ -338,52 +382,25 @@ impl TransportUnicastTrait for TransportUnicastUniversal { self.stats.clone() } - /*************************************/ - /* INITIATION */ - /*************************************/ - async fn sync(&self, initial_sn_rx: TransportSn) -> ZResult<()> { - // Mark the transport as alive and keep the lock - // to avoid concurrent new_transport and closing/closed notifications - let mut a_guard = zasynclock!(self.alive); - if *a_guard { - let e = zerror!("Transport already synched with peer: {}", self.config.zid); - log::trace!("{}", e); - return Err(e.into()); - } - - *a_guard = true; - - let csn = PrioritySn { - reliable: initial_sn_rx, - best_effort: initial_sn_rx, - }; - for c in self.priority_rx.iter() { - c.sync(csn)?; - } - - Ok(()) - } - /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()> { + async fn close_link(&self, link: Link, reason: u8) -> ZResult<()> { log::trace!("Closing link {} with peer: {}", link, self.config.zid); - let mut pipeline = zlinkget!(zread!(self.links), link) - .map(|l| l.pipeline.clone()) - .ok_or_else(|| zerror!("Cannot close Link {:?}: not found", link))?; + let transport_link_pipeline = zlinkget!(zread!(self.links), link) + .ok_or_else(|| zerror!("Cannot close Link {:?}: not found", link))? + .pipeline + .clone(); - if let Some(p) = pipeline.take() { - // Close message to be sent on the target link - let msg: TransportMessage = Close { - reason, - session: false, - } - .into(); - - p.push_transport_message(msg, Priority::Background); + // Close message to be sent on the target link + let msg: TransportMessage = Close { + reason, + session: false, } + .into(); + + transport_link_pipeline.push_transport_message(msg, Priority::Background); // Remove the link from the channel self.del_link(link).await @@ -394,7 +411,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { let mut pipelines = zread!(self.links) .iter() - .filter_map(|sl| sl.pipeline.clone()) + .map(|sl| sl.pipeline.clone()) .collect::>(); for p in pipelines.drain(..) { // Close message to be sent on all the links @@ -413,8 +430,8 @@ impl TransportUnicastTrait for TransportUnicastUniversal { self.delete().await } - fn get_links(&self) -> Vec { - zread!(self.links).iter().map(|l| l.link.clone()).collect() + fn get_links(&self) -> Vec { + zread!(self.links).iter().map(|l| l.link.link()).collect() } /*************************************/ @@ -427,46 +444,6 @@ impl TransportUnicastTrait for TransportUnicastUniversal { } } - fn start_tx( - &self, - link: &TransportLinkUnicast, - executor: &TransportExecutor, - keep_alive: Duration, - ) -> ZResult<()> { - let mut guard = zwrite!(self.links); - match zlinkgetmut!(guard, link) { - Some(l) => { - assert!(!self.priority_tx.is_empty()); - l.start_tx(executor, keep_alive, &self.priority_tx); - Ok(()) - } - None => { - bail!( - "Can not start Link TX {} with ZID: {}", - link, - self.config.zid, - ) - } - } - } - - fn start_rx(&self, link: &TransportLinkUnicast, lease: Duration) -> ZResult<()> { - let mut guard = zwrite!(self.links); - match zlinkgetmut!(guard, link) { - Some(l) => { - l.start_rx(lease); - Ok(()) - } - None => { - bail!( - "Can not start Link RX {} with peer: {}", - link, - self.config.zid - ) - } - } - } - fn add_debug_fields<'a, 'b: 'a, 'c>( &self, s: &'c mut DebugStruct<'a, 'b>, diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index bf5be7e702..eb41e2611c 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -35,7 +35,7 @@ impl TransportUnicastUniversal { .iter() .filter_map(|tl| { if msg.is_reliable() == tl.link.link.is_reliable() { - tl.pipeline.as_ref() + Some(&tl.pipeline) } else { None } @@ -46,7 +46,7 @@ impl TransportUnicastUniversal { } // No best match found, take the first available link - if let Some(pl) = guard.iter().filter_map(|tl| tl.pipeline.as_ref()).next() { + if let Some(pl) = guard.iter().map(|tl| &tl.pipeline).next() { zpush!(guard, pl, msg); } diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 500a174daf..e27acfe3c3 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -14,6 +14,7 @@ #[cfg(feature = "shared-memory")] mod tests { use async_std::{prelude::FutureExt, task}; + use rand::{Rng, SeedableRng}; use std::{ any::Any, convert::TryFrom, @@ -25,6 +26,7 @@ mod tests { }; use zenoh_buffers::buffer::SplitBuffer; use zenoh_core::zasync_executor_init; + use zenoh_crypto::PseudoRng; use zenoh_link::Link; use zenoh_protocol::{ core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, @@ -34,7 +36,7 @@ mod tests { }, zenoh::{PushBody, Put}, }; - use zenoh_result::ZResult; + use zenoh_result::{zerror, ZResult}; use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager}; use zenoh_transport::{ multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, @@ -157,10 +159,22 @@ mod tests { let peer_shm02 = ZenohId::try_from([2]).unwrap(); let peer_net01 = ZenohId::try_from([3]).unwrap(); - // Create the SharedMemoryManager - let mut shm01 = - SharedMemoryManager::make(format!("peer_shm01_{}", endpoint.protocol()), 2 * MSG_SIZE) - .unwrap(); + let mut tries = 100; + let mut prng = PseudoRng::from_entropy(); + let mut shm01 = loop { + // Create the SharedMemoryManager + if let Ok(shm01) = SharedMemoryManager::make( + format!("peer_shm01_{}_{}", endpoint.protocol(), prng.gen::()), + 2 * MSG_SIZE, + ) { + break Ok(shm01); + } + tries -= 1; + if tries == 0 { + break Err(zerror!("Unable to create SharedMemoryManager!")); + } + } + .unwrap(); // Create a peer manager with shared-memory authenticator enabled let peer_shm01_handler = Arc::new(SHPeer::new(true)); diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index ac35090cdb..9b25bb26c8 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -481,6 +481,7 @@ async fn test_transport( .into(), } .into(); + for _ in 0..MSG_COUNT { let _ = client_transport.schedule(message.clone()); } diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 7219bf5ff2..3b10f12f03 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -319,7 +319,7 @@ fn gossip() -> Result<()> { async_std::task::block_on(async { zasync_executor_init!(); - let locator = String::from("tcp/127.0.0.1:17449"); + let locator = String::from("tcp/127.0.0.1:17446"); let ke = String::from("testKeyExprGossip"); let msg_size = 8; From 49011f1fd55125609ee2115ed53e0793c406f8dc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 14 Dec 2023 15:33:50 +0100 Subject: [PATCH 27/29] Add protocol extensions for user attachment (#590) Co-authored-by: Pierre Avital Co-authored-by: Pierre Avital --- commons/zenoh-buffers/src/zbuf.rs | 108 ++++++- commons/zenoh-codec/benches/codec.rs | 6 + commons/zenoh-codec/src/common/extension.rs | 22 +- commons/zenoh-codec/src/core/property.rs | 6 +- commons/zenoh-codec/src/core/shm.rs | 15 +- commons/zenoh-codec/src/core/wire_expr.rs | 10 +- commons/zenoh-codec/src/network/declare.rs | 140 +++++---- commons/zenoh-codec/src/network/mod.rs | 10 +- commons/zenoh-codec/src/network/oam.rs | 21 +- commons/zenoh-codec/src/network/push.rs | 32 ++- commons/zenoh-codec/src/network/request.rs | 57 ++-- commons/zenoh-codec/src/network/response.rs | 49 ++-- commons/zenoh-codec/src/scouting/hello.rs | 23 +- commons/zenoh-codec/src/scouting/mod.rs | 4 +- commons/zenoh-codec/src/scouting/scout.rs | 10 +- commons/zenoh-codec/src/transport/close.rs | 6 +- commons/zenoh-codec/src/transport/fragment.rs | 37 ++- commons/zenoh-codec/src/transport/frame.rs | 29 +- commons/zenoh-codec/src/transport/init.rs | 4 +- commons/zenoh-codec/src/transport/join.rs | 57 ++-- .../zenoh-codec/src/transport/keepalive.rs | 4 +- commons/zenoh-codec/src/transport/mod.rs | 9 +- commons/zenoh-codec/src/transport/oam.rs | 14 +- commons/zenoh-codec/src/transport/open.rs | 4 +- commons/zenoh-codec/src/zenoh/ack.rs | 16 +- commons/zenoh-codec/src/zenoh/del.rs | 30 +- commons/zenoh-codec/src/zenoh/err.rs | 28 +- commons/zenoh-codec/src/zenoh/mod.rs | 82 ++++-- commons/zenoh-codec/src/zenoh/pull.rs | 6 +- commons/zenoh-codec/src/zenoh/put.rs | 50 +++- commons/zenoh-codec/src/zenoh/query.rs | 45 ++- commons/zenoh-codec/src/zenoh/reply.rs | 58 ++-- .../zenoh-collections/src/single_or_vec.rs | 65 ++++- commons/zenoh-crypto/src/cipher.rs | 2 +- commons/zenoh-protocol/src/lib.rs | 4 +- commons/zenoh-protocol/src/scouting/scout.rs | 2 +- commons/zenoh-protocol/src/transport/init.rs | 2 +- commons/zenoh-protocol/src/transport/join.rs | 2 +- commons/zenoh-protocol/src/zenoh/del.rs | 9 +- commons/zenoh-protocol/src/zenoh/mod.rs | 28 ++ commons/zenoh-protocol/src/zenoh/put.rs | 9 +- commons/zenoh-protocol/src/zenoh/query.rs | 9 +- commons/zenoh-protocol/src/zenoh/reply.rs | 9 +- commons/zenoh-shm/src/lib.rs | 1 - examples/examples/z_pub.rs | 22 +- examples/examples/z_pub_thr.rs | 42 ++- examples/src/lib.rs | 3 +- io/zenoh-transport/src/common/batch.rs | 1 + io/zenoh-transport/src/common/pipeline.rs | 3 + .../tests/multicast_compression.rs | 1 + .../tests/multicast_transport.rs | 1 + .../tests/unicast_compression.rs | 1 + .../tests/unicast_concurrent.rs | 2 + .../tests/unicast_defragmentation.rs | 1 + .../tests/unicast_intermittent.rs | 1 + .../tests/unicast_priorities.rs | 1 + io/zenoh-transport/tests/unicast_shm.rs | 2 + .../tests/unicast_simultaneous.rs | 1 + io/zenoh-transport/tests/unicast_transport.rs | 1 + zenoh/src/admin.rs | 16 +- zenoh/src/liveliness.rs | 2 + zenoh/src/net/routing/queries.rs | 1 + zenoh/src/net/runtime/adminspace.rs | 2 + zenoh/src/net/tests/tables.rs | 5 + zenoh/src/publication.rs | 89 +++++- zenoh/src/query.rs | 22 ++ zenoh/src/queryable.rs | 54 +++- zenoh/src/sample.rs | 268 ++++++++++++++++-- zenoh/src/session.rs | 84 +++++- zenoh/tests/attachments.rs | 112 ++++++++ 70 files changed, 1504 insertions(+), 368 deletions(-) create mode 100644 zenoh/tests/attachments.rs diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index db62e26f54..1365397966 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -20,7 +20,7 @@ use crate::{ ZSlice, }; use alloc::{sync::Arc, vec::Vec}; -use core::{cmp, iter, mem, num::NonZeroUsize, ptr, slice}; +use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; use zenoh_collections::SingleOrVec; fn get_mut_unchecked(arc: &mut Arc) -> &mut T { @@ -55,6 +55,85 @@ impl ZBuf { self.slices.push(zslice); } } + + pub fn splice>(&mut self, erased: Range, replacement: &[u8]) { + let start = match erased.start_bound() { + core::ops::Bound::Included(n) => *n, + core::ops::Bound::Excluded(n) => n + 1, + core::ops::Bound::Unbounded => 0, + }; + let end = match erased.end_bound() { + core::ops::Bound::Included(n) => n + 1, + core::ops::Bound::Excluded(n) => *n, + core::ops::Bound::Unbounded => self.len(), + }; + if start != end { + self.remove(start, end); + } + self.insert(start, replacement); + } + fn remove(&mut self, mut start: usize, mut end: usize) { + assert!(start <= end); + assert!(end <= self.len()); + let mut start_slice_idx = 0; + let mut start_idx_in_start_slice = 0; + let mut end_slice_idx = 0; + let mut end_idx_in_end_slice = 0; + for (i, slice) in self.slices.as_mut().iter_mut().enumerate() { + if slice.len() > start { + start_slice_idx = i; + start_idx_in_start_slice = start; + } + if slice.len() >= end { + end_slice_idx = i; + end_idx_in_end_slice = end; + break; + } + start -= slice.len(); + end -= slice.len(); + } + let start_slice = &mut self.slices.as_mut()[start_slice_idx]; + start_slice.end = start_slice.start + start_idx_in_start_slice; + let drain_start = start_slice_idx + (start_slice.start < start_slice.end) as usize; + let end_slice = &mut self.slices.as_mut()[end_slice_idx]; + end_slice.start += end_idx_in_end_slice; + let drain_end = end_slice_idx + (end_slice.start >= end_slice.end) as usize; + self.slices.drain(drain_start..drain_end); + } + fn insert(&mut self, mut at: usize, slice: &[u8]) { + if slice.is_empty() { + return; + } + let old_at = at; + let mut slice_index = usize::MAX; + for (i, slice) in self.slices.as_ref().iter().enumerate() { + if at < slice.len() { + slice_index = i; + break; + } + if let Some(new_at) = at.checked_sub(slice.len()) { + at = new_at + } else { + panic!( + "Out of bounds insert attempted: at={old_at}, len={}", + self.len() + ) + } + } + if at != 0 { + let split = &self.slices.as_ref()[slice_index]; + let (l, r) = ( + split.subslice(0, at).unwrap(), + split.subslice(at, split.len()).unwrap(), + ); + self.slices.drain(slice_index..(slice_index + 1)); + self.slices.insert(slice_index, l); + self.slices.insert(slice_index + 1, Vec::from(slice).into()); + self.slices.insert(slice_index + 2, r); + } else { + self.slices.insert(slice_index, Vec::from(slice).into()) + } + } } // Buffer @@ -70,7 +149,7 @@ impl Buffer for ZBuf { // SplitBuffer impl SplitBuffer for ZBuf { - type Slices<'a> = iter::Map, fn(&'a ZSlice) -> &'a [u8]>; + type Slices<'a> = iter::Map, fn(&'a ZSlice) -> &'a [u8]>; fn slices(&self) -> Self::Slices<'_> { self.slices.as_ref().iter().map(ZSlice::as_slice) @@ -89,7 +168,7 @@ impl PartialEq for ZBuf { (None, _) | (_, None) => return false, (Some(l), Some(r)) => { let cmp_len = l.len().min(r.len()); - // SAFETY: cmp_len is the minimum lenght between l and r slices. + // SAFETY: cmp_len is the minimum length between l and r slices. let lhs = crate::unsafe_slice!(l, ..cmp_len); let rhs = crate::unsafe_slice!(r, ..cmp_len); if lhs != rhs { @@ -98,14 +177,14 @@ impl PartialEq for ZBuf { if cmp_len == l.len() { current_self = self_slices.next(); } else { - // SAFETY: cmp_len is the minimum lenght between l and r slices. + // SAFETY: cmp_len is the minimum length between l and r slices. let lhs = crate::unsafe_slice!(l, cmp_len..); current_self = Some(lhs); } if cmp_len == r.len() { current_other = other_slices.next(); } else { - // SAFETY: cmp_len is the minimum lenght between l and r slices. + // SAFETY: cmp_len is the minimum length between l and r slices. let rhs = crate::unsafe_slice!(r, cmp_len..); current_other = Some(rhs); } @@ -161,12 +240,12 @@ impl<'a> Reader for ZBufReader<'a> { // Take the minimum length among read and write slices let len = from.len().min(into.len()); // Copy the slice content - // SAFETY: len is the minimum lenght between from and into slices. + // SAFETY: len is the minimum length between from and into slices. let lhs = crate::unsafe_slice_mut!(into, ..len); let rhs = crate::unsafe_slice!(from, ..len); lhs.copy_from_slice(rhs); // Advance the write slice - // SAFETY: len is the minimum lenght between from and into slices. + // SAFETY: len is the minimum length between from and into slices. into = crate::unsafe_slice_mut!(into, len..); // Update the counter read += len; @@ -380,9 +459,20 @@ impl<'a> HasWriter for &'a mut ZBuf { type Writer = ZBufWriter<'a>; fn writer(self) -> Self::Writer { + let mut cache = None; + if let Some(ZSlice { buf, end, .. }) = self.slices.last_mut() { + // Verify the ZSlice is actually a Vec + if let Some(b) = buf.as_any().downcast_ref::>() { + // Check for the length + if *end == b.len() { + cache = Some(unsafe { Arc::from_raw(Arc::into_raw(buf.clone()).cast()) }) + } + } + } + ZBufWriter { inner: self, - cache: Arc::new(Vec::new()), + cache: cache.unwrap_or_else(|| Arc::new(Vec::new())), } } } @@ -433,7 +523,7 @@ impl Writer for ZBufWriter<'_> { } fn write_u8(&mut self, byte: u8) -> Result<(), DidntWrite> { - self.write_exact(slice::from_ref(&byte)) + self.write_exact(core::slice::from_ref(&byte)) } fn remaining(&self) -> usize { diff --git a/commons/zenoh-codec/benches/codec.rs b/commons/zenoh-codec/benches/codec.rs index 2c786a41db..1c46a700a7 100644 --- a/commons/zenoh-codec/benches/codec.rs +++ b/commons/zenoh-codec/benches/codec.rs @@ -91,6 +91,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::from(vec![0u8; 8]), }), @@ -136,6 +137,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::from(vec![0u8; 8]), }), @@ -176,6 +178,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::from(vec![0u8; 8]), }), @@ -216,6 +219,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::from(vec![0u8; 1_000_000]), }), @@ -243,6 +247,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::from(vec![0u8; 1_000_000]), }), @@ -281,6 +286,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::from(vec![0u8; 1_000_000]), }), diff --git a/commons/zenoh-codec/src/common/extension.rs b/commons/zenoh-codec/src/common/extension.rs index 4215711815..b31cfc19bc 100644 --- a/commons/zenoh-codec/src/common/extension.rs +++ b/commons/zenoh-codec/src/common/extension.rs @@ -88,7 +88,9 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: (&ZExtUnit<{ ID }>, bool)) -> Self::Output { - let (_x, more) = x; + let (x, more) = x; + let ZExtUnit = x; + let mut header: u8 = ID; if more { header |= iext::FLAG_Z; @@ -134,12 +136,14 @@ where fn write(self, writer: &mut W, x: (&ZExtZ64<{ ID }>, bool)) -> Self::Output { let (x, more) = x; + let ZExtZ64 { value } = x; + let mut header: u8 = ID; if more { header |= iext::FLAG_Z; } self.write(&mut *writer, header)?; - self.write(&mut *writer, x.value)?; + self.write(&mut *writer, value)?; Ok(()) } } @@ -182,13 +186,15 @@ where fn write(self, writer: &mut W, x: (&ZExtZBuf<{ ID }>, bool)) -> Self::Output { let (x, more) = x; + let ZExtZBuf { value } = x; + let mut header: u8 = ID; if more { header |= iext::FLAG_Z; } self.write(&mut *writer, header)?; let bodec = Zenoh080Bounded::::new(); - bodec.write(&mut *writer, &x.value)?; + bodec.write(&mut *writer, value)?; Ok(()) } } @@ -231,13 +237,15 @@ where fn write(self, writer: &mut W, x: (&ZExtZBufHeader<{ ID }>, bool)) -> Self::Output { let (x, more) = x; + let ZExtZBufHeader { len } = x; + let mut header: u8 = ID; if more { header |= iext::FLAG_Z; } self.write(&mut *writer, header)?; let bodec = Zenoh080Bounded::::new(); - bodec.write(&mut *writer, x.len)?; + bodec.write(&mut *writer, *len)?; Ok(()) } } @@ -284,11 +292,13 @@ where fn write(self, writer: &mut W, x: (&ZExtUnknown, bool)) -> Self::Output { let (x, more) = x; - let mut header: u8 = x.id; + let ZExtUnknown { id, body } = x; + + let mut header: u8 = *id; if more { header |= iext::FLAG_Z; } - match &x.body { + match body { ZExtBody::Unit => self.write(&mut *writer, header)?, ZExtBody::Z64(u64) => { self.write(&mut *writer, header)?; diff --git a/commons/zenoh-codec/src/core/property.rs b/commons/zenoh-codec/src/core/property.rs index 02536ccd82..bb7f760208 100644 --- a/commons/zenoh-codec/src/core/property.rs +++ b/commons/zenoh-codec/src/core/property.rs @@ -26,8 +26,10 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Property) -> Self::Output { - self.write(&mut *writer, x.key)?; - self.write(&mut *writer, x.value.as_slice())?; + let Property { key, value } = x; + + self.write(&mut *writer, key)?; + self.write(&mut *writer, value.as_slice())?; Ok(()) } } diff --git a/commons/zenoh-codec/src/core/shm.rs b/commons/zenoh-codec/src/core/shm.rs index 1ab6976ebe..69c5c59ce0 100644 --- a/commons/zenoh-codec/src/core/shm.rs +++ b/commons/zenoh-codec/src/core/shm.rs @@ -25,10 +25,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &SharedMemoryBufInfo) -> Self::Output { - self.write(&mut *writer, x.offset)?; - self.write(&mut *writer, x.length)?; - self.write(&mut *writer, x.shm_manager.as_str())?; - self.write(&mut *writer, x.kind)?; + let SharedMemoryBufInfo { + offset, + length, + shm_manager, + kind, + } = x; + + self.write(&mut *writer, offset)?; + self.write(&mut *writer, length)?; + self.write(&mut *writer, shm_manager.as_str())?; + self.write(&mut *writer, kind)?; Ok(()) } } diff --git a/commons/zenoh-codec/src/core/wire_expr.rs b/commons/zenoh-codec/src/core/wire_expr.rs index bc484149ce..6caba6c8c7 100644 --- a/commons/zenoh-codec/src/core/wire_expr.rs +++ b/commons/zenoh-codec/src/core/wire_expr.rs @@ -29,12 +29,18 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &WireExpr<'_>) -> Self::Output { + let WireExpr { + scope, + suffix, + mapping: _, + } = x; + let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, x.scope)?; + zodec.write(&mut *writer, *scope)?; if x.has_suffix() { let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, x.suffix.as_ref())?; + zodec.write(&mut *writer, suffix.as_ref())?; } Ok(()) } diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index ae3a3dd77c..20916dc359 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -93,32 +93,39 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Declare) -> Self::Output { + let Declare { + ext_qos, + ext_tstamp, + ext_nodeid, + body, + } = x; + // Header let mut header = id::DECLARE; - let mut n_exts = ((x.ext_qos != declare::ext::QoSType::default()) as u8) - + (x.ext_tstamp.is_some() as u8) - + ((x.ext_nodeid != declare::ext::NodeIdType::default()) as u8); + let mut n_exts = ((ext_qos != &declare::ext::QoSType::default()) as u8) + + (ext_tstamp.is_some() as u8) + + ((ext_nodeid != &declare::ext::NodeIdType::default()) as u8); if n_exts != 0 { header |= declare::flag::Z; } self.write(&mut *writer, header)?; // Extensions - if x.ext_qos != declare::ext::QoSType::default() { + if ext_qos != &declare::ext::QoSType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_qos, n_exts != 0))?; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } - if let Some(ts) = x.ext_tstamp.as_ref() { + if let Some(ts) = ext_tstamp.as_ref() { n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if x.ext_nodeid != declare::ext::NodeIdType::default() { + if ext_nodeid != &declare::ext::NodeIdType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_nodeid, n_exts != 0))?; + self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } // Body - self.write(&mut *writer, &x.body)?; + self.write(&mut *writer, body)?; Ok(()) } @@ -200,16 +207,18 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &keyexpr::DeclareKeyExpr) -> Self::Output { + let keyexpr::DeclareKeyExpr { id, wire_expr } = x; + // Header let mut header = declare::id::D_KEYEXPR; - if x.wire_expr.has_suffix() { + if wire_expr.has_suffix() { header |= keyexpr::flag::N; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; - self.write(&mut *writer, &x.wire_expr)?; + self.write(&mut *writer, id)?; + self.write(&mut *writer, wire_expr)?; Ok(()) } @@ -262,12 +271,14 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &keyexpr::UndeclareKeyExpr) -> Self::Output { + let keyexpr::UndeclareKeyExpr { id } = x; + // Header let header = declare::id::U_KEYEXPR; self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; + self.write(&mut *writer, id)?; Ok(()) } @@ -321,28 +332,34 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &subscriber::DeclareSubscriber) -> Self::Output { + let subscriber::DeclareSubscriber { + id, + wire_expr, + ext_info, + } = x; + // Header let mut header = declare::id::D_SUBSCRIBER; - let mut n_exts = (x.ext_info != subscriber::ext::SubscriberInfo::default()) as u8; + let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::default()) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if x.wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::default() { header |= subscriber::flag::M; } - if x.wire_expr.has_suffix() { + if wire_expr.has_suffix() { header |= subscriber::flag::N; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; - self.write(&mut *writer, &x.wire_expr)?; + self.write(&mut *writer, id)?; + self.write(&mut *writer, wire_expr)?; // Extensions - if x.ext_info != subscriber::ext::SubscriberInfo::default() { + if ext_info != &subscriber::ext::SubscriberInfo::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_info, n_exts != 0))?; + self.write(&mut *writer, (*ext_info, n_exts != 0))?; } Ok(()) @@ -420,15 +437,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &subscriber::UndeclareSubscriber) -> Self::Output { + let subscriber::UndeclareSubscriber { id, ext_wire_expr } = x; + // Header let header = declare::id::U_SUBSCRIBER | subscriber::flag::Z; self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; + self.write(&mut *writer, id)?; // Extension - self.write(&mut *writer, (&x.ext_wire_expr, false))?; + self.write(&mut *writer, (ext_wire_expr, false))?; Ok(()) } @@ -497,26 +516,32 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &queryable::DeclareQueryable) -> Self::Output { + let queryable::DeclareQueryable { + id, + wire_expr, + ext_info, + } = x; + // Header let mut header = declare::id::D_QUERYABLE; - let mut n_exts = (x.ext_info != queryable::ext::QueryableInfo::default()) as u8; + let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::default()) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if x.wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::default() { header |= subscriber::flag::M; } - if x.wire_expr.has_suffix() { + if wire_expr.has_suffix() { header |= subscriber::flag::N; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; - self.write(&mut *writer, &x.wire_expr)?; - if x.ext_info != queryable::ext::QueryableInfo::default() { + self.write(&mut *writer, id)?; + self.write(&mut *writer, wire_expr)?; + if ext_info != &queryable::ext::QueryableInfo::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_info, n_exts != 0))?; + self.write(&mut *writer, (*ext_info, n_exts != 0))?; } Ok(()) @@ -594,15 +619,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &queryable::UndeclareQueryable) -> Self::Output { + let queryable::UndeclareQueryable { id, ext_wire_expr } = x; + // Header let header = declare::id::U_QUERYABLE | queryable::flag::Z; self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; + self.write(&mut *writer, id)?; // Extension - self.write(&mut *writer, (&x.ext_wire_expr, false))?; + self.write(&mut *writer, (ext_wire_expr, false))?; Ok(()) } @@ -668,19 +695,21 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &token::DeclareToken) -> Self::Output { + let token::DeclareToken { id, wire_expr } = x; + // Header let mut header = declare::id::D_TOKEN; - if x.wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::default() { header |= subscriber::flag::M; } - if x.wire_expr.has_suffix() { + if wire_expr.has_suffix() { header |= subscriber::flag::N; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; - self.write(&mut *writer, &x.wire_expr)?; + self.write(&mut *writer, id)?; + self.write(&mut *writer, wire_expr)?; Ok(()) } @@ -738,15 +767,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &token::UndeclareToken) -> Self::Output { + let token::UndeclareToken { id, ext_wire_expr } = x; + // Header let header = declare::id::U_TOKEN | token::flag::Z; self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; + self.write(&mut *writer, id)?; // Extension - self.write(&mut *writer, (&x.ext_wire_expr, false))?; + self.write(&mut *writer, (ext_wire_expr, false))?; Ok(()) } @@ -812,20 +843,26 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { + let interest::DeclareInterest { + id, + wire_expr, + interest, + } = x; + // Header let mut header = declare::id::D_INTEREST; - if x.wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::default() { header |= subscriber::flag::M; } - if x.wire_expr.has_suffix() { + if wire_expr.has_suffix() { header |= subscriber::flag::N; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; - self.write(&mut *writer, &x.wire_expr)?; - self.write(&mut *writer, x.interest.as_u8())?; + self.write(&mut *writer, id)?; + self.write(&mut *writer, wire_expr)?; + self.write(&mut *writer, interest.as_u8())?; Ok(()) } @@ -888,12 +925,14 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &interest::FinalInterest) -> Self::Output { + let interest::FinalInterest { id } = x; + // Header let header = declare::id::F_INTEREST; self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; + self.write(&mut *writer, id)?; Ok(()) } @@ -945,15 +984,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &interest::UndeclareInterest) -> Self::Output { + let interest::UndeclareInterest { id, ext_wire_expr } = x; + // Header let header = declare::id::U_INTEREST | interest::flag::Z; self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; + self.write(&mut *writer, id)?; // Extension - self.write(&mut *writer, (&x.ext_wire_expr, false))?; + self.write(&mut *writer, (ext_wire_expr, false))?; Ok(()) } @@ -1020,6 +1061,7 @@ where fn write(self, writer: &mut W, x: (&common::ext::WireExprType, bool)) -> Self::Output { let (x, more) = x; + let common::ext::WireExprType { wire_expr } = x; let codec = Zenoh080::new(); let mut value = ZBuf::empty(); @@ -1029,14 +1071,14 @@ where if x.wire_expr.has_suffix() { flags |= 1; } - if let Mapping::Receiver = x.wire_expr.mapping { + if let Mapping::Receiver = wire_expr.mapping { flags |= 1 << 1; } codec.write(&mut zriter, flags)?; - codec.write(&mut zriter, x.wire_expr.scope)?; - if x.wire_expr.has_suffix() { - zriter.write_exact(x.wire_expr.suffix.as_bytes())?; + codec.write(&mut zriter, wire_expr.scope)?; + if wire_expr.has_suffix() { + zriter.write_exact(wire_expr.suffix.as_bytes())?; } let ext = common::ext::WireExprExt { value }; diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index 7263c3fe27..c1f2489b88 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -27,7 +27,7 @@ use zenoh_buffers::{ use zenoh_protocol::{ common::{imsg, ZExtZ64, ZExtZBufHeader}, core::{Reliability, ZenohId}, - network::*, + network::{ext::EntityIdType, *}, }; // NetworkMessage @@ -38,7 +38,9 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &NetworkMessage) -> Self::Output { - match &x.body { + let NetworkMessage { body, .. } = x; + + match body { NetworkBody::Push(b) => self.write(&mut *writer, b), NetworkBody::Request(b) => self.write(&mut *writer, b), NetworkBody::Response(b) => self.write(&mut *writer, b), @@ -218,7 +220,9 @@ where // Extension: EntityId impl LCodec<&ext::EntityIdType<{ ID }>> for Zenoh080 { fn w_len(self, x: &ext::EntityIdType<{ ID }>) -> usize { - 1 + self.w_len(&x.zid) + self.w_len(x.eid) + let EntityIdType { zid, eid } = x; + + 1 + self.w_len(zid) + self.w_len(*eid) } } diff --git a/commons/zenoh-codec/src/network/oam.rs b/commons/zenoh-codec/src/network/oam.rs index 0e59421ba8..ff6daeb020 100644 --- a/commons/zenoh-codec/src/network/oam.rs +++ b/commons/zenoh-codec/src/network/oam.rs @@ -32,9 +32,16 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Oam) -> Self::Output { + let Oam { + id, + body, + ext_qos, + ext_tstamp, + } = x; + // Header let mut header = id::OAM; - match &x.body { + match &body { ZExtBody::Unit => { header |= iext::ENC_UNIT; } @@ -46,27 +53,27 @@ where } } let mut n_exts = - ((x.ext_qos != ext::QoSType::default()) as u8) + (x.ext_tstamp.is_some() as u8); + ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; + self.write(&mut *writer, id)?; // Extensions - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_qos, n_exts != 0))?; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } - if let Some(ts) = x.ext_tstamp.as_ref() { + if let Some(ts) = ext_tstamp.as_ref() { n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } // Payload - match &x.body { + match body { ZExtBody::Unit => {} ZExtBody::Z64(u64) => { self.write(&mut *writer, u64)?; diff --git a/commons/zenoh-codec/src/network/push.rs b/commons/zenoh-codec/src/network/push.rs index f6d4ee7f0c..10a8489b29 100644 --- a/commons/zenoh-codec/src/network/push.rs +++ b/commons/zenoh-codec/src/network/push.rs @@ -34,41 +34,49 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Push) -> Self::Output { + let Push { + wire_expr, + ext_qos, + ext_tstamp, + ext_nodeid, + payload, + } = x; + // Header let mut header = id::PUSH; - let mut n_exts = ((x.ext_qos != ext::QoSType::default()) as u8) - + (x.ext_tstamp.is_some() as u8) - + ((x.ext_nodeid != ext::NodeIdType::default()) as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + + (ext_tstamp.is_some() as u8) + + ((ext_nodeid != &ext::NodeIdType::default()) as u8); if n_exts != 0 { header |= flag::Z; } - if x.wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::default() { header |= flag::M; } - if x.wire_expr.has_suffix() { + if wire_expr.has_suffix() { header |= flag::N; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, &x.wire_expr)?; + self.write(&mut *writer, wire_expr)?; // Extensions - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_qos, n_exts != 0))?; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } - if let Some(ts) = x.ext_tstamp.as_ref() { + if let Some(ts) = ext_tstamp.as_ref() { n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if x.ext_nodeid != ext::NodeIdType::default() { + if ext_nodeid != &ext::NodeIdType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_nodeid, n_exts != 0))?; + self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } // Payload - self.write(&mut *writer, &x.payload)?; + self.write(&mut *writer, payload)?; Ok(()) } diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 088c9e79f8..19711ff147 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -37,8 +37,9 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: (&ext::TargetType, bool)) -> Self::Output { - let (rt, more) = x; - let v = match rt { + let (x, more) = x; + + let v = match x { ext::TargetType::BestMatching => 0, ext::TargetType::All => 1, ext::TargetType::AllComplete => 2, @@ -78,59 +79,71 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Request) -> Self::Output { + let Request { + id, + wire_expr, + ext_qos, + ext_tstamp, + ext_nodeid, + ext_target, + ext_budget, + ext_timeout, + payload, + } = x; + // Header let mut header = id::REQUEST; - let mut n_exts = ((x.ext_qos != ext::QoSType::default()) as u8) - + (x.ext_tstamp.is_some() as u8) - + ((x.ext_target != ext::TargetType::default()) as u8) - + (x.ext_budget.is_some() as u8) - + (x.ext_timeout.is_some() as u8) - + ((x.ext_nodeid != ext::NodeIdType::default()) as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + + (ext_tstamp.is_some() as u8) + + ((ext_target != &ext::TargetType::default()) as u8) + + (ext_budget.is_some() as u8) + + (ext_timeout.is_some() as u8) + + ((ext_nodeid != &ext::NodeIdType::default()) as u8); if n_exts != 0 { header |= flag::Z; } - if x.wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::default() { header |= flag::M; } - if x.wire_expr.has_suffix() { + if wire_expr.has_suffix() { header |= flag::N; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; - self.write(&mut *writer, &x.wire_expr)?; + self.write(&mut *writer, id)?; + self.write(&mut *writer, wire_expr)?; // Extensions - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_qos, n_exts != 0))?; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } - if let Some(ts) = x.ext_tstamp.as_ref() { + if let Some(ts) = ext_tstamp.as_ref() { n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if x.ext_target != ext::TargetType::default() { + if ext_target != &ext::TargetType::default() { n_exts -= 1; - self.write(&mut *writer, (&x.ext_target, n_exts != 0))?; + self.write(&mut *writer, (ext_target, n_exts != 0))?; } - if let Some(l) = x.ext_budget.as_ref() { + if let Some(l) = ext_budget.as_ref() { n_exts -= 1; let e = ext::Budget::new(l.get() as u64); self.write(&mut *writer, (&e, n_exts != 0))?; } - if let Some(to) = x.ext_timeout.as_ref() { + if let Some(to) = ext_timeout.as_ref() { n_exts -= 1; let e = ext::Timeout::new(to.as_millis() as u64); self.write(&mut *writer, (&e, n_exts != 0))?; } - if x.ext_nodeid != ext::NodeIdType::default() { + if ext_nodeid != &ext::NodeIdType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_nodeid, n_exts != 0))?; + self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } // Payload - self.write(&mut *writer, &x.payload)?; + self.write(&mut *writer, payload)?; Ok(()) } diff --git a/commons/zenoh-codec/src/network/response.rs b/commons/zenoh-codec/src/network/response.rs index 59d97fefda..bec7df2967 100644 --- a/commons/zenoh-codec/src/network/response.rs +++ b/commons/zenoh-codec/src/network/response.rs @@ -37,42 +37,51 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Response) -> Self::Output { + let Response { + rid, + wire_expr, + payload, + ext_qos, + ext_tstamp, + ext_respid, + } = x; + // Header let mut header = id::RESPONSE; - let mut n_exts = ((x.ext_qos != ext::QoSType::default()) as u8) - + (x.ext_tstamp.is_some() as u8) - + (x.ext_respid.is_some() as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + + (ext_tstamp.is_some() as u8) + + (ext_respid.is_some() as u8); if n_exts != 0 { header |= flag::Z; } - if x.wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::default() { header |= flag::M; } - if x.wire_expr.has_suffix() { + if wire_expr.has_suffix() { header |= flag::N; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.rid)?; - self.write(&mut *writer, &x.wire_expr)?; + self.write(&mut *writer, rid)?; + self.write(&mut *writer, wire_expr)?; // Extensions - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_qos, n_exts != 0))?; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } - if let Some(ts) = x.ext_tstamp.as_ref() { + if let Some(ts) = ext_tstamp.as_ref() { n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if let Some(ri) = x.ext_respid.as_ref() { + if let Some(ri) = ext_respid.as_ref() { n_exts -= 1; self.write(&mut *writer, (ri, n_exts != 0))?; } // Payload - self.write(&mut *writer, &x.payload)?; + self.write(&mut *writer, payload)?; Ok(()) } @@ -166,24 +175,30 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &ResponseFinal) -> Self::Output { + let ResponseFinal { + rid, + ext_qos, + ext_tstamp, + } = x; + // Header let mut header = id::RESPONSE_FINAL; let mut n_exts = - ((x.ext_qos != ext::QoSType::default()) as u8) + (x.ext_tstamp.is_some() as u8); + ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.rid)?; + self.write(&mut *writer, rid)?; // Extensions - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_qos, n_exts != 0))?; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } - if let Some(ts) = x.ext_tstamp.as_ref() { + if let Some(ts) = ext_tstamp.as_ref() { n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } diff --git a/commons/zenoh-codec/src/scouting/hello.rs b/commons/zenoh-codec/src/scouting/hello.rs index 1793676cde..430201133e 100644 --- a/commons/zenoh-codec/src/scouting/hello.rs +++ b/commons/zenoh-codec/src/scouting/hello.rs @@ -33,31 +33,38 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Hello) -> Self::Output { + let Hello { + version, + whatami, + zid, + locators, + } = x; + // Header let mut header = id::HELLO; - if !x.locators.is_empty() { + if !locators.is_empty() { header |= flag::L; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.version)?; + self.write(&mut *writer, version)?; let mut flags: u8 = 0; - let whatami: u8 = match x.whatami { + let whatami: u8 = match whatami { WhatAmI::Router => 0b00, WhatAmI::Peer => 0b01, WhatAmI::Client => 0b10, }; flags |= whatami & 0b11; - flags |= ((x.zid.size() - 1) as u8) << 4; + flags |= ((zid.size() - 1) as u8) << 4; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(x.zid.size()); - lodec.write(&mut *writer, &x.zid)?; + let lodec = Zenoh080Length::new(zid.size()); + lodec.write(&mut *writer, zid)?; - if !x.locators.is_empty() { - self.write(&mut *writer, x.locators.as_slice())?; + if !locators.is_empty() { + self.write(&mut *writer, locators.as_slice())?; } Ok(()) diff --git a/commons/zenoh-codec/src/scouting/mod.rs b/commons/zenoh-codec/src/scouting/mod.rs index 70f6fb8065..bbedce4282 100644 --- a/commons/zenoh-codec/src/scouting/mod.rs +++ b/commons/zenoh-codec/src/scouting/mod.rs @@ -31,7 +31,9 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &ScoutingMessage) -> Self::Output { - match &x.body { + let ScoutingMessage { body, .. } = x; + + match body { ScoutingBody::Scout(s) => self.write(&mut *writer, s), ScoutingBody::Hello(h) => self.write(&mut *writer, h), } diff --git a/commons/zenoh-codec/src/scouting/scout.rs b/commons/zenoh-codec/src/scouting/scout.rs index 941c455866..02d5294047 100644 --- a/commons/zenoh-codec/src/scouting/scout.rs +++ b/commons/zenoh-codec/src/scouting/scout.rs @@ -33,22 +33,24 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Scout) -> Self::Output { + let Scout { version, what, zid } = x; + // Header let header = id::SCOUT; self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.version)?; + self.write(&mut *writer, version)?; let mut flags: u8 = 0; - let what: u8 = x.what.into(); + let what: u8 = (*what).into(); flags |= what & 0b111; - if let Some(zid) = x.zid.as_ref() { + if let Some(zid) = zid.as_ref() { flags |= (((zid.size() - 1) as u8) << 4) | flag::I; }; self.write(&mut *writer, flags)?; - if let Some(zid) = x.zid.as_ref() { + if let Some(zid) = zid.as_ref() { let lodec = Zenoh080Length::new(zid.size()); lodec.write(&mut *writer, zid)?; } diff --git a/commons/zenoh-codec/src/transport/close.rs b/commons/zenoh-codec/src/transport/close.rs index 86b54f8688..9771b9e1e9 100644 --- a/commons/zenoh-codec/src/transport/close.rs +++ b/commons/zenoh-codec/src/transport/close.rs @@ -31,15 +31,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Close) -> Self::Output { + let Close { reason, session } = x; + // Header let mut header = id::CLOSE; - if x.session { + if *session { header |= flag::S; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.reason)?; + self.write(&mut *writer, reason)?; Ok(()) } diff --git a/commons/zenoh-codec/src/transport/fragment.rs b/commons/zenoh-codec/src/transport/fragment.rs index 7cc827d378..b66f395df1 100644 --- a/commons/zenoh-codec/src/transport/fragment.rs +++ b/commons/zenoh-codec/src/transport/fragment.rs @@ -33,25 +33,32 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &FragmentHeader) -> Self::Output { + let FragmentHeader { + reliability, + more, + sn, + ext_qos, + } = x; + // Header let mut header = id::FRAGMENT; - if let Reliability::Reliable = x.reliability { + if let Reliability::Reliable = reliability { header |= flag::R; } - if x.more { + if *more { header |= flag::M; } - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.sn)?; + self.write(&mut *writer, sn)?; // Extensions - if x.ext_qos != ext::QoSType::default() { - self.write(&mut *writer, (x.ext_qos, false))?; + if ext_qos != &ext::QoSType::default() { + self.write(&mut *writer, (*ext_qos, false))?; } Ok(()) @@ -125,17 +132,25 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Fragment) -> Self::Output { + let Fragment { + reliability, + more, + sn, + payload, + ext_qos, + } = x; + // Header let header = FragmentHeader { - reliability: x.reliability, - more: x.more, - sn: x.sn, - ext_qos: x.ext_qos, + reliability: *reliability, + more: *more, + sn: *sn, + ext_qos: *ext_qos, }; self.write(&mut *writer, &header)?; // Body - writer.write_zslice(&x.payload)?; + writer.write_zslice(payload)?; Ok(()) } diff --git a/commons/zenoh-codec/src/transport/frame.rs b/commons/zenoh-codec/src/transport/frame.rs index 1293dc950c..8d39aabcdb 100644 --- a/commons/zenoh-codec/src/transport/frame.rs +++ b/commons/zenoh-codec/src/transport/frame.rs @@ -35,21 +35,27 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &FrameHeader) -> Self::Output { + let FrameHeader { + reliability, + sn, + ext_qos, + } = x; + // Header let mut header = id::FRAME; - if let Reliability::Reliable = x.reliability { + if let Reliability::Reliable = reliability { header |= flag::R; } - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.sn)?; + self.write(&mut *writer, sn)?; // Extensions - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { self.write(&mut *writer, (x.ext_qos, false))?; } @@ -122,16 +128,23 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Frame) -> Self::Output { + let Frame { + reliability, + sn, + payload, + ext_qos, + } = x; + // Header let header = FrameHeader { - reliability: x.reliability, - sn: x.sn, - ext_qos: x.ext_qos, + reliability: *reliability, + sn: *sn, + ext_qos: *ext_qos, }; self.write(&mut *writer, &header)?; // Body - for m in x.payload.iter() { + for m in payload.iter() { self.write(&mut *writer, m)?; } diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index 5f98c77e5b..d3a92165ea 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -53,7 +53,7 @@ where // Header let mut header = id::INIT; - if *resolution != Resolution::default() || *batch_size != batch_size::UNICAST { + if resolution != &Resolution::default() || batch_size != &batch_size::UNICAST { header |= flag::S; } let mut n_exts = (ext_qos.is_some() as u8) @@ -253,7 +253,7 @@ where // Header let mut header = id::INIT | flag::A; - if *resolution != Resolution::default() || *batch_size != batch_size::UNICAST { + if resolution != &Resolution::default() || batch_size != &batch_size::UNICAST { header |= flag::S; } let mut n_exts = (ext_qos.is_some() as u8) diff --git a/commons/zenoh-codec/src/transport/join.rs b/commons/zenoh-codec/src/transport/join.rs index 197190946a..80c1663413 100644 --- a/commons/zenoh-codec/src/transport/join.rs +++ b/commons/zenoh-codec/src/transport/join.rs @@ -30,7 +30,11 @@ use zenoh_protocol::{ impl LCodec<&PrioritySn> for Zenoh080 { fn w_len(self, p: &PrioritySn) -> usize { - self.w_len(p.reliable) + self.w_len(p.best_effort) + let PrioritySn { + reliable, + best_effort, + } = p; + self.w_len(*reliable) + self.w_len(*best_effort) } } @@ -41,8 +45,13 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &PrioritySn) -> Self::Output { - self.write(&mut *writer, x.reliable)?; - self.write(&mut *writer, x.best_effort)?; + let PrioritySn { + reliable, + best_effort, + } = x; + + self.write(&mut *writer, reliable)?; + self.write(&mut *writer, best_effort)?; Ok(()) } } @@ -129,52 +138,64 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Join) -> Self::Output { + let Join { + version, + whatami, + zid, + resolution, + batch_size, + lease, + next_sn, + ext_qos, + ext_shm, + } = x; + // Header let mut header = id::JOIN; - if x.lease.as_millis() % 1_000 == 0 { + if lease.as_millis() % 1_000 == 0 { header |= flag::T; } - if x.resolution != Resolution::default() || x.batch_size != batch_size::MULTICAST { + if resolution != &Resolution::default() || batch_size != &batch_size::MULTICAST { header |= flag::S; } - let mut n_exts = (x.ext_qos.is_some() as u8) + (x.ext_shm.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + (ext_shm.is_some() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.version)?; + self.write(&mut *writer, version)?; - let whatami: u8 = match x.whatami { + let whatami: u8 = match whatami { WhatAmI::Router => 0b00, WhatAmI::Peer => 0b01, WhatAmI::Client => 0b10, }; - let flags: u8 = ((x.zid.size() as u8 - 1) << 4) | whatami; + let flags: u8 = ((zid.size() as u8 - 1) << 4) | whatami; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(x.zid.size()); - lodec.write(&mut *writer, &x.zid)?; + let lodec = Zenoh080Length::new(zid.size()); + lodec.write(&mut *writer, zid)?; if imsg::has_flag(header, flag::S) { - self.write(&mut *writer, x.resolution.as_u8())?; - self.write(&mut *writer, x.batch_size.to_le_bytes())?; + self.write(&mut *writer, resolution.as_u8())?; + self.write(&mut *writer, batch_size.to_le_bytes())?; } if imsg::has_flag(header, flag::T) { - self.write(&mut *writer, x.lease.as_secs())?; + self.write(&mut *writer, lease.as_secs())?; } else { - self.write(&mut *writer, x.lease.as_millis() as u64)?; + self.write(&mut *writer, lease.as_millis() as u64)?; } - self.write(&mut *writer, &x.next_sn)?; + self.write(&mut *writer, next_sn)?; // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } diff --git a/commons/zenoh-codec/src/transport/keepalive.rs b/commons/zenoh-codec/src/transport/keepalive.rs index ce432e63a6..aa6726f50b 100644 --- a/commons/zenoh-codec/src/transport/keepalive.rs +++ b/commons/zenoh-codec/src/transport/keepalive.rs @@ -30,7 +30,9 @@ where { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, _x: &KeepAlive) -> Self::Output { + fn write(self, writer: &mut W, x: &KeepAlive) -> Self::Output { + let KeepAlive = x; + // Header let header = id::KEEP_ALIVE; self.write(&mut *writer, header)?; diff --git a/commons/zenoh-codec/src/transport/mod.rs b/commons/zenoh-codec/src/transport/mod.rs index 4ddf872551..559b5b5fda 100644 --- a/commons/zenoh-codec/src/transport/mod.rs +++ b/commons/zenoh-codec/src/transport/mod.rs @@ -40,7 +40,9 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &TransportMessageLowLatency) -> Self::Output { - match &x.body { + let TransportMessageLowLatency { body } = x; + + match body { TransportBodyLowLatency::Network(b) => self.write(&mut *writer, b), TransportBodyLowLatency::KeepAlive(b) => self.write(&mut *writer, b), TransportBodyLowLatency::Close(b) => self.write(&mut *writer, b), @@ -79,7 +81,9 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &TransportMessage) -> Self::Output { - match &x.body { + let TransportMessage { body, .. } = x; + + match body { TransportBody::Frame(b) => self.write(&mut *writer, b), TransportBody::Fragment(b) => self.write(&mut *writer, b), TransportBody::KeepAlive(b) => self.write(&mut *writer, b), @@ -142,6 +146,7 @@ where fn write(self, writer: &mut W, x: (ext::QoSType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; let ext: ZExtZ64<{ ID }> = x.into(); + self.write(&mut *writer, (&ext, more)) } } diff --git a/commons/zenoh-codec/src/transport/oam.rs b/commons/zenoh-codec/src/transport/oam.rs index 46fe63345e..e2f905abf8 100644 --- a/commons/zenoh-codec/src/transport/oam.rs +++ b/commons/zenoh-codec/src/transport/oam.rs @@ -32,9 +32,11 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Oam) -> Self::Output { + let Oam { id, body, ext_qos } = x; + // Header let mut header = id::OAM; - match &x.body { + match &body { ZExtBody::Unit => { header |= iext::ENC_UNIT; } @@ -45,23 +47,23 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = (x.ext_qos != ext::QoSType::default()) as u8; + let mut n_exts = (ext_qos != &ext::QoSType::default()) as u8; if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.id)?; + self.write(&mut *writer, id)?; // Extensions - if x.ext_qos != ext::QoSType::default() { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_qos, n_exts != 0))?; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } // Payload - match &x.body { + match &body { ZExtBody::Unit => {} ZExtBody::Z64(u64) => { self.write(&mut *writer, u64)?; diff --git a/commons/zenoh-codec/src/transport/open.rs b/commons/zenoh-codec/src/transport/open.rs index 17482b1610..f895942ea1 100644 --- a/commons/zenoh-codec/src/transport/open.rs +++ b/commons/zenoh-codec/src/transport/open.rs @@ -36,8 +36,8 @@ where fn write(self, writer: &mut W, x: &OpenSyn) -> Self::Output { let OpenSyn { - initial_sn, lease, + initial_sn, cookie, ext_qos, ext_shm, @@ -208,8 +208,8 @@ where fn write(self, writer: &mut W, x: &OpenAck) -> Self::Output { let OpenAck { - initial_sn, lease, + initial_sn, ext_qos, ext_shm, ext_auth, diff --git a/commons/zenoh-codec/src/zenoh/ack.rs b/commons/zenoh-codec/src/zenoh/ack.rs index 0b940eb877..78cbca2987 100644 --- a/commons/zenoh-codec/src/zenoh/ack.rs +++ b/commons/zenoh-codec/src/zenoh/ack.rs @@ -32,28 +32,34 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Ack) -> Self::Output { + let Ack { + timestamp, + ext_sinfo, + ext_unknown, + } = x; + // Header let mut header = id::ACK; - if x.timestamp.is_some() { + if timestamp.is_some() { header |= flag::T; } - let mut n_exts = ((x.ext_sinfo.is_some()) as u8) + (x.ext_unknown.len() as u8); + let mut n_exts = ((ext_sinfo.is_some()) as u8) + (ext_unknown.len() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - if let Some(ts) = x.timestamp.as_ref() { + if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } // Extensions - if let Some(sinfo) = x.ext_sinfo.as_ref() { + if let Some(sinfo) = ext_sinfo.as_ref() { n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - for u in x.ext_unknown.iter() { + for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } diff --git a/commons/zenoh-codec/src/zenoh/del.rs b/commons/zenoh-codec/src/zenoh/del.rs index cdd5c332d8..3d0a64f428 100644 --- a/commons/zenoh-codec/src/zenoh/del.rs +++ b/commons/zenoh-codec/src/zenoh/del.rs @@ -32,28 +32,41 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Del) -> Self::Output { + let Del { + timestamp, + ext_sinfo, + ext_attachment, + ext_unknown, + } = x; + // Header let mut header = id::DEL; - if x.timestamp.is_some() { + if timestamp.is_some() { header |= flag::T; } - let mut n_exts = (x.ext_sinfo.is_some()) as u8 + (x.ext_unknown.len() as u8); + let mut n_exts = (ext_sinfo.is_some()) as u8 + + (ext_attachment.is_some()) as u8 + + (ext_unknown.len() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - if let Some(ts) = x.timestamp.as_ref() { + if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } // Extensions - if let Some(sinfo) = x.ext_sinfo.as_ref() { + if let Some(sinfo) = ext_sinfo.as_ref() { n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - for u in x.ext_unknown.iter() { + if let Some(att) = ext_attachment.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (att, n_exts != 0))?; + } + for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } @@ -94,6 +107,7 @@ where // Extensions let mut ext_sinfo: Option = None; + let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -106,6 +120,11 @@ where ext_sinfo = Some(s); has_ext = ext; } + ext::Attachment::ID => { + let (a, ext): (ext::AttachmentType, bool) = eodec.read(&mut *reader)?; + ext_attachment = Some(a); + has_ext = ext; + } _ => { let (u, ext) = extension::read(reader, "Del", ext)?; ext_unknown.push(u); @@ -117,6 +136,7 @@ where Ok(Del { timestamp, ext_sinfo, + ext_attachment, ext_unknown, }) } diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index 425044402c..5cef1a6389 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -32,38 +32,46 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Err) -> Self::Output { + let Err { + code, + is_infrastructure, + timestamp, + ext_sinfo, + ext_body, + ext_unknown, + } = x; + // Header let mut header = id::ERR; - if x.timestamp.is_some() { + if timestamp.is_some() { header |= flag::T; } - if x.is_infrastructure { + if *is_infrastructure { header |= flag::I; } - let mut n_exts = (x.ext_sinfo.is_some() as u8) - + (x.ext_body.is_some() as u8) - + (x.ext_unknown.len() as u8); + let mut n_exts = + (ext_sinfo.is_some() as u8) + (ext_body.is_some() as u8) + (ext_unknown.len() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.code)?; - if let Some(ts) = x.timestamp.as_ref() { + self.write(&mut *writer, code)?; + if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } // Extensions - if let Some(sinfo) = x.ext_sinfo.as_ref() { + if let Some(sinfo) = ext_sinfo.as_ref() { n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if let Some(body) = x.ext_body.as_ref() { + if let Some(body) = ext_body.as_ref() { n_exts -= 1; self.write(&mut *writer, (body, n_exts != 0))?; } - for u in x.ext_unknown.iter() { + for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index dea0b7c495..2e3ea48be7 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -153,7 +153,9 @@ where // Extension: SourceInfo impl LCodec<&ext::SourceInfoType<{ ID }>> for Zenoh080 { fn w_len(self, x: &ext::SourceInfoType<{ ID }>) -> usize { - 1 + self.w_len(&x.zid) + self.w_len(x.eid) + self.w_len(x.sn) + let ext::SourceInfoType { zid, eid, sn } = x; + + 1 + self.w_len(zid) + self.w_len(*eid) + self.w_len(*sn) } } @@ -165,17 +167,19 @@ where fn write(self, writer: &mut W, x: (&ext::SourceInfoType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; + let ext::SourceInfoType { zid, eid, sn } = x; + let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; - let flags: u8 = (x.zid.size() as u8 - 1) << 4; + let flags: u8 = (zid.size() as u8 - 1) << 4; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(x.zid.size()); - lodec.write(&mut *writer, &x.zid)?; + let lodec = Zenoh080Length::new(zid.size()); + lodec.write(&mut *writer, zid)?; - self.write(&mut *writer, x.eid)?; - self.write(&mut *writer, x.sn)?; + self.write(&mut *writer, eid)?; + self.write(&mut *writer, sn)?; Ok(()) } } @@ -211,7 +215,9 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: (&ext::ShmType<{ ID }>, bool)) -> Self::Output { - let (_, more) = x; + let (x, more) = x; + let ext::ShmType = x; + let header: ZExtUnit<{ ID }> = ZExtUnit::new(); self.write(&mut *writer, (&header, more))?; Ok(()) @@ -241,25 +247,31 @@ where fn write(self, writer: &mut W, x: (&ext::ValueType<{ VID }, { SID }>, bool)) -> Self::Output { let (x, more) = x; + let ext::ValueType { + encoding, + payload, + #[cfg(feature = "shared-memory")] + ext_shm, + } = x; #[cfg(feature = "shared-memory")] // Write Shm extension if present - if let Some(eshm) = x.ext_shm.as_ref() { + if let Some(eshm) = ext_shm.as_ref() { self.write(&mut *writer, (eshm, true))?; } // Compute extension length - let mut len = self.w_len(&x.encoding); + let mut len = self.w_len(encoding); #[cfg(feature = "shared-memory")] { - let codec = Zenoh080Sliced::::new(x.ext_shm.is_some()); - len += codec.w_len(&x.payload); + let codec = Zenoh080Sliced::::new(ext_shm.is_some()); + len += codec.w_len(payload); } #[cfg(not(feature = "shared-memory"))] { let codec = Zenoh080Bounded::::new(); - len += codec.w_len(&x.payload); + len += codec.w_len(payload); } // Write ZExtBuf header @@ -267,7 +279,7 @@ where self.write(&mut *writer, (&header, more))?; // Write encoding - self.write(&mut *writer, &x.encoding)?; + self.write(&mut *writer, encoding)?; // Write payload fn write(writer: &mut W, payload: &ZBuf) -> Result<(), DidntWrite> @@ -283,17 +295,17 @@ where #[cfg(feature = "shared-memory")] { - if x.ext_shm.is_some() { + if ext_shm.is_some() { let codec = Zenoh080Sliced::::new(true); - codec.write(&mut *writer, &x.payload)?; + codec.write(&mut *writer, payload)?; } else { - write(&mut *writer, &x.payload)?; + write(&mut *writer, payload)?; } } #[cfg(not(feature = "shared-memory"))] { - write(&mut *writer, &x.payload)?; + write(&mut *writer, payload)?; } Ok(()) @@ -367,3 +379,39 @@ where )) } } + +// Extension: Attachment +impl WCodec<(&ext::AttachmentType<{ ID }>, bool), &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: (&ext::AttachmentType<{ ID }>, bool)) -> Self::Output { + let (x, more) = x; + let ext::AttachmentType { buffer } = x; + + let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(buffer)); + self.write(&mut *writer, (&header, more))?; + for s in buffer.zslices() { + writer.write_zslice(s)?; + } + + Ok(()) + } +} + +impl RCodec<(ext::AttachmentType<{ ID }>, bool), &mut R> for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result<(ext::AttachmentType<{ ID }>, bool), Self::Error> { + let (h, more): (ZExtZBufHeader<{ ID }>, bool) = self.read(&mut *reader)?; + let mut buffer = ZBuf::empty(); + reader.read_zslices(h.len, |s| buffer.push_zslice(s))?; + + Ok((ext::AttachmentType { buffer }, more)) + } +} diff --git a/commons/zenoh-codec/src/zenoh/pull.rs b/commons/zenoh-codec/src/zenoh/pull.rs index 2b2a3a61e0..dc71901d58 100644 --- a/commons/zenoh-codec/src/zenoh/pull.rs +++ b/commons/zenoh-codec/src/zenoh/pull.rs @@ -33,16 +33,18 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Pull) -> Self::Output { + let Pull { ext_unknown } = x; + // Header let mut header = id::PULL; - let mut n_exts = x.ext_unknown.len() as u8; + let mut n_exts = ext_unknown.len() as u8; if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Extensions - for u in x.ext_unknown.iter() { + for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index 6358a533a1..ebc364cf9b 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -38,18 +38,31 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Put) -> Self::Output { + let Put { + timestamp, + encoding, + ext_sinfo, + ext_attachment, + #[cfg(feature = "shared-memory")] + ext_shm, + ext_unknown, + payload, + }: &Put = x; + // Header let mut header = id::PUT; - if x.timestamp.is_some() { + if timestamp.is_some() { header |= flag::T; } - if x.encoding != Encoding::default() { + if encoding != &Encoding::default() { header |= flag::E; } - let mut n_exts = (x.ext_sinfo.is_some()) as u8 + (x.ext_unknown.len() as u8); + let mut n_exts = (ext_sinfo.is_some()) as u8 + + (ext_attachment.is_some()) as u8 + + (ext_unknown.len() as u8); #[cfg(feature = "shared-memory")] { - n_exts += x.ext_shm.is_some() as u8; + n_exts += ext_shm.is_some() as u8; } if n_exts != 0 { header |= flag::Z; @@ -57,24 +70,28 @@ where self.write(&mut *writer, header)?; // Body - if let Some(ts) = x.timestamp.as_ref() { + if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } - if x.encoding != Encoding::default() { - self.write(&mut *writer, &x.encoding)?; + if encoding != &Encoding::default() { + self.write(&mut *writer, encoding)?; } // Extensions - if let Some(sinfo) = x.ext_sinfo.as_ref() { + if let Some(sinfo) = ext_sinfo.as_ref() { n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } #[cfg(feature = "shared-memory")] - if let Some(eshm) = x.ext_shm.as_ref() { + if let Some(eshm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (eshm, n_exts != 0))?; } - for u in x.ext_unknown.iter() { + if let Some(att) = ext_attachment.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (att, n_exts != 0))?; + } + for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } @@ -82,14 +99,14 @@ where // Payload #[cfg(feature = "shared-memory")] { - let codec = Zenoh080Sliced::::new(x.ext_shm.is_some()); - codec.write(&mut *writer, &x.payload)?; + let codec = Zenoh080Sliced::::new(ext_shm.is_some()); + codec.write(&mut *writer, payload)?; } #[cfg(not(feature = "shared-memory"))] { let bodec = Zenoh080Bounded::::new(); - bodec.write(&mut *writer, &x.payload)?; + bodec.write(&mut *writer, payload)?; } Ok(()) @@ -135,6 +152,7 @@ where let mut ext_sinfo: Option = None; #[cfg(feature = "shared-memory")] let mut ext_shm: Option = None; + let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -153,6 +171,11 @@ where ext_shm = Some(s); has_ext = ext; } + ext::Attachment::ID => { + let (a, ext): (ext::AttachmentType, bool) = eodec.read(&mut *reader)?; + ext_attachment = Some(a); + has_ext = ext; + } _ => { let (u, ext) = extension::read(reader, "Put", ext)?; ext_unknown.push(u); @@ -182,6 +205,7 @@ where ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm, + ext_attachment, ext_unknown, payload, }) diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index 0844e16df4..09b01b2266 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -74,39 +74,53 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Query) -> Self::Output { + let Query { + parameters, + ext_sinfo, + ext_consolidation, + ext_body, + ext_attachment, + ext_unknown, + } = x; + // Header let mut header = id::QUERY; - if !x.parameters.is_empty() { + if !parameters.is_empty() { header |= flag::P; } - let mut n_exts = (x.ext_sinfo.is_some() as u8) - + ((x.ext_consolidation != ext::ConsolidationType::default()) as u8) - + (x.ext_body.is_some() as u8) - + (x.ext_unknown.len() as u8); + let mut n_exts = (ext_sinfo.is_some() as u8) + + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) + + (ext_body.is_some() as u8) + + (ext_attachment.is_some() as u8) + + (ext_unknown.len() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - if !x.parameters.is_empty() { - self.write(&mut *writer, &x.parameters)?; + if !parameters.is_empty() { + self.write(&mut *writer, parameters)?; } // Extensions - if let Some(sinfo) = x.ext_sinfo.as_ref() { + if let Some(sinfo) = ext_sinfo.as_ref() { n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if x.ext_consolidation != ext::ConsolidationType::default() { + if ext_consolidation != &ext::ConsolidationType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_consolidation, n_exts != 0))?; + self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; } - if let Some(body) = x.ext_body.as_ref() { + if let Some(body) = ext_body.as_ref() { n_exts -= 1; self.write(&mut *writer, (body, n_exts != 0))?; } - for u in x.ext_unknown.iter() { + if let Some(att) = ext_attachment.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (att, n_exts != 0))?; + } + for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } @@ -149,6 +163,7 @@ where let mut ext_sinfo: Option = None; let mut ext_consolidation = ext::ConsolidationType::default(); let mut ext_body: Option = None; + let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -171,6 +186,11 @@ where ext_body = Some(s); has_ext = ext; } + ext::Attachment::ID => { + let (a, ext): (ext::AttachmentType, bool) = eodec.read(&mut *reader)?; + ext_attachment = Some(a); + has_ext = ext; + } _ => { let (u, ext) = extension::read(reader, "Query", ext)?; ext_unknown.push(u); @@ -184,6 +204,7 @@ where ext_sinfo, ext_consolidation, ext_body, + ext_attachment, ext_unknown, }) } diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index 1aef954220..d98c72b341 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -38,20 +38,33 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Reply) -> Self::Output { + let Reply { + timestamp, + encoding, + ext_sinfo, + ext_consolidation, + #[cfg(feature = "shared-memory")] + ext_shm, + ext_attachment, + ext_unknown, + payload, + } = x; + // Header let mut header = id::REPLY; - if x.timestamp.is_some() { + if timestamp.is_some() { header |= flag::T; } - if x.encoding != Encoding::default() { + if encoding != &Encoding::default() { header |= flag::E; } - let mut n_exts = (x.ext_sinfo.is_some()) as u8 - + ((x.ext_consolidation != ext::ConsolidationType::default()) as u8) - + (x.ext_unknown.len() as u8); + let mut n_exts = (ext_sinfo.is_some()) as u8 + + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) + + (ext_attachment.is_some()) as u8 + + (ext_unknown.len() as u8); #[cfg(feature = "shared-memory")] { - n_exts += x.ext_shm.is_some() as u8; + n_exts += ext_shm.is_some() as u8; } if n_exts != 0 { header |= flag::Z; @@ -59,28 +72,32 @@ where self.write(&mut *writer, header)?; // Body - if let Some(ts) = x.timestamp.as_ref() { + if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } - if x.encoding != Encoding::default() { - self.write(&mut *writer, &x.encoding)?; + if encoding != &Encoding::default() { + self.write(&mut *writer, encoding)?; } // Extensions - if let Some(sinfo) = x.ext_sinfo.as_ref() { + if let Some(sinfo) = ext_sinfo.as_ref() { n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if x.ext_consolidation != ext::ConsolidationType::default() { + if ext_consolidation != &ext::ConsolidationType::default() { n_exts -= 1; - self.write(&mut *writer, (x.ext_consolidation, n_exts != 0))?; + self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; } #[cfg(feature = "shared-memory")] - if let Some(eshm) = x.ext_shm.as_ref() { + if let Some(eshm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (eshm, n_exts != 0))?; } - for u in x.ext_unknown.iter() { + if let Some(att) = ext_attachment.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (att, n_exts != 0))?; + } + for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } @@ -88,14 +105,14 @@ where // Payload #[cfg(feature = "shared-memory")] { - let codec = Zenoh080Sliced::::new(x.ext_shm.is_some()); - codec.write(&mut *writer, &x.payload)?; + let codec = Zenoh080Sliced::::new(ext_shm.is_some()); + codec.write(&mut *writer, payload)?; } #[cfg(not(feature = "shared-memory"))] { let bodec = Zenoh080Bounded::::new(); - bodec.write(&mut *writer, &x.payload)?; + bodec.write(&mut *writer, payload)?; } Ok(()) @@ -142,6 +159,7 @@ where let mut ext_consolidation = ext::ConsolidationType::default(); #[cfg(feature = "shared-memory")] let mut ext_shm: Option = None; + let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -165,6 +183,11 @@ where ext_shm = Some(s); has_ext = ext; } + ext::Attachment::ID => { + let (a, ext): (ext::AttachmentType, bool) = eodec.read(&mut *reader)?; + ext_attachment = Some(a); + has_ext = ext; + } _ => { let (u, ext) = extension::read(reader, "Reply", ext)?; ext_unknown.push(u); @@ -195,6 +218,7 @@ where ext_consolidation, #[cfg(feature = "shared-memory")] ext_shm, + ext_attachment, ext_unknown, payload, }) diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index ea190395fb..0490a66a71 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // + use alloc::{vec, vec::Vec}; use core::{ cmp::PartialEq, fmt, iter, - ops::{Index, IndexMut}, + ops::{Index, IndexMut, RangeBounds}, ptr, slice, }; @@ -112,6 +113,19 @@ impl SingleOrVec { matches!(&self.0, SingleOrVecInner::Vec(v) if v.is_empty()) } + fn vectorize(&mut self) -> &mut Vec { + if let SingleOrVecInner::Single(v) = &self.0 { + unsafe { + let v = core::ptr::read(v); + core::ptr::write(&mut self.0, SingleOrVecInner::Vec(vec![v])) + }; + } + let SingleOrVecInner::Vec(v) = &mut self.0 else { + unsafe { core::hint::unreachable_unchecked() } + }; + v + } + pub fn get(&self, index: usize) -> Option<&T> { match &self.0 { SingleOrVecInner::Single(v) => (index == 0).then_some(v), @@ -139,6 +153,55 @@ impl SingleOrVec { SingleOrVecInner::Vec(v) => v.last_mut(), } } + pub fn drain>(&mut self, range: Range) -> Drain { + match &mut self.0 { + this @ SingleOrVecInner::Single(_) if range.contains(&0) => Drain { + inner: DrainInner::Single(this), + }, + SingleOrVecInner::Vec(vec) => Drain { + inner: DrainInner::Vec(vec.drain(range)), + }, + _ => Drain { + inner: DrainInner::Done, + }, + } + } + pub fn insert(&mut self, at: usize, value: T) { + assert!(at <= self.len()); + self.vectorize().insert(at, value); + } +} +enum DrainInner<'a, T> { + Vec(alloc::vec::Drain<'a, T>), + Single(&'a mut SingleOrVecInner), + Done, +} +pub struct Drain<'a, T> { + inner: DrainInner<'a, T>, +} +impl<'a, T> Iterator for Drain<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + match &mut self.inner { + DrainInner::Vec(drain) => drain.next(), + DrainInner::Single(inner) => match unsafe { core::ptr::read(*inner) } { + SingleOrVecInner::Single(value) => unsafe { + core::ptr::write(*inner, SingleOrVecInner::Vec(Vec::new())); + Some(value) + }, + SingleOrVecInner::Vec(_) => None, + }, + _ => None, + } + } +} +impl<'a, T> Drop for Drain<'a, T> { + fn drop(&mut self) { + if let DrainInner::Single(_) = self.inner { + self.next(); + } + } } impl Default for SingleOrVec { diff --git a/commons/zenoh-crypto/src/cipher.rs b/commons/zenoh-crypto/src/cipher.rs index 0345805423..3d12712e56 100644 --- a/commons/zenoh-crypto/src/cipher.rs +++ b/commons/zenoh-crypto/src/cipher.rs @@ -50,7 +50,7 @@ impl BlockCipher { pub fn decrypt(&self, mut bytes: Vec) -> ZResult> { if bytes.len() % Self::BLOCK_SIZE != 0 { - bail!("Invalid bytes lenght to decode: {}", bytes.len()); + bail!("Invalid bytes length to decode: {}", bytes.len()); } let mut start: usize = 0; diff --git a/commons/zenoh-protocol/src/lib.rs b/commons/zenoh-protocol/src/lib.rs index a18aeb766f..2e1a2fa7cf 100644 --- a/commons/zenoh-protocol/src/lib.rs +++ b/commons/zenoh-protocol/src/lib.rs @@ -48,7 +48,7 @@ pub const VERSION: u8 = 0x08; // # Variable length field // // The field size depends on the element definition and/or actual encoding. An example of variable -// lenght element is an array of bytes (e.g., a payload or a string). +// length element is an array of bytes (e.g., a payload or a string). // // ```text // 7 6 5 4 3 2 1 0 @@ -60,7 +60,7 @@ pub const VERSION: u8 = 0x08; // // # u64 field // -// A u64 is a specialized variable lenght field that is used to encode an unsigned integer. +// A u64 is a specialized variable length field that is used to encode an unsigned integer. // // ```text // 7 6 5 4 3 2 1 0 diff --git a/commons/zenoh-protocol/src/scouting/scout.rs b/commons/zenoh-protocol/src/scouting/scout.rs index 8cdb47d3cf..b7a51642df 100644 --- a/commons/zenoh-protocol/src/scouting/scout.rs +++ b/commons/zenoh-protocol/src/scouting/scout.rs @@ -56,7 +56,7 @@ use crate::core::{whatami::WhatAmIMatcher, ZenohId}; /// +---------------+ /// /// (#) ZID length. If Flag(I)==1 it indicates how many bytes are used for the ZenohID bytes. -/// A ZenohID is minimum 1 byte and maximum 16 bytes. Therefore, the actual lenght is computed as: +/// A ZenohID is minimum 1 byte and maximum 16 bytes. Therefore, the actual length is computed as: /// real_zid_len := 1 + zid_len /// /// (*) What. It indicates a bitmap of WhatAmI interests. diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index 0c60dd8a90..1327288471 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -76,7 +76,7 @@ use zenoh_buffers::ZSlice; /// - 0b11: Reserved /// /// (#) ZID length. It indicates how many bytes are used for the ZenohID bytes. -/// A ZenohID is minimum 1 byte and maximum 16 bytes. Therefore, the actual lenght is computed as: +/// A ZenohID is minimum 1 byte and maximum 16 bytes. Therefore, the actual length is computed as: /// real_zid_len := 1 + zid_len /// /// (+) Sequence Number/ID resolution. It indicates the resolution and consequently the wire overhead diff --git a/commons/zenoh-protocol/src/transport/join.rs b/commons/zenoh-protocol/src/transport/join.rs index 00920c17ee..c5fbb98430 100644 --- a/commons/zenoh-protocol/src/transport/join.rs +++ b/commons/zenoh-protocol/src/transport/join.rs @@ -74,7 +74,7 @@ use core::time::Duration; /// - 0b11: Reserved /// /// (#) ZID length. It indicates how many bytes are used for the ZenohID bytes. -/// A ZenohID is minimum 1 byte and maximum 16 bytes. Therefore, the actual lenght is computed as: +/// A ZenohID is minimum 1 byte and maximum 16 bytes. Therefore, the actual length is computed as: /// real_zid_len := 1 + zid_len /// /// (+) Sequence Number/ID resolution. It indicates the resolution and consequently the wire overhead diff --git a/commons/zenoh-protocol/src/zenoh/del.rs b/commons/zenoh-protocol/src/zenoh/del.rs index 0de867ce51..84fec5bc08 100644 --- a/commons/zenoh-protocol/src/zenoh/del.rs +++ b/commons/zenoh-protocol/src/zenoh/del.rs @@ -42,6 +42,7 @@ pub mod flag { pub struct Del { pub timestamp: Option, pub ext_sinfo: Option, + pub ext_attachment: Option, pub ext_unknown: Vec, } @@ -52,6 +53,10 @@ pub mod ext { /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; + + /// # User attachment + pub type Attachment = zextzbuf!(0x2, false); + pub type AttachmentType = crate::zenoh::ext::AttachmentType<{ Attachment::ID }>; } impl Del { @@ -67,10 +72,11 @@ impl Del { Timestamp::new(time, id) }); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); + let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::SourceInfo::ID) + 1, + iext::mid(ext::Attachment::ID) + 1, false, )); } @@ -78,6 +84,7 @@ impl Del { Self { timestamp, ext_sinfo, + ext_attachment, ext_unknown, } } diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 740c7e8b0d..e67576e673 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -256,4 +256,32 @@ pub mod ext { } } } + + /// ```text + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// % num elems % + /// +-------+-+-+---+ + /// ~ key: ~ + /// +---------------+ + /// ~ val: ~ + /// +---------------+ + /// ... -- N times (key, value) tuples + /// ``` + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct AttachmentType { + pub buffer: ZBuf, + } + + impl AttachmentType<{ ID }> { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + Self { + buffer: ZBuf::rand(rng.gen_range(3..=1_024)), + } + } + } } diff --git a/commons/zenoh-protocol/src/zenoh/put.rs b/commons/zenoh-protocol/src/zenoh/put.rs index 30b8ef837a..14674e9ad9 100644 --- a/commons/zenoh-protocol/src/zenoh/put.rs +++ b/commons/zenoh-protocol/src/zenoh/put.rs @@ -48,6 +48,7 @@ pub struct Put { pub timestamp: Option, pub encoding: Encoding, pub ext_sinfo: Option, + pub ext_attachment: Option, #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_unknown: Vec, @@ -70,6 +71,10 @@ pub mod ext { pub type Shm = zextunit!(0x2, true); #[cfg(feature = "shared-memory")] pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; + + /// # User attachment + pub type Attachment = zextzbuf!(0x3, false); + pub type AttachmentType = crate::zenoh::ext::AttachmentType<{ Attachment::ID }>; } impl Put { @@ -88,10 +93,11 @@ impl Put { let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); + let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::SourceInfo::ID) + 1, + iext::mid(ext::Attachment::ID) + 1, false, )); } @@ -103,6 +109,7 @@ impl Put { ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm, + ext_attachment, ext_unknown, payload, } diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index 17a2aa1d59..7432840492 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -94,6 +94,7 @@ pub struct Query { pub ext_sinfo: Option, pub ext_consolidation: Consolidation, pub ext_body: Option, + pub ext_attachment: Option, pub ext_unknown: Vec, } @@ -117,6 +118,10 @@ pub mod ext { /// Shared Memory extension is automatically defined by ValueType extension if /// #[cfg(feature = "shared-memory")] is defined. pub type QueryBodyType = crate::zenoh::ext::ValueType<{ ZExtZBuf::<0x03>::id(false) }, 0x04>; + + /// # User attachment + pub type Attachment = zextzbuf!(0x5, false); + pub type AttachmentType = crate::zenoh::ext::AttachmentType<{ Attachment::ID }>; } impl Query { @@ -141,10 +146,11 @@ impl Query { let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); let ext_consolidation = Consolidation::rand(); let ext_body = rng.gen_bool(0.5).then_some(ext::QueryBodyType::rand()); + let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::QueryBodyType::SID) + 1, + iext::mid(ext::Attachment::ID) + 1, false, )); } @@ -154,6 +160,7 @@ impl Query { ext_sinfo, ext_consolidation, ext_body, + ext_attachment, ext_unknown, } } diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index d6b65f88c0..2395e1e9b2 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -51,6 +51,7 @@ pub struct Reply { pub ext_consolidation: ext::ConsolidationType, #[cfg(feature = "shared-memory")] pub ext_shm: Option, + pub ext_attachment: Option, pub ext_unknown: Vec, pub payload: ZBuf, } @@ -78,6 +79,10 @@ pub mod ext { pub type Shm = zextunit!(0x3, true); #[cfg(feature = "shared-memory")] pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; + + /// # User attachment + pub type Attachment = zextzbuf!(0x4, false); + pub type AttachmentType = crate::zenoh::ext::AttachmentType<{ Attachment::ID }>; } impl Reply { @@ -97,10 +102,11 @@ impl Reply { let ext_consolidation = Consolidation::rand(); #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); + let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::Consolidation::ID) + 1, + iext::mid(ext::Attachment::ID) + 1, false, )); } @@ -113,6 +119,7 @@ impl Reply { ext_consolidation, #[cfg(feature = "shared-memory")] ext_shm, + ext_attachment, ext_unknown, payload, } diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 61a7ea9be3..33409ce20a 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -65,7 +65,6 @@ impl PartialEq for Chunk { /// Informations about a [`SharedMemoryBuf`]. /// /// This that can be serialized and can be used to retrieve the [`SharedMemoryBuf`] in a remote process. -#[non_exhaustive] #[derive(Clone, Debug, PartialEq, Eq)] pub struct SharedMemoryBufInfo { /// The index of the beginning of the buffer in the shm segment. diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index aebca309ad..097b686de9 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -23,7 +23,7 @@ async fn main() { // Initiate logging env_logger::init(); - let (config, key_expr, value) = parse_args(); + let (config, key_expr, value, attachment) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); @@ -35,7 +35,16 @@ async fn main() { sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); - publisher.put(buf).res().await.unwrap(); + let mut put = publisher.put(buf); + if let Some(attachment) = &attachment { + put = put.with_attachment( + attachment + .split('&') + .map(|pair| pair.as_bytes().split_at(pair.find('=').unwrap_or(0))) + .collect(), + ) + } + put.res().await.unwrap(); } } @@ -47,11 +56,16 @@ struct Args { #[arg(short, long, default_value = "Pub from Rust!")] /// The value to write. value: String, + #[arg(short, long)] + /// The attachments to add to each put. + /// + /// The key-value pairs are &-separated, and = serves as the separator between key and value. + attach: Option, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> (Config, KeyExpr<'static>, String) { +fn parse_args() -> (Config, KeyExpr<'static>, String, Option) { let args = Args::parse(); - (args.common.into(), args.key, args.value) + (args.common.into(), args.key, args.value, args.attach) } diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 433444b8de..3e130e0608 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // + use clap::Parser; use std::convert::TryInto; -use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh_examples::CommonArgs; @@ -21,14 +21,21 @@ use zenoh_examples::CommonArgs; fn main() { // initiate logging env_logger::init(); - let (config, size, prio, print, number) = parse_args(); + let args = Args::parse(); + + let mut prio = Priority::default(); + if let Some(p) = args.priority { + prio = p.try_into().unwrap(); + } + + let payload_size = args.payload_size; - let data: Value = (0usize..size) + let data: Value = (0..payload_size) .map(|i| (i % 10) as u8) .collect::>() .into(); - let session = zenoh::open(config).res().unwrap(); + let session = zenoh::open(args.common).res().unwrap(); let publisher = session .declare_publisher("test/thr") @@ -42,8 +49,8 @@ fn main() { loop { publisher.put(data.clone()).res().unwrap(); - if print { - if count < number { + if args.print { + if count < args.number { count += 1; } else { let thpt = count as f64 / start.elapsed().as_secs_f64(); @@ -57,34 +64,17 @@ fn main() { #[derive(Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { - #[arg(short, long)] /// Priority for sending data + #[arg(short, long)] priority: Option, - #[arg(short = 't', long)] /// Print the statistics + #[arg(short = 't', long)] print: bool, - #[arg(short, long, default_value = "100000")] /// Number of messages in each throughput measurements + #[arg(short, long, default_value = "100000")] number: usize, /// Sets the size of the payload to publish payload_size: usize, #[command(flatten)] common: CommonArgs, } - -fn parse_args() -> (Config, usize, Priority, bool, usize) { - let args = Args::parse(); - - let mut prio = Priority::default(); - if let Some(p) = args.priority { - prio = p.try_into().unwrap(); - } - - ( - args.common.into(), - args.payload_size, - prio, - args.print, - args.number, - ) -} diff --git a/examples/src/lib.rs b/examples/src/lib.rs index a766bd0695..255ac01917 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -4,9 +4,8 @@ //! use zenoh::config::Config; -#[derive(clap::ValueEnum, Default, Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[derive(clap::ValueEnum, Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum Wai { - #[default] Peer, Client, Router, diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 488e357236..4139a65a05 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -583,6 +583,7 @@ mod tests { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::from(vec![0u8; 8]), }), diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 256dfbef47..2e3af61d64 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -758,6 +758,7 @@ mod tests { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload, }), @@ -887,6 +888,7 @@ mod tests { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload, }), @@ -998,6 +1000,7 @@ mod tests { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload, }), diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index fafb28e642..f8e56a5484 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -277,6 +277,7 @@ mod tests { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], } .into(), diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 0822d08f58..ebb290af1e 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -273,6 +273,7 @@ mod tests { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], } .into(), diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index be979fef23..323c6f529e 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -305,6 +305,7 @@ mod tests { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], } .into(), diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 64516f6f26..d13f763b68 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -204,6 +204,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec, client_transport: TransportUn ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], } .into(), diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index e27acfe3c3..f9180849af 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -280,6 +280,7 @@ mod tests { encoding: Encoding::default(), ext_sinfo: None, ext_shm: None, + ext_attachment: None, ext_unknown: vec![], } .into(), @@ -327,6 +328,7 @@ mod tests { encoding: Encoding::default(), ext_sinfo: None, ext_shm: None, + ext_attachment: None, ext_unknown: vec![], } .into(), diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index dad4b6f775..19380eb49e 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -86,6 +86,7 @@ mod tests { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], } .into(), diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 9b25bb26c8..11839aef2a 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -476,6 +476,7 @@ async fn test_transport( ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], } .into(), diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index a8aad9c809..8cdf638af5 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -153,6 +153,8 @@ impl TransportMulticastEventHandler for Handler { &expr, Some(info), serde_json::to_vec(&peer).unwrap().into(), + #[cfg(feature = "unstable")] + None, ); Ok(Arc::new(PeerHandler { expr, @@ -200,6 +202,8 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), serde_json::to_vec(&link).unwrap().into(), + #[cfg(feature = "unstable")] + None, ); } @@ -218,6 +222,8 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), vec![0u8; 0].into(), + #[cfg(feature = "unstable")] + None, ); } @@ -228,8 +234,14 @@ impl TransportPeerEventHandler for PeerHandler { kind: SampleKind::Delete, ..Default::default() }; - self.session - .handle_data(true, &self.expr, Some(info), vec![0u8; 0].into()); + self.session.handle_data( + true, + &self.expr, + Some(info), + vec![0u8; 0].into(), + #[cfg(feature = "unstable")] + None, + ); } fn as_any(&self) -> &dyn std::any::Any { diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 1e36cb8f69..a29d4b5d4a 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -749,6 +749,8 @@ where Locality::default(), self.timeout, None, + #[cfg(feature = "unstable")] + None, callback, ) .map(|_| receiver) diff --git a/zenoh/src/net/routing/queries.rs b/zenoh/src/net/routing/queries.rs index 06b81a998b..c2496b5ff8 100644 --- a/zenoh/src/net/routing/queries.rs +++ b/zenoh/src/net/routing/queries.rs @@ -2121,6 +2121,7 @@ pub fn route_query( ext_consolidation: ConsolidationType::default(), #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, // @TODO: expose it in the API ext_unknown: vec![], payload, }); diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 08b00c5047..96ea85f6b4 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -378,6 +378,8 @@ impl Primitives for AdminSpace { qid: msg.id, zid, primitives, + #[cfg(feature = "unstable")] + attachment: query.ext_attachment.map(Into::into), }), }; diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 933a2e46a4..518ec7e551 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -604,6 +604,7 @@ fn client_test() { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::empty(), }), @@ -636,6 +637,7 @@ fn client_test() { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::empty(), }), @@ -668,6 +670,7 @@ fn client_test() { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::empty(), }), @@ -700,6 +703,7 @@ fn client_test() { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::empty(), }), @@ -732,6 +736,7 @@ fn client_test() { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment: None, ext_unknown: vec![], payload: ZBuf::empty(), }), diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index be439b6f2d..8a84e49566 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -19,6 +19,8 @@ use crate::handlers::Callback; use crate::handlers::DefaultHandler; use crate::net::transport::primitives::Primitives; use crate::prelude::*; +#[zenoh_macros::unstable] +use crate::sample::Attachment; use crate::sample::DataInfo; use crate::Encoding; use crate::SessionRef; @@ -78,6 +80,8 @@ pub struct PutBuilder<'a, 'b> { pub(crate) publisher: PublisherBuilder<'a, 'b>, pub(crate) value: Value, pub(crate) kind: SampleKind, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, } impl PutBuilder<'_, '_> { @@ -117,6 +121,12 @@ impl PutBuilder<'_, '_> { self.kind = kind; self } + + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self + } } impl Resolvable for PutBuilder<'_, '_> { @@ -130,6 +140,8 @@ impl SyncResolve for PutBuilder<'_, '_> { publisher, value, kind, + #[cfg(feature = "unstable")] + attachment, } = self; let key_expr = publisher.key_expr?; log::trace!("write({:?}, [...])", &key_expr); @@ -151,20 +163,42 @@ impl SyncResolve for PutBuilder<'_, '_> { ext_tstamp: None, ext_nodeid: ext::NodeIdType::default(), payload: match kind { - SampleKind::Put => PushBody::Put(Put { - timestamp, - encoding: value.encoding.clone(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], - payload: value.payload.clone(), - }), - SampleKind::Delete => PushBody::Del(Del { - timestamp, - ext_sinfo: None, - ext_unknown: vec![], - }), + SampleKind::Put => { + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + if let Some(attachment) = attachment.clone() { + ext_attachment = Some(attachment.into()); + } + } + PushBody::Put(Put { + timestamp, + encoding: value.encoding.clone(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment, + ext_unknown: vec![], + payload: value.payload.clone(), + }) + } + SampleKind::Delete => { + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + if let Some(attachment) = attachment.clone() { + ext_attachment = Some(attachment.into()); + } + } + PushBody::Del(Del { + timestamp, + ext_sinfo: None, + ext_attachment, + ext_unknown: vec![], + }) + } }, }); } @@ -181,6 +215,8 @@ impl SyncResolve for PutBuilder<'_, '_> { &key_expr.to_wire(&publisher.session), Some(data_info), value.payload, + #[cfg(feature = "unstable")] + attachment, ); } Ok(()) @@ -337,6 +373,8 @@ impl<'a> Publisher<'a> { publisher: self, value, kind, + #[cfg(feature = "unstable")] + attachment: None, } } @@ -621,6 +659,16 @@ pub struct Publication<'a> { publisher: &'a Publisher<'a>, value: Value, kind: SampleKind, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl<'a> Publication<'a> { + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self + } } impl Resolvable for Publication<'_> { @@ -633,6 +681,8 @@ impl SyncResolve for Publication<'_> { publisher, value, kind, + #[cfg(feature = "unstable")] + attachment, } = self; log::trace!("write({:?}, [...])", publisher.key_expr); let primitives = zread!(publisher.session.state) @@ -643,6 +693,14 @@ impl SyncResolve for Publication<'_> { let timestamp = publisher.session.runtime.new_timestamp(); if publisher.destination != Locality::SessionLocal { + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + if let Some(attachment) = attachment.clone() { + ext_attachment = Some(attachment.into()); + } + } primitives.send_push(Push { wire_expr: publisher.key_expr.to_wire(&publisher.session).to_owned(), ext_qos: ext::QoSType::new( @@ -658,6 +716,7 @@ impl SyncResolve for Publication<'_> { ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment, ext_unknown: vec![], payload: value.payload.clone(), }), @@ -675,6 +734,8 @@ impl SyncResolve for Publication<'_> { &publisher.key_expr.to_wire(&publisher.session), Some(data_info), value.payload, + #[cfg(feature = "unstable")] + attachment, ); } Ok(()) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 18cb7e882e..c4f3fb35e9 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -16,6 +16,8 @@ use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; +#[zenoh_macros::unstable] +use crate::sample::Attachment; use crate::Session; use std::collections::HashMap; use std::future::Ready; @@ -126,6 +128,8 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) timeout: Duration, pub(crate) handler: Handler, pub(crate) value: Option, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, } impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { @@ -159,6 +163,8 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { destination, timeout, value, + #[cfg(feature = "unstable")] + attachment, handler: _, } = self; GetBuilder { @@ -170,6 +176,8 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { destination, timeout, value, + #[cfg(feature = "unstable")] + attachment, handler: callback, } } @@ -238,6 +246,8 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { destination, timeout, value, + #[cfg(feature = "unstable")] + attachment, handler: _, } = self; GetBuilder { @@ -249,6 +259,8 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { destination, timeout, value, + #[cfg(feature = "unstable")] + attachment, handler, } } @@ -294,6 +306,12 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { self } + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self + } + /// By default, `get` guarantees that it will only receive replies whose key expressions intersect /// with the queried key expression. /// @@ -310,6 +328,7 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { destination, timeout, value, + attachment, handler, } = self; Self { @@ -321,6 +340,7 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { destination, timeout, value, + attachment, handler, } } @@ -369,6 +389,8 @@ where self.destination, self.timeout, self.value, + #[cfg(feature = "unstable")] + self.attachment, callback, ) .map(|_| receiver) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 4881de6ec1..914684f76f 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,6 +18,9 @@ use crate::handlers::{locked, DefaultHandler}; use crate::prelude::*; #[zenoh_macros::unstable] use crate::query::ReplyKeyExpr; +#[zenoh_macros::unstable] +use crate::sample::Attachment; +use crate::sample::DataInfo; use crate::SessionRef; use crate::Undeclarable; @@ -45,6 +48,8 @@ pub(crate) struct QueryInner { pub(crate) qid: RequestId, pub(crate) zid: ZenohId, pub(crate) primitives: Arc, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, } impl Drop for QueryInner { @@ -91,6 +96,11 @@ impl Query { self.inner.value.as_ref() } + #[zenoh_macros::unstable] + pub fn attachment(&self) -> Option<&Attachment> { + self.inner.attachment.as_ref() + } + /// Sends a reply to this Query. /// /// By default, queries only accept replies whose key expression intersects with the query's. @@ -150,6 +160,20 @@ pub struct ReplyBuilder<'a> { result: Result, } +impl<'a> ReplyBuilder<'a> { + #[allow(clippy::result_large_err)] + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Result { + match &mut self.result { + Ok(sample) => { + sample.attachment = Some(attachment); + Ok(self) + } + Err(_) => Err((self, attachment)), + } + } +} + impl<'a> Resolvable for ReplyBuilder<'a> { type To = ZResult<()>; } @@ -163,7 +187,34 @@ impl SyncResolve for ReplyBuilder<'_> { { bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) } - let (key_expr, payload, data_info) = sample.split(); + let Sample { + key_expr, + value: Value { payload, encoding }, + kind, + timestamp, + #[cfg(feature = "unstable")] + source_info, + #[cfg(feature = "unstable")] + attachment, + } = sample; + #[allow(unused_mut)] + let mut data_info = DataInfo { + kind, + encoding: Some(encoding), + timestamp, + source_id: None, + source_sn: None, + }; + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + data_info.source_id = source_info.source_id; + data_info.source_sn = source_info.source_sn; + if let Some(attachment) = attachment { + ext_attachment = Some(attachment.into()); + } + } self.query.inner.primitives.send_response(Response { rid: self.query.inner.qid, wire_expr: WireExpr { @@ -187,6 +238,7 @@ impl SyncResolve for ReplyBuilder<'_> { ext_consolidation: ConsolidationType::default(), #[cfg(feature = "shared-memory")] ext_shm: None, + ext_attachment, ext_unknown: vec![], payload, }), diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 1d3c168e40..083e6fced5 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -98,6 +98,221 @@ impl From> for SourceInfo { } } +mod attachment { + #[zenoh_macros::unstable] + use zenoh_buffers::{ + reader::{HasReader, Reader}, + writer::HasWriter, + ZBuf, ZBufReader, ZSlice, + }; + #[zenoh_macros::unstable] + use zenoh_codec::{RCodec, WCodec, Zenoh080}; + #[zenoh_macros::unstable] + use zenoh_protocol::zenoh::ext::AttachmentType; + + /// A builder for [`Attachment`] + #[zenoh_macros::unstable] + pub struct AttachmentBuilder { + pub(crate) inner: Vec, + } + #[zenoh_macros::unstable] + impl Default for AttachmentBuilder { + fn default() -> Self { + Self::new() + } + } + #[zenoh_macros::unstable] + impl AttachmentBuilder { + pub fn new() -> Self { + Self { inner: Vec::new() } + } + fn _insert(&mut self, key: &[u8], value: &[u8]) { + let codec = Zenoh080; + let mut writer = self.inner.writer(); + codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure + codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure + } + /// Inserts a key-value pair to the attachment. + /// + /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. + pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( + &mut self, + key: &Key, + value: &Value, + ) { + self._insert(key.as_ref(), value.as_ref()) + } + pub fn build(self) -> Attachment { + Attachment { + inner: self.inner.into(), + } + } + } + #[zenoh_macros::unstable] + impl From for Attachment { + fn from(value: AttachmentBuilder) -> Self { + Attachment { + inner: value.inner.into(), + } + } + } + #[zenoh_macros::unstable] + #[derive(Clone)] + pub struct Attachment { + pub(crate) inner: ZBuf, + } + #[zenoh_macros::unstable] + impl Default for Attachment { + fn default() -> Self { + Self::new() + } + } + #[zenoh_macros::unstable] + impl From for AttachmentType { + fn from(this: Attachment) -> Self { + AttachmentType { buffer: this.inner } + } + } + #[zenoh_macros::unstable] + impl From> for Attachment { + fn from(this: AttachmentType) -> Self { + Attachment { inner: this.buffer } + } + } + #[zenoh_macros::unstable] + impl Attachment { + pub fn new() -> Self { + Self { + inner: ZBuf::empty(), + } + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn len(&self) -> usize { + self.iter().count() + } + pub fn iter(&self) -> AttachmentIterator { + self.into_iter() + } + fn _get(&self, key: &[u8]) -> Option { + self.iter() + .find_map(|(k, v)| (k.as_slice() == key).then_some(v)) + } + pub fn get>(&self, key: &Key) -> Option { + self._get(key.as_ref()) + } + fn _insert(&mut self, key: &[u8], value: &[u8]) { + let codec = Zenoh080; + let mut writer = self.inner.writer(); + codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure + codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure + } + /// Inserts a key-value pair to the attachment. + /// + /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. + /// + /// [`Attachment`] is not very efficient at inserting, so if you wish to perform multiple inserts, it's generally better to [`Attachment::extend`] after performing the inserts on an [`AttachmentBuilder`] + pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( + &mut self, + key: &Key, + value: &Value, + ) { + self._insert(key.as_ref(), value.as_ref()) + } + fn _extend(&mut self, with: Self) -> &mut Self { + for slice in with.inner.zslices().cloned() { + self.inner.push_zslice(slice); + } + self + } + pub fn extend(&mut self, with: impl Into) -> &mut Self { + let with = with.into(); + self._extend(with) + } + } + #[zenoh_macros::unstable] + pub struct AttachmentIterator<'a> { + reader: ZBufReader<'a>, + } + #[zenoh_macros::unstable] + impl<'a> core::iter::IntoIterator for &'a Attachment { + type Item = (ZSlice, ZSlice); + type IntoIter = AttachmentIterator<'a>; + fn into_iter(self) -> Self::IntoIter { + AttachmentIterator { + reader: self.inner.reader(), + } + } + } + #[zenoh_macros::unstable] + impl core::fmt::Debug for Attachment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{{")?; + for (key, value) in self { + let key = key.as_slice(); + let value = value.as_slice(); + match core::str::from_utf8(key) { + Ok(key) => write!(f, "\"{key}\": ")?, + Err(_) => { + write!(f, "0x")?; + for byte in key { + write!(f, "{byte:02X}")? + } + } + } + match core::str::from_utf8(value) { + Ok(value) => write!(f, "\"{value}\", ")?, + Err(_) => { + write!(f, "0x")?; + for byte in value { + write!(f, "{byte:02X}")? + } + write!(f, ", ")? + } + } + } + write!(f, "}}") + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::Iterator for AttachmentIterator<'a> { + type Item = (ZSlice, ZSlice); + fn next(&mut self) -> Option { + let key = Zenoh080.read(&mut self.reader).ok()?; + let value = Zenoh080.read(&mut self.reader).ok()?; + Some((key, value)) + } + fn size_hint(&self) -> (usize, Option) { + ( + (self.reader.remaining() != 0) as usize, + Some(self.reader.remaining() / 2), + ) + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for AttachmentBuilder { + fn from_iter>(iter: T) -> Self { + let codec = Zenoh080; + let mut buffer: Vec = Vec::new(); + let mut writer = buffer.writer(); + for (key, value) in iter { + codec.write(&mut writer, key).unwrap(); // Infallible, barring allocation failures + codec.write(&mut writer, value).unwrap(); // Infallible, barring allocation failures + } + Self { inner: buffer } + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for Attachment { + fn from_iter>(iter: T) -> Self { + AttachmentBuilder::from_iter(iter).into() + } + } +} +#[zenoh_macros::unstable] +pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; + /// A zenoh sample. #[non_exhaustive] #[derive(Clone, Debug)] @@ -120,6 +335,16 @@ pub struct Sample { /// /// Infos on the source of this Sample. pub source_info: SourceInfo, + + #[cfg(feature = "unstable")] + ///
+ /// 🔬 + /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. + /// To use it, you must enable zenoh's unstable feature flag. + ///
+ /// + /// A map of key-value pairs, where each key and value are byte-slices. + pub attachment: Option, } impl Sample { @@ -137,6 +362,8 @@ impl Sample { timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, } } /// Creates a new Sample. @@ -157,6 +384,8 @@ impl Sample { timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, }) } @@ -179,6 +408,8 @@ impl Sample { timestamp: data_info.timestamp, #[cfg(feature = "unstable")] source_info: data_info.into(), + #[cfg(feature = "unstable")] + attachment: None, } } else { Sample { @@ -188,28 +419,12 @@ impl Sample { timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, } } } - #[inline] - pub(crate) fn split(self) -> (KeyExpr<'static>, ZBuf, DataInfo) { - let info = DataInfo { - kind: self.kind, - encoding: Some(self.value.encoding), - timestamp: self.timestamp, - #[cfg(feature = "unstable")] - source_id: self.source_info.source_id, - #[cfg(not(feature = "unstable"))] - source_id: None, - #[cfg(feature = "unstable")] - source_sn: self.source_info.source_sn, - #[cfg(not(feature = "unstable"))] - source_sn: None, - }; - (self.key_expr, self.value.payload, info) - } - /// Gets the timestamp of this Sample. #[inline] pub fn get_timestamp(&self) -> Option<&Timestamp> { @@ -244,6 +459,23 @@ impl Sample { self.timestamp.as_ref().unwrap() } } + + #[zenoh_macros::unstable] + pub fn attachment(&self) -> Option<&Attachment> { + self.attachment.as_ref() + } + + #[zenoh_macros::unstable] + pub fn attachment_mut(&mut self) -> &mut Option { + &mut self.attachment + } + + #[allow(clippy::result_large_err)] + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self + } } impl std::ops::Deref for Sample { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 23369e5790..6609d1361d 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -28,6 +28,8 @@ use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; use crate::query::*; use crate::queryable::*; +#[cfg(feature = "unstable")] +use crate::sample::Attachment; use crate::sample::DataInfo; use crate::selector::TIME_RANGE_KEY; use crate::subscriber::*; @@ -721,6 +723,8 @@ impl Session { publisher: self.declare_publisher(key_expr), value: value.into(), kind: SampleKind::Put, + #[cfg(feature = "unstable")] + attachment: None, } } @@ -752,6 +756,8 @@ impl Session { publisher: self.declare_publisher(key_expr), value: Value::empty(), kind: SampleKind::Delete, + #[cfg(feature = "unstable")] + attachment: None, } } /// Query data from the matching queryables in the system. @@ -794,6 +800,8 @@ impl Session { destination: Locality::default(), timeout: Duration::from_millis(unwrap_or_default!(conf.queries_default_timeout())), value: None, + #[cfg(feature = "unstable")] + attachment: None, handler: DefaultHandler, } } @@ -1606,6 +1614,7 @@ impl Session { key_expr: &WireExpr, info: Option, payload: ZBuf, + #[cfg(feature = "unstable")] attachment: Option, ) { let mut callbacks = SingleOrVec::default(); let state = zread!(self.state); @@ -1706,10 +1715,22 @@ impl Session { drop(state); let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { - cb(Sample::with_info(key_expr, payload.clone(), info.clone())); + #[allow(unused_mut)] + let mut sample = Sample::with_info(key_expr, payload.clone(), info.clone()); + #[cfg(feature = "unstable")] + { + sample.attachment = attachment.clone(); + } + cb(sample); } if let Some((cb, key_expr)) = last { - cb(Sample::with_info(key_expr, payload, info)); + #[allow(unused_mut)] + let mut sample = Sample::with_info(key_expr, payload, info); + #[cfg(feature = "unstable")] + { + sample.attachment = attachment; + } + cb(sample); } } @@ -1746,6 +1767,7 @@ impl Session { destination: Locality, timeout: Duration, value: Option, + #[cfg(feature = "unstable")] attachment: Option, callback: Callback<'static, Reply>, ) -> ZResult<()> { log::trace!("get({}, {:?}, {:?})", selector, target, consolidation); @@ -1813,6 +1835,14 @@ impl Session { drop(state); if destination != Locality::SessionLocal { + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + if let Some(attachment) = attachment.clone() { + ext_attachment = Some(attachment.into()); + } + } primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), @@ -1832,6 +1862,7 @@ impl Session { encoding: v.encoding.clone(), payload: v.payload.clone(), }), + ext_attachment, ext_unknown: vec![], }), }); @@ -1850,6 +1881,8 @@ impl Session { encoding: v.encoding.clone(), payload: v.payload.clone(), }), + #[cfg(feature = "unstable")] + attachment, ); } Ok(()) @@ -1865,6 +1898,7 @@ impl Session { _target: TargetType, _consolidation: ConsolidationType, body: Option, + #[cfg(feature = "unstable")] attachment: Option, ) { let (primitives, key_expr, callbacks) = { let state = zread!(self.state); @@ -1925,6 +1959,8 @@ impl Session { } else { primitives }, + #[cfg(feature = "unstable")] + attachment, }), }; for callback in callbacks.iter() { @@ -2123,7 +2159,14 @@ impl Primitives for Session { .starts_with(crate::liveliness::PREFIX_LIVELINESS) { drop(state); - self.handle_data(false, &m.wire_expr, None, ZBuf::default()); + self.handle_data( + false, + &m.wire_expr, + None, + ZBuf::default(), + #[cfg(feature = "unstable")] + None, + ); } } Err(err) => { @@ -2155,6 +2198,8 @@ impl Primitives for Session { &m.ext_wire_expr.wire_expr, Some(data_info), ZBuf::default(), + #[cfg(feature = "unstable")] + None, ); } } @@ -2189,7 +2234,14 @@ impl Primitives for Session { source_id: m.ext_sinfo.as_ref().map(|i| i.zid), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; - self.handle_data(false, &msg.wire_expr, Some(info), m.payload) + self.handle_data( + false, + &msg.wire_expr, + Some(info), + m.payload, + #[cfg(feature = "unstable")] + m.ext_attachment.map(Into::into), + ) } PushBody::Del(m) => { let info = DataInfo { @@ -2199,7 +2251,14 @@ impl Primitives for Session { source_id: m.ext_sinfo.as_ref().map(|i| i.zid), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; - self.handle_data(false, &msg.wire_expr, Some(info), ZBuf::empty()) + self.handle_data( + false, + &msg.wire_expr, + Some(info), + ZBuf::empty(), + #[cfg(feature = "unstable")] + m.ext_attachment.map(Into::into), + ) } } } @@ -2215,6 +2274,8 @@ impl Primitives for Session { msg.ext_target, m.ext_consolidation, m.ext_body, + #[cfg(feature = "unstable")] + m.ext_attachment.map(Into::into), ), RequestBody::Put(_) => (), RequestBody::Del(_) => (), @@ -2326,12 +2387,15 @@ impl Primitives for Session { source_id: m.ext_sinfo.as_ref().map(|i| i.zid), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; + #[allow(unused_mut)] + let mut sample = + Sample::with_info(key_expr.into_owned(), m.payload, Some(info)); + #[cfg(feature = "unstable")] + { + sample.attachment = m.ext_attachment.map(Into::into); + } let new_reply = Reply { - sample: Ok(Sample::with_info( - key_expr.into_owned(), - m.payload, - Some(info), - )), + sample: Ok(sample), replier_id: ZenohId::rand(), // TODO }; let callback = diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs new file mode 100644 index 0000000000..d1fbd1086a --- /dev/null +++ b/zenoh/tests/attachments.rs @@ -0,0 +1,112 @@ +#[cfg(feature = "unstable")] +#[test] +fn pubsub() { + use zenoh::prelude::sync::*; + + let zenoh = zenoh::open(Config::default()).res().unwrap(); + let _sub = zenoh + .declare_subscriber("test/attachment") + .callback(|sample| { + println!( + "{}", + std::str::from_utf8(&sample.payload.contiguous()).unwrap() + ); + for (k, v) in &sample.attachment.unwrap() { + assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) + } + }) + .res() + .unwrap(); + let publisher = zenoh.declare_publisher("test/attachment").res().unwrap(); + for i in 0..10 { + let mut backer = [( + [0; std::mem::size_of::()], + [0; std::mem::size_of::()], + ); 10]; + for (j, backer) in backer.iter_mut().enumerate() { + *backer = ((i * 10 + j).to_le_bytes(), (i * 10 + j).to_be_bytes()) + } + zenoh + .put("test/attachment", "put") + .with_attachment( + backer + .iter() + .map(|b| (b.0.as_slice(), b.1.as_slice())) + .collect(), + ) + .res() + .unwrap(); + publisher + .put("publisher") + .with_attachment( + backer + .iter() + .map(|b| (b.0.as_slice(), b.1.as_slice())) + .collect(), + ) + .res() + .unwrap(); + } +} +#[cfg(feature = "unstable")] +#[test] +fn queries() { + use zenoh::{prelude::sync::*, sample::Attachment}; + + let zenoh = zenoh::open(Config::default()).res().unwrap(); + let _sub = zenoh + .declare_queryable("test/attachment") + .callback(|query| { + println!( + "{}", + std::str::from_utf8( + &query + .value() + .map(|q| q.payload.contiguous()) + .unwrap_or_default() + ) + .unwrap() + ); + let mut attachment = Attachment::new(); + for (k, v) in query.attachment().unwrap() { + assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)); + attachment.insert(&k, &k); + } + query + .reply(Ok(Sample::new( + query.key_expr().clone(), + query.value().unwrap().clone(), + ) + .with_attachment(attachment))) + .res() + .unwrap(); + }) + .res() + .unwrap(); + for i in 0..10 { + let mut backer = [( + [0; std::mem::size_of::()], + [0; std::mem::size_of::()], + ); 10]; + for (j, backer) in backer.iter_mut().enumerate() { + *backer = ((i * 10 + j).to_le_bytes(), (i * 10 + j).to_be_bytes()) + } + let get = zenoh + .get("test/attachment") + .with_value("query") + .with_attachment( + backer + .iter() + .map(|b| (b.0.as_slice(), b.1.as_slice())) + .collect(), + ) + .res() + .unwrap(); + while let Ok(reply) = get.recv() { + let response = reply.sample.as_ref().unwrap(); + for (k, v) in response.attachment().unwrap() { + assert_eq!(k, v) + } + } + } +} From 202b8bb3ead1226f14df1021f556f12f45c80dab Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 15 Dec 2023 12:00:58 +0300 Subject: [PATCH 28/29] Tests: use blocking executor for non-async code (#627) --- io/zenoh-transport/src/common/pipeline.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 2e3af61d64..954c656280 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -855,7 +855,7 @@ mod tests { }); let c_ps = *ps; - let t_s = task::spawn(async move { + let t_s = task::spawn_blocking(move || { schedule(producer, num_msg, c_ps); }); @@ -972,7 +972,7 @@ mod tests { let size = Arc::new(AtomicUsize::new(0)); let c_size = size.clone(); - task::spawn(async move { + task::spawn_blocking(move || { loop { let payload_sizes: [usize; 16] = [ 8, 16, 32, 64, 128, 256, 512, 1_024, 2_048, 4_096, 8_192, 16_384, 32_768, From 780ec606ba4000a17b2da0e2b0b668e11d37c65e Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 15 Dec 2023 12:01:37 +0300 Subject: [PATCH 29/29] Use TX executor for multicast TX (#626) --- io/zenoh-transport/src/multicast/link.rs | 7 +++++-- io/zenoh-transport/src/multicast/transport.rs | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 8e1d17fefe..21ed0b3fdf 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -23,7 +23,9 @@ use crate::{ priority::TransportPriorityTx, }, multicast::transport::TransportMulticastInner, + TransportExecutor, }; +use async_executor::Task; use async_std::{ prelude::FutureExt, task::{self, JoinHandle}, @@ -269,7 +271,7 @@ pub(super) struct TransportLinkMulticastUniversal { // The transport this link is associated to transport: TransportMulticastInner, // The signals to stop TX/RX tasks - handle_tx: Option>>, + handle_tx: Option>>, signal_rx: Signal, handle_rx: Option>>, } @@ -295,6 +297,7 @@ impl TransportLinkMulticastUniversal { &mut self, config: TransportLinkMulticastConfigUniversal, priority_tx: Arc<[TransportPriorityTx]>, + executor: &TransportExecutor, ) { let initial_sns: Vec = priority_tx .iter() @@ -331,7 +334,7 @@ impl TransportLinkMulticastUniversal { // Spawn the TX task let c_link = self.link.clone(); let ctransport = self.transport.clone(); - let handle = task::spawn(async move { + let handle = executor.spawn(async move { let res = tx_task( consumer, c_link.tx(), diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index ca6cddaf2b..d5a1da14d4 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -257,7 +257,7 @@ impl TransportMulticastInner { sn_resolution: self.manager.config.resolution.get(Field::FrameSN), batch_size, }; - l.start_tx(config, self.priority_tx.clone()); + l.start_tx(config, self.priority_tx.clone(), &self.manager.tx_executor); Ok(()) } None => {