From 1dc31d47f86396f6817ba34125446d001960de51 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 30 Nov 2023 15:29:06 +0100 Subject: [PATCH] Hop-to-hop compression (#585) --- Cargo.lock | 1 + DEFAULT_CONFIG.json5 | 27 +- commons/zenoh-buffers/src/bbuf.rs | 65 +- commons/zenoh-buffers/src/lib.rs | 74 +-- commons/zenoh-buffers/src/slice.rs | 33 +- commons/zenoh-buffers/src/vec.rs | 31 +- commons/zenoh-buffers/src/zbuf.rs | 26 +- commons/zenoh-buffers/src/zslice.rs | 34 +- commons/zenoh-codec/src/core/zbuf.rs | 3 +- commons/zenoh-codec/src/transport/batch.rs | 255 ++++++++ commons/zenoh-codec/src/transport/init.rs | 127 ++-- commons/zenoh-codec/src/transport/mod.rs | 1 + commons/zenoh-codec/src/transport/open.rs | 105 +++- commons/zenoh-config/src/defaults.rs | 27 +- commons/zenoh-config/src/lib.rs | 40 +- commons/zenoh-protocol/src/transport/frame.rs | 2 +- commons/zenoh-protocol/src/transport/init.rs | 10 + commons/zenoh-protocol/src/transport/open.rs | 10 + io/zenoh-link-commons/Cargo.toml | 4 + io/zenoh-link-commons/src/unicast.rs | 83 +-- io/zenoh-transport/src/common/batch.rs | 564 +++++++++++------- .../src/common/defragmentation.rs | 2 +- io/zenoh-transport/src/common/mod.rs | 2 +- io/zenoh-transport/src/common/pipeline.rs | 55 +- io/zenoh-transport/src/lib.rs | 12 +- io/zenoh-transport/src/manager.rs | 4 +- .../src/multicast/establishment.rs | 15 +- io/zenoh-transport/src/multicast/link.rs | 345 +++++++++-- io/zenoh-transport/src/multicast/manager.rs | 22 +- io/zenoh-transport/src/multicast/mod.rs | 8 +- io/zenoh-transport/src/multicast/rx.rs | 43 +- io/zenoh-transport/src/multicast/transport.rs | 24 +- .../src/unicast/establishment/accept.rs | 304 ++++++---- .../src/unicast/establishment/cookie.rs | 10 + .../src/unicast/establishment/ext/auth/mod.rs | 150 +---- .../unicast/establishment/ext/auth/pubkey.rs | 20 +- .../unicast/establishment/ext/auth/usrpwd.rs | 20 +- .../unicast/establishment/ext/compression.rs | 196 ++++++ .../unicast/establishment/ext/lowlatency.rs | 20 +- .../src/unicast/establishment/ext/mod.rs | 2 + .../unicast/establishment/ext/multilink.rs | 20 +- .../src/unicast/establishment/ext/qos.rs | 20 +- .../src/unicast/establishment/ext/shm.rs | 20 +- .../src/unicast/establishment/mod.rs | 50 +- .../src/unicast/establishment/open.rs | 326 ++++++---- io/zenoh-transport/src/unicast/link.rs | 275 +++++++++ .../src/unicast/lowlatency/link.rs | 65 +- .../src/unicast/lowlatency/transport.rs | 45 +- io/zenoh-transport/src/unicast/manager.rs | 56 +- io/zenoh-transport/src/unicast/mod.rs | 3 +- .../src/unicast/test_helpers.rs | 4 +- .../src/unicast/transport_unicast_inner.rs | 21 +- .../src/unicast/universal/link.rs | 535 ++--------------- .../src/unicast/universal/rx.rs | 32 +- .../src/unicast/universal/transport.rs | 67 ++- .../src/unicast/universal/tx.rs | 2 +- io/zenoh-transport/tests/endpoints.rs | 4 +- .../tests/multicast_compression.rs | 376 ++++++++++++ .../tests/multicast_transport.rs | 8 +- .../tests/transport_whitelist.rs | 4 +- .../tests/unicast_authenticator.rs | 27 +- .../tests/unicast_compression.rs | 553 +++++++++++++++++ .../tests/unicast_concurrent.rs | 4 +- .../tests/unicast_intermittent.rs | 7 +- io/zenoh-transport/tests/unicast_multilink.rs | 6 +- io/zenoh-transport/tests/unicast_openclose.rs | 7 +- .../tests/unicast_priorities.rs | 8 +- io/zenoh-transport/tests/unicast_shm.rs | 6 +- .../tests/unicast_simultaneous.rs | 4 +- io/zenoh-transport/tests/unicast_transport.rs | 31 +- zenoh/Cargo.toml | 2 + zenoh/src/admin.rs | 4 +- zenoh/src/key_expr.rs | 2 +- zenoh/src/net/routing/face.rs | 2 +- zenoh/src/net/routing/network.rs | 2 +- zenoh/src/net/routing/router.rs | 6 +- zenoh/src/net/runtime/adminspace.rs | 4 +- zenoh/src/net/runtime/mod.rs | 5 +- zenoh/src/net/tests/tables.rs | 2 +- zenoh/src/prelude.rs | 6 +- zenoh/src/publication.rs | 3 +- zenoh/src/queryable.rs | 2 +- zenoh/src/session.rs | 2 +- zenoh/tests/liveliness.rs | 15 +- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 3 + 86 files changed, 3641 insertions(+), 1783 deletions(-) create mode 100644 commons/zenoh-codec/src/transport/batch.rs create mode 100644 io/zenoh-transport/src/unicast/establishment/ext/compression.rs create mode 100644 io/zenoh-transport/src/unicast/link.rs create mode 100644 io/zenoh-transport/tests/multicast_compression.rs create mode 100644 io/zenoh-transport/tests/unicast_compression.rs diff --git a/Cargo.lock b/Cargo.lock index 1cc18137ad..01378d3015 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4671,6 +4671,7 @@ dependencies = [ "async-std", "async-trait", "flume", + "lz4_flex", "serde", "typenum", "zenoh-buffers", diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index dae3ebc9aa..4a0179fb71 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -133,9 +133,20 @@ /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to /// enable 'lowlatency' you need to explicitly disable 'qos'. lowlatency: false, + qos: { + enabled: true, + }, + compression: { + enabled: false, + }, }, - qos: { - enabled: true, + multicast: { + qos: { + enabled: true, + }, + compression: { + enabled: false, + }, }, link: { /// An optional whitelist of protocols to be used for accepting and opening sessions. @@ -183,6 +194,9 @@ /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. /// Higher values lead to a more aggressive batching but it will introduce additional latency. backoff: 100, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, }, }, /// Configure the zenoh RX parameters of a link @@ -220,15 +234,6 @@ // ca to verify that the server at baz.com is actually baz.com, let this be true (default). server_name_verification: null, }, - - /// **Experimental** compression feature. - /// Will compress the batches hop to hop (as opposed to end to end). - /// The features "transport_compression" and "unstable" need to be enabled to handle - /// compression on the integrality of the network. - compression: { - /// When 'enabled' is true, batches will be sent compressed. - enabled: false, - }, }, /// Shared memory configuration shared_memory: { diff --git a/commons/zenoh-buffers/src/bbuf.rs b/commons/zenoh-buffers/src/bbuf.rs index bdb9e9a056..2f5c24d6a0 100644 --- a/commons/zenoh-buffers/src/bbuf.rs +++ b/commons/zenoh-buffers/src/bbuf.rs @@ -12,14 +12,16 @@ // ZettaScale Zenoh Team, // use crate::{ + buffer::{Buffer, SplitBuffer}, reader::HasReader, vec, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, + ZSlice, }; -use alloc::boxed::Box; -use core::num::NonZeroUsize; +use alloc::{boxed::Box, sync::Arc}; +use core::{fmt, num::NonZeroUsize, option}; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq)] pub struct BBuf { buffer: Box<[u8]>, len: usize, @@ -39,16 +41,6 @@ impl BBuf { self.buffer.len() } - #[must_use] - pub const fn len(&self) -> usize { - self.len - } - - #[must_use] - pub const fn is_empty(&self) -> bool { - self.len == 0 - } - #[must_use] pub fn as_slice(&self) -> &[u8] { // SAFETY: self.len is ensured by the writer to be smaller than buffer length. @@ -70,6 +62,40 @@ impl BBuf { } } +impl fmt::Debug for BBuf { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:02x?}", self.as_slice()) + } +} + +// Buffer +impl Buffer for BBuf { + fn len(&self) -> usize { + self.len + } +} + +impl Buffer for &BBuf { + fn len(&self) -> usize { + self.len + } +} + +impl Buffer for &mut BBuf { + fn len(&self) -> usize { + self.len + } +} + +// SplitBuffer +impl SplitBuffer for BBuf { + type Slices<'a> = option::IntoIter<&'a [u8]>; + + fn slices(&self) -> Self::Slices<'_> { + Some(self.as_slice()).into_iter() + } +} + // Writer impl HasWriter for &mut BBuf { type Writer = Self; @@ -152,6 +178,19 @@ impl<'a> HasReader for &'a BBuf { } } +// From impls +impl From for ZSlice { + fn from(value: BBuf) -> Self { + ZSlice { + buf: Arc::new(value.buffer), + start: 0, + end: value.len, + #[cfg(feature = "shared-memory")] + kind: crate::ZSliceKind::Raw, + } + } +} + #[cfg(feature = "test")] impl BBuf { pub fn rand(len: usize) -> Self { diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 718f486def..4dee599ea7 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -28,7 +28,6 @@ pub mod vec; mod zbuf; mod zslice; -use alloc::{borrow::Cow, vec::Vec}; pub use bbuf::*; pub use zbuf::*; pub use zslice::*; @@ -73,6 +72,45 @@ macro_rules! unsafe_slice_mut { }; } +pub mod buffer { + use alloc::{borrow::Cow, vec::Vec}; + + pub trait Buffer { + /// Returns the number of bytes in the buffer. + fn len(&self) -> usize; + + /// Returns `true` if the buffer has a length of 0. + fn is_empty(&self) -> bool { + self.len() == 0 + } + } + + /// A trait for buffers that can be composed of multiple non contiguous slices. + pub trait SplitBuffer: Buffer { + type Slices<'a>: Iterator + ExactSizeIterator + where + Self: 'a; + + /// Gets all the slices of this buffer. + fn slices(&self) -> Self::Slices<'_>; + + /// Returns all the bytes of this buffer in a conitguous slice. + /// This may require allocation and copy if the original buffer + /// is not contiguous. + fn contiguous(&self) -> Cow<'_, [u8]> { + let mut slices = self.slices(); + match slices.len() { + 0 => Cow::Borrowed(b""), + 1 => Cow::Borrowed(slices.next().unwrap()), + _ => Cow::Owned(slices.fold(Vec::new(), |mut acc, it| { + acc.extend(it); + acc + })), + } + } + } +} + pub mod writer { use crate::ZSlice; use core::num::NonZeroUsize; @@ -100,6 +138,7 @@ pub mod writer { where F: FnOnce(&mut [u8]) -> usize; } + pub trait BacktrackableWriter: Writer { type Mark; @@ -175,36 +214,3 @@ pub mod reader { fn reader(self) -> Self::Reader; } } - -/// A trait for buffers that can be composed of multiple non contiguous slices. -pub trait SplitBuffer<'a> { - type Slices: Iterator + ExactSizeIterator; - - /// Gets all the slices of this buffer. - fn slices(&'a self) -> Self::Slices; - - /// Returns `true` if the buffer has a length of 0. - fn is_empty(&'a self) -> bool { - self.slices().all(<[u8]>::is_empty) - } - - /// Returns the number of bytes in the buffer. - fn len(&'a self) -> usize { - self.slices().fold(0, |acc, it| acc + it.len()) - } - - /// Returns all the bytes of this buffer in a conitguous slice. - /// This may require allocation and copy if the original buffer - /// is not contiguous. - fn contiguous(&'a self) -> Cow<'a, [u8]> { - let mut slices = self.slices(); - match slices.len() { - 0 => Cow::Borrowed(b""), - 1 => Cow::Borrowed(slices.next().unwrap()), - _ => Cow::Owned(slices.fold(Vec::new(), |mut acc, it| { - acc.extend(it); - acc - })), - } - } -} diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index 6056bb9606..a652c6930e 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -12,11 +12,42 @@ // ZettaScale Zenoh Team, // use crate::{ + buffer::{Buffer, SplitBuffer}, reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, }; -use core::{marker::PhantomData, mem, num::NonZeroUsize, slice}; +use core::{ + marker::PhantomData, + mem, + num::NonZeroUsize, + option, + slice::{self}, +}; + +// Buffer +impl Buffer for &[u8] { + #[inline(always)] + fn len(&self) -> usize { + <[u8]>::len(self) + } +} + +impl Buffer for &mut [u8] { + #[inline(always)] + fn len(&self) -> usize { + <[u8]>::len(self) + } +} + +// SplitBuffer +impl<'b> SplitBuffer for &'b [u8] { + type Slices<'a> = option::IntoIter<&'a [u8]> where 'b: 'a; + + fn slices(&self) -> Self::Slices<'_> { + Some(*self).into_iter() + } +} // Writer impl HasWriter for &mut [u8] { diff --git a/commons/zenoh-buffers/src/vec.rs b/commons/zenoh-buffers/src/vec.rs index cbe1ee5801..cf5a3ad9b4 100644 --- a/commons/zenoh-buffers/src/vec.rs +++ b/commons/zenoh-buffers/src/vec.rs @@ -12,11 +12,12 @@ // ZettaScale Zenoh Team, // use crate::{ + buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, }; use alloc::vec::Vec; -use core::{mem, num::NonZeroUsize}; +use core::{mem, num::NonZeroUsize, option}; /// Allocate a vector with a given capacity and sets the length to that capacity. #[must_use] @@ -30,6 +31,34 @@ pub fn uninit(capacity: usize) -> Vec { vbuf } +// Buffer +impl Buffer for Vec { + fn len(&self) -> usize { + Vec::len(self) + } +} + +impl Buffer for &Vec { + fn len(&self) -> usize { + Vec::len(self) + } +} + +impl Buffer for &mut Vec { + fn len(&self) -> usize { + Vec::len(self) + } +} + +// SplitBuffer +impl SplitBuffer for Vec { + type Slices<'a> = option::IntoIter<&'a [u8]>; + + fn slices(&self) -> Self::Slices<'_> { + Some(self.as_slice()).into_iter() + } +} + // Writer impl<'a> HasWriter for &'a mut Vec { type Writer = Self; diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 3f941f48e3..db62e26f54 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -14,9 +14,10 @@ #[cfg(feature = "shared-memory")] use crate::ZSliceKind; use crate::{ + buffer::{Buffer, SplitBuffer}, reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - SplitBuffer, ZSlice, + ZSlice, }; use alloc::{sync::Arc, vec::Vec}; use core::{cmp, iter, mem, num::NonZeroUsize, ptr, slice}; @@ -56,18 +57,8 @@ impl ZBuf { } } -impl<'a> SplitBuffer<'a> for ZBuf { - type Slices = iter::Map, fn(&'a ZSlice) -> &'a [u8]>; - - fn slices(&'a self) -> Self::Slices { - self.slices.as_ref().iter().map(ZSlice::as_slice) - } - - #[inline(always)] - fn is_empty(&self) -> bool { - self.len() == 0 - } - +// Buffer +impl Buffer for ZBuf { #[inline(always)] fn len(&self) -> usize { self.slices @@ -77,6 +68,15 @@ impl<'a> SplitBuffer<'a> for ZBuf { } } +// SplitBuffer +impl SplitBuffer for ZBuf { + type Slices<'a> = iter::Map, fn(&'a ZSlice) -> &'a [u8]>; + + fn slices(&self) -> Self::Slices<'_> { + self.slices.as_ref().iter().map(ZSlice::as_slice) + } +} + impl PartialEq for ZBuf { fn eq(&self, other: &Self) -> bool { let mut self_slices = self.slices(); diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 294092e682..e53e6f3334 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -11,7 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::reader::{BacktrackableReader, DidntRead, HasReader, Reader}; +use crate::{ + buffer::{Buffer, SplitBuffer}, + reader::{BacktrackableReader, DidntRead, HasReader, Reader}, +}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use core::{ any::Any, @@ -19,6 +22,7 @@ use core::{ fmt, num::NonZeroUsize, ops::{Deref, Index, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}, + option, }; /*************************************/ @@ -272,6 +276,34 @@ where } } +// Buffer +impl Buffer for ZSlice { + fn len(&self) -> usize { + ZSlice::len(self) + } +} + +impl Buffer for &ZSlice { + fn len(&self) -> usize { + ZSlice::len(self) + } +} + +impl Buffer for &mut ZSlice { + fn len(&self) -> usize { + ZSlice::len(self) + } +} + +// SplitBuffer +impl SplitBuffer for ZSlice { + type Slices<'a> = option::IntoIter<&'a [u8]>; + + fn slices(&self) -> Self::Slices<'_> { + Some(self.as_slice()).into_iter() + } +} + // Reader impl HasReader for &mut ZSlice { type Reader = Self; diff --git a/commons/zenoh-codec/src/core/zbuf.rs b/commons/zenoh-codec/src/core/zbuf.rs index ccf5d595ce..137030e66c 100644 --- a/commons/zenoh-codec/src/core/zbuf.rs +++ b/commons/zenoh-codec/src/core/zbuf.rs @@ -13,9 +13,10 @@ // use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ + buffer::Buffer, reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, - SplitBuffer, ZBuf, + ZBuf, }; // ZBuf bounded diff --git a/commons/zenoh-codec/src/transport/batch.rs b/commons/zenoh-codec/src/transport/batch.rs new file mode 100644 index 0000000000..525336d6e8 --- /dev/null +++ b/commons/zenoh-codec/src/transport/batch.rs @@ -0,0 +1,255 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{RCodec, WCodec, Zenoh080}; +use core::num::NonZeroUsize; +use zenoh_buffers::reader::{BacktrackableReader, DidntRead, Reader, SiphonableReader}; +use zenoh_buffers::writer::{BacktrackableWriter, DidntWrite, Writer}; +use zenoh_buffers::ZBufReader; +use zenoh_protocol::core::Reliability; +use zenoh_protocol::network::NetworkMessage; +use zenoh_protocol::transport::{ + Fragment, FragmentHeader, Frame, FrameHeader, TransportBody, TransportMessage, TransportSn, +}; + +#[derive(Clone, Copy, Debug)] +#[repr(u8)] +pub enum CurrentFrame { + Reliable, + BestEffort, + None, +} + +#[derive(Clone, Copy, Debug)] +pub struct LatestSn { + pub reliable: Option, + pub best_effort: Option, +} + +impl LatestSn { + const fn new() -> Self { + Self { + reliable: None, + best_effort: None, + } + } +} + +#[derive(Clone, Debug)] +pub struct Zenoh080Batch { + // The current frame being serialized: BestEffort/Reliable + pub current_frame: CurrentFrame, + // The latest SN + pub latest_sn: LatestSn, +} + +impl Zenoh080Batch { + pub const fn new() -> Self { + Self { + current_frame: CurrentFrame::None, + latest_sn: LatestSn::new(), + } + } + + pub fn clear(&mut self) { + self.current_frame = CurrentFrame::None; + self.latest_sn = LatestSn::new(); + } +} + +#[repr(u8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum BatchError { + NewFrame, + DidntWrite, +} + +impl WCodec<&TransportMessage, &mut W> for &mut Zenoh080Batch +where + W: Writer + BacktrackableWriter, + ::Mark: Copy, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &TransportMessage) -> Self::Output { + // Mark the write operation + let mark = writer.mark(); + + let codec = Zenoh080::new(); + codec.write(&mut *writer, x).map_err(|e| { + // Revert the write operation + writer.rewind(mark); + e + })?; + + // Reset the current frame value + self.current_frame = CurrentFrame::None; + + Ok(()) + } +} + +impl WCodec<&NetworkMessage, &mut W> for &mut Zenoh080Batch +where + W: Writer + BacktrackableWriter, + ::Mark: Copy, +{ + type Output = Result<(), BatchError>; + + fn write(self, writer: &mut W, x: &NetworkMessage) -> Self::Output { + // Eventually update the current frame and sn based on the current status + if let (CurrentFrame::Reliable, false) + | (CurrentFrame::BestEffort, true) + | (CurrentFrame::None, _) = (self.current_frame, x.is_reliable()) + { + // We are not serializing on the right frame. + return Err(BatchError::NewFrame); + } + + // Mark the write operation + let mark = writer.mark(); + + let codec = Zenoh080::new(); + codec.write(&mut *writer, x).map_err(|_| { + // Revert the write operation + writer.rewind(mark); + BatchError::DidntWrite + }) + } +} + +impl WCodec<(&NetworkMessage, &FrameHeader), &mut W> for &mut Zenoh080Batch +where + W: Writer + BacktrackableWriter, + ::Mark: Copy, +{ + type Output = Result<(), BatchError>; + + fn write(self, writer: &mut W, x: (&NetworkMessage, &FrameHeader)) -> Self::Output { + let (m, f) = x; + + // @TODO: m.is_reliable() always return true for the time being + // if let (Reliability::Reliable, false) | (Reliability::BestEffort, true) = + // (f.reliability, m.is_reliable()) + // { + // // We are not serializing on the right frame. + // return Err(BatchError::NewFrame); + // } + + // Mark the write operation + let mark = writer.mark(); + + let codec = Zenoh080::new(); + // Write the frame header + codec.write(&mut *writer, f).map_err(|_| { + // Revert the write operation + writer.rewind(mark); + BatchError::DidntWrite + })?; + // Write the zenoh message + codec.write(&mut *writer, m).map_err(|_| { + // Revert the write operation + writer.rewind(mark); + BatchError::DidntWrite + })?; + // Update the frame + self.current_frame = match f.reliability { + Reliability::Reliable => { + self.latest_sn.reliable = Some(f.sn); + CurrentFrame::Reliable + } + Reliability::BestEffort => { + self.latest_sn.best_effort = Some(f.sn); + CurrentFrame::BestEffort + } + }; + Ok(()) + } +} + +impl WCodec<(&mut ZBufReader<'_>, &mut FragmentHeader), &mut W> for &mut Zenoh080Batch +where + W: Writer + BacktrackableWriter, + ::Mark: Copy, +{ + type Output = Result; + + fn write(self, writer: &mut W, x: (&mut ZBufReader<'_>, &mut FragmentHeader)) -> Self::Output { + let (r, f) = x; + + // Mark the buffer for the writing operation + let mark = writer.mark(); + + let codec = Zenoh080::new(); + // Write the fragment header + codec.write(&mut *writer, &*f).map_err(|e| { + // Revert the write operation + writer.rewind(mark); + e + })?; + + // Check if it is really the final fragment + if r.remaining() <= writer.remaining() { + // Revert the buffer + writer.rewind(mark); + // It is really the finally fragment, reserialize the header + f.more = false; + // Write the fragment header + codec.write(&mut *writer, &*f).map_err(|e| { + // Revert the write operation + writer.rewind(mark); + e + })?; + } + + // Write the fragment + r.siphon(&mut *writer).map_err(|_| { + // Revert the write operation + writer.rewind(mark); + DidntWrite + }) + } +} + +impl RCodec for &mut Zenoh080Batch +where + R: Reader + BacktrackableReader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let codec = Zenoh080::new(); + let x: TransportMessage = codec.read(reader)?; + + match &x.body { + TransportBody::Frame(Frame { + reliability, sn, .. + }) + | TransportBody::Fragment(Fragment { + reliability, sn, .. + }) => match reliability { + Reliability::Reliable => { + self.current_frame = CurrentFrame::Reliable; + self.latest_sn.reliable = Some(*sn); + } + Reliability::BestEffort => { + self.current_frame = CurrentFrame::BestEffort; + self.latest_sn.best_effort = Some(*sn); + } + }, + _ => self.current_frame = CurrentFrame::None, + } + + Ok(x) + } +} diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index db37c8fc03..5f98c77e5b 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -37,61 +37,80 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &InitSyn) -> Self::Output { + let InitSyn { + version, + whatami, + zid, + resolution, + batch_size, + ext_qos, + ext_shm, + ext_auth, + ext_mlink, + ext_lowlatency, + ext_compression, + } = x; + // Header let mut header = id::INIT; - if x.resolution != Resolution::default() || x.batch_size != batch_size::UNICAST { + if *resolution != Resolution::default() || *batch_size != batch_size::UNICAST { header |= flag::S; } - let mut n_exts = (x.ext_qos.is_some() as u8) - + (x.ext_shm.is_some() as u8) - + (x.ext_auth.is_some() as u8) - + (x.ext_mlink.is_some() as u8) - + (x.ext_lowlatency.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + + (ext_shm.is_some() as u8) + + (ext_auth.is_some() as u8) + + (ext_mlink.is_some() as u8) + + (ext_lowlatency.is_some() as u8) + + (ext_compression.is_some() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.version)?; + self.write(&mut *writer, version)?; - let whatami: u8 = match x.whatami { + let whatami: u8 = match whatami { WhatAmI::Router => 0b00, WhatAmI::Peer => 0b01, WhatAmI::Client => 0b10, }; - let flags: u8 = ((x.zid.size() as u8 - 1) << 4) | whatami; + let flags: u8 = ((zid.size() as u8 - 1) << 4) | whatami; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(x.zid.size()); - lodec.write(&mut *writer, &x.zid)?; + let lodec = Zenoh080Length::new(zid.size()); + lodec.write(&mut *writer, zid)?; if imsg::has_flag(header, flag::S) { - self.write(&mut *writer, x.resolution.as_u8())?; - self.write(&mut *writer, x.batch_size.to_le_bytes())?; + self.write(&mut *writer, resolution.as_u8())?; + self.write(&mut *writer, batch_size.to_le_bytes())?; } // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } - if let Some(auth) = x.ext_auth.as_ref() { + if let Some(auth) = ext_auth.as_ref() { n_exts -= 1; self.write(&mut *writer, (auth, n_exts != 0))?; } - if let Some(mlink) = x.ext_mlink.as_ref() { + if let Some(mlink) = ext_mlink.as_ref() { n_exts -= 1; self.write(&mut *writer, (mlink, n_exts != 0))?; } - if let Some(lowlatency) = x.ext_lowlatency.as_ref() { + if let Some(lowlatency) = ext_lowlatency.as_ref() { n_exts -= 1; self.write(&mut *writer, (lowlatency, n_exts != 0))?; } + if let Some(compression) = ext_compression.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (compression, n_exts != 0))?; + } Ok(()) } @@ -150,6 +169,7 @@ where let mut ext_auth = None; let mut ext_mlink = None; let mut ext_lowlatency = None; + let mut ext_compression = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { @@ -181,6 +201,11 @@ where ext_lowlatency = Some(q); has_ext = ext; } + ext::Compression::ID => { + let (q, ext): (ext::Compression, bool) = eodec.read(&mut *reader)?; + ext_compression = Some(q); + has_ext = ext; + } _ => { has_ext = extension::skip(reader, "InitSyn", ext)?; } @@ -198,6 +223,7 @@ where ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }) } } @@ -210,64 +236,84 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &InitAck) -> Self::Output { + let InitAck { + version, + whatami, + zid, + resolution, + batch_size, + cookie, + ext_qos, + ext_shm, + ext_auth, + ext_mlink, + ext_lowlatency, + ext_compression, + } = x; + // Header let mut header = id::INIT | flag::A; - if x.resolution != Resolution::default() || x.batch_size != batch_size::UNICAST { + if *resolution != Resolution::default() || *batch_size != batch_size::UNICAST { header |= flag::S; } - let mut n_exts = (x.ext_qos.is_some() as u8) - + (x.ext_shm.is_some() as u8) - + (x.ext_auth.is_some() as u8) - + (x.ext_mlink.is_some() as u8) - + (x.ext_lowlatency.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + + (ext_shm.is_some() as u8) + + (ext_auth.is_some() as u8) + + (ext_mlink.is_some() as u8) + + (ext_lowlatency.is_some() as u8) + + (ext_compression.is_some() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.version)?; + self.write(&mut *writer, version)?; - let whatami: u8 = match x.whatami { + let whatami: u8 = match whatami { WhatAmI::Router => 0b00, WhatAmI::Peer => 0b01, WhatAmI::Client => 0b10, }; - let flags: u8 = ((x.zid.size() as u8 - 1) << 4) | whatami; + let flags: u8 = ((zid.size() as u8 - 1) << 4) | whatami; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(x.zid.size()); - lodec.write(&mut *writer, &x.zid)?; + let lodec = Zenoh080Length::new(zid.size()); + lodec.write(&mut *writer, zid)?; if imsg::has_flag(header, flag::S) { - self.write(&mut *writer, x.resolution.as_u8())?; - self.write(&mut *writer, x.batch_size.to_le_bytes())?; + self.write(&mut *writer, resolution.as_u8())?; + self.write(&mut *writer, batch_size.to_le_bytes())?; } let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, &x.cookie)?; + zodec.write(&mut *writer, cookie)?; // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } - if let Some(auth) = x.ext_auth.as_ref() { + if let Some(auth) = ext_auth.as_ref() { n_exts -= 1; self.write(&mut *writer, (auth, n_exts != 0))?; } - if let Some(mlink) = x.ext_mlink.as_ref() { + if let Some(mlink) = ext_mlink.as_ref() { n_exts -= 1; self.write(&mut *writer, (mlink, n_exts != 0))?; } - if let Some(lowlatency) = x.ext_lowlatency.as_ref() { + if let Some(lowlatency) = ext_lowlatency.as_ref() { n_exts -= 1; self.write(&mut *writer, (lowlatency, n_exts != 0))?; } + if let Some(compression) = ext_compression.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (compression, n_exts != 0))?; + } Ok(()) } @@ -329,6 +375,7 @@ where let mut ext_auth = None; let mut ext_mlink = None; let mut ext_lowlatency = None; + let mut ext_compression = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { @@ -360,6 +407,11 @@ where ext_lowlatency = Some(q); has_ext = ext; } + ext::Compression::ID => { + let (q, ext): (ext::Compression, bool) = eodec.read(&mut *reader)?; + ext_compression = Some(q); + has_ext = ext; + } _ => { has_ext = extension::skip(reader, "InitAck", ext)?; } @@ -378,6 +430,7 @@ where ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }) } } diff --git a/commons/zenoh-codec/src/transport/mod.rs b/commons/zenoh-codec/src/transport/mod.rs index 3aa6423eb6..4ddf872551 100644 --- a/commons/zenoh-codec/src/transport/mod.rs +++ b/commons/zenoh-codec/src/transport/mod.rs @@ -11,6 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // +pub mod batch; mod close; mod fragment; mod frame; diff --git a/commons/zenoh-codec/src/transport/open.rs b/commons/zenoh-codec/src/transport/open.rs index bbcb43de98..17482b1610 100644 --- a/commons/zenoh-codec/src/transport/open.rs +++ b/commons/zenoh-codec/src/transport/open.rs @@ -35,16 +35,29 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &OpenSyn) -> Self::Output { + let OpenSyn { + initial_sn, + lease, + cookie, + ext_qos, + ext_shm, + ext_auth, + ext_mlink, + ext_lowlatency, + ext_compression, + } = x; + // Header let mut header = id::OPEN; - if x.lease.as_millis() % 1_000 == 0 { + if lease.as_millis() % 1_000 == 0 { header |= flag::T; } - let mut n_exts = (x.ext_qos.is_some() as u8) - + (x.ext_shm.is_some() as u8) - + (x.ext_auth.is_some() as u8) - + (x.ext_mlink.is_some() as u8) - + (x.ext_lowlatency.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + + (ext_shm.is_some() as u8) + + (ext_auth.is_some() as u8) + + (ext_mlink.is_some() as u8) + + (ext_lowlatency.is_some() as u8) + + (ext_compression.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -52,34 +65,38 @@ where // Body if imsg::has_flag(header, flag::T) { - self.write(&mut *writer, x.lease.as_secs())?; + self.write(&mut *writer, lease.as_secs())?; } else { - self.write(&mut *writer, x.lease.as_millis() as u64)?; + self.write(&mut *writer, lease.as_millis() as u64)?; } - self.write(&mut *writer, x.initial_sn)?; - self.write(&mut *writer, &x.cookie)?; + self.write(&mut *writer, initial_sn)?; + self.write(&mut *writer, cookie)?; // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } - if let Some(auth) = x.ext_auth.as_ref() { + if let Some(auth) = ext_auth.as_ref() { n_exts -= 1; self.write(&mut *writer, (auth, n_exts != 0))?; } - if let Some(mlink) = x.ext_mlink.as_ref() { + if let Some(mlink) = ext_mlink.as_ref() { n_exts -= 1; self.write(&mut *writer, (mlink, n_exts != 0))?; } - if let Some(lowlatency) = x.ext_lowlatency.as_ref() { + if let Some(lowlatency) = ext_lowlatency.as_ref() { n_exts -= 1; self.write(&mut *writer, (lowlatency, n_exts != 0))?; } + if let Some(compression) = ext_compression.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (compression, n_exts != 0))?; + } Ok(()) } @@ -125,6 +142,7 @@ where let mut ext_auth = None; let mut ext_mlink = None; let mut ext_lowlatency = None; + let mut ext_compression = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { @@ -156,6 +174,11 @@ where ext_lowlatency = Some(q); has_ext = ext; } + ext::Compression::ID => { + let (q, ext): (ext::Compression, bool) = eodec.read(&mut *reader)?; + ext_compression = Some(q); + has_ext = ext; + } _ => { has_ext = extension::skip(reader, "OpenSyn", ext)?; } @@ -171,6 +194,7 @@ where ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }) } } @@ -183,18 +207,30 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &OpenAck) -> Self::Output { + let OpenAck { + initial_sn, + lease, + ext_qos, + ext_shm, + ext_auth, + ext_mlink, + ext_lowlatency, + ext_compression, + } = x; + // Header let mut header = id::OPEN; header |= flag::A; // Verify that the timeout is expressed in seconds, i.e. subsec part is 0. - if x.lease.subsec_nanos() == 0 { + if lease.subsec_nanos() == 0 { header |= flag::T; } - let mut n_exts = (x.ext_qos.is_some() as u8) - + (x.ext_shm.is_some() as u8) - + (x.ext_auth.is_some() as u8) - + (x.ext_mlink.is_some() as u8) - + (x.ext_lowlatency.is_some() as u8); + let mut n_exts = (ext_qos.is_some() as u8) + + (ext_shm.is_some() as u8) + + (ext_auth.is_some() as u8) + + (ext_mlink.is_some() as u8) + + (ext_lowlatency.is_some() as u8) + + (ext_compression.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -202,33 +238,37 @@ where // Body if imsg::has_flag(header, flag::T) { - self.write(&mut *writer, x.lease.as_secs())?; + self.write(&mut *writer, lease.as_secs())?; } else { - self.write(&mut *writer, x.lease.as_millis() as u64)?; + self.write(&mut *writer, lease.as_millis() as u64)?; } - self.write(&mut *writer, x.initial_sn)?; + self.write(&mut *writer, initial_sn)?; // Extensions - if let Some(qos) = x.ext_qos.as_ref() { + if let Some(qos) = ext_qos.as_ref() { n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } - if let Some(shm) = x.ext_shm.as_ref() { + if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; } - if let Some(auth) = x.ext_auth.as_ref() { + if let Some(auth) = ext_auth.as_ref() { n_exts -= 1; self.write(&mut *writer, (auth, n_exts != 0))?; } - if let Some(mlink) = x.ext_mlink.as_ref() { + if let Some(mlink) = ext_mlink.as_ref() { n_exts -= 1; self.write(&mut *writer, (mlink, n_exts != 0))?; } - if let Some(lowlatency) = x.ext_lowlatency.as_ref() { + if let Some(lowlatency) = ext_lowlatency.as_ref() { n_exts -= 1; self.write(&mut *writer, (lowlatency, n_exts != 0))?; } + if let Some(compression) = ext_compression.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (compression, n_exts != 0))?; + } Ok(()) } @@ -273,6 +313,7 @@ where let mut ext_auth = None; let mut ext_mlink = None; let mut ext_lowlatency = None; + let mut ext_compression = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { @@ -304,6 +345,11 @@ where ext_lowlatency = Some(q); has_ext = ext; } + ext::Compression::ID => { + let (q, ext): (ext::Compression, bool) = eodec.read(&mut *reader)?; + ext_compression = Some(q); + has_ext = ext; + } _ => { has_ext = extension::skip(reader, "OpenAck", ext)?; } @@ -318,6 +364,7 @@ where ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }) } } diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 5b4d3da835..8d1a5dbc0f 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -107,6 +107,8 @@ impl Default for TransportUnicastConf { max_sessions: 1_000, max_links: 1, lowlatency: false, + qos: QoSUnicastConf::default(), + compression: CompressionUnicastConf::default(), } } } @@ -116,16 +118,39 @@ impl Default for TransportMulticastConf { Self { join_interval: Some(2500), max_sessions: Some(1000), + qos: QoSMulticastConf::default(), + compression: CompressionMulticastConf::default(), } } } -impl Default for QoSConf { +impl Default for QoSUnicastConf { fn default() -> Self { Self { enabled: true } } } +#[allow(clippy::derivable_impls)] +impl Default for QoSMulticastConf { + fn default() -> Self { + Self { enabled: false } + } +} + +#[allow(clippy::derivable_impls)] +impl Default for CompressionUnicastConf { + fn default() -> Self { + Self { enabled: false } + } +} + +#[allow(clippy::derivable_impls)] +impl Default for CompressionMulticastConf { + fn default() -> Self { + Self { enabled: false } + } +} + impl Default for LinkTxConf { #[allow(clippy::unnecessary_cast)] fn default() -> Self { diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 53ac033506..c3a633b0e2 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -247,17 +247,32 @@ validated_struct::validator! { /// This option does not make LowLatency transport mandatory, the actual implementation of transport /// used will depend on Establish procedure and other party's settings lowlatency: bool, + pub qos: QoSUnicastConf { + /// Whether QoS is enabled or not. + /// If set to `false`, the QoS will be disabled. (default `true`). + enabled: bool + }, + pub compression: CompressionUnicastConf { + /// You must compile zenoh with "transport_compression" feature to be able to enable compression. + /// When enabled is true, batches will be sent compressed. (default `false`). + enabled: bool, + }, }, pub multicast: TransportMulticastConf { /// Link join interval duration in milliseconds (default: 2500) join_interval: Option, /// Maximum number of multicast sessions (default: 1000) max_sessions: Option, - }, - pub qos: QoSConf { - /// Whether QoS is enabled or not. - /// If set to `false`, the QoS will be disabled. (default `true`). - enabled: bool + pub qos: QoSMulticastConf { + /// Whether QoS is enabled or not. + /// If set to `false`, the QoS will be disabled. (default `false`). + enabled: bool + }, + pub compression: CompressionMulticastConf { + /// You must compile zenoh with "transport_compression" feature to be able to enable compression. + /// When enabled is true, batches will be sent compressed. (default `false`). + enabled: bool, + }, }, pub link: #[derive(Default)] TransportLinkConf { @@ -329,24 +344,11 @@ validated_struct::validator! { client_private_key_base64 : Option, #[serde(skip_serializing)] client_certificate_base64 : Option, - } - , + }, pub unixpipe: #[derive(Default)] UnixPipeConf { file_access_mask: Option }, - pub compression: #[derive(Default)] - /// **Experimental** compression feature. - /// Will compress the batches hop to hop (as opposed to end to end). May cause errors when - /// the batches's complexity is too high, causing the resulting compression to be bigger in - /// size than the MTU. - /// You must use the features "transport_compression" and "unstable" to enable this. - CompressionConf { - /// When enabled is true, batches will be sent compressed. It does not affect the - /// reception, which always expects compressed batches when built with thes features - /// "transport_compression" and "unstable". - enabled: bool, - } }, pub shared_memory: SharedMemoryConf { diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index bcd01e7965..184784f9f1 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -70,8 +70,8 @@ pub mod flag { pub struct Frame { pub reliability: Reliability, pub sn: TransportSn, - pub payload: Vec, pub ext_qos: ext::QoSType, + pub payload: Vec, } // Extensions diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index d553799fd1..0c60dd8a90 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -118,6 +118,7 @@ pub struct InitSyn { pub ext_auth: Option, pub ext_mlink: Option, pub ext_lowlatency: Option, + pub ext_compression: Option, } // Extensions @@ -146,6 +147,10 @@ pub mod ext { /// # LowLatency extension /// Used to negotiate the use of lowlatency transport pub type LowLatency = zextunit!(0x5, false); + + /// # Compression extension + /// Used to negotiate the use of compression on the link + pub type Compression = zextunit!(0x6, false); } impl InitSyn { @@ -166,6 +171,7 @@ impl InitSyn { let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_lowlatency = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + let ext_compression = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); Self { version, @@ -178,6 +184,7 @@ impl InitSyn { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } } } @@ -195,6 +202,7 @@ pub struct InitAck { pub ext_auth: Option, pub ext_mlink: Option, pub ext_lowlatency: Option, + pub ext_compression: Option, } impl InitAck { @@ -220,6 +228,7 @@ impl InitAck { let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_lowlatency = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + let ext_compression = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); Self { version, @@ -233,6 +242,7 @@ impl InitAck { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } } } diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index b7ec56da62..d793671b06 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -82,6 +82,7 @@ pub struct OpenSyn { pub ext_auth: Option, pub ext_mlink: Option, pub ext_lowlatency: Option, + pub ext_compression: Option, } // Extensions @@ -111,6 +112,10 @@ pub mod ext { /// # LowLatency extension /// Used to negotiate the use of lowlatency transport pub type LowLatency = zextunit!(0x5, false); + + /// # Compression extension + /// Used to negotiate the use of compression on the link + pub type Compression = zextunit!(0x6, false); } impl OpenSyn { @@ -137,6 +142,7 @@ impl OpenSyn { let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_lowlatency = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + let ext_compression = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); Self { lease, @@ -147,6 +153,7 @@ impl OpenSyn { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } } } @@ -160,6 +167,7 @@ pub struct OpenAck { pub ext_auth: Option, pub ext_mlink: Option, pub ext_lowlatency: Option, + pub ext_compression: Option, } impl OpenAck { @@ -182,6 +190,7 @@ impl OpenAck { let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); let ext_lowlatency = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + let ext_compression = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); Self { lease, @@ -191,6 +200,7 @@ impl OpenAck { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } } } diff --git a/io/zenoh-link-commons/Cargo.toml b/io/zenoh-link-commons/Cargo.toml index 51db4d671c..36e39eceed 100644 --- a/io/zenoh-link-commons/Cargo.toml +++ b/io/zenoh-link-commons/Cargo.toml @@ -24,10 +24,14 @@ categories = { workspace = true } description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +compression = [] + [dependencies] async-std = { workspace = true } async-trait = { workspace = true } flume = { workspace = true } +lz4_flex = { workspace = true } serde = { workspace = true, features = ["default"] } typenum = { workspace = true } zenoh-buffers = { workspace = true } diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index 7f3eb43518..d44686ff50 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -14,21 +14,12 @@ use alloc::{boxed::Box, sync::Arc, vec::Vec}; use async_trait::async_trait; use core::{ - convert::TryFrom, fmt, hash::{Hash, Hasher}, ops::Deref, }; -use zenoh_buffers::{ - reader::HasReader, - writer::{HasWriter, Writer}, -}; -use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::{ - core::{EndPoint, Locator}, - transport::{BatchSize, TransportMessage}, -}; -use zenoh_result::{zerror, ZResult}; +use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_result::ZResult; pub type LinkManagerUnicast = Arc; #[async_trait] @@ -44,12 +35,6 @@ pub trait ConstructibleLinkManagerUnicast: Sized { fn new(new_link_sender: NewLinkChannelSender, config: T) -> ZResult; } -#[derive(Clone, PartialEq, Eq)] -pub enum LinkUnicastDirection { - Inbound, - Outbound, -} - #[derive(Clone)] pub struct LinkUnicast(pub Arc); @@ -67,70 +52,6 @@ pub trait LinkUnicastTrait: Send + Sync { async fn close(&self) -> ZResult<()>; } -impl LinkUnicast { - pub async fn send(&self, msg: &TransportMessage) -> ZResult { - const ERR: &str = "Write error on link: "; - - // Create the buffer for serializing the message - let mut buff = Vec::new(); - let mut writer = buff.writer(); - let codec = Zenoh080::new(); - - // Reserve 16 bits to write the length - if self.is_streamed() { - writer - .write_exact(BatchSize::MIN.to_le_bytes().as_slice()) - .map_err(|_| zerror!("{ERR}{self}"))?; - } - // Serialize the message - codec - .write(&mut writer, msg) - .map_err(|_| zerror!("{ERR}{self}"))?; - - // Write the length - if self.is_streamed() { - let num = BatchSize::MIN.to_le_bytes().len(); - let len = - BatchSize::try_from(writer.len() - num).map_err(|_| zerror!("{ERR}{self}"))?; - buff[..num].copy_from_slice(len.to_le_bytes().as_slice()); - } - - // Send the message on the link - self.0.write_all(buff.as_slice()).await?; - - Ok(buff.len()) - } - - pub async fn recv(&self) -> ZResult { - // Read from the link - let buffer = if self.is_streamed() { - // Read and decode the message length - let mut length_bytes = BatchSize::MIN.to_le_bytes(); - self.read_exact(&mut length_bytes).await?; - let to_read = BatchSize::from_le_bytes(length_bytes) as usize; - // Read the message - let mut buffer = zenoh_buffers::vec::uninit(to_read); - self.read_exact(&mut buffer).await?; - buffer - } else { - // Read the message - let mut buffer = zenoh_buffers::vec::uninit(self.get_mtu() as usize); - let n = self.read(&mut buffer).await?; - buffer.truncate(n); - buffer - }; - - let mut reader = buffer.reader(); - let codec = Zenoh080::new(); - - let msg: TransportMessage = codec - .read(&mut reader) - .map_err(|_| zerror!("Read error on link: {}", self))?; - - Ok(msg) - } -} - impl Deref for LinkUnicast { type Target = Arc; diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 6fc2051242..cd029a9435 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -11,67 +11,112 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::num::NonZeroUsize; +use std::num::{NonZeroU8, NonZeroUsize}; use zenoh_buffers::{ - reader::{Reader, SiphonableReader}, - writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - BBuf, ZBufReader, + buffer::Buffer, + reader::{DidntRead, HasReader}, + writer::{DidntWrite, HasWriter, Writer}, + BBuf, ZBufReader, ZSlice, ZSliceBuffer, +}; +use zenoh_codec::{ + transport::batch::{BatchError, Zenoh080Batch}, + RCodec, WCodec, }; -use zenoh_codec::{WCodec, Zenoh080}; use zenoh_protocol::{ - core::Reliability, network::NetworkMessage, - transport::{ - fragment::FragmentHeader, frame::FrameHeader, BatchSize, TransportMessage, TransportSn, - }, + transport::{fragment::FragmentHeader, frame::FrameHeader, BatchSize, TransportMessage}, }; - -const LENGTH_BYTES: [u8; 2] = u16::MIN.to_be_bytes(); - -pub(crate) trait Encode { - type Output; - fn encode(self, message: Message) -> Self::Output; +use zenoh_result::ZResult; +#[cfg(feature = "transport_compression")] +use {std::sync::Arc, zenoh_protocol::common::imsg, zenoh_result::zerror}; + +// Split the inner buffer into (length, header, payload) inmutable slices +#[cfg(feature = "transport_compression")] +macro_rules! zsplit { + ($slice:expr, $header:expr) => {{ + match $header.get() { + Some(_) => $slice.split_at(BatchHeader::INDEX + 1), + None => (&[], $slice), + } + }}; } -pub(crate) trait Decode { - type Error; - fn decode(self) -> Result; +// Batch config +#[derive(Copy, Clone, Debug)] +pub struct BatchConfig { + pub mtu: BatchSize, + #[cfg(feature = "transport_compression")] + pub is_compression: bool, } -#[derive(Clone, Copy, Debug)] -#[repr(u8)] -pub(crate) enum CurrentFrame { - Reliable, - BestEffort, - None, +impl BatchConfig { + fn header(&self) -> BatchHeader { + #[allow(unused_mut)] // No need for mut when "transport_compression" is disabled + let mut h = 0; + #[cfg(feature = "transport_compression")] + if self.is_compression { + h |= BatchHeader::COMPRESSION; + } + BatchHeader::new(h) + } } -#[derive(Clone, Copy, Debug)] -pub(crate) struct LatestSn { - pub(crate) reliable: Option, - pub(crate) best_effort: Option, -} +// Batch header +#[repr(transparent)] +#[derive(Copy, Clone, Debug)] +pub struct BatchHeader(Option); -impl LatestSn { - fn clear(&mut self) { - self.reliable = None; - self.best_effort = None; +impl BatchHeader { + #[cfg(feature = "transport_compression")] + const INDEX: usize = 0; + #[cfg(feature = "transport_compression")] + const COMPRESSION: u8 = 1; + + fn new(h: u8) -> Self { + Self(NonZeroU8::new(h)) + } + + #[cfg(feature = "transport_compression")] + const fn is_empty(&self) -> bool { + self.0.is_none() + } + + const fn get(&self) -> Option { + self.0 + } + + /// Verify that the [`WBatch`][WBatch] is for a stream-based protocol, i.e., the first + /// 2 bytes are reserved to encode the total amount of serialized bytes as 16-bits little endian. + #[cfg(feature = "transport_compression")] + #[inline(always)] + pub fn is_compression(&self) -> bool { + self.0 + .is_some_and(|h| imsg::has_flag(h.get(), Self::COMPRESSION)) } } +// WRITE BATCH #[cfg(feature = "stats")] #[derive(Clone, Copy, Debug, Default)] -pub(crate) struct SerializationBatchStats { - pub(crate) t_msgs: usize, +pub struct WBatchStats { + pub t_msgs: usize, } #[cfg(feature = "stats")] -impl SerializationBatchStats { +impl WBatchStats { fn clear(&mut self) { self.t_msgs = 0; } } +#[repr(u8)] +#[derive(Debug)] +pub enum Finalize { + Batch, + #[cfg(feature = "transport_compression")] + Buffer, +} + /// Write Batch /// /// A [`WBatch`][WBatch] is a non-expandable and contiguous region of memory @@ -80,44 +125,38 @@ impl SerializationBatchStats { /// [`TransportMessage`][TransportMessage] are always serialized on the batch as they are, while /// [`ZenohMessage`][ZenohMessage] are always serializaed on the batch as part of a [`TransportMessage`] /// [TransportMessage] Frame. Reliable and Best Effort Frames can be interleaved on the same -/// [`SerializationBatch`][SerializationBatch] as long as they fit in the remaining buffer capacity. +/// [`WBatch`][WBatch] as long as they fit in the remaining buffer capacity. /// -/// In the serialized form, the [`SerializationBatch`][SerializationBatch] always contains one or more +/// In the serialized form, the [`WBatch`][WBatch] always contains one or more /// [`TransportMessage`][TransportMessage]. In the particular case of [`TransportMessage`][TransportMessage] Frame, /// its payload is either (i) one or more complete [`ZenohMessage`][ZenohMessage] or (ii) a fragment of a /// a [`ZenohMessage`][ZenohMessage]. /// -/// As an example, the content of the [`SerializationBatch`][SerializationBatch] in memory could be: +/// As an example, the content of the [`WBatch`][WBatch] in memory could be: /// /// | Keep Alive | Frame Reliable | Frame Best Effort | /// -#[derive(Debug)] -pub(crate) struct WBatch { +#[derive(Clone, Debug)] +pub struct WBatch { // The buffer to perform the batching on - buffer: BBuf, - // It is a streamed batch - is_streamed: bool, - // The current frame being serialized: BestEffort/Reliable - current_frame: CurrentFrame, - // The latest SN - pub(crate) latest_sn: LatestSn, + pub buffer: BBuf, + // The batch codec + pub codec: Zenoh080Batch, + // It contains 1 byte as additional header, e.g. to signal the batch is compressed + pub header: BatchHeader, // Statistics related to this batch #[cfg(feature = "stats")] - pub(crate) stats: SerializationBatchStats, + pub stats: WBatchStats, } impl WBatch { - pub(crate) fn new(size: BatchSize, is_streamed: bool) -> Self { + pub fn new(config: BatchConfig) -> Self { let mut batch = Self { - buffer: BBuf::with_capacity(size as usize), - is_streamed, - current_frame: CurrentFrame::None, - latest_sn: LatestSn { - reliable: None, - best_effort: None, - }, + buffer: BBuf::with_capacity(config.mtu as usize), + codec: Zenoh080Batch::new(), + header: config.header(), #[cfg(feature = "stats")] - stats: SerializationBatchStats::default(), + stats: WBatchStats::default(), }; // Bring the batch in a clear state @@ -126,237 +165,302 @@ impl WBatch { batch } - /// Verify that the [`SerializationBatch`][SerializationBatch] has no serialized bytes. + /// Verify that the [`WBatch`][WBatch] has no serialized bytes. #[inline(always)] - pub(crate) fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.len() == 0 } - /// Get the total number of bytes that have been serialized on the [`SerializationBatch`][SerializationBatch]. + /// Get the total number of bytes that have been serialized on the [`WBatch`][WBatch]. #[inline(always)] - pub(crate) fn len(&self) -> BatchSize { - let len = self.buffer.len() as BatchSize; - if self.is_streamed() { - len - (LENGTH_BYTES.len() as BatchSize) - } else { - len - } + pub fn len(&self) -> BatchSize { + self.buffer.len() as BatchSize } - /// Verify that the [`SerializationBatch`][SerializationBatch] is for a stream-based protocol, i.e., the first - /// 2 bytes are reserved to encode the total amount of serialized bytes as 16-bits little endian. + /// Clear the [`WBatch`][WBatch] memory buffer and related internal state. #[inline(always)] - pub(crate) fn is_streamed(&self) -> bool { - self.is_streamed - } - - /// Clear the [`SerializationBatch`][SerializationBatch] memory buffer and related internal state. - #[inline(always)] - pub(crate) fn clear(&mut self) { + pub fn clear(&mut self) { self.buffer.clear(); - self.current_frame = CurrentFrame::None; - self.latest_sn.clear(); - #[cfg(feature = "stats")] - { - self.stats.clear(); - } - if self.is_streamed() { + self.codec.clear(); + if let Some(h) = self.header.get() { let mut writer = self.buffer.writer(); - let _ = writer.write_exact(&LENGTH_BYTES[..]); + let _ = writer.write_u8(h.get()); } } - /// In case the [`SerializationBatch`][SerializationBatch] is for a stream-based protocol, use the first 2 bytes - /// to encode the total amount of serialized bytes as 16-bits little endian. + /// Get a `&[u8]` to access the internal memory buffer, usually for transmitting it on the network. #[inline(always)] - pub(crate) fn write_len(&mut self) { - if self.is_streamed() { - let length = self.len(); - self.buffer.as_mut_slice()[..LENGTH_BYTES.len()].copy_from_slice(&length.to_le_bytes()); - } + pub fn as_slice(&self) -> &[u8] { + self.buffer.as_slice() } - /// Get a `&[u8]` to access the internal memory buffer, usually for transmitting it on the network. + // Split (length, header, payload) internal buffer slice #[inline(always)] - pub(crate) fn as_bytes(&self) -> &[u8] { - self.buffer.as_slice() + #[cfg(feature = "transport_compression")] + fn split(&self) -> (&[u8], &[u8]) { + zsplit!(self.buffer.as_slice(), self.header) + } + + pub fn finalize( + &mut self, + #[cfg(feature = "transport_compression")] buffer: Option<&mut BBuf>, + ) -> ZResult { + #[cfg(feature = "transport_compression")] + if self.header.is_compression() { + let buffer = buffer.ok_or_else(|| zerror!("Support buffer not provided"))?; + buffer.clear(); + return self.compress(buffer); + } + + Ok(Finalize::Batch) + } + + #[cfg(feature = "transport_compression")] + fn compress(&mut self, support: &mut BBuf) -> ZResult { + // Write the initial bytes for the batch + let mut writer = support.writer(); + if let Some(h) = self.header.get() { + let _ = writer.write_u8(h.get()); + } + + // Compress the actual content + let (_header, payload) = self.split(); + writer + .with_slot(writer.remaining(), |b| { + lz4_flex::block::compress_into(payload, b).unwrap_or(0) + }) + .map_err(|_| zerror!("Compression error"))?; + + // Verify wether the resulting compressed data is smaller than the initial input + if support.len() < self.buffer.len() { + Ok(Finalize::Buffer) + } else { + // Keep the original uncompressed buffer and unset the compression flag from the header + let h = self + .buffer + .as_mut_slice() + .get_mut(BatchHeader::INDEX) + .ok_or_else(|| zerror!("Header not present"))?; + *h &= !BatchHeader::COMPRESSION; + Ok(Finalize::Batch) + } } } +pub trait Encode { + type Output; + + fn encode(self, x: Message) -> Self::Output; +} + impl Encode<&TransportMessage> for &mut WBatch { type Output = Result<(), DidntWrite>; - /// Try to serialize a [`TransportMessage`][TransportMessage] on the [`SerializationBatch`][SerializationBatch]. - /// - /// # Arguments - /// * `message` - The [`TransportMessage`][TransportMessage] to serialize. - /// - fn encode(self, message: &TransportMessage) -> Self::Output { - // Mark the write operation + fn encode(self, x: &TransportMessage) -> Self::Output { let mut writer = self.buffer.writer(); - let mark = writer.mark(); - - let codec = Zenoh080::new(); - codec.write(&mut writer, message).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - - // Reset the current frame value - self.current_frame = CurrentFrame::None; - #[cfg(feature = "stats")] - { - self.stats.t_msgs += 1; - } - Ok(()) + self.codec.write(&mut writer, x) } } -#[repr(u8)] -pub(crate) enum WError { - NewFrame, - DidntWrite, -} - impl Encode<&NetworkMessage> for &mut WBatch { - type Output = Result<(), WError>; - - /// Try to serialize a [`NetworkMessage`][NetworkMessage] on the [`SerializationBatch`][SerializationBatch]. - /// - /// # Arguments - /// * `message` - The [`NetworkMessage`][NetworkMessage] to serialize. - /// - fn encode(self, message: &NetworkMessage) -> Self::Output { - // Eventually update the current frame and sn based on the current status - if let (CurrentFrame::Reliable, false) - | (CurrentFrame::BestEffort, true) - | (CurrentFrame::None, _) = (self.current_frame, message.is_reliable()) - { - // We are not serializing on the right frame. - return Err(WError::NewFrame); - }; + type Output = Result<(), BatchError>; - // Mark the write operation + fn encode(self, x: &NetworkMessage) -> Self::Output { let mut writer = self.buffer.writer(); - let mark = writer.mark(); - - let codec = Zenoh080::new(); - codec.write(&mut writer, message).map_err(|_| { - // Revert the write operation - writer.rewind(mark); - WError::DidntWrite - }) + self.codec.write(&mut writer, x) } } -impl Encode<(&NetworkMessage, FrameHeader)> for &mut WBatch { - type Output = Result<(), DidntWrite>; +impl Encode<(&NetworkMessage, &FrameHeader)> for &mut WBatch { + type Output = Result<(), BatchError>; + + fn encode(self, x: (&NetworkMessage, &FrameHeader)) -> Self::Output { + let mut writer = self.buffer.writer(); + self.codec.write(&mut writer, x) + } +} - /// Try to serialize a [`NetworkMessage`][NetworkMessage] on the [`SerializationBatch`][SerializationBatch]. - /// - /// # Arguments - /// * `message` - The [`NetworkMessage`][NetworkMessage] to serialize. - /// - fn encode(self, message: (&NetworkMessage, FrameHeader)) -> Self::Output { - let (message, frame) = message; +impl Encode<(&mut ZBufReader<'_>, &mut FragmentHeader)> for &mut WBatch { + type Output = Result; - // Mark the write operation + fn encode(self, x: (&mut ZBufReader<'_>, &mut FragmentHeader)) -> Self::Output { let mut writer = self.buffer.writer(); - let mark = writer.mark(); - - let codec = Zenoh080::new(); - // Write the frame header - codec.write(&mut writer, &frame).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - // Write the zenoh message - codec.write(&mut writer, message).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - // Update the frame - self.current_frame = match frame.reliability { - Reliability::Reliable => { - self.latest_sn.reliable = Some(frame.sn); - CurrentFrame::Reliable - } - Reliability::BestEffort => { - self.latest_sn.best_effort = Some(frame.sn); - CurrentFrame::BestEffort + self.codec.write(&mut writer, x) + } +} + +// Read batch +#[derive(Debug)] +pub struct RBatch { + // The buffer to perform deserializationn from + buffer: ZSlice, + // The batch codec + codec: Zenoh080Batch, + // It contains 1 byte as additional header, e.g. to signal the batch is compressed + #[cfg(feature = "transport_compression")] + header: BatchHeader, +} + +impl RBatch { + pub fn new(#[allow(unused_variables)] config: BatchConfig, buffer: T) -> Self + where + T: Into, + { + Self { + buffer: buffer.into(), + codec: Zenoh080Batch::new(), + #[cfg(feature = "transport_compression")] + header: config.header(), + } + } + + #[inline(always)] + pub const fn is_empty(&self) -> bool { + self.buffer.is_empty() + } + + // Split (length, header, payload) internal buffer slice + #[inline(always)] + #[cfg(feature = "transport_compression")] + fn split(&self) -> (&[u8], &[u8]) { + zsplit!(self.buffer.as_slice(), self.header) + } + + pub fn initialize(&mut self, #[allow(unused_variables)] buff: C) -> ZResult<()> + where + C: Fn() -> T + Copy, + T: ZSliceBuffer + 'static, + { + #[cfg(feature = "transport_compression")] + if !self.header.is_empty() { + let h = *self + .buffer + .get(BatchHeader::INDEX) + .ok_or_else(|| zerror!("Batch header not present"))?; + let header = BatchHeader::new(h); + + if header.is_compression() { + self.decompress(buff)?; + } else { + self.buffer = self + .buffer + .subslice(BatchHeader::INDEX + 1, self.buffer.len()) + .ok_or_else(|| zerror!("Invalid batch length"))?; } - }; + } + + Ok(()) + } + + #[cfg(feature = "transport_compression")] + fn decompress(&mut self, mut buff: impl FnMut() -> T) -> ZResult<()> + where + T: ZSliceBuffer + 'static, + { + let (_h, p) = self.split(); + + let mut into = (buff)(); + let n = lz4_flex::block::decompress_into(p, into.as_mut_slice()) + .map_err(|_| zerror!("Decompression error"))?; + self.buffer = ZSlice::make(Arc::new(into), 0, n) + .map_err(|_| zerror!("Invalid decompression buffer length"))?; + Ok(()) } } -impl Encode<(&mut ZBufReader<'_>, FragmentHeader)> for &mut WBatch { - type Output = Result; +pub trait Decode { + type Error; - /// Try to serialize a [`ZenohMessage`][ZenohMessage] on the [`SerializationBatch`][SerializationBatch]. - /// - /// # Arguments - /// * `message` - The [`ZenohMessage`][ZenohMessage] to serialize. - /// - fn encode(self, message: (&mut ZBufReader<'_>, FragmentHeader)) -> Self::Output { - let (reader, mut fragment) = message; + fn decode(self) -> Result; +} - let mut writer = self.buffer.writer(); - let codec = Zenoh080::new(); - - // Mark the buffer for the writing operation - let mark = writer.mark(); - - // Write the frame header - codec.write(&mut writer, &fragment).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - - // Check if it is really the final fragment - if reader.remaining() <= writer.remaining() { - // Revert the buffer - writer.rewind(mark); - // It is really the finally fragment, reserialize the header - fragment.more = false; - // Write the frame header - codec.write(&mut writer, &fragment).map_err(|e| { - // Revert the write operation - writer.rewind(mark); - e - })?; - } +impl Decode for &mut RBatch { + type Error = DidntRead; - // Write the fragment - reader.siphon(&mut writer).map_err(|_| { - // Revert the write operation - writer.rewind(mark); - DidntWrite - }) + fn decode(self) -> Result { + let mut reader = self.buffer.reader(); + self.codec.read(&mut reader) } } #[cfg(test)] mod tests { + use std::vec; + use super::*; + use rand::Rng; use zenoh_buffers::ZBuf; use zenoh_protocol::{ core::{CongestionControl, Encoding, Priority, Reliability, WireExpr}, network::{ext, Push}, transport::{ frame::{self, FrameHeader}, - KeepAlive, TransportMessage, + Fragment, KeepAlive, TransportMessage, }, zenoh::{PushBody, Put}, }; + #[test] + fn rw_batch() { + let mut rng = rand::thread_rng(); + + for _ in 0..1_000 { + let msg_ins: [TransportMessage; 2] = [TransportMessage::rand(), { + let mut msg_in = Fragment::rand(); + msg_in.payload = vec![0u8; rng.gen_range(8..1_024)].into(); + msg_in.into() + }]; + for msg_in in msg_ins { + let config = BatchConfig { + mtu: BatchSize::MAX, + #[cfg(feature = "transport_compression")] + is_compression: rng.gen_bool(0.5), + }; + let mut wbatch = WBatch::new(config); + wbatch.encode(&msg_in).unwrap(); + println!("Encoded WBatch: {:?}", wbatch); + + #[cfg(feature = "transport_compression")] + let mut buffer = config.is_compression.then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size(wbatch.as_slice().len()), + )); + + let res = wbatch + .finalize( + #[cfg(feature = "transport_compression")] + buffer.as_mut(), + ) + .unwrap(); + let bytes = match res { + Finalize::Batch => wbatch.as_slice(), + #[cfg(feature = "transport_compression")] + Finalize::Buffer => buffer.as_mut().unwrap().as_slice(), + }; + println!("Finalized WBatch: {:02x?}", bytes); + + let mut rbatch = RBatch::new(config, bytes.to_vec().into_boxed_slice()); + println!("Decoded RBatch: {:?}", rbatch); + rbatch + .initialize(|| { + zenoh_buffers::vec::uninit(config.mtu as usize).into_boxed_slice() + }) + .unwrap(); + println!("Initialized RBatch: {:?}", rbatch); + let msg_out: TransportMessage = rbatch.decode().unwrap(); + assert_eq!(msg_in, msg_out); + } + } + } + #[test] fn serialization_batch() { - let mut batch = WBatch::new(u16::MAX, true); + let config = BatchConfig { + mtu: BatchSize::MAX, + #[cfg(feature = "transport_compression")] + is_compression: false, + }; + let mut batch = WBatch::new(config); let tmsg: TransportMessage = KeepAlive.into(); let nmsg: NetworkMessage = Push { @@ -391,12 +495,12 @@ mod tests { }; // Serialize with a frame - batch.encode((&nmsg, frame)).unwrap(); + batch.encode((&nmsg, &frame)).unwrap(); assert_ne!(batch.len(), 0); nmsgs_in.push(nmsg.clone()); frame.reliability = Reliability::BestEffort; - batch.encode((&nmsg, frame)).unwrap(); + batch.encode((&nmsg, &frame)).unwrap(); assert_ne!(batch.len(), 0); nmsgs_in.push(nmsg.clone()); @@ -410,7 +514,7 @@ mod tests { // Serialize with a frame frame.sn = 1; - batch.encode((&nmsg, frame)).unwrap(); + batch.encode((&nmsg, &frame)).unwrap(); assert_ne!(batch.len(), 0); nmsgs_in.push(nmsg.clone()); } diff --git a/io/zenoh-transport/src/common/defragmentation.rs b/io/zenoh-transport/src/common/defragmentation.rs index be734cad45..8fab075fe4 100644 --- a/io/zenoh-transport/src/common/defragmentation.rs +++ b/io/zenoh-transport/src/common/defragmentation.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use super::seq_num::SeqNum; -use zenoh_buffers::{reader::HasReader, SplitBuffer, ZBuf, ZSlice}; +use zenoh_buffers::{buffer::Buffer, reader::HasReader, ZBuf, ZSlice}; use zenoh_codec::{RCodec, Zenoh080Reliability}; use zenoh_protocol::{ core::{Bits, Reliability}, diff --git a/io/zenoh-transport/src/common/mod.rs b/io/zenoh-transport/src/common/mod.rs index 0837ced4f7..c7de8a64ce 100644 --- a/io/zenoh-transport/src/common/mod.rs +++ b/io/zenoh-transport/src/common/mod.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -pub(crate) mod batch; +pub mod batch; pub(crate) mod defragmentation; pub(crate) mod pipeline; pub(crate) mod priority; diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 47c5ef4a4d..19e7a47289 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -1,3 +1,5 @@ +use crate::common::batch::BatchConfig; + // // Copyright (c) 2023 ZettaScale Technology // @@ -11,8 +13,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::batch::{Encode, WBatch, WError}; -use super::priority::{TransportChannelTx, TransportPriorityTx}; +use super::{ + batch::{Encode, WBatch}, + priority::{TransportChannelTx, TransportPriorityTx}, +}; use async_std::prelude::FutureExt; use flume::{bounded, Receiver, Sender}; use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; @@ -25,7 +29,7 @@ use zenoh_buffers::{ writer::HasWriter, ZBuf, }; -use zenoh_codec::{WCodec, Zenoh080}; +use zenoh_codec::{transport::batch::BatchError, WCodec, Zenoh080}; use zenoh_config::QueueSizeConf; use zenoh_core::zlock; use zenoh_protocol::core::Reliability; @@ -187,11 +191,11 @@ impl StageIn { ext_qos: frame::ext::QoSType::new(priority), }; - if let WError::NewFrame = e { + if let BatchError::NewFrame = e { // Attempt a serialization with a new frame - if batch.encode((&*msg, frame)).is_ok() { + if batch.encode((&*msg, &frame)).is_ok() { zretok!(batch); - }; + } } if !batch.is_empty() { @@ -201,9 +205,9 @@ impl StageIn { } // Attempt a second serialization on fully empty batch - if batch.encode((&*msg, frame)).is_ok() { + if batch.encode((&*msg, &frame)).is_ok() { zretok!(batch); - }; + } // The second serialization attempt has failed. This means that the message is // too large for the current batch size: we need to fragment. @@ -231,7 +235,7 @@ impl StageIn { batch = zgetbatch_rets!(true); // Serialize the message fragmnet - match batch.encode((&mut reader, fragment)) { + match batch.encode((&mut reader, &mut fragment)) { Ok(_) => { // Update the SN fragment.sn = tch.sn.get(); @@ -378,8 +382,7 @@ struct StageOutIn { impl StageOutIn { #[inline] fn try_pull(&mut self) -> Pull { - if let Some(mut batch) = self.s_out_r.pull() { - batch.write_len(); + if let Some(batch) = self.s_out_r.pull() { self.backoff.stop(); return Pull::Some(batch); } @@ -397,16 +400,14 @@ impl StageOutIn { // No new bytes have been written on the batch, try to pull if let Ok(mut g) = self.current.try_lock() { // First try to pull from stage OUT - if let Some(mut batch) = self.s_out_r.pull() { - batch.write_len(); + if let Some(batch) = self.s_out_r.pull() { self.backoff.stop(); return Pull::Some(batch); } // An incomplete (non-empty) batch is available in the state IN pipeline. match g.take() { - Some(mut batch) => { - batch.write_len(); + Some(batch) => { self.backoff.stop(); return Pull::Some(batch); } @@ -420,8 +421,7 @@ impl StageOutIn { } std::cmp::Ordering::Less => { // There should be a new batch in Stage OUT - if let Some(mut batch) = self.s_out_r.pull() { - batch.write_len(); + if let Some(batch) = self.s_out_r.pull() { self.backoff.stop(); return Pull::Some(batch); } @@ -469,8 +469,7 @@ impl StageOut { fn drain(&mut self, guard: &mut MutexGuard<'_, Option>) -> Vec { let mut batches = vec![]; // Empty the ring buffer - while let Some(mut batch) = self.s_in.s_out_r.pull() { - batch.write_len(); + while let Some(batch) = self.s_in.s_out_r.pull() { batches.push(batch); } // Take the current batch @@ -484,6 +483,8 @@ impl StageOut { #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct TransmissionPipelineConf { pub(crate) is_streamed: bool, + #[cfg(feature = "transport_compression")] + pub(crate) is_compression: bool, pub(crate) batch_size: BatchSize, pub(crate) queue_size: [usize; Priority::NUM], pub(crate) backoff: Duration, @@ -493,6 +494,8 @@ impl Default for TransmissionPipelineConf { fn default() -> Self { Self { is_streamed: false, + #[cfg(feature = "transport_compression")] + is_compression: false, batch_size: BatchSize::MAX, queue_size: [1; Priority::NUM], backoff: Duration::from_micros(1), @@ -530,9 +533,13 @@ impl TransmissionPipeline { let (mut s_ref_w, s_ref_r) = RingBuffer::::init(); // Fill the refill ring buffer with batches for _ in 0..*num { - assert!(s_ref_w - .push(WBatch::new(config.batch_size, config.is_streamed)) - .is_none()); + let bc = BatchConfig { + mtu: config.batch_size, + #[cfg(feature = "transport_compression")] + is_compression: config.is_compression, + }; + let batch = WBatch::new(bc); + assert!(s_ref_w.push(batch).is_none()); } // Create the channel for notifying that new batches are in the refill ring buffer // This is a SPSC channel @@ -730,6 +737,8 @@ mod tests { const CONFIG: TransmissionPipelineConf = TransmissionPipelineConf { is_streamed: true, + #[cfg(feature = "transport_compression")] + is_compression: true, batch_size: BatchSize::MAX, queue_size: [1; Priority::NUM], backoff: Duration::from_micros(1), @@ -782,7 +791,7 @@ mod tests { batches += 1; bytes += batch.len() as usize; // Create a ZBuf for deserialization starting from the batch - let bytes = batch.as_bytes(); + let bytes = batch.as_slice(); // Deserialize the messages let mut reader = bytes.reader(); let codec = Zenoh080::new(); diff --git a/io/zenoh-transport/src/lib.rs b/io/zenoh-transport/src/lib.rs index 05240710f6..5432394756 100644 --- a/io/zenoh-transport/src/lib.rs +++ b/io/zenoh-transport/src/lib.rs @@ -17,10 +17,10 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -mod common; -mod manager; -mod multicast; -mod primitives; +pub mod common; +pub mod manager; +pub mod multicast; +pub mod primitives; pub mod unicast; #[cfg(feature = "stats")] @@ -29,13 +29,11 @@ pub use common::stats; #[cfg(feature = "shared-memory")] mod shm; +use crate::{multicast::TransportMulticast, unicast::TransportUnicast}; pub use manager::*; -pub use multicast::*; -pub use primitives::*; use serde::Serialize; use std::any::Any; use std::sync::Arc; -pub use unicast::*; use zenoh_link::Link; use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_protocol::network::NetworkMessage; diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 6847b12dd8..3c225274aa 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -49,14 +49,14 @@ use zenoh_result::{bail, ZResult}; /// impl TransportEventHandler for MySH { /// fn new_unicast(&self, /// _peer: TransportPeer, -/// _transport: TransportUnicast +/// _transport: unicast::TransportUnicast /// ) -> ZResult> { /// Ok(Arc::new(DummyTransportPeerEventHandler)) /// } /// /// fn new_multicast( /// &self, -/// _transport: TransportMulticast, +/// _transport: multicast::TransportMulticast, /// ) -> ZResult> { /// Ok(Arc::new(DummyTransportMulticastEventHandler)) /// } diff --git a/io/zenoh-transport/src/multicast/establishment.rs b/io/zenoh-transport/src/multicast/establishment.rs index fc4ad21da3..e31ab05d30 100644 --- a/io/zenoh-transport/src/multicast/establishment.rs +++ b/io/zenoh-transport/src/multicast/establishment.rs @@ -13,8 +13,12 @@ // use crate::{ common::seq_num, - multicast::{transport::TransportMulticastInner, TransportMulticast}, - TransportConfigMulticast, TransportManager, + multicast::{ + link::{TransportLinkMulticast, TransportLinkMulticastConfig}, + transport::TransportMulticastInner, + TransportConfigMulticast, TransportMulticast, + }, + TransportManager, }; use rand::Rng; use std::sync::Arc; @@ -57,6 +61,13 @@ pub(crate) async fn open_link( // Create the transport let locator = link.get_dst().to_owned(); + let config = TransportLinkMulticastConfig { + mtu: link.get_mtu(), + #[cfg(feature = "transport_compression")] + is_compression: manager.config.multicast.is_compression, + }; + let link = TransportLinkMulticast::new(link, config); + let config = TransportConfigMulticast { link, sn_resolution, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index b430e7efb1..937216dd08 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -11,31 +11,256 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::common::{pipeline::TransmissionPipeline, priority::TransportPriorityTx}; -use super::transport::TransportMulticastInner; -use crate::common::batch::WBatch; -use crate::common::pipeline::{ - TransmissionPipelineConf, TransmissionPipelineConsumer, TransmissionPipelineProducer, -}; #[cfg(feature = "stats")] use crate::stats::TransportStats; -use async_std::prelude::FutureExt; -use async_std::task; -use async_std::task::JoinHandle; -use std::convert::TryInto; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use zenoh_buffers::ZSlice; +use crate::{ + common::{ + batch::{BatchConfig, Encode, Finalize, RBatch, WBatch}, + pipeline::{ + TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, + TransmissionPipelineProducer, + }, + priority::TransportPriorityTx, + }, + multicast::transport::TransportMulticastInner, +}; +use async_std::{ + prelude::FutureExt, + task::{self, JoinHandle}, +}; +use std::{ + convert::TryInto, + fmt, + sync::Arc, + time::{Duration, Instant}, +}; +#[cfg(feature = "transport_compression")] +use zenoh_buffers::BBuf; +use zenoh_buffers::{ZSlice, ZSliceBuffer}; use zenoh_core::zlock; -use zenoh_link::{LinkMulticast, Locator}; +use zenoh_link::{Link, LinkMulticast, Locator}; use zenoh_protocol::{ core::{Bits, Priority, Resolution, WhatAmI, ZenohId}, - transport::{BatchSize, Join, PrioritySn, TransportMessage, TransportSn}, + transport::{BatchSize, Close, Join, PrioritySn, TransportMessage, TransportSn}, }; -use zenoh_result::{bail, zerror, ZResult}; -use zenoh_sync::{RecyclingObjectPool, Signal}; +use zenoh_result::{zerror, ZResult}; +use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; + +/****************************/ +/* TRANSPORT MULTICAST LINK */ +/****************************/ +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) struct TransportLinkMulticastConfig { + // MTU + pub(crate) mtu: BatchSize, + // Compression is active on the link + #[cfg(feature = "transport_compression")] + pub(crate) is_compression: bool, +} + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct TransportLinkMulticast { + pub(crate) link: LinkMulticast, + pub(crate) config: TransportLinkMulticastConfig, +} + +impl TransportLinkMulticast { + pub(crate) fn new(link: LinkMulticast, mut config: TransportLinkMulticastConfig) -> Self { + config.mtu = link.get_mtu().min(config.mtu); + Self { link, config } + } + + const fn batch_config(&self) -> BatchConfig { + BatchConfig { + mtu: self.config.mtu, + #[cfg(feature = "transport_compression")] + is_compression: self.config.is_compression, + } + } + + pub(crate) fn tx(&self) -> TransportLinkMulticastTx { + TransportLinkMulticastTx { + inner: self.clone(), + #[cfg(feature = "transport_compression")] + buffer: self.config.is_compression.then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size(self.config.mtu as usize), + )), + } + } + + pub(crate) fn rx(&self) -> TransportLinkMulticastRx { + TransportLinkMulticastRx { + inner: self.clone(), + } + } + + pub(crate) async fn send(&self, msg: &TransportMessage) -> ZResult { + let mut link = self.tx(); + link.send(msg).await + } + + // pub(crate) async fn recv(&self) -> ZResult<(TransportMessage, Locator)> { + // let mut link = self.rx(); + // link.recv().await + // } + + pub(crate) async fn close(&self, reason: Option) -> ZResult<()> { + if let Some(reason) = reason { + // Build the close message + let message: TransportMessage = Close { + reason, + session: false, + } + .into(); + // Send the close message on the link + let _ = self.send(&message).await; + } + self.link.close().await + } +} + +impl fmt::Display for TransportLinkMulticast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.link) + } +} + +impl fmt::Debug for TransportLinkMulticast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransportLinkMulticast") + .field("link", &self.link) + .field("config", &self.config) + .finish() + } +} + +impl From<&TransportLinkMulticast> for Link { + fn from(link: &TransportLinkMulticast) -> Self { + Link::from(&link.link) + } +} + +impl From for Link { + fn from(link: TransportLinkMulticast) -> Self { + Link::from(link.link) + } +} + +pub(crate) struct TransportLinkMulticastTx { + pub(crate) inner: TransportLinkMulticast, + #[cfg(feature = "transport_compression")] + pub(crate) buffer: Option, +} + +impl TransportLinkMulticastTx { + pub(crate) async fn send_batch(&mut self, batch: &mut WBatch) -> ZResult<()> { + const ERR: &str = "Write error on link: "; + + let res = batch + .finalize( + #[cfg(feature = "transport_compression")] + self.buffer.as_mut(), + ) + .map_err(|_| zerror!("{ERR}{self}"))?; + + let bytes = match res { + Finalize::Batch => batch.as_slice(), + #[cfg(feature = "transport_compression")] + Finalize::Buffer => self + .buffer + .as_ref() + .ok_or_else(|| zerror!("Invalid buffer finalization"))? + .as_slice(), + }; + + // Send the message on the link + self.inner.link.write_all(bytes).await?; + + Ok(()) + } + + pub(crate) async fn send(&mut self, msg: &TransportMessage) -> ZResult { + const ERR: &str = "Write error on link: "; + + // Create the batch for serializing the message + let mut batch = WBatch::new(self.inner.batch_config()); + batch.encode(msg).map_err(|_| zerror!("{ERR}{self}"))?; + let len = batch.len() as usize; + self.send_batch(&mut batch).await?; + Ok(len) + } +} + +impl fmt::Display for TransportLinkMulticastTx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +impl fmt::Debug for TransportLinkMulticastTx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut s = f.debug_struct("TransportLinkMulticastRx"); + s.field("link", &self.inner.link) + .field("config", &self.inner.config); + #[cfg(feature = "transport_compression")] + { + s.field("buffer", &self.buffer.as_ref().map(|b| b.capacity())); + } + s.finish() + } +} + +pub(crate) struct TransportLinkMulticastRx { + pub(crate) inner: TransportLinkMulticast, +} + +impl TransportLinkMulticastRx { + pub async fn recv_batch(&self, buff: C) -> ZResult<(RBatch, Locator)> + where + C: Fn() -> T + Copy, + T: ZSliceBuffer + 'static, + { + const ERR: &str = "Read error from link: "; + + let mut into = (buff)(); + let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; + let buffer = ZSlice::make(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; + let mut batch = RBatch::new(self.inner.batch_config(), buffer); + batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; + Ok((batch, locator.into_owned())) + } + + // pub async fn recv(&mut self) -> ZResult<(TransportMessage, Locator)> { + // let mtu = self.inner.config.mtu as usize; + // let (mut batch, locator) = self + // .recv_batch(|| zenoh_buffers::vec::uninit(mtu).into_boxed_slice()) + // .await?; + // let msg = batch + // .decode() + // .map_err(|_| zerror!("Decode error on link: {}", self))?; + // Ok((msg, locator)) + // } +} + +impl fmt::Display for TransportLinkMulticastRx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} -pub(super) struct TransportLinkMulticastConfig { +impl fmt::Debug for TransportLinkMulticastRx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransportLinkMulticastRx") + .field("link", &self.inner.link) + .field("config", &self.inner.config) + .finish() + } +} + +/**************************************/ +/* TRANSPORT MULTICAST LINK UNIVERSAL */ +/**************************************/ +pub(super) struct TransportLinkMulticastConfigUniversal { pub(super) version: u8, pub(super) zid: ZenohId, pub(super) whatami: WhatAmI, @@ -46,9 +271,9 @@ pub(super) struct TransportLinkMulticastConfig { } #[derive(Clone)] -pub(super) struct TransportLinkMulticast { +pub(super) struct TransportLinkMulticastUniversal { // The underlying link - pub(super) link: LinkMulticast, + pub(super) link: TransportLinkMulticast, // The transmission pipeline pub(super) pipeline: Option, // The transport this link is associated to @@ -59,12 +284,12 @@ pub(super) struct TransportLinkMulticast { handle_rx: Option>>, } -impl TransportLinkMulticast { +impl TransportLinkMulticastUniversal { pub(super) fn new( transport: TransportMulticastInner, - link: LinkMulticast, - ) -> TransportLinkMulticast { - TransportLinkMulticast { + link: TransportLinkMulticast, + ) -> TransportLinkMulticastUniversal { + TransportLinkMulticastUniversal { transport, link, pipeline: None, @@ -75,10 +300,10 @@ impl TransportLinkMulticast { } } -impl TransportLinkMulticast { +impl TransportLinkMulticastUniversal { pub(super) fn start_tx( &mut self, - config: TransportLinkMulticastConfig, + config: TransportLinkMulticastConfigUniversal, priority_tx: Arc<[TransportPriorityTx]>, ) { let initial_sns: Vec = priority_tx @@ -106,6 +331,8 @@ impl TransportLinkMulticast { if self.handle_tx.is_none() { let tpc = TransmissionPipelineConf { is_streamed: false, + #[cfg(feature = "transport_compression")] + is_compression: self.link.config.is_compression, batch_size: config.batch_size, queue_size: self.transport.manager.config.queue_size, backoff: self.transport.manager.config.queue_backoff, @@ -120,7 +347,7 @@ impl TransportLinkMulticast { let handle = task::spawn(async move { let res = tx_task( consumer, - c_link.clone(), + c_link.tx(), config, initial_sns, #[cfg(feature = "stats")] @@ -155,7 +382,7 @@ impl TransportLinkMulticast { let handle = task::spawn(async move { // Start the consume task let res = rx_task( - c_link.clone(), + c_link.rx(), ctransport.clone(), c_signal.clone(), c_rx_buffer_size, @@ -194,7 +421,7 @@ impl TransportLinkMulticast { handle_tx.await; } - self.link.close().await + self.link.close(None).await } } @@ -203,8 +430,8 @@ impl TransportLinkMulticast { /*************************************/ async fn tx_task( mut pipeline: TransmissionPipelineConsumer, - link: LinkMulticast, - config: TransportLinkMulticastConfig, + mut link: TransportLinkMulticastTx, + config: TransportLinkMulticastConfigUniversal, mut last_sns: Vec, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { @@ -237,15 +464,14 @@ async fn tx_task( .race(join(last_join, config.join_interval)) .await { - Action::Pull((batch, priority)) => { + Action::Pull((mut batch, priority)) => { // Send the buffer on the link - let bytes = batch.as_bytes(); - link.write_all(bytes).await?; + link.send_batch(&mut batch).await?; // Keep track of next SNs - if let Some(sn) = batch.latest_sn.reliable { + if let Some(sn) = batch.codec.latest_sn.reliable { last_sns[priority].reliable = sn; } - if let Some(sn) = batch.latest_sn.best_effort { + if let Some(sn) = batch.codec.latest_sn.best_effort { last_sns[priority].best_effort = sn; } #[cfg(feature = "stats")] @@ -297,8 +523,8 @@ async fn tx_task( Action::Stop => { // Drain the transmission pipeline and write remaining bytes on the wire let mut batches = pipeline.drain(); - for (b, _) in batches.drain(..) { - link.write_all(b.as_bytes()) + for (mut b, _) in batches.drain(..) { + link.send_batch(&mut b) .timeout(config.join_interval) .await .map_err(|_| { @@ -324,20 +550,30 @@ async fn tx_task( } async fn rx_task( - link: LinkMulticast, + mut link: TransportLinkMulticastRx, transport: TransportMulticastInner, signal: Signal, rx_buffer_size: usize, batch_size: BatchSize, ) -> ZResult<()> { enum Action { - Read((usize, Locator)), + Read((RBatch, Locator)), Stop, } - async fn read(link: &LinkMulticast, buffer: &mut [u8]) -> ZResult { - let (n, loc) = link.read(buffer).await?; - Ok(Action::Read((n, loc.into_owned()))) + async fn read( + link: &mut TransportLinkMulticastRx, + pool: &RecyclingObjectPool, + ) -> ZResult + where + T: ZSliceBuffer + 'static, + F: Fn() -> T, + RecyclingObject: ZSliceBuffer, + { + let (rbatch, locator) = link + .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) + .await?; + Ok(Action::Read((rbatch, locator))) } async fn stop(signal: Signal) -> ZResult { @@ -346,35 +582,26 @@ async fn rx_task( } // The pool of buffers - let mtu = link.get_mtu() as usize; + let mtu = link.inner.config.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; } + let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); while !signal.is_triggered() { - // Retrieve one buffer - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); // Async read from the underlying link - let action = read(&link, &mut buffer).race(stop(signal.clone())).await?; + let action = read(&mut link, &pool).race(stop(signal.clone())).await?; match action { - Action::Read((n, loc)) => { - if n == 0 { - // Reading 0 bytes means error - bail!("{}: zero bytes reading", link); - } - + Action::Read((batch, locator)) => { #[cfg(feature = "stats")] - transport.stats.inc_rx_bytes(n); + transport.stats.inc_rx_bytes(zslice.len()); // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, n) - .map_err(|_| zerror!("Read {} bytes but buffer is {} bytes", n, mtu))?; transport.read_messages( - zslice, - &link, + batch, + locator, batch_size, - &loc, #[cfg(feature = "stats")] &transport, )?; diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index 5d996d25ad..7cda3d8eb3 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -19,6 +19,8 @@ use async_std::sync::Mutex; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; +#[cfg(feature = "transport_compression")] +use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] use zenoh_config::SharedMemoryConf; use zenoh_config::{Config, LinkTxConf}; @@ -36,6 +38,8 @@ pub struct TransportManagerConfigMulticast { pub is_qos: bool, #[cfg(feature = "shared-memory")] pub is_shm: bool, + #[cfg(feature = "transport_compression")] + pub is_compression: bool, } pub struct TransportManagerBuilderMulticast { @@ -46,6 +50,8 @@ pub struct TransportManagerBuilderMulticast { is_qos: bool, #[cfg(feature = "shared-memory")] is_shm: bool, + #[cfg(feature = "transport_compression")] + is_compression: bool, } pub struct TransportManagerStateMulticast { @@ -95,6 +101,12 @@ impl TransportManagerBuilderMulticast { self } + #[cfg(feature = "transport_compression")] + pub fn compression(mut self, is_compression: bool) -> Self { + self.is_compression = is_compression; + self + } + pub async fn from_config( mut self, config: &Config, @@ -107,9 +119,7 @@ impl TransportManagerBuilderMulticast { config.transport().multicast().join_interval().unwrap(), )); self = self.max_sessions(config.transport().multicast().max_sessions().unwrap()); - // @TODO: Force QoS deactivation in multicast since it is not supported - // self = self.qos(*config.transport().qos().enabled()); - self = self.qos(false); + self = self.qos(*config.transport().multicast().qos().enabled()); #[cfg(feature = "shared-memory")] { self = self.shm(*config.transport().shared_memory().enabled()); @@ -127,6 +137,8 @@ impl TransportManagerBuilderMulticast { is_qos: self.is_qos, #[cfg(feature = "shared-memory")] is_shm: self.is_shm, + #[cfg(feature = "transport_compression")] + is_compression: self.is_compression, }; let state = TransportManagerStateMulticast { @@ -147,6 +159,8 @@ impl Default for TransportManagerBuilderMulticast { let link_tx = LinkTxConf::default(); #[cfg(feature = "shared-memory")] let shm = SharedMemoryConf::default(); + #[cfg(feature = "transport_compression")] + let compression = CompressionMulticastConf::default(); let tmb = TransportManagerBuilderMulticast { lease: Duration::from_millis(*link_tx.lease()), @@ -156,6 +170,8 @@ impl Default for TransportManagerBuilderMulticast { is_qos: false, #[cfg(feature = "shared-memory")] is_shm: *shm.enabled(), + #[cfg(feature = "transport_compression")] + is_compression: *compression.enabled(), }; async_std::task::block_on(tmb.from_config(&Config::default())).unwrap() } diff --git a/io/zenoh-transport/src/multicast/mod.rs b/io/zenoh-transport/src/multicast/mod.rs index 9c1d8646f3..3ce0856df3 100644 --- a/io/zenoh-transport/src/multicast/mod.rs +++ b/io/zenoh-transport/src/multicast/mod.rs @@ -20,7 +20,9 @@ pub(crate) mod transport; pub(crate) mod tx; use super::common; -use crate::{TransportMulticastEventHandler, TransportPeer}; +use crate::{ + multicast::link::TransportLinkMulticast, TransportMulticastEventHandler, TransportPeer, +}; pub use manager::{ TransportManagerBuilderMulticast, TransportManagerConfigMulticast, TransportManagerParamsMulticast, @@ -31,7 +33,7 @@ use std::{ }; use transport::TransportMulticastInner; use zenoh_core::{zcondfeat, zread}; -use zenoh_link::{Link, LinkMulticast}; +use zenoh_link::Link; use zenoh_protocol::{ core::Bits, network::NetworkMessage, @@ -46,7 +48,7 @@ use zenoh_result::{zerror, ZResult}; pub(crate) struct TransportConfigMulticast { pub(crate) sn_resolution: Bits, pub(crate) initial_sns: Box<[PrioritySn]>, - pub(crate) link: LinkMulticast, + pub(crate) link: TransportLinkMulticast, #[cfg(feature = "shared-memory")] pub(crate) is_shm: bool, } diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 8dd4882ded..14f2fd619c 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -12,18 +12,20 @@ // ZettaScale Zenoh Team, // use super::transport::{TransportMulticastInner, TransportMulticastPeer}; -use crate::common::priority::TransportChannelRx; +use crate::common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, +}; use std::sync::MutexGuard; -use zenoh_buffers::reader::{HasReader, Reader}; -use zenoh_buffers::ZSlice; -use zenoh_codec::{RCodec, Zenoh080}; use zenoh_core::{zlock, zread}; -use zenoh_link::LinkMulticast; -use zenoh_protocol::core::{Priority, Reliability}; -use zenoh_protocol::transport::{ - BatchSize, Close, Fragment, Frame, Join, KeepAlive, TransportBody, TransportSn, +use zenoh_protocol::{ + core::{Locator, Priority, Reliability}, + network::NetworkMessage, + transport::{ + BatchSize, Close, Fragment, Frame, Join, KeepAlive, TransportBody, TransportMessage, + TransportSn, + }, }; -use zenoh_protocol::{core::Locator, network::NetworkMessage, transport::TransportMessage}; use zenoh_result::{bail, zerror, ZResult}; /*************************************/ @@ -115,7 +117,7 @@ impl TransportMulticastInner { locator, join.zid, join.batch_size, - batch_size, + batch_size ); return Ok(()); } @@ -247,18 +249,15 @@ impl TransportMulticastInner { pub(super) fn read_messages( &self, - mut zslice: ZSlice, - link: &LinkMulticast, + mut batch: RBatch, + locator: Locator, batch_size: BatchSize, - locator: &Locator, #[cfg(feature = "stats")] transport: &TransportMulticastInner, ) -> ZResult<()> { - let codec = Zenoh080::new(); - let mut reader = zslice.reader(); - while reader.can_read() { - let msg: TransportMessage = codec - .read(&mut reader) - .map_err(|_| zerror!("{}: decoding error", link))?; + while !batch.is_empty() { + let msg: TransportMessage = batch + .decode() + .map_err(|_| zerror!("{}: decoding error", locator))?; log::trace!("Received: {:?}", msg); @@ -268,7 +267,7 @@ impl TransportMulticastInner { } let r_guard = zread!(self.peers); - match r_guard.get(locator) { + match r_guard.get(&locator) { Some(peer) => { peer.active(); match msg.body { @@ -280,7 +279,7 @@ impl TransportMulticastInner { TransportBody::KeepAlive(KeepAlive { .. }) => {} TransportBody::Close(Close { reason, .. }) => { drop(r_guard); - self.del_peer(locator, reason)?; + self.del_peer(&locator, reason)?; } _ => { log::debug!( @@ -294,7 +293,7 @@ impl TransportMulticastInner { None => { drop(r_guard); if let TransportBody::Join(join) = msg.body { - self.handle_join_from_unknown(join, locator, batch_size)?; + self.handle_join_from_unknown(join, &locator, batch_size)?; } } } diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index c4412447cf..ca6cddaf2b 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -12,12 +12,14 @@ // ZettaScale Zenoh Team, // use super::common::priority::{TransportPriorityRx, TransportPriorityTx}; -use super::link::{TransportLinkMulticast, TransportLinkMulticastConfig}; +use super::link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}; #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ - TransportConfigMulticast, TransportManager, TransportMulticastEventHandler, TransportPeer, - TransportPeerEventHandler, + multicast::{ + link::TransportLinkMulticast, TransportConfigMulticast, TransportMulticastEventHandler, + }, + TransportManager, TransportPeer, TransportPeerEventHandler, }; use async_trait::async_trait; use std::{ @@ -29,7 +31,7 @@ use std::{ time::Duration, }; use zenoh_core::{zcondfeat, zread, zwrite}; -use zenoh_link::{Link, LinkMulticast, Locator}; +use zenoh_link::{Link, Locator}; use zenoh_protocol::core::Resolution; use zenoh_protocol::transport::{batch_size, Close, TransportMessage}; use zenoh_protocol::{ @@ -96,7 +98,7 @@ pub(crate) struct TransportMulticastInner { // The multicast locator - Convenience for logging pub(super) locator: Locator, // The multicast link - pub(super) link: Arc>>, + pub(super) link: Arc>>, // The callback pub(super) callback: Arc>>>, // The timer for peer leases @@ -129,7 +131,7 @@ impl TransportMulticastInner { manager, priority_tx: priority_tx.into_boxed_slice().into(), peers: Arc::new(RwLock::new(HashMap::new())), - locator: config.link.get_dst().to_owned(), + locator: config.link.link.get_dst().to_owned(), link: Arc::new(RwLock::new(None)), callback: Arc::new(RwLock::new(None)), timer: Arc::new(Timer::new(false)), @@ -137,7 +139,7 @@ impl TransportMulticastInner { stats, }; - let link = TransportLinkMulticast::new(ti.clone(), config.link); + let link = TransportLinkMulticastUniversal::new(ti.clone(), config.link); let mut guard = zwrite!(ti.link); *guard = Some(link); drop(guard); @@ -170,7 +172,7 @@ impl TransportMulticastInner { zread!(self.callback).clone() } - pub(crate) fn get_link(&self) -> LinkMulticast { + pub(crate) fn get_link(&self) -> TransportLinkMulticast { zread!(self.link).as_ref().unwrap().link.clone() } @@ -244,9 +246,9 @@ impl TransportMulticastInner { .manager .config .batch_size - .min(l.link.get_mtu()) + .min(l.link.link.get_mtu()) .min(batch_size::MULTICAST); - let config = TransportLinkMulticastConfig { + let config = TransportLinkMulticastConfigUniversal { version: self.manager.config.version, zid: self.manager.config.zid, whatami: self.manager.config.whatami, @@ -295,7 +297,7 @@ impl TransportMulticastInner { .manager .config .batch_size - .min(l.link.get_mtu()) + .min(l.link.link.get_mtu()) .min(batch_size::MULTICAST); l.start_rx(batch_size); Ok(()) diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 412affd4ea..112b471b9e 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -14,11 +14,14 @@ #[cfg(feature = "shared-memory")] use crate::unicast::shared_memory_unicast::Challenge; use crate::{ - unicast::establishment::{ - close_link, compute_sn, ext, finalize_transport, AcceptFsm, Cookie, InputFinalize, - Zenoh080Cookie, + unicast::{ + establishment::{ + compute_sn, ext, finalize_transport, AcceptFsm, Cookie, InputFinalize, Zenoh080Cookie, + }, + link::{TransportLinkUnicast, TransportLinkUnicastConfig, TransportLinkUnicastDirection}, + TransportConfigUnicast, }, - TransportConfigUnicast, TransportManager, + TransportManager, }; use async_std::sync::Mutex; use async_trait::async_trait; @@ -28,7 +31,7 @@ use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZSlice}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_core::{zasynclock, zcondfeat, zerror}; use zenoh_crypto::{BlockCipher, PseudoRng}; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; +use zenoh_link::LinkUnicast; use zenoh_protocol::{ core::{Field, Resolution, WhatAmI, ZenohId}, transport::{ @@ -41,21 +44,29 @@ use zenoh_result::ZResult; pub(super) type AcceptError = (zenoh_result::Error, Option); -struct StateZenoh { +struct StateTransport { batch_size: BatchSize, resolution: Resolution, -} - -struct State { - zenoh: StateZenoh, ext_qos: ext::qos::StateAccept, #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::StateAccept, #[cfg(feature = "shared-memory")] ext_shm: ext::shm::StateAccept, + ext_lowlatency: ext::lowlatency::StateAccept, +} + +#[cfg(any(feature = "transport_auth", feature = "transport_compression"))] +struct StateLink { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::StateAccept, - ext_lowlatency: ext::lowlatency::StateAccept, + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateAccept, +} + +struct State { + transport: StateTransport, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink, } // InitSyn @@ -106,7 +117,7 @@ struct SendOpenAckOut { // Fsm struct AcceptLink<'a> { - link: &'a LinkUnicast, + link: &'a mut TransportLinkUnicast, prng: &'a Mutex, cipher: &'a BlockCipher, ext_qos: ext::qos::QoSFsm<'a>, @@ -117,16 +128,18 @@ struct AcceptLink<'a> { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::CompressionFsm<'a>, } #[async_trait] -impl<'a> AcceptFsm for AcceptLink<'a> { +impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { type Error = AcceptError; type RecvInitSynIn = (&'a mut State, RecvInitSynIn); type RecvInitSynOut = RecvInitSynOut; async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { let (state, input) = input; @@ -160,38 +173,32 @@ impl<'a> AcceptFsm for AcceptLink<'a> { } // Compute the minimum SN resolution - state.zenoh.resolution = { + state.transport.resolution = { let mut res = Resolution::default(); // Frame SN let i_fsn_res = init_syn.resolution.get(Field::FrameSN); - let m_fsn_res = state.zenoh.resolution.get(Field::FrameSN); + let m_fsn_res = state.transport.resolution.get(Field::FrameSN); res.set(Field::FrameSN, i_fsn_res.min(m_fsn_res)); // Request ID let i_rid_res = init_syn.resolution.get(Field::RequestID); - let m_rid_res = state.zenoh.resolution.get(Field::RequestID); + let m_rid_res = state.transport.resolution.get(Field::RequestID); res.set(Field::RequestID, i_rid_res.min(m_rid_res)); res }; // Compute the minimum batch size - state.zenoh.batch_size = state - .zenoh + state.transport.batch_size = state + .transport .batch_size .min(init_syn.batch_size) .min(batch_size::UNICAST); // Extension QoS self.ext_qos - .recv_init_syn((&mut state.ext_qos, init_syn.ext_qos)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - self.ext_lowlatency - .recv_init_syn((&mut state.ext_lowlatency, init_syn.ext_lowlatency)) + .recv_init_syn((&mut state.transport.ext_qos, init_syn.ext_qos)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -199,21 +206,34 @@ impl<'a> AcceptFsm for AcceptLink<'a> { #[cfg(feature = "shared-memory")] let ext_shm = self .ext_shm - .recv_init_syn((&mut state.ext_shm, init_syn.ext_shm)) + .recv_init_syn((&mut state.transport.ext_shm, init_syn.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Auth #[cfg(feature = "transport_auth")] self.ext_auth - .recv_init_syn((&mut state.ext_auth, init_syn.ext_auth)) + .recv_init_syn((&mut state.link.ext_auth, init_syn.ext_auth)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] self.ext_mlink - .recv_init_syn((&mut state.ext_mlink, init_syn.ext_mlink)) + .recv_init_syn((&mut state.transport.ext_mlink, init_syn.ext_mlink)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension LowLatency + self.ext_lowlatency + .recv_init_syn((&mut state.transport.ext_lowlatency, init_syn.ext_lowlatency)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + #[cfg(feature = "transport_compression")] + self.ext_compression + .recv_init_syn((&mut state.link.ext_compression, init_syn.ext_compression)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -229,7 +249,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { type SendInitAckIn = (State, SendInitAckIn); type SendInitAckOut = SendInitAckOut; async fn send_init_ack( - &self, + self, input: Self::SendInitAckIn, ) -> Result { #[allow(unused_mut)] // Required for "shared-memory" feature @@ -238,14 +258,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { // Extension QoS let ext_qos = self .ext_qos - .send_init_ack(&state.ext_qos) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - let ext_lowlatency = self - .ext_lowlatency - .send_init_ack(&state.ext_lowlatency) + .send_init_ack(&state.transport.ext_qos) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -253,7 +266,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_shm = zcondfeat!( "shared-memory", self.ext_shm - .send_init_ack((&mut state.ext_shm, input.ext_shm)) + .send_init_ack((&mut state.transport.ext_shm, input.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -263,7 +276,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_auth = zcondfeat!( "transport_auth", self.ext_auth - .send_init_ack(&state.ext_auth) + .send_init_ack(&state.link.ext_auth) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -273,7 +286,24 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_mlink = zcondfeat!( "transport_multilink", self.ext_mlink - .send_init_ack(&state.ext_mlink) + .send_init_ack(&state.transport.ext_mlink) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None + ); + + // Extension LowLatency + let ext_lowlatency = self + .ext_lowlatency + .send_init_ack(&state.transport.ext_lowlatency) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension MultiLink + let ext_compression = zcondfeat!( + "transport_compression", + self.ext_compression + .send_init_ack(&state.link.ext_compression) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -284,17 +314,19 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let cookie = Cookie { zid: input.other_zid, whatami: input.other_whatami, - resolution: state.zenoh.resolution, - batch_size: state.zenoh.batch_size, + resolution: state.transport.resolution, + batch_size: state.transport.batch_size, nonce: cookie_nonce, - ext_qos: state.ext_qos, + ext_qos: state.transport.ext_qos, #[cfg(feature = "transport_multilink")] - ext_mlink: state.ext_mlink, + ext_mlink: state.transport.ext_mlink, #[cfg(feature = "shared-memory")] - ext_shm: state.ext_shm, + ext_shm: state.transport.ext_shm, #[cfg(feature = "transport_auth")] - ext_auth: state.ext_auth, - ext_lowlatency: state.ext_lowlatency, + ext_auth: state.link.ext_auth, + ext_lowlatency: state.transport.ext_lowlatency, + #[cfg(feature = "transport_compression")] + ext_compression: state.link.ext_compression, }; let mut encrypted = vec![]; @@ -317,14 +349,15 @@ impl<'a> AcceptFsm for AcceptLink<'a> { version: input.mine_version, whatami: input.mine_whatami, zid: input.mine_zid, - resolution: state.zenoh.resolution, - batch_size: state.zenoh.batch_size, + resolution: state.transport.resolution, + batch_size: state.transport.batch_size, cookie, ext_qos, ext_shm, ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } .into(); @@ -341,7 +374,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { type RecvOpenSynIn = RecvOpenSynIn; type RecvOpenSynOut = (State, RecvOpenSynOut); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { let msg = self @@ -400,50 +433,62 @@ impl<'a> AcceptFsm for AcceptLink<'a> { // Rebuild the state from the cookie let mut state = State { - zenoh: StateZenoh { + transport: StateTransport { batch_size: cookie.batch_size, resolution: cookie.resolution, + ext_qos: cookie.ext_qos, + #[cfg(feature = "transport_multilink")] + ext_mlink: cookie.ext_mlink, + #[cfg(feature = "shared-memory")] + ext_shm: cookie.ext_shm, + ext_lowlatency: cookie.ext_lowlatency, + }, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink { + #[cfg(feature = "transport_auth")] + ext_auth: cookie.ext_auth, + #[cfg(feature = "transport_compression")] + ext_compression: cookie.ext_compression, }, - ext_qos: cookie.ext_qos, - #[cfg(feature = "transport_multilink")] - ext_mlink: cookie.ext_mlink, - #[cfg(feature = "shared-memory")] - ext_shm: cookie.ext_shm, - #[cfg(feature = "transport_auth")] - ext_auth: cookie.ext_auth, - ext_lowlatency: cookie.ext_lowlatency, }; // Extension QoS self.ext_qos - .recv_open_syn((&mut state.ext_qos, open_syn.ext_qos)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - self.ext_lowlatency - .recv_open_syn((&mut state.ext_lowlatency, open_syn.ext_lowlatency)) + .recv_open_syn((&mut state.transport.ext_qos, open_syn.ext_qos)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm #[cfg(feature = "shared-memory")] self.ext_shm - .recv_open_syn((&mut state.ext_shm, open_syn.ext_shm)) + .recv_open_syn((&mut state.transport.ext_shm, open_syn.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Auth #[cfg(feature = "transport_auth")] self.ext_auth - .recv_open_syn((&mut state.ext_auth, open_syn.ext_auth)) + .recv_open_syn((&mut state.link.ext_auth, open_syn.ext_auth)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] self.ext_mlink - .recv_open_syn((&mut state.ext_mlink, open_syn.ext_mlink)) + .recv_open_syn((&mut state.transport.ext_mlink, open_syn.ext_mlink)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension LowLatency + self.ext_lowlatency + .recv_open_syn((&mut state.transport.ext_lowlatency, open_syn.ext_lowlatency)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + #[cfg(feature = "transport_compression")] + self.ext_compression + .recv_open_syn((&mut state.link.ext_compression, open_syn.ext_compression)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -459,7 +504,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { type SendOpenAckIn = (&'a mut State, SendOpenAckIn); type SendOpenAckOut = SendOpenAckOut; async fn send_open_ack( - &self, + self, input: Self::SendOpenAckIn, ) -> Result { let (state, input) = input; @@ -467,14 +512,14 @@ impl<'a> AcceptFsm for AcceptLink<'a> { // Extension QoS let ext_qos = self .ext_qos - .send_open_ack(&state.ext_qos) + .send_open_ack(&state.transport.ext_qos) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension LowLatency let ext_lowlatency = self .ext_lowlatency - .send_open_ack(&state.ext_lowlatency) + .send_open_ack(&state.transport.ext_lowlatency) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -482,7 +527,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_shm = zcondfeat!( "shared-memory", self.ext_shm - .send_open_ack(&mut state.ext_shm) + .send_open_ack(&mut state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -492,7 +537,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_auth = zcondfeat!( "transport_auth", self.ext_auth - .send_open_ack(&state.ext_auth) + .send_open_ack(&state.link.ext_auth) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -502,14 +547,25 @@ impl<'a> AcceptFsm for AcceptLink<'a> { let ext_mlink = zcondfeat!( "transport_multilink", self.ext_mlink - .send_open_ack(&state.ext_mlink) + .send_open_ack(&state.transport.ext_mlink) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None + ); + + // Extension Compression + let ext_compression = zcondfeat!( + "transport_compression", + self.ext_compression + .send_open_ack(&state.link.ext_compression) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None ); // Build OpenAck message - let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.zenoh.resolution); + let mine_initial_sn = + compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); let open_ack = OpenAck { lease: input.mine_lease, initial_sn: mine_initial_sn, @@ -518,6 +574,7 @@ impl<'a> AcceptFsm for AcceptLink<'a> { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, }; // Do not send the OpenAck right now since we might still incur in MAX_LINKS error @@ -528,8 +585,16 @@ impl<'a> AcceptFsm for AcceptLink<'a> { } pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) -> ZResult<()> { - let fsm = AcceptLink { - link, + let mtu = link.get_mtu(); + let config = TransportLinkUnicastConfig { + mtu, + direction: TransportLinkUnicastDirection::Inbound, + #[cfg(feature = "transport_compression")] + is_compression: false, + }; + let mut link = TransportLinkUnicast::new(link.clone(), config); + let mut fsm = AcceptLink { + link: &mut link, prng: &manager.prng, cipher: &manager.cipher, ext_qos: ext::qos::QoSFsm::new(), @@ -540,6 +605,8 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) #[cfg(feature = "transport_auth")] ext_auth: manager.state.unicast.authenticator.fsm(&manager.prng), ext_lowlatency: ext::lowlatency::LowLatencyFsm::new(), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::CompressionFsm::new(), }; // Init handshake @@ -549,7 +616,7 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) Ok(output) => output, Err((e, reason)) => { log::debug!("{}", e); - close_link(link, reason).await; + let _ = link.close(reason).await; return Err(e); } } @@ -558,26 +625,35 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) let iack_out = { let mut state = State { - zenoh: StateZenoh { - batch_size: manager.config.batch_size, + transport: StateTransport { + batch_size: manager.config.batch_size.min(batch_size::UNICAST).min(mtu), resolution: manager.config.resolution, + ext_qos: ext::qos::StateAccept::new(manager.config.unicast.is_qos), + #[cfg(feature = "transport_multilink")] + ext_mlink: manager + .state + .unicast + .multilink + .accept(manager.config.unicast.max_links > 1), + #[cfg(feature = "shared-memory")] + ext_shm: ext::shm::StateAccept::new(manager.config.unicast.is_shm), + ext_lowlatency: ext::lowlatency::StateAccept::new( + manager.config.unicast.is_lowlatency, + ), + }, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink { + #[cfg(feature = "transport_auth")] + ext_auth: manager + .state + .unicast + .authenticator + .accept(&mut *zasynclock!(manager.prng)), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateAccept::new( + manager.config.unicast.is_compression, + ), }, - ext_qos: ext::qos::StateAccept::new(manager.config.unicast.is_qos), - ext_lowlatency: ext::lowlatency::StateAccept::new(manager.config.unicast.is_lowlatency), - #[cfg(feature = "transport_multilink")] - ext_mlink: manager - .state - .unicast - .multilink - .accept(manager.config.unicast.max_links > 1), - #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateAccept::new(manager.config.unicast.is_shm), - #[cfg(feature = "transport_auth")] - ext_auth: manager - .state - .unicast - .authenticator - .accept(&mut *zasynclock!(manager.prng)), }; // Let's scope the Init phase in such a way memory is freed by Rust @@ -618,21 +694,25 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) let config = TransportConfigUnicast { zid: osyn_out.other_zid, whatami: osyn_out.other_whatami, - sn_resolution: state.zenoh.resolution.get(Field::FrameSN), + sn_resolution: state.transport.resolution.get(Field::FrameSN), tx_initial_sn: oack_out.open_ack.initial_sn, - is_qos: state.ext_qos.is_qos(), + is_qos: state.transport.ext_qos.is_qos(), #[cfg(feature = "transport_multilink")] - multilink: state.ext_mlink.multilink(), + multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.ext_shm.is_shm(), - is_lowlatency: state.ext_lowlatency.is_lowlatency(), + is_shm: state.transport.ext_shm.is_shm(), + is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), }; - let transport = step!( - manager - .init_transport_unicast(config, link.clone(), LinkUnicastDirection::Inbound) - .await - ); + let a_config = TransportLinkUnicastConfig { + mtu: state.transport.batch_size, + direction: TransportLinkUnicastDirection::Inbound, + #[cfg(feature = "transport_compression")] + is_compression: state.link.ext_compression.is_compression(), + }; + let a_link = TransportLinkUnicast::new(link.link.clone(), a_config); + let s_link = format!("{:?}", a_link); + let transport = step!(manager.init_transport_unicast(config, a_link).await); // Send the open_ack on the link step!(link @@ -651,18 +731,16 @@ pub(crate) async fn accept_link(link: &LinkUnicast, manager: &TransportManager) let input = InputFinalize { transport: transport.clone(), other_lease: osyn_out.other_lease, - agreed_batch_size: state.zenoh.batch_size, }; - step!(finalize_transport(link, manager, input) + step!(finalize_transport(&link, manager, input) .await .map_err(|e| (e, Some(close::reason::INVALID)))); log::debug!( - "New transport link accepted from {} to {}: {}. Batch size: {}.", + "New transport link accepted from {} to {}: {}.", osyn_out.other_zid, manager.config.zid, - link, - state.zenoh.batch_size, + s_link, ); Ok(()) diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index 0c6b5519e8..e9916be7e6 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -38,6 +38,8 @@ pub(crate) struct Cookie { #[cfg(feature = "transport_auth")] pub(crate) ext_auth: ext::auth::StateAccept, pub(crate) ext_lowlatency: ext::lowlatency::StateAccept, + #[cfg(feature = "transport_compression")] + pub(crate) ext_compression: ext::compression::StateAccept, } impl WCodec<&Cookie, &mut W> for Zenoh080 @@ -62,6 +64,8 @@ where #[cfg(feature = "transport_auth")] self.write(&mut *writer, &x.ext_auth)?; self.write(&mut *writer, &x.ext_lowlatency)?; + #[cfg(feature = "transport_compression")] + self.write(&mut *writer, &x.ext_compression)?; Ok(()) } @@ -90,6 +94,8 @@ where #[cfg(feature = "transport_auth")] let ext_auth: ext::auth::StateAccept = self.read(&mut *reader)?; let ext_lowlatency: ext::lowlatency::StateAccept = self.read(&mut *reader)?; + #[cfg(feature = "transport_compression")] + let ext_compression: ext::compression::StateAccept = self.read(&mut *reader)?; let cookie = Cookie { zid, @@ -105,6 +111,8 @@ where #[cfg(feature = "transport_auth")] ext_auth, ext_lowlatency, + #[cfg(feature = "transport_compression")] + ext_compression, }; Ok(cookie) @@ -174,6 +182,8 @@ impl Cookie { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::StateAccept::rand(), ext_lowlatency: ext::lowlatency::StateAccept::rand(), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateAccept::rand(), } } } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index 0e9c385e46..99a11ee3a9 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -288,13 +288,13 @@ macro_rules! ztake { /* OPEN */ /*************************************/ #[async_trait] -impl<'a> OpenFsm for AuthFsm<'a> { +impl<'a> OpenFsm for &'a AuthFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, state: Self::SendInitSynIn, ) -> Result { const S: &str = "Auth extension - Send InitSyn."; @@ -341,7 +341,7 @@ impl<'a> OpenFsm for AuthFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "Auth extension - Recv InitAck."; @@ -385,7 +385,7 @@ impl<'a> OpenFsm for AuthFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, state: Self::SendOpenSynIn, ) -> Result { const S: &str = "Auth extension - Send OpenSyn."; @@ -432,7 +432,7 @@ impl<'a> OpenFsm for AuthFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { const S: &str = "Auth extension - Recv OpenAck."; @@ -478,13 +478,13 @@ impl<'a> OpenFsm for AuthFsm<'a> { /* ACCEPT */ /*************************************/ #[async_trait] -impl<'a> AcceptFsm for AuthFsm<'a> { +impl<'a> AcceptFsm for &'a AuthFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "Auth extension - Recv InitSyn."; @@ -528,7 +528,7 @@ impl<'a> AcceptFsm for AuthFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { const S: &str = "Auth extension - Send InitAck."; @@ -575,7 +575,7 @@ impl<'a> AcceptFsm for AuthFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "Auth extension - Recv OpenSyn."; @@ -619,7 +619,7 @@ impl<'a> AcceptFsm for AuthFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, state: Self::SendOpenAckIn, ) -> Result { const S: &str = "Auth extension - Send OpenAck."; @@ -663,133 +663,3 @@ impl<'a> AcceptFsm for AuthFsm<'a> { Ok(output) } } - -// #[derive(Clone)] -// pub struct TransportAuthenticator(Arc); - -// impl TransportAuthenticator { -// pub async fn from_config(_config: &Config) -> ZResult> { -// #[allow(unused_mut)] -// let mut pas = HashSet::new(); - -// #[cfg(feature = "auth_pubkey")] -// { -// let mut res = PubKeyAuthenticator::from_config(_config).await?; -// if let Some(pa) = res.take() { -// pas.insert(pa.into()); -// } -// } - -// #[cfg(feature = "auth_usrpwd")] -// { -// let mut res = UserPasswordAuthenticator::from_config(_config).await?; -// if let Some(pa) = res.take() { -// pas.insert(pa.into()); -// } -// } - -// Ok(pas) -// } -// } - -/*************************************/ -/* ACCEPT */ -/*************************************/ - -// Return the attachment to be included in the InitSyn message. -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the initial InitSyn message will be sent on -// -// * `node_id` - The [`ZenohId`][ZenohId] of the sender of the InitSyn, i.e., the peer -// initiating a new transport. -// -// async fn get_init_syn_properties( -// &self, -// link: &AuthenticatedLink, -// node_id: &ZenohId, -// ) -> ZResult>>; - -// Return the attachment to be included in the InitAck message to be sent -// in response of the authenticated InitSyn. -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the InitSyn message was received on -// -// * `cookie` - The Cookie containing the internal state -// -// * `property` - The optional `Property` included in the InitSyn message -// -// async fn handle_init_syn( -// &self, -// link: &AuthenticatedLink, -// cookie: &Cookie, -// property: Option>, -// ) -> ZResult<(Option>, Option>)>; // (Attachment, Cookie) - -// Return the attachment to be included in the OpenSyn message to be sent -// in response of the authenticated InitAck. -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the InitSyn message was received on -// -// * `node_id` - The [`ZenohId`][ZenohId] of the sender of the InitAck message -// -// * `sn_resolution` - The sn_resolution negotiated by the sender of the InitAck message -// -// * `properties` - The optional `Property` included in the InitAck message -// -// async fn handle_init_ack( -// &self, -// link: &AuthenticatedLink, -// node_id: &ZenohId, -// sn_resolution: u64, -// property: Option>, -// ) -> ZResult>>; - -// Return the attachment to be included in the OpenAck message to be sent -// in response of the authenticated OpenSyn. -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the OpenSyn message was received on -// -// * `properties` - The optional `Property` included in the OpenSyn message -// -// * `cookie` - The optional `Property` included in the OpenSyn message -// -// async fn handle_open_syn( -// &self, -// link: &AuthenticatedLink, -// cookie: &Cookie, -// property: (Option>, Option>), // (Attachment, Cookie) -// ) -> ZResult>>; - -// Auhtenticate the OpenAck. No message is sent back in response to an OpenAck -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] the OpenAck message was received on -// -// * `properties` - The optional `Property` included in the OpenAck message -// -// async fn handle_open_ack( -// &self, -// link: &AuthenticatedLink, -// property: Option>, -// ) -> ZResult>>; - -// Handle any error on a link. This callback is mainly used to clean-up any internal state -// of the authenticator in such a way no unnecessary data is left around -// -// # Arguments -// * `link` - The [`AuthenticatedPeerLink`][AuthenticatedPeerLink] generating the error -// -// async fn handle_link_err(&self, link: &AuthenticatedLink); - -// Handle any error on a link. This callback is mainly used to clean-up any internal state -// of the authenticator in such a way no unnecessary data is left around -// -// # Arguments -// * `peerd_id` - The [`ZenohId`][ZenohId] of the transport being closed. -// -// async fn handle_close(&self, node_id: &ZenohId); -// } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index d34480fded..25ecc0e24e 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -363,13 +363,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for AuthPubKeyFsm<'a> { +impl<'a> OpenFsm for &'a AuthPubKeyFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, _input: Self::SendInitSynIn, ) -> Result { const S: &str = "PubKey extension - Send InitSyn."; @@ -392,7 +392,7 @@ impl<'a> OpenFsm for AuthPubKeyFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "PubKey extension - Recv InitAck."; @@ -438,7 +438,7 @@ impl<'a> OpenFsm for AuthPubKeyFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, state: Self::SendOpenSynIn, ) -> Result { const S: &str = "PubKey extension - Send OpenSyn."; @@ -461,7 +461,7 @@ impl<'a> OpenFsm for AuthPubKeyFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { const S: &str = "PubKey extension - Recv OpenAck."; @@ -539,13 +539,13 @@ impl PartialEq for StateAccept { } #[async_trait] -impl<'a> AcceptFsm for AuthPubKeyFsm<'a> { +impl<'a> AcceptFsm for &'a AuthPubKeyFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "PubKey extension - Recv InitSyn."; @@ -583,7 +583,7 @@ impl<'a> AcceptFsm for AuthPubKeyFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { const S: &str = "PubKey extension - Send InitAck."; @@ -607,7 +607,7 @@ impl<'a> AcceptFsm for AuthPubKeyFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "PubKey extension - Recv OpenSyn."; @@ -646,7 +646,7 @@ impl<'a> AcceptFsm for AuthPubKeyFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, _input: Self::SendOpenAckIn, ) -> Result { const S: &str = "PubKey extension - Send OpenAck."; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 521986ae00..d66a4a02c7 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -276,13 +276,13 @@ where /// ZExtUnit #[async_trait] -impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { +impl<'a> OpenFsm for &'a AuthUsrPwdFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, _input: Self::SendInitSynIn, ) -> Result { let output = zasyncread!(self.inner) @@ -295,7 +295,7 @@ impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "UsrPwd extension - Recv InitSyn."; @@ -316,7 +316,7 @@ impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, state: Self::SendOpenSynIn, ) -> Result { const S: &str = "UsrPwd extension - Send OpenSyn."; @@ -352,7 +352,7 @@ impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { const S: &str = "UsrPwd extension - Recv OpenAck."; @@ -370,13 +370,13 @@ impl<'a> OpenFsm for AuthUsrPwdFsm<'a> { /* ACCEPT */ /*************************************/ #[async_trait] -impl<'a> AcceptFsm for AuthUsrPwdFsm<'a> { +impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "UsrPwd extension - Recv InitSyn."; @@ -392,7 +392,7 @@ impl<'a> AcceptFsm for AuthUsrPwdFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { Ok(Some(ZExtZ64::new(state.nonce))) @@ -401,7 +401,7 @@ impl<'a> AcceptFsm for AuthUsrPwdFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "UsrPwd extension - Recv OpenSyn."; @@ -436,7 +436,7 @@ impl<'a> AcceptFsm for AuthUsrPwdFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, _input: Self::SendOpenAckIn, ) -> Result { Ok(Some(ZExtUnit::new())) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/compression.rs b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs new file mode 100644 index 0000000000..2b57eb85db --- /dev/null +++ b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs @@ -0,0 +1,196 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; +use async_trait::async_trait; +use core::marker::PhantomData; +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; +use zenoh_codec::{RCodec, WCodec, Zenoh080}; +use zenoh_protocol::transport::{init, open}; +use zenoh_result::Error as ZError; + +// Extension Fsm +pub(crate) struct CompressionFsm<'a> { + _a: PhantomData<&'a ()>, +} + +impl<'a> CompressionFsm<'a> { + pub(crate) const fn new() -> Self { + Self { _a: PhantomData } + } +} + +/*************************************/ +/* OPEN */ +/*************************************/ +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct StateOpen { + is_compression: bool, +} + +impl StateOpen { + pub(crate) const fn new(is_compression: bool) -> Self { + Self { is_compression } + } + + pub(crate) const fn is_compression(&self) -> bool { + self.is_compression + } +} + +#[async_trait] +impl<'a> OpenFsm for &'a CompressionFsm<'a> { + type Error = ZError; + + type SendInitSynIn = &'a StateOpen; + type SendInitSynOut = Option; + async fn send_init_syn( + self, + state: Self::SendInitSynIn, + ) -> Result { + let output = state + .is_compression + .then_some(init::ext::Compression::new()); + Ok(output) + } + + type RecvInitAckIn = (&'a mut StateOpen, Option); + type RecvInitAckOut = (); + async fn recv_init_ack( + self, + input: Self::RecvInitAckIn, + ) -> Result { + let (state, other_ext) = input; + state.is_compression &= other_ext.is_some(); + Ok(()) + } + + type SendOpenSynIn = &'a StateOpen; + type SendOpenSynOut = Option; + async fn send_open_syn( + self, + _state: Self::SendOpenSynIn, + ) -> Result { + Ok(None) + } + + type RecvOpenAckIn = (&'a mut StateOpen, Option); + type RecvOpenAckOut = (); + async fn recv_open_ack( + self, + _state: Self::RecvOpenAckIn, + ) -> Result { + Ok(()) + } +} + +/*************************************/ +/* ACCEPT */ +/*************************************/ +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct StateAccept { + is_compression: bool, +} + +impl StateAccept { + pub(crate) const fn new(is_compression: bool) -> Self { + Self { is_compression } + } + + pub(crate) const fn is_compression(&self) -> bool { + self.is_compression + } + + #[cfg(test)] + pub(crate) fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + Self::new(rng.gen_bool(0.5)) + } +} + +// Codec +impl WCodec<&StateAccept, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &StateAccept) -> Self::Output { + let is_compression = u8::from(x.is_compression); + self.write(&mut *writer, is_compression)?; + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let is_compression: u8 = self.read(&mut *reader)?; + let is_compression = is_compression == 1; + Ok(StateAccept { is_compression }) + } +} + +#[async_trait] +impl<'a> AcceptFsm for &'a CompressionFsm<'a> { + type Error = ZError; + + type RecvInitSynIn = (&'a mut StateAccept, Option); + type RecvInitSynOut = (); + async fn recv_init_syn( + self, + input: Self::RecvInitSynIn, + ) -> Result { + let (state, other_ext) = input; + state.is_compression &= other_ext.is_some(); + Ok(()) + } + + type SendInitAckIn = &'a StateAccept; + type SendInitAckOut = Option; + async fn send_init_ack( + self, + state: Self::SendInitAckIn, + ) -> Result { + let output = state + .is_compression + .then_some(init::ext::Compression::new()); + Ok(output) + } + + type RecvOpenSynIn = (&'a mut StateAccept, Option); + type RecvOpenSynOut = (); + async fn recv_open_syn( + self, + _state: Self::RecvOpenSynIn, + ) -> Result { + Ok(()) + } + + type SendOpenAckIn = &'a StateAccept; + type SendOpenAckOut = Option; + async fn send_open_ack( + self, + _state: Self::SendOpenAckIn, + ) -> Result { + Ok(None) + } +} diff --git a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs index 25edbde2e1..9dda9175b1 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs @@ -52,13 +52,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for LowLatencyFsm<'a> { +impl<'a> OpenFsm for &'a LowLatencyFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, state: Self::SendInitSynIn, ) -> Result { let output = state.is_lowlatency.then_some(init::ext::LowLatency::new()); @@ -68,7 +68,7 @@ impl<'a> OpenFsm for LowLatencyFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { let (state, other_ext) = input; @@ -79,7 +79,7 @@ impl<'a> OpenFsm for LowLatencyFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, _state: Self::SendOpenSynIn, ) -> Result { Ok(None) @@ -88,7 +88,7 @@ impl<'a> OpenFsm for LowLatencyFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, _state: Self::RecvOpenAckIn, ) -> Result { Ok(()) @@ -148,13 +148,13 @@ where } #[async_trait] -impl<'a> AcceptFsm for LowLatencyFsm<'a> { +impl<'a> AcceptFsm for &'a LowLatencyFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { let (state, other_ext) = input; @@ -165,7 +165,7 @@ impl<'a> AcceptFsm for LowLatencyFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { let output = state.is_lowlatency.then_some(init::ext::LowLatency::new()); @@ -175,7 +175,7 @@ impl<'a> AcceptFsm for LowLatencyFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, _state: Self::RecvOpenSynIn, ) -> Result { Ok(()) @@ -184,7 +184,7 @@ impl<'a> AcceptFsm for LowLatencyFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, _state: Self::SendOpenAckIn, ) -> Result { Ok(None) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/mod.rs index 956a8c5112..f4aafa832c 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/mod.rs @@ -13,6 +13,8 @@ // #[cfg(feature = "transport_auth")] pub mod auth; +#[cfg(feature = "transport_compression")] +pub(crate) mod compression; pub(crate) mod lowlatency; #[cfg(feature = "transport_multilink")] pub(crate) mod multilink; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs index 7a3f0d9f30..9c3c584c70 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs @@ -92,13 +92,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for MultiLinkFsm<'a> { +impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, input: Self::SendInitSynIn, ) -> Result { let pubkey = match input.pubkey.as_ref() { @@ -117,7 +117,7 @@ impl<'a> OpenFsm for MultiLinkFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "MultiLink extension - Recv InitAck."; @@ -152,7 +152,7 @@ impl<'a> OpenFsm for MultiLinkFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, input: Self::SendOpenSynIn, ) -> Result { let pubkey = match input.pubkey.as_ref() { @@ -171,7 +171,7 @@ impl<'a> OpenFsm for MultiLinkFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { let (state, mut ext) = input; @@ -267,13 +267,13 @@ where } #[async_trait] -impl<'a> AcceptFsm for MultiLinkFsm<'a> { +impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "MultiLink extension - Recv InitSyn."; @@ -309,7 +309,7 @@ impl<'a> AcceptFsm for MultiLinkFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, input: Self::SendInitAckIn, ) -> Result { let pubkey = match input.pubkey.as_ref() { @@ -328,7 +328,7 @@ impl<'a> AcceptFsm for MultiLinkFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { let (state, ext) = input; @@ -345,7 +345,7 @@ impl<'a> AcceptFsm for MultiLinkFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, input: Self::SendOpenAckIn, ) -> Result { let pubkey = match input.pubkey.as_ref() { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs index b72e34c636..4626ec5998 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs @@ -52,13 +52,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for QoSFsm<'a> { +impl<'a> OpenFsm for &'a QoSFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, state: Self::SendInitSynIn, ) -> Result { let output = state.is_qos.then_some(init::ext::QoS::new()); @@ -68,7 +68,7 @@ impl<'a> OpenFsm for QoSFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = (); async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { let (state, other_ext) = input; @@ -79,7 +79,7 @@ impl<'a> OpenFsm for QoSFsm<'a> { type SendOpenSynIn = &'a StateOpen; type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, _state: Self::SendOpenSynIn, ) -> Result { Ok(None) @@ -88,7 +88,7 @@ impl<'a> OpenFsm for QoSFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, _state: Self::RecvOpenAckIn, ) -> Result { Ok(()) @@ -148,13 +148,13 @@ where } #[async_trait] -impl<'a> AcceptFsm for QoSFsm<'a> { +impl<'a> AcceptFsm for &'a QoSFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = (); async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { let (state, other_ext) = input; @@ -165,7 +165,7 @@ impl<'a> AcceptFsm for QoSFsm<'a> { type SendInitAckIn = &'a StateAccept; type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, state: Self::SendInitAckIn, ) -> Result { let output = state.is_qos.then_some(init::ext::QoS::new()); @@ -175,7 +175,7 @@ impl<'a> AcceptFsm for QoSFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, _state: Self::RecvOpenSynIn, ) -> Result { Ok(()) @@ -184,7 +184,7 @@ impl<'a> AcceptFsm for QoSFsm<'a> { type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, _state: Self::SendOpenAckIn, ) -> Result { Ok(None) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index 131c0b5186..f2d6fe4dd0 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -152,13 +152,13 @@ impl StateOpen { } #[async_trait] -impl<'a> OpenFsm for ShmFsm<'a> { +impl<'a> OpenFsm for &'a ShmFsm<'a> { type Error = ZError; type SendInitSynIn = &'a StateOpen; type SendInitSynOut = Option; async fn send_init_syn( - &self, + self, state: Self::SendInitSynIn, ) -> Result { const S: &str = "Shm extension - Send InitSyn."; @@ -184,7 +184,7 @@ impl<'a> OpenFsm for ShmFsm<'a> { type RecvInitAckIn = (&'a mut StateOpen, Option); type RecvInitAckOut = Challenge; async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result { const S: &str = "Shm extension - Recv InitAck."; @@ -256,7 +256,7 @@ impl<'a> OpenFsm for ShmFsm<'a> { type SendOpenSynIn = (&'a StateOpen, Self::RecvInitAckOut); type SendOpenSynOut = Option; async fn send_open_syn( - &self, + self, input: Self::SendOpenSynIn, ) -> Result { // const S: &str = "Shm extension - Send OpenSyn."; @@ -272,7 +272,7 @@ impl<'a> OpenFsm for ShmFsm<'a> { type RecvOpenAckIn = (&'a mut StateOpen, Option); type RecvOpenAckOut = (); async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result { const S: &str = "Shm extension - Recv OpenAck."; @@ -352,13 +352,13 @@ where } #[async_trait] -impl<'a> AcceptFsm for ShmFsm<'a> { +impl<'a> AcceptFsm for &'a ShmFsm<'a> { type Error = ZError; type RecvInitSynIn = (&'a mut StateAccept, Option); type RecvInitSynOut = Challenge; async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "Shm extension - Recv InitSyn."; @@ -409,7 +409,7 @@ impl<'a> AcceptFsm for ShmFsm<'a> { type SendInitAckIn = (&'a StateAccept, Self::RecvInitSynOut); type SendInitAckOut = Option; async fn send_init_ack( - &self, + self, input: Self::SendInitAckIn, ) -> Result { const S: &str = "Shm extension - Send InitAck."; @@ -437,7 +437,7 @@ impl<'a> AcceptFsm for ShmFsm<'a> { type RecvOpenSynIn = (&'a mut StateAccept, Option); type RecvOpenSynOut = (); async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "Shm extension - Recv OpenSyn."; @@ -480,7 +480,7 @@ impl<'a> AcceptFsm for ShmFsm<'a> { type SendOpenAckIn = &'a mut StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( - &self, + self, state: Self::SendOpenAckIn, ) -> Result { // const S: &str = "Shm extension - Send OpenAck."; diff --git a/io/zenoh-transport/src/unicast/establishment/mod.rs b/io/zenoh-transport/src/unicast/establishment/mod.rs index 6bc8c898e8..523e6e9d22 100644 --- a/io/zenoh-transport/src/unicast/establishment/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/mod.rs @@ -17,7 +17,7 @@ pub mod ext; pub(crate) mod open; use super::{TransportPeer, TransportUnicast}; -use crate::{common::seq_num, TransportManager}; +use crate::{common::seq_num, unicast::link::TransportLinkUnicast, TransportManager}; use async_trait::async_trait; use cookie::*; use sha3::{ @@ -25,10 +25,10 @@ use sha3::{ Shake128, }; use std::time::Duration; -use zenoh_link::{Link, LinkUnicast}; +use zenoh_link::Link; use zenoh_protocol::{ core::{Field, Resolution, ZenohId}, - transport::{BatchSize, Close, TransportMessage, TransportSn}, + transport::TransportSn, }; use zenoh_result::ZResult; @@ -42,28 +42,28 @@ pub trait OpenFsm { type SendInitSynIn; type SendInitSynOut; async fn send_init_syn( - &self, + self, input: Self::SendInitSynIn, ) -> Result; type RecvInitAckIn; type RecvInitAckOut; async fn recv_init_ack( - &self, + self, input: Self::RecvInitAckIn, ) -> Result; type SendOpenSynIn; type SendOpenSynOut; async fn send_open_syn( - &self, + self, input: Self::SendOpenSynIn, ) -> Result; type RecvOpenAckIn; type RecvOpenAckOut; async fn recv_open_ack( - &self, + self, input: Self::RecvOpenAckIn, ) -> Result; } @@ -75,28 +75,28 @@ pub trait AcceptFsm { type RecvInitSynIn; type RecvInitSynOut; async fn recv_init_syn( - &self, + self, input: Self::RecvInitSynIn, ) -> Result; type SendInitAckIn; type SendInitAckOut; async fn send_init_ack( - &self, + self, input: Self::SendInitAckIn, ) -> Result; type RecvOpenSynIn; type RecvOpenSynOut; async fn recv_open_syn( - &self, + self, input: Self::RecvOpenSynIn, ) -> Result; type SendOpenAckIn; type SendOpenAckOut; async fn send_open_ack( - &self, + self, input: Self::SendOpenAckIn, ) -> Result; } @@ -116,30 +116,13 @@ pub(super) fn compute_sn(zid1: ZenohId, zid2: ZenohId, resolution: Resolution) - TransportSn::from_le_bytes(array) & seq_num::get_mask(resolution.get(Field::FrameSN)) } -pub(super) async fn close_link(link: &LinkUnicast, reason: Option) { - if let Some(reason) = reason { - // Build the close message - let message: TransportMessage = Close { - reason, - session: false, - } - .into(); - // Send the close message on the link - let _ = link.send(&message).await; - } - - // Close the link - let _ = link.close().await; -} - pub(super) struct InputFinalize { pub(super) transport: TransportUnicast, pub(super) other_lease: Duration, - pub(super) agreed_batch_size: BatchSize, } // Finalize the transport, notify the callback and start the link tasks pub(super) async fn finalize_transport( - link: &LinkUnicast, + link: &TransportLinkUnicast, manager: &TransportManager, input: self::InputFinalize, ) -> ZResult<()> { @@ -148,12 +131,7 @@ pub(super) async fn finalize_transport( // Start the TX loop let keep_alive = manager.config.unicast.lease / manager.config.unicast.keep_alive as u32; - transport.start_tx( - link, - &manager.tx_executor, - keep_alive, - input.agreed_batch_size, - )?; + transport.start_tx(link, &manager.tx_executor, keep_alive)?; // Assign a callback if the transport is new // Keep the lock to avoid concurrent new_transport and closing/closed notifications @@ -185,7 +163,7 @@ pub(super) async fn finalize_transport( drop(a_guard); // Start the RX loop - transport.start_rx(link, input.other_lease, input.agreed_batch_size)?; + transport.start_rx(link, input.other_lease)?; Ok(()) } diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index dbd4872c3e..4c1314dd29 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -14,10 +14,12 @@ #[cfg(feature = "shared-memory")] use crate::unicast::shared_memory_unicast::Challenge; use crate::{ - unicast::establishment::{ - close_link, compute_sn, ext, finalize_transport, InputFinalize, OpenFsm, + unicast::{ + establishment::{compute_sn, ext, finalize_transport, InputFinalize, OpenFsm}, + link::{TransportLinkUnicast, TransportLinkUnicastConfig, TransportLinkUnicastDirection}, + TransportConfigUnicast, TransportUnicast, }, - TransportConfigUnicast, TransportManager, TransportUnicast, + TransportManager, }; use async_trait::async_trait; use std::time::Duration; @@ -25,7 +27,7 @@ use zenoh_buffers::ZSlice; #[cfg(feature = "transport_auth")] use zenoh_core::zasynclock; use zenoh_core::{zcondfeat, zerror}; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; +use zenoh_link::LinkUnicast; use zenoh_protocol::{ core::{Field, Resolution, WhatAmI, ZenohId}, transport::{ @@ -37,21 +39,29 @@ use zenoh_result::ZResult; type OpenError = (zenoh_result::Error, Option); -struct StateZenoh { +struct StateTransport { batch_size: BatchSize, resolution: Resolution, -} - -struct State { - zenoh: StateZenoh, ext_qos: ext::qos::StateOpen, #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::StateOpen, #[cfg(feature = "shared-memory")] ext_shm: ext::shm::StateOpen, + ext_lowlatency: ext::lowlatency::StateOpen, +} + +#[cfg(any(feature = "transport_auth", feature = "transport_compression"))] +struct StateLink { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::StateOpen, - ext_lowlatency: ext::lowlatency::StateOpen, + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateOpen, +} + +struct State { + transport: StateTransport, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink, } // InitSyn @@ -92,7 +102,6 @@ struct RecvOpenAckOut { // FSM struct OpenLink<'a> { - link: &'a LinkUnicast, ext_qos: ext::qos::QoSFsm<'a>, #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::MultiLinkFsm<'a>, @@ -101,31 +110,26 @@ struct OpenLink<'a> { #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::CompressionFsm<'a>, } #[async_trait] -impl<'a> OpenFsm for OpenLink<'a> { +impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { type Error = OpenError; - type SendInitSynIn = (&'a mut State, SendInitSynIn); + type SendInitSynIn = (&'a mut TransportLinkUnicast, &'a mut State, SendInitSynIn); type SendInitSynOut = (); async fn send_init_syn( - &self, + self, input: Self::SendInitSynIn, ) -> Result { - let (state, input) = input; + let (link, state, input) = input; // Extension QoS let ext_qos = self .ext_qos - .send_init_syn(&state.ext_qos) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - let ext_lowlatency = self - .ext_lowlatency - .send_init_syn(&state.ext_lowlatency) + .send_init_syn(&state.transport.ext_qos) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -133,7 +137,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_shm = zcondfeat!( "shared-memory", self.ext_shm - .send_init_syn(&state.ext_shm) + .send_init_syn(&state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -143,7 +147,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_auth = zcondfeat!( "transport_auth", self.ext_auth - .send_init_syn(&state.ext_auth) + .send_init_syn(&state.link.ext_auth) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -153,7 +157,24 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_mlink = zcondfeat!( "transport_multilink", self.ext_mlink - .send_init_syn(&state.ext_mlink) + .send_init_syn(&state.transport.ext_mlink) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None + ); + + // Extension LowLatency + let ext_lowlatency = self + .ext_lowlatency + .send_init_syn(&state.transport.ext_lowlatency) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + let ext_compression = zcondfeat!( + "transport_compression", + self.ext_compression + .send_init_syn(&state.link.ext_compression) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -163,18 +184,18 @@ impl<'a> OpenFsm for OpenLink<'a> { version: input.mine_version, whatami: input.mine_whatami, zid: input.mine_zid, - batch_size: state.zenoh.batch_size, - resolution: state.zenoh.resolution, + batch_size: state.transport.batch_size, + resolution: state.transport.resolution, ext_qos, ext_shm, ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } .into(); - let _ = self - .link + let _ = link .send(&msg) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -182,14 +203,15 @@ impl<'a> OpenFsm for OpenLink<'a> { Ok(()) } - type RecvInitAckIn = &'a mut State; + type RecvInitAckIn = (&'a mut TransportLinkUnicast, &'a mut State); type RecvInitAckOut = RecvInitAckOut; async fn recv_init_ack( - &self, - state: Self::RecvInitAckIn, + self, + input: Self::RecvInitAckIn, ) -> Result { - let msg = self - .link + let (link, state) = input; + + let msg = link .recv() .await .map_err(|e| (e, Some(close::reason::INVALID)))?; @@ -200,7 +222,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let e = zerror!( "Received a close message (reason {}) in response to an InitSyn on: {}", close::reason_to_str(reason), - self.link, + link, ); match reason { close::reason::MAX_LINKS => log::debug!("{}", e), @@ -211,7 +233,7 @@ impl<'a> OpenFsm for OpenLink<'a> { _ => { let e = zerror!( "Received an invalid message in response to an InitSyn on {}: {:?}", - self.link, + link, msg.body ); log::error!("{}", e); @@ -220,17 +242,17 @@ impl<'a> OpenFsm for OpenLink<'a> { }; // Compute the minimum SN resolution - state.zenoh.resolution = { + state.transport.resolution = { let mut res = Resolution::default(); // Frame SN let i_fsn_res = init_ack.resolution.get(Field::FrameSN); - let m_fsn_res = state.zenoh.resolution.get(Field::FrameSN); + let m_fsn_res = state.transport.resolution.get(Field::FrameSN); if i_fsn_res > m_fsn_res { let e = zerror!( "Invalid FrameSN resolution on {}: {:?} > {:?}", - self.link, + link, i_fsn_res, m_fsn_res ); @@ -241,12 +263,12 @@ impl<'a> OpenFsm for OpenLink<'a> { // Request ID let i_rid_res = init_ack.resolution.get(Field::RequestID); - let m_rid_res = state.zenoh.resolution.get(Field::RequestID); + let m_rid_res = state.transport.resolution.get(Field::RequestID); if i_rid_res > m_rid_res { let e = zerror!( "Invalid RequestID resolution on {}: {:?} > {:?}", - self.link, + link, i_rid_res, m_rid_res ); @@ -259,17 +281,11 @@ impl<'a> OpenFsm for OpenLink<'a> { }; // Compute the minimum batch size - state.zenoh.batch_size = state.zenoh.batch_size.min(init_ack.batch_size); + state.transport.batch_size = state.transport.batch_size.min(init_ack.batch_size); // Extension QoS self.ext_qos - .recv_init_ack((&mut state.ext_qos, init_ack.ext_qos)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - self.ext_lowlatency - .recv_init_ack((&mut state.ext_lowlatency, init_ack.ext_lowlatency)) + .recv_init_ack((&mut state.transport.ext_qos, init_ack.ext_qos)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -277,21 +293,34 @@ impl<'a> OpenFsm for OpenLink<'a> { #[cfg(feature = "shared-memory")] let shm_challenge = self .ext_shm - .recv_init_ack((&mut state.ext_shm, init_ack.ext_shm)) + .recv_init_ack((&mut state.transport.ext_shm, init_ack.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Auth #[cfg(feature = "transport_auth")] self.ext_auth - .recv_init_ack((&mut state.ext_auth, init_ack.ext_auth)) + .recv_init_ack((&mut state.link.ext_auth, init_ack.ext_auth)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] self.ext_mlink - .recv_init_ack((&mut state.ext_mlink, init_ack.ext_mlink)) + .recv_init_ack((&mut state.transport.ext_mlink, init_ack.ext_mlink)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension LowLatency + self.ext_lowlatency + .recv_init_ack((&mut state.transport.ext_lowlatency, init_ack.ext_lowlatency)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + #[cfg(feature = "transport_compression")] + self.ext_compression + .recv_init_ack((&mut state.link.ext_compression, init_ack.ext_compression)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -305,25 +334,18 @@ impl<'a> OpenFsm for OpenLink<'a> { Ok(output) } - type SendOpenSynIn = (&'a mut State, SendOpenSynIn); + type SendOpenSynIn = (&'a mut TransportLinkUnicast, &'a mut State, SendOpenSynIn); type SendOpenSynOut = SendOpenSynOut; async fn send_open_syn( - &self, + self, input: Self::SendOpenSynIn, ) -> Result { - let (state, input) = input; + let (link, state, input) = input; // Extension QoS let ext_qos = self .ext_qos - .send_open_syn(&state.ext_qos) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - let ext_lowlatency = self - .ext_lowlatency - .send_open_syn(&state.ext_lowlatency) + .send_open_syn(&state.transport.ext_qos) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -331,7 +353,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_shm = zcondfeat!( "shared-memory", self.ext_shm - .send_open_syn((&state.ext_shm, input.ext_shm)) + .send_open_syn((&state.transport.ext_shm, input.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -341,7 +363,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_auth = zcondfeat!( "transport_auth", self.ext_auth - .send_open_syn(&state.ext_auth) + .send_open_syn(&state.link.ext_auth) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None @@ -351,14 +373,32 @@ impl<'a> OpenFsm for OpenLink<'a> { let ext_mlink = zcondfeat!( "transport_multilink", self.ext_mlink - .send_open_syn(&state.ext_mlink) + .send_open_syn(&state.transport.ext_mlink) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None + ); + + // Extension LowLatency + let ext_lowlatency = self + .ext_lowlatency + .send_open_syn(&state.transport.ext_lowlatency) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + let ext_compression = zcondfeat!( + "transport_compression", + self.ext_compression + .send_open_syn(&state.link.ext_compression) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, None ); // Build and send an OpenSyn message - let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.zenoh.resolution); + let mine_initial_sn = + compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); let message: TransportMessage = OpenSyn { lease: input.mine_lease, initial_sn: mine_initial_sn, @@ -368,11 +408,11 @@ impl<'a> OpenFsm for OpenLink<'a> { ext_auth, ext_mlink, ext_lowlatency, + ext_compression, } .into(); - let _ = self - .link + let _ = link .send(&message) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -381,14 +421,15 @@ impl<'a> OpenFsm for OpenLink<'a> { Ok(output) } - type RecvOpenAckIn = &'a mut State; + type RecvOpenAckIn = (&'a mut TransportLinkUnicast, &'a mut State); type RecvOpenAckOut = RecvOpenAckOut; async fn recv_open_ack( - &self, - state: Self::RecvOpenAckIn, + self, + input: Self::RecvOpenAckIn, ) -> Result { - let msg = self - .link + let (link, state) = input; + + let msg = link .recv() .await .map_err(|e| (e, Some(close::reason::INVALID)))?; @@ -399,7 +440,7 @@ impl<'a> OpenFsm for OpenLink<'a> { let e = zerror!( "Received a close message (reason {}) in response to an OpenSyn on: {:?}", close::reason_to_str(reason), - self.link, + link, ); match reason { close::reason::MAX_LINKS => log::debug!("{}", e), @@ -410,7 +451,7 @@ impl<'a> OpenFsm for OpenLink<'a> { _ => { let e = zerror!( "Received an invalid message in response to an OpenSyn on {}: {:?}", - self.link, + link, msg.body ); log::error!("{}", e); @@ -420,34 +461,41 @@ impl<'a> OpenFsm for OpenLink<'a> { // Extension QoS self.ext_qos - .recv_open_ack((&mut state.ext_qos, open_ack.ext_qos)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - - // Extension LowLatency - self.ext_lowlatency - .recv_open_ack((&mut state.ext_lowlatency, open_ack.ext_lowlatency)) + .recv_open_ack((&mut state.transport.ext_qos, open_ack.ext_qos)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm #[cfg(feature = "shared-memory")] self.ext_shm - .recv_open_ack((&mut state.ext_shm, open_ack.ext_shm)) + .recv_open_ack((&mut state.transport.ext_shm, open_ack.ext_shm)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Auth #[cfg(feature = "transport_auth")] self.ext_auth - .recv_open_ack((&mut state.ext_auth, open_ack.ext_auth)) + .recv_open_ack((&mut state.link.ext_auth, open_ack.ext_auth)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] self.ext_mlink - .recv_open_ack((&mut state.ext_mlink, open_ack.ext_mlink)) + .recv_open_ack((&mut state.transport.ext_mlink, open_ack.ext_mlink)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension LowLatency + self.ext_lowlatency + .recv_open_ack((&mut state.transport.ext_lowlatency, open_ack.ext_lowlatency)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + + // Extension Compression + #[cfg(feature = "transport_compression")] + self.ext_compression + .recv_open_ack((&mut state.link.ext_compression, open_ack.ext_compression)) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; @@ -460,11 +508,17 @@ impl<'a> OpenFsm for OpenLink<'a> { } pub(crate) async fn open_link( - link: &LinkUnicast, + link: LinkUnicast, manager: &TransportManager, ) -> ZResult { - let fsm = OpenLink { - link, + let config = TransportLinkUnicastConfig { + direction: TransportLinkUnicastDirection::Outbound, + mtu: link.get_mtu(), + #[cfg(feature = "transport_compression")] + is_compression: false, // Perform the exchange Init/Open exchange with no compression + }; + let mut link = TransportLinkUnicast::new(link, config); + let mut fsm = OpenLink { ext_qos: ext::qos::QoSFsm::new(), #[cfg(feature = "transport_multilink")] ext_mlink: manager.state.unicast.multilink.fsm(&manager.prng), @@ -473,29 +527,43 @@ pub(crate) async fn open_link( #[cfg(feature = "transport_auth")] ext_auth: manager.state.unicast.authenticator.fsm(&manager.prng), ext_lowlatency: ext::lowlatency::LowLatencyFsm::new(), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::CompressionFsm::new(), }; let mut state = State { - zenoh: StateZenoh { - batch_size: manager.config.batch_size.min(batch_size::UNICAST), + transport: StateTransport { + batch_size: manager + .config + .batch_size + .min(batch_size::UNICAST) + .min(link.config.mtu), resolution: manager.config.resolution, + ext_qos: ext::qos::StateOpen::new(manager.config.unicast.is_qos), + #[cfg(feature = "transport_multilink")] + ext_mlink: manager + .state + .unicast + .multilink + .open(manager.config.unicast.max_links > 1), + #[cfg(feature = "shared-memory")] + ext_shm: ext::shm::StateOpen::new(manager.config.unicast.is_shm), + + ext_lowlatency: ext::lowlatency::StateOpen::new(manager.config.unicast.is_lowlatency), + }, + #[cfg(any(feature = "transport_auth", feature = "transport_compression"))] + link: StateLink { + #[cfg(feature = "transport_auth")] + ext_auth: manager + .state + .unicast + .authenticator + .open(&mut *zasynclock!(manager.prng)), + #[cfg(feature = "transport_compression")] + ext_compression: ext::compression::StateOpen::new( + manager.config.unicast.is_compression, + ), }, - ext_qos: ext::qos::StateOpen::new(manager.config.unicast.is_qos), - #[cfg(feature = "transport_multilink")] - ext_mlink: manager - .state - .unicast - .multilink - .open(manager.config.unicast.max_links > 1), - #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateOpen::new(manager.config.unicast.is_shm), - #[cfg(feature = "transport_auth")] - ext_auth: manager - .state - .unicast - .authenticator - .open(&mut *zasynclock!(manager.prng)), - ext_lowlatency: ext::lowlatency::StateOpen::new(manager.config.unicast.is_lowlatency), }; // Init handshake @@ -504,7 +572,7 @@ pub(crate) async fn open_link( match $s { Ok(output) => output, Err((e, reason)) => { - close_link(link, reason).await; + let _ = link.close(reason).await; return Err(e); } } @@ -516,9 +584,9 @@ pub(crate) async fn open_link( mine_zid: manager.config.zid, mine_whatami: manager.config.whatami, }; - step!(fsm.send_init_syn((&mut state, isyn_in)).await); + step!(fsm.send_init_syn((&mut link, &mut state, isyn_in)).await); - let iack_out = step!(fsm.recv_init_ack(&mut state).await); + let iack_out = step!(fsm.recv_init_ack((&mut link, &mut state)).await); // Open handshake let osyn_in = SendOpenSynIn { @@ -529,29 +597,33 @@ pub(crate) async fn open_link( #[cfg(feature = "shared-memory")] ext_shm: iack_out.ext_shm, }; - let osyn_out = step!(fsm.send_open_syn((&mut state, osyn_in)).await); + let osyn_out = step!(fsm.send_open_syn((&mut link, &mut state, osyn_in)).await); - let oack_out = step!(fsm.recv_open_ack(&mut state).await); + let oack_out = step!(fsm.recv_open_ack((&mut link, &mut state)).await); // Initialize the transport let config = TransportConfigUnicast { zid: iack_out.other_zid, whatami: iack_out.other_whatami, - sn_resolution: state.zenoh.resolution.get(Field::FrameSN), + sn_resolution: state.transport.resolution.get(Field::FrameSN), tx_initial_sn: osyn_out.mine_initial_sn, - is_qos: state.ext_qos.is_qos(), + is_qos: state.transport.ext_qos.is_qos(), #[cfg(feature = "transport_multilink")] - multilink: state.ext_mlink.multilink(), + multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.ext_shm.is_shm(), - is_lowlatency: state.ext_lowlatency.is_lowlatency(), + is_shm: state.transport.ext_shm.is_shm(), + is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), }; - let transport = step!( - manager - .init_transport_unicast(config, link.clone(), LinkUnicastDirection::Outbound) - .await - ); + let o_config = TransportLinkUnicastConfig { + mtu: state.transport.batch_size, + direction: TransportLinkUnicastDirection::Outbound, + #[cfg(feature = "transport_compression")] + is_compression: state.link.ext_compression.is_compression(), + }; + let o_link = TransportLinkUnicast::new(link.link.clone(), o_config); + let s_link = format!("{:?}", o_link); + let transport = step!(manager.init_transport_unicast(config, o_link).await); // Sync the RX sequence number let _ = step!(transport @@ -563,21 +635,19 @@ pub(crate) async fn open_link( let output = InputFinalize { transport, other_lease: oack_out.other_lease, - agreed_batch_size: state.zenoh.batch_size, }; let transport = output.transport.clone(); - let res = finalize_transport(link, manager, output).await; + let res = finalize_transport(&link, manager, output).await; if let Err(e) = res { let _ = transport.close().await; return Err(e); } log::debug!( - "New transport link opened from {} to {}: {}. Batch size: {}.", + "New transport link opened from {} to {}: {}.", manager.config.zid, iack_out.other_zid, - link, - state.zenoh.batch_size, + s_link, ); Ok(transport) diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs new file mode 100644 index 0000000000..afc12bc87d --- /dev/null +++ b/io/zenoh-transport/src/unicast/link.rs @@ -0,0 +1,275 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; +use std::fmt; +use std::sync::Arc; +#[cfg(feature = "transport_compression")] +use zenoh_buffers::BBuf; +use zenoh_buffers::{ZSlice, ZSliceBuffer}; +use zenoh_link::{Link, LinkUnicast}; +use zenoh_protocol::transport::{BatchSize, Close, TransportMessage}; +use zenoh_result::{zerror, ZResult}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) enum TransportLinkUnicastDirection { + Inbound, + Outbound, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) struct TransportLinkUnicastConfig { + // Inbound / outbound + pub(crate) direction: TransportLinkUnicastDirection, + // MTU + pub(crate) mtu: BatchSize, + // Compression is active on the link + #[cfg(feature = "transport_compression")] + pub(crate) is_compression: bool, +} + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct TransportLinkUnicast { + pub(crate) link: LinkUnicast, + pub(crate) config: TransportLinkUnicastConfig, +} + +impl TransportLinkUnicast { + pub(crate) fn new(link: LinkUnicast, mut config: TransportLinkUnicastConfig) -> Self { + config.mtu = link.get_mtu().min(config.mtu); + Self { link, config } + } + + const fn batch_config(&self) -> BatchConfig { + BatchConfig { + mtu: self.config.mtu, + #[cfg(feature = "transport_compression")] + is_compression: self.config.is_compression, + } + } + + pub(crate) fn tx(&self) -> TransportLinkUnicastTx { + TransportLinkUnicastTx { + inner: self.clone(), + #[cfg(feature = "transport_compression")] + buffer: self.config.is_compression.then_some(BBuf::with_capacity( + lz4_flex::block::get_maximum_output_size(self.config.mtu as usize), + )), + } + } + + pub(crate) fn rx(&self) -> TransportLinkUnicastRx { + TransportLinkUnicastRx { + inner: self.clone(), + } + } + + pub(crate) async fn send(&self, msg: &TransportMessage) -> ZResult { + let mut link = self.tx(); + link.send(msg).await + } + + pub(crate) async fn recv(&self) -> ZResult { + let mut link = self.rx(); + link.recv().await + } + + pub(crate) async fn close(&self, reason: Option) -> ZResult<()> { + if let Some(reason) = reason { + // Build the close message + let message: TransportMessage = Close { + reason, + session: false, + } + .into(); + // Send the close message on the link + let _ = self.send(&message).await; + } + self.link.close().await + } +} + +impl fmt::Display for TransportLinkUnicast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.link) + } +} + +impl fmt::Debug for TransportLinkUnicast { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransportLinkUnicast") + .field("link", &self.link) + .field("config", &self.config) + .finish() + } +} + +impl From<&TransportLinkUnicast> for Link { + fn from(link: &TransportLinkUnicast) -> Self { + Link::from(&link.link) + } +} + +impl From for Link { + fn from(link: TransportLinkUnicast) -> Self { + Link::from(link.link) + } +} + +pub(crate) struct TransportLinkUnicastTx { + pub(crate) inner: TransportLinkUnicast, + #[cfg(feature = "transport_compression")] + pub(crate) buffer: Option, +} + +impl TransportLinkUnicastTx { + pub(crate) async fn send_batch(&mut self, batch: &mut WBatch) -> ZResult<()> { + const ERR: &str = "Write error on link: "; + + // log::trace!("WBatch: {:?}", batch); + + let res = batch + .finalize( + #[cfg(feature = "transport_compression")] + self.buffer.as_mut(), + ) + .map_err(|_| zerror!("{ERR}{self}"))?; + + let bytes = match res { + Finalize::Batch => batch.as_slice(), + #[cfg(feature = "transport_compression")] + Finalize::Buffer => self + .buffer + .as_ref() + .ok_or_else(|| zerror!("Invalid buffer finalization"))? + .as_slice(), + }; + + // log::trace!("WBytes: {:02x?}", bytes); + + // Send the message on the link + if self.inner.link.is_streamed() { + let len: BatchSize = bytes + .len() + .try_into() + .map_err(|_| zerror!("Invalid batch length"))?; + let len = len.to_le_bytes(); + self.inner.link.write_all(&len).await?; + } + self.inner.link.write_all(bytes).await?; + + Ok(()) + } + + pub(crate) async fn send(&mut self, msg: &TransportMessage) -> ZResult { + const ERR: &str = "Write error on link: "; + + // Create the batch for serializing the message + let mut batch = WBatch::new(self.inner.batch_config()); + batch.encode(msg).map_err(|_| zerror!("{ERR}{self}"))?; + let len = batch.len() as usize; + self.send_batch(&mut batch).await?; + Ok(len) + } +} + +impl fmt::Display for TransportLinkUnicastTx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +impl fmt::Debug for TransportLinkUnicastTx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut s = f.debug_struct("TransportLinkUnicastRx"); + s.field("link", &self.inner.link) + .field("config", &self.inner.config); + #[cfg(feature = "transport_compression")] + { + s.field("buffer", &self.buffer.as_ref().map(|b| b.capacity())); + } + s.finish() + } +} + +pub(crate) struct TransportLinkUnicastRx { + pub(crate) inner: TransportLinkUnicast, +} + +impl TransportLinkUnicastRx { + pub async fn recv_batch(&mut self, buff: C) -> ZResult + where + C: Fn() -> T + Copy, + T: ZSliceBuffer + 'static, + { + const ERR: &str = "Read error from link: "; + + let mut into = (buff)(); + let end = if self.inner.link.is_streamed() { + // Read and decode the message length + let mut len = BatchSize::MIN.to_le_bytes(); + self.inner.link.read_exact(&mut len).await?; + let len = BatchSize::from_le_bytes(len) as usize; + + // Read the bytes + let slice = into + .as_mut_slice() + .get_mut(..len) + .ok_or_else(|| zerror!("{ERR}{self}. Invalid batch length or buffer size."))?; + self.inner.link.read_exact(slice).await?; + len + } else { + // Read the bytes + self.inner.link.read(into.as_mut_slice()).await? + }; + + // log::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); + + let buffer = ZSlice::make(Arc::new(into), 0, end) + .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; + let mut batch = RBatch::new(self.inner.batch_config(), buffer); + batch + .initialize(buff) + .map_err(|e| zerror!("{ERR}{self}. {e}."))?; + + // log::trace!("RBatch: {:?}", batch); + + Ok(batch) + } + + pub async fn recv(&mut self) -> ZResult { + let mtu = self.inner.config.mtu as usize; + let mut batch = self + .recv_batch(|| zenoh_buffers::vec::uninit(mtu).into_boxed_slice()) + .await?; + let msg = batch + .decode() + .map_err(|_| zerror!("Decode error on link: {}", self))?; + Ok(msg) + } +} + +impl fmt::Display for TransportLinkUnicastRx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +impl fmt::Debug for TransportLinkUnicastRx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransportLinkUnicastRx") + .field("link", &self.inner.link) + .field("config", &self.inner.config) + .finish() + } +} diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 111936cb95..437e9c4fa4 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -14,29 +14,25 @@ use super::transport::TransportUnicastLowlatency; #[cfg(feature = "stats")] use crate::stats::TransportStats; -use crate::TransportExecutor; +use crate::{unicast::link::TransportLinkUnicast, TransportExecutor}; use async_std::task; use async_std::{prelude::FutureExt, sync::RwLock}; -use zenoh_codec::*; -use zenoh_core::{zasyncread, zasyncwrite}; - use std::sync::Arc; use std::time::Duration; use zenoh_buffers::{writer::HasWriter, ZSlice}; -use zenoh_link::LinkUnicast; -use zenoh_protocol::transport::{ - BatchSize, KeepAlive, TransportBodyLowLatency, TransportMessageLowLatency, -}; +use zenoh_codec::*; +use zenoh_core::{zasyncread, zasyncwrite}; +use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency, TransportMessageLowLatency}; use zenoh_result::{zerror, ZResult}; use zenoh_sync::RecyclingObjectPool; pub(crate) async fn send_with_link( - link: &LinkUnicast, + link: &TransportLinkUnicast, msg: TransportMessageLowLatency, #[cfg(feature = "stats")] stats: &Arc, ) -> ZResult<()> { let len; - if link.is_streamed() { + if link.link.is_streamed() { let mut buffer = vec![0, 0, 0, 0]; let codec = Zenoh080::new(); let mut writer = buffer.writer(); @@ -49,7 +45,7 @@ pub(crate) async fn send_with_link( buffer[0..4].copy_from_slice(&le); - link.write_all(&buffer).await?; + link.link.write_all(&buffer).await?; } else { let mut buffer = vec![]; let codec = Zenoh080::new(); @@ -62,7 +58,7 @@ pub(crate) async fn send_with_link( { len = buffer.len() as u32; } - link.write_all(&buffer).await?; + link.link.write_all(&buffer).await?; } log::trace!("Sent: {:?}", msg); @@ -131,7 +127,7 @@ impl TransportUnicastLowlatency { } } - pub(super) fn internal_start_rx(&self, lease: Duration, batch_size: u16) { + pub(super) fn internal_start_rx(&self, lease: Duration) { let mut guard = async_std::task::block_on(async { zasyncwrite!(self.handle_rx) }); let c_transport = self.clone(); let handle = task::spawn(async move { @@ -141,7 +137,7 @@ impl TransportUnicastLowlatency { let rx_buffer_size = c_transport.manager.config.link_rx_buffer_size; // Start the rx task - let res = rx_task(link, c_transport.clone(), lease, batch_size, rx_buffer_size).await; + let res = rx_task(link, c_transport.clone(), lease, rx_buffer_size).await; log::debug!( "[{}] Rx task finished with result {:?}", c_transport.manager.config.zid, @@ -177,7 +173,7 @@ impl TransportUnicastLowlatency { /* TASKS */ /*************************************/ async fn keepalive_task( - link: Arc>, + link: Arc>, keep_alive: Duration, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { @@ -201,27 +197,26 @@ async fn keepalive_task( } async fn rx_task_stream( - link: LinkUnicast, + link: TransportLinkUnicast, transport: TransportUnicastLowlatency, lease: Duration, - rx_batch_size: BatchSize, rx_buffer_size: usize, ) -> ZResult<()> { - async fn read(link: &LinkUnicast, buffer: &mut [u8]) -> ZResult { + async fn read(link: &TransportLinkUnicast, buffer: &mut [u8]) -> ZResult { // 16 bits for reading the batch length let mut length = [0_u8, 0_u8, 0_u8, 0_u8]; - link.read_exact(&mut length).await?; + link.link.read_exact(&mut length).await?; let n = u32::from_le_bytes(length) as usize; let len = buffer.len(); let b = buffer.get_mut(0..n).ok_or_else(|| { zerror!("Batch len is invalid. Received {n} but negotiated max len is {len}.") })?; - link.read_exact(b).await?; + link.link.read_exact(b).await?; Ok(n) } // The pool of buffers - let mtu = link.get_mtu().min(rx_batch_size) as usize; + let mtu = link.config.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -242,19 +237,18 @@ async fn rx_task_stream( // Deserialize all the messages from the current ZBuf let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); - transport.read_messages(zslice, &link).await?; + transport.read_messages(zslice, &link.link).await?; } } async fn rx_task_dgram( - link: LinkUnicast, + link: TransportLinkUnicast, transport: TransportUnicastLowlatency, lease: Duration, - rx_batch_size: BatchSize, rx_buffer_size: usize, ) -> ZResult<()> { // The pool of buffers - let mtu = link.get_mtu().min(rx_batch_size) as usize; + let mtu = link.config.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -266,30 +260,31 @@ async fn rx_task_dgram( let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); // Async read from the underlying link - let bytes = - link.read(&mut buffer).timeout(lease).await.map_err(|_| { - zerror!("{}: expired after {} milliseconds", link, lease.as_millis()) - })??; + let bytes = link + .link + .read(&mut buffer) + .timeout(lease) + .await + .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; #[cfg(feature = "stats")] transport.stats.inc_rx_bytes(bytes); // Deserialize all the messages from the current ZBuf let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); - transport.read_messages(zslice, &link).await?; + transport.read_messages(zslice, &link.link).await?; } } async fn rx_task( - link: LinkUnicast, + link: TransportLinkUnicast, transport: TransportUnicastLowlatency, lease: Duration, - rx_batch_size: u16, rx_buffer_size: usize, ) -> ZResult<()> { - if link.is_streamed() { - rx_task_stream(link, transport, lease, rx_batch_size, rx_buffer_size).await + if link.link.is_streamed() { + rx_task_stream(link, transport, lease, rx_buffer_size).await } else { - rx_task_dgram(link, transport, lease, rx_batch_size, rx_buffer_size).await + rx_task_dgram(link, transport, lease, rx_buffer_size).await } } diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index ea97aa143b..d2d64a0310 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -15,10 +15,13 @@ use super::link::send_with_link; #[cfg(feature = "stats")] use crate::stats::TransportStats; -use crate::transport_unicast_inner::TransportUnicastTrait; -use crate::TransportConfigUnicast; -use crate::TransportManager; -use crate::{TransportExecutor, TransportPeerEventHandler}; +use crate::{ + unicast::{ + link::TransportLinkUnicast, transport_unicast_inner::TransportUnicastTrait, + TransportConfigUnicast, + }, + TransportExecutor, TransportManager, TransportPeerEventHandler, +}; use async_executor::Task; #[cfg(feature = "transport_unixpipe")] use async_std::sync::RwLockUpgradableReadGuard; @@ -29,17 +32,19 @@ use std::sync::{Arc, RwLock as SyncRwLock}; use std::time::Duration; #[cfg(feature = "transport_unixpipe")] use zenoh_core::zasyncread_upgradable; -use zenoh_core::{zasynclock, zasyncread, zread, zwrite}; +use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; #[cfg(feature = "transport_unixpipe")] use zenoh_link::unixpipe::UNIXPIPE_LOCATOR_PREFIX; #[cfg(feature = "transport_unixpipe")] use zenoh_link::Link; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; -use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::transport::TransportBodyLowLatency; use zenoh_protocol::transport::TransportMessageLowLatency; use zenoh_protocol::transport::{Close, TransportSn}; +use zenoh_protocol::{ + core::{WhatAmI, ZenohId}, + transport::close, +}; #[cfg(not(feature = "transport_unixpipe"))] use zenoh_result::bail; use zenoh_result::{zerror, ZResult}; @@ -54,7 +59,7 @@ pub(crate) struct TransportUnicastLowlatency { // Transport config pub(super) config: TransportConfigUnicast, // The link associated to the transport - pub(super) link: Arc>, + pub(super) link: Arc>, // The callback pub(super) callback: Arc>>>, // Mutex for notification @@ -72,7 +77,7 @@ impl TransportUnicastLowlatency { pub fn make( manager: TransportManager, config: TransportConfigUnicast, - link: LinkUnicast, + link: TransportLinkUnicast, ) -> ZResult { #[cfg(feature = "stats")] let stats = Arc::new(TransportStats::new(Some(manager.get_stats().clone()))); @@ -137,7 +142,9 @@ impl TransportUnicastLowlatency { // Close and drop the link self.stop_keepalive().await; self.stop_rx().await; - let _ = zasyncread!(self.link).close().await; + let _ = zasyncwrite!(self.link) + .close(Some(close::reason::GENERIC)) + .await; // Notify the callback that we have closed the transport if let Some(cb) = callback.as_ref() { @@ -162,7 +169,7 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { zasynclock!(self.alive) } - fn get_links(&self) -> Vec { + fn get_links(&self) -> Vec { let guard = async_std::task::block_on(async { zasyncread!(self.link) }); [guard.clone()].to_vec() } @@ -206,24 +213,23 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { fn start_tx( &self, - _link: &LinkUnicast, + _link: &TransportLinkUnicast, executor: &TransportExecutor, keep_alive: Duration, - _batch_size: u16, ) -> ZResult<()> { self.start_keepalive(executor, keep_alive); Ok(()) } - fn start_rx(&self, _link: &LinkUnicast, lease: Duration, batch_size: u16) -> ZResult<()> { - self.internal_start_rx(lease, batch_size); + fn start_rx(&self, _link: &TransportLinkUnicast, lease: Duration) -> ZResult<()> { + self.internal_start_rx(lease); Ok(()) } /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: LinkUnicast, _direction: LinkUnicastDirection) -> ZResult<()> { + async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()> { log::trace!("Adding link: {}", link); #[cfg(not(feature = "transport_unixpipe"))] @@ -237,8 +243,9 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { { let guard = zasyncread_upgradable!(self.link); - let existing_unixpipe = guard.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; - let new_unixpipe = link.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; + let existing_unixpipe = + guard.link.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; + let new_unixpipe = link.link.get_dst().protocol().as_str() == UNIXPIPE_LOCATOR_PREFIX; match (existing_unixpipe, new_unixpipe) { (false, true) => { // LowLatency transport suports only a single link, but code here also handles upgrade from non-unixpipe link to unixpipe link! @@ -308,7 +315,7 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &LinkUnicast, reason: u8) -> ZResult<()> { + async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()> { log::trace!("Closing link {} with peer: {}", link, self.config.zid); self.finalize(reason).await } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index d7d79d5387..da064e8f5b 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -18,17 +18,20 @@ use crate::unicast::establishment::ext::auth::Auth; #[cfg(feature = "transport_multilink")] use crate::unicast::establishment::ext::multilink::MultiLink; use crate::{ - lowlatency::transport::TransportUnicastLowlatency, - transport_unicast_inner::TransportUnicastTrait, - unicast::{TransportConfigUnicast, TransportUnicast}, - universal::transport::TransportUnicastUniversal, + unicast::{ + link::TransportLinkUnicast, lowlatency::transport::TransportUnicastLowlatency, + transport_unicast_inner::TransportUnicastTrait, + universal::transport::TransportUnicastUniversal, TransportConfigUnicast, TransportUnicast, + }, TransportManager, }; use async_std::{prelude::FutureExt, sync::Mutex, task}; use std::{collections::HashMap, sync::Arc, time::Duration}; +#[cfg(feature = "transport_compression")] +use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] use zenoh_config::SharedMemoryConf; -use zenoh_config::{Config, LinkTxConf, QoSConf, TransportUnicastConf}; +use zenoh_config::{Config, LinkTxConf, QoSUnicastConf, TransportUnicastConf}; use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; @@ -53,8 +56,8 @@ pub struct TransportManagerConfigUnicast { pub max_links: usize, #[cfg(feature = "shared-memory")] pub is_shm: bool, - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - pub is_compressed: bool, + #[cfg(feature = "transport_compression")] + pub is_compression: bool, } pub struct TransportManagerStateUnicast { @@ -96,11 +99,11 @@ pub struct TransportManagerBuilderUnicast { pub(super) max_links: usize, #[cfg(feature = "shared-memory")] pub(super) is_shm: bool, - #[cfg(feature = "transport_compression")] - pub(super) is_compressed: bool, #[cfg(feature = "transport_auth")] pub(super) authenticator: Auth, pub(super) is_lowlatency: bool, + #[cfg(feature = "transport_compression")] + pub(super) is_compression: bool, } impl TransportManagerBuilderUnicast { @@ -157,9 +160,9 @@ impl TransportManagerBuilderUnicast { self } - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - pub fn compression(mut self, is_compressed: bool) -> Self { - self.is_compressed = is_compressed; + #[cfg(feature = "transport_compression")] + pub fn compression(mut self, is_compression: bool) -> Self { + self.is_compression = is_compression; self } @@ -173,7 +176,7 @@ impl TransportManagerBuilderUnicast { )); self = self.accept_pending(*config.transport().unicast().accept_pending()); self = self.max_sessions(*config.transport().unicast().max_sessions()); - self = self.qos(*config.transport().qos().enabled()); + self = self.qos(*config.transport().unicast().qos().enabled()); self = self.lowlatency(*config.transport().unicast().lowlatency()); #[cfg(feature = "transport_multilink")] @@ -188,6 +191,10 @@ impl TransportManagerBuilderUnicast { { self = self.authenticator(Auth::from_config(config).await?); } + #[cfg(feature = "transport_compression")] + { + self = self.compression(*config.transport().unicast().compression().enabled()); + } Ok(self) } @@ -211,9 +218,9 @@ impl TransportManagerBuilderUnicast { max_links: self.max_links, #[cfg(feature = "shared-memory")] is_shm: self.is_shm, - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - is_compressed: self.is_compressed, is_lowlatency: self.is_lowlatency, + #[cfg(feature = "transport_compression")] + is_compression: self.is_compression, }; let state = TransportManagerStateUnicast { @@ -238,9 +245,11 @@ impl Default for TransportManagerBuilderUnicast { fn default() -> Self { let transport = TransportUnicastConf::default(); let link_tx = LinkTxConf::default(); - let qos = QoSConf::default(); + let qos = QoSUnicastConf::default(); #[cfg(feature = "shared-memory")] let shm = SharedMemoryConf::default(); + #[cfg(feature = "transport_compression")] + let compression = CompressionUnicastConf::default(); Self { lease: Duration::from_millis(*link_tx.lease()), @@ -253,11 +262,11 @@ impl Default for TransportManagerBuilderUnicast { max_links: *transport.max_links(), #[cfg(feature = "shared-memory")] is_shm: *shm.enabled(), - #[cfg(feature = "transport_compression")] - is_compressed: false, #[cfg(feature = "transport_auth")] authenticator: Auth::default(), is_lowlatency: *transport.lowlatency(), + #[cfg(feature = "transport_compression")] + is_compression: *compression.enabled(), } } } @@ -402,8 +411,7 @@ impl TransportManager { pub(super) async fn init_transport_unicast( &self, config: TransportConfigUnicast, - link: LinkUnicast, - direction: LinkUnicastDirection, + link: TransportLinkUnicast, ) -> Result)> { let mut guard = zasynclock!(self.state.unicast.transports); @@ -426,7 +434,7 @@ impl TransportManager { // Add the link to the transport transport - .add_link(link, direction) + .add_link(link) .await .map_err(|e| (e, Some(close::reason::MAX_LINKS)))?; @@ -462,7 +470,7 @@ impl TransportManager { .map_err(|e| (e, Some(close::reason::INVALID))) .map(|v| Arc::new(v) as Arc)?; // Add the link to the transport - t.add_link(link, direction) + t.add_link(link) .await .map_err(|e| (e, Some(close::reason::MAX_LINKS)))?; t @@ -538,7 +546,7 @@ impl TransportManager { // Create a new link associated by calling the Link Manager let link = manager.new_link(endpoint).await?; // Open the link - super::establishment::open::open_link(&link, self).await + super::establishment::open::open_link(link, self).await } pub async fn get_transport_unicast(&self, peer: &ZenohId) -> Option { @@ -587,7 +595,7 @@ impl TransportManager { } // A new link is available - log::trace!("New link waiting... {}", link); + log::trace!("Accepting link... {}", link); *guard += 1; drop(guard); diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index d2a14a0276..3385cbed6a 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // pub mod establishment; +pub(crate) mod link; pub(crate) mod lowlatency; pub(crate) mod manager; pub(crate) mod transport_unicast_inner; @@ -134,7 +135,7 @@ impl TransportUnicast { let link = transport .get_links() .into_iter() - .find(|l| l.get_src() == &link.src && l.get_dst() == &link.dst) + .find(|l| l.link.get_src() == &link.src && l.link.get_dst() == &link.dst) .ok_or_else(|| zerror!("Invalid link"))?; transport.close_link(&link, close::reason::GENERIC).await?; Ok(()) diff --git a/io/zenoh-transport/src/unicast/test_helpers.rs b/io/zenoh-transport/src/unicast/test_helpers.rs index 403384c851..42ed6db927 100644 --- a/io/zenoh-transport/src/unicast/test_helpers.rs +++ b/io/zenoh-transport/src/unicast/test_helpers.rs @@ -11,11 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // - +use crate::{unicast::TransportManagerBuilderUnicast, TransportManager}; use zenoh_core::zcondfeat; -use crate::{TransportManager, TransportManagerBuilderUnicast}; - pub fn make_transport_manager_builder( #[cfg(feature = "transport_multilink")] max_links: usize, #[cfg(feature = "shared-memory")] with_shm: bool, diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index acb6503c30..265607705b 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -12,11 +12,13 @@ // ZettaScale Zenoh Team, // -use std::{fmt::DebugStruct, sync::Arc, time::Duration}; - +use crate::{ + unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, + TransportExecutor, TransportPeerEventHandler, +}; use async_std::sync::MutexGuard as AsyncMutexGuard; use async_trait::async_trait; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; +use std::{fmt::DebugStruct, sync::Arc, time::Duration}; use zenoh_protocol::{ core::{WhatAmI, ZenohId}, network::NetworkMessage, @@ -24,8 +26,6 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{TransportConfigUnicast, TransportExecutor, TransportPeerEventHandler}; - /*************************************/ /* UNICAST TRANSPORT TRAIT */ /*************************************/ @@ -39,7 +39,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn get_zid(&self) -> ZenohId; fn get_whatami(&self) -> WhatAmI; fn get_callback(&self) -> Option>; - fn get_links(&self) -> Vec; + fn get_links(&self) -> Vec; #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool; fn is_qos(&self) -> bool; @@ -50,7 +50,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: LinkUnicast, direction: LinkUnicastDirection) -> ZResult<()>; + async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()>; /*************************************/ /* TX */ @@ -58,16 +58,15 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn schedule(&self, msg: NetworkMessage) -> ZResult<()>; fn start_tx( &self, - link: &LinkUnicast, + link: &TransportLinkUnicast, executor: &TransportExecutor, keep_alive: Duration, - batch_size: u16, ) -> ZResult<()>; /*************************************/ /* RX */ /*************************************/ - fn start_rx(&self, link: &LinkUnicast, lease: Duration, batch_size: u16) -> ZResult<()>; + fn start_rx(&self, link: &TransportLinkUnicast, lease: Duration) -> ZResult<()>; /*************************************/ /* INITIATION */ @@ -77,7 +76,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &LinkUnicast, reason: u8) -> ZResult<()>; + async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()>; async fn close(&self, reason: u8) -> ZResult<()>; fn add_debug_fields<'a, 'b: 'a, 'c>( diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index c4d19d2b66..3a7eafbc52 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -12,55 +12,33 @@ // ZettaScale Zenoh Team, // use super::transport::TransportUnicastUniversal; -use crate::common::pipeline::{ - TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, - TransmissionPipelineProducer, -}; -use crate::common::priority::TransportPriorityTx; #[cfg(feature = "stats")] use crate::common::stats::TransportStats; -use crate::TransportExecutor; +use crate::{ + common::{ + batch::RBatch, + pipeline::{ + TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, + TransmissionPipelineProducer, + }, + priority::TransportPriorityTx, + }, + unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx, TransportLinkUnicastTx}, + TransportExecutor, +}; use async_std::prelude::FutureExt; use async_std::task; use async_std::task::JoinHandle; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -use std::convert::TryInto; -use std::sync::Arc; -use std::time::Duration; -use zenoh_buffers::ZSlice; -use zenoh_link::{LinkUnicast, LinkUnicastDirection}; -use zenoh_protocol::transport::{BatchSize, KeepAlive, TransportMessage}; -use zenoh_result::{bail, zerror, ZResult}; -use zenoh_sync::{RecyclingObjectPool, Signal}; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const HEADER_BYTES_SIZE: usize = 2; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const COMPRESSION_BYTE_INDEX_STREAMED: usize = 2; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const COMPRESSION_BYTE_INDEX: usize = 0; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const COMPRESSION_ENABLED: u8 = 1_u8; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const COMPRESSION_DISABLED: u8 = 0_u8; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const BATCH_PAYLOAD_START_INDEX: usize = 1; - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -const MAX_BATCH_SIZE: usize = u16::MAX as usize; +use std::{sync::Arc, time::Duration}; +use zenoh_buffers::ZSliceBuffer; +use zenoh_protocol::transport::{KeepAlive, TransportMessage}; +use zenoh_result::{zerror, ZResult}; +use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; #[derive(Clone)] -pub(super) struct TransportLinkUnicast { - // Inbound / outbound - pub(super) direction: LinkUnicastDirection, +pub(super) struct TransportLinkUnicastUniversal { // The underlying link - pub(super) link: LinkUnicast, + pub(super) link: TransportLinkUnicast, // The transmission pipeline pub(super) pipeline: Option, // The transport this link is associated to @@ -71,17 +49,12 @@ pub(super) struct TransportLinkUnicast { handle_rx: Option>>, } -impl TransportLinkUnicast { - pub(super) fn new( - transport: TransportUnicastUniversal, - link: LinkUnicast, - direction: LinkUnicastDirection, - ) -> TransportLinkUnicast { - TransportLinkUnicast { - direction, - transport, +impl TransportLinkUnicastUniversal { + pub(super) fn new(transport: TransportUnicastUniversal, link: TransportLinkUnicast) -> Self { + Self { link, pipeline: None, + transport, handle_tx: None, signal_rx: Signal::new(), handle_rx: None, @@ -89,25 +62,23 @@ impl TransportLinkUnicast { } } -impl TransportLinkUnicast { +impl TransportLinkUnicastUniversal { pub(super) fn start_tx( &mut self, executor: &TransportExecutor, keep_alive: Duration, - batch_size: u16, priority_tx: &[TransportPriorityTx], ) { if self.handle_tx.is_none() { let config = TransmissionPipelineConf { - is_streamed: self.link.is_streamed(), - batch_size: batch_size.min(self.link.get_mtu()), + is_streamed: self.link.link.is_streamed(), + #[cfg(feature = "transport_compression")] + is_compression: self.link.config.is_compression, + batch_size: self.link.config.mtu, queue_size: self.transport.manager.config.queue_size, backoff: self.transport.manager.config.queue_backoff, }; - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - let is_compressed = self.transport.manager.config.unicast.is_compressed; - // The pipeline let (producer, consumer) = TransmissionPipeline::make(config, priority_tx); self.pipeline = Some(producer); @@ -118,12 +89,10 @@ impl TransportLinkUnicast { let handle = executor.spawn(async move { let res = tx_task( consumer, - c_link.clone(), + c_link.tx(), keep_alive, #[cfg(feature = "stats")] c_transport.stats.clone(), - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - is_compressed, ) .await; if let Err(e) = res { @@ -143,7 +112,7 @@ impl TransportLinkUnicast { } } - pub(super) fn start_rx(&mut self, lease: Duration, batch_size: u16) { + pub(super) fn start_rx(&mut self, lease: Duration) { if self.handle_rx.is_none() { // Spawn the RX task let c_link = self.link.clone(); @@ -154,11 +123,10 @@ impl TransportLinkUnicast { let handle = task::spawn(async move { // Start the consume task let res = rx_task( - c_link.clone(), + c_link.rx(), c_transport.clone(), lease, c_signal.clone(), - batch_size, c_rx_buffer_size, ) .await; @@ -194,7 +162,7 @@ impl TransportLinkUnicast { handle_tx.await; } - self.link.close().await + self.link.close(None).await } } @@ -203,35 +171,15 @@ impl TransportLinkUnicast { /*************************************/ async fn tx_task( mut pipeline: TransmissionPipelineConsumer, - link: LinkUnicast, + mut link: TransportLinkUnicastTx, keep_alive: Duration, #[cfg(feature = "stats")] stats: Arc, - #[cfg(all(feature = "unstable", feature = "transport_compression"))] is_compressed: bool, ) -> ZResult<()> { - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - let mut compression_aux_buff: Box<[u8]> = - vec![0; lz4_flex::block::get_maximum_output_size(MAX_BATCH_SIZE)].into_boxed_slice(); - loop { match pipeline.pull().timeout(keep_alive).await { Ok(res) => match res { - Some((batch, priority)) => { - // Send the buffer on the link - #[allow(unused_mut)] - let mut bytes = batch.as_bytes(); - - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - { - let (batch_size, _) = tx_compressed( - is_compressed, - link.is_streamed(), - bytes, - &mut compression_aux_buff, - )?; - bytes = &compression_aux_buff[..batch_size]; - } - - link.write_all(bytes).await?; + Some((mut batch, priority)) => { + link.send_batch(&mut batch).await?; #[cfg(feature = "stats")] { @@ -260,8 +208,8 @@ async fn tx_task( // Drain the transmission pipeline and write remaining bytes on the wire let mut batches = pipeline.drain(); - for (b, _) in batches.drain(..) { - link.write_all(b.as_bytes()) + for (mut b, _) in batches.drain(..) { + link.send_batch(&mut b) .timeout(keep_alive) .await .map_err(|_| zerror!("{}: flush failed after {} ms", link, keep_alive.as_millis()))??; @@ -276,30 +224,31 @@ async fn tx_task( Ok(()) } -async fn rx_task_stream( - link: LinkUnicast, +async fn rx_task( + mut link: TransportLinkUnicastRx, transport: TransportUnicastUniversal, lease: Duration, signal: Signal, - rx_batch_size: BatchSize, rx_buffer_size: usize, ) -> ZResult<()> { enum Action { - Read(usize), + Read(RBatch), Stop, } - async fn read(link: &LinkUnicast, buffer: &mut [u8]) -> ZResult { - // 16 bits for reading the batch length - let mut length = [0_u8, 0_u8]; - link.read_exact(&mut length).await?; - let n = BatchSize::from_le_bytes(length) as usize; - let len = buffer.len(); - let b = buffer.get_mut(0..n).ok_or_else(|| { - zerror!("Batch len is invalid. Received {n} but negotiated max len is {len}.") - })?; - link.read_exact(b).await?; - Ok(Action::Read(n)) + async fn read( + link: &mut TransportLinkUnicastRx, + pool: &RecyclingObjectPool, + ) -> ZResult + where + T: ZSliceBuffer + 'static, + F: Fn() -> T, + RecyclingObject: ZSliceBuffer, + { + let batch = link + .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) + .await?; + Ok(Action::Read(batch)) } async fn stop(signal: Signal) -> ZResult { @@ -308,7 +257,7 @@ async fn rx_task_stream( } // The pool of buffers - let mtu = link.get_mtu().min(rx_batch_size) as usize; + let mtu = link.inner.config.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -316,393 +265,23 @@ async fn rx_task_stream( let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); while !signal.is_triggered() { - // Retrieve one buffer - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); // Async read from the underlying link - let action = read(&link, &mut buffer) + let action = read(&mut link, &pool) .race(stop(signal.clone())) .timeout(lease) .await .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; match action { - Action::Read(n) => { + Action::Read(batch) => { #[cfg(feature = "stats")] { transport.stats.inc_rx_bytes(2 + n); // Account for the batch len encoding (16 bits) } - - #[allow(unused_mut)] - let mut end_pos = n; - - #[allow(unused_mut)] - let mut start_pos = 0; - - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - rx_decompress(&mut buffer, &pool, n, &mut start_pos, &mut end_pos)?; - - // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), start_pos, end_pos) - .map_err(|_| zerror!("Read {} bytes but buffer is {} bytes", n, mtu))?; - transport.read_messages(zslice, &link)?; + transport.read_messages(batch, &link.inner)?; } Action::Stop => break, } } - Ok(()) -} -async fn rx_task_dgram( - link: LinkUnicast, - transport: TransportUnicastUniversal, - lease: Duration, - signal: Signal, - rx_batch_size: BatchSize, - rx_buffer_size: usize, -) -> ZResult<()> { - enum Action { - Read(usize), - Stop, - } - - async fn read(link: &LinkUnicast, buffer: &mut [u8]) -> ZResult { - let n = link.read(buffer).await?; - Ok(Action::Read(n)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) - } - - // The pool of buffers - let mtu = link.get_mtu().min(rx_batch_size) as usize; - let mut n = rx_buffer_size / mtu; - if rx_buffer_size % mtu != 0 { - n += 1; - } - let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); - - while !signal.is_triggered() { - // Retrieve one buffer - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); - // Async read from the underlying link - let action = read(&link, &mut buffer) - .race(stop(signal.clone())) - .timeout(lease) - .await - .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; - match action { - Action::Read(n) => { - if n == 0 { - // Reading 0 bytes means error - bail!("{}: zero bytes reading", link) - } - - #[cfg(feature = "stats")] - { - transport.stats.inc_rx_bytes(n); - } - - #[allow(unused_mut)] - let mut end_pos = n; - - #[allow(unused_mut)] - let mut start_pos = 0; - - #[cfg(all(feature = "unstable", feature = "transport_compression"))] - rx_decompress(&mut buffer, &pool, n, &mut start_pos, &mut end_pos)?; - - // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), start_pos, end_pos) - .map_err(|_| zerror!("Read {} bytes but buffer is {} bytes", n, mtu))?; - transport.read_messages(zslice, &link)?; - } - Action::Stop => break, - } - } Ok(()) } - -async fn rx_task( - link: LinkUnicast, - transport: TransportUnicastUniversal, - lease: Duration, - signal: Signal, - rx_batch_size: u16, - rx_buffer_size: usize, -) -> ZResult<()> { - if link.is_streamed() { - rx_task_stream( - link, - transport, - lease, - signal, - rx_batch_size, - rx_buffer_size, - ) - .await - } else { - rx_task_dgram( - link, - transport, - lease, - signal, - rx_batch_size, - rx_buffer_size, - ) - .await - } -} - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -/// Decompresses the received contents contained in the buffer. -fn rx_decompress( - buffer: &mut zenoh_sync::RecyclingObject>, - pool: &RecyclingObjectPool, impl Fn() -> Box<[u8]>>, - read_bytes: usize, - start_pos: &mut usize, - end_pos: &mut usize, -) -> ZResult<()> { - let is_compressed: bool = buffer[COMPRESSION_BYTE_INDEX] == COMPRESSION_ENABLED; - if is_compressed { - let mut aux_buff = pool.try_take().unwrap_or_else(|| pool.alloc()); - *end_pos = lz4_flex::block::decompress_into( - &buffer[BATCH_PAYLOAD_START_INDEX..read_bytes], - &mut aux_buff, - ) - .map_err(|e| zerror!("Decompression error: {:}", e))?; - *buffer = aux_buff; - } else { - *start_pos = BATCH_PAYLOAD_START_INDEX; - *end_pos = read_bytes; - } - Ok(()) -} - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -/// Compresses the batch into the output buffer. -/// -/// If the batch is streamed, the output contains a header of two bytes representing the size of -/// the resulting batch, otherwise it is not included. In any case, an extra byte is added (before -/// the payload and considered in the header value) representing if the batch is compressed or not. -/// If the resulting size of the compression no smaller than the original batch size, then -/// we send the original one. -/// -/// Returns a tuple containing the size of the resulting batch, along with a boolean representing -/// if the batch was indeed compressed or not. -fn tx_compressed( - is_compressed: bool, - is_streamed: bool, - batch: &[u8], - output: &mut [u8], -) -> ZResult<(/*batch_size=*/ usize, /*was_compressed=*/ bool)> { - if is_compressed { - let s_pos = if is_streamed { 3 } else { 1 }; - let payload = &batch[s_pos - 1..]; - let payload_size = payload.len(); - let compression_size = lz4_flex::block::compress_into(payload, &mut output[s_pos..]) - .map_err(|e| zerror!("Compression error: {:}", e))?; - if compression_size >= payload_size { - log::debug!( - "Compression discarded due to the original batch size being smaller than the compressed batch." - ); - return Ok(( - set_uncompressed_batch_header(batch, output, is_streamed)?, - false, - )); - } - Ok(( - set_compressed_batch_header(output, compression_size, is_streamed)?, - true, - )) - } else { - Ok(( - set_uncompressed_batch_header(batch, output, is_streamed)?, - false, - )) - } -} - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -/// Inserts the compresion byte for batches WITH compression. -/// The buffer is expected to contain the compression starting from byte 3 (if streamed) or 1 -/// (if not streamed). -/// -/// Arguments: -/// - buff: the buffer with the compression, with 3 or 1 bytes reserved at the beginning in case of -/// being streamed or not respectively. -/// - compression_size: the size of the compression -/// - is_streamed: if the batch is intended to be streamed or not -/// -/// Returns: the size of the compressed batch considering the header. -fn set_compressed_batch_header( - buff: &mut [u8], - compression_size: usize, - is_streamed: bool, -) -> ZResult { - let final_batch_size: usize; - let payload_size = 1 + compression_size; - if is_streamed { - let payload_size_u16: u16 = payload_size.try_into().map_err(|e| { - zerror!( - "Compression error: unable to convert batch size into u16: {}", - e - ) - })?; - buff[0..HEADER_BYTES_SIZE].copy_from_slice(&payload_size_u16.to_le_bytes()); - buff[COMPRESSION_BYTE_INDEX_STREAMED] = COMPRESSION_ENABLED; - final_batch_size = payload_size + HEADER_BYTES_SIZE; - } else { - buff[COMPRESSION_BYTE_INDEX] = COMPRESSION_ENABLED; - final_batch_size = payload_size; - } - if final_batch_size > MAX_BATCH_SIZE { - // May happen when the payload size is itself the MTU and adding the header exceeds it. - Err(zerror!("Failed to send uncompressed batch, batch size ({}) exceeds the maximum batch size of {}.", final_batch_size, MAX_BATCH_SIZE))? - } - Ok(final_batch_size) -} - -#[cfg(all(feature = "unstable", feature = "transport_compression"))] -/// Inserts the compression byte for batches without compression, that is inserting a 0 byte on the -/// third position of the buffer and increasing the batch size from the header by 1. -/// -/// Arguments: -/// - bytes: the source slice -/// - buff: the output slice -/// - is_streamed: if the batch is meant to be streamed or not, thus considering or not the 2 bytes -/// header specifying the size of the batch. -/// -/// Returns: the size of the batch considering the header. -fn set_uncompressed_batch_header( - bytes: &[u8], - buff: &mut [u8], - is_streamed: bool, -) -> ZResult { - let final_batch_size: usize; - if is_streamed { - let mut header = [0_u8, 0_u8]; - header[..HEADER_BYTES_SIZE].copy_from_slice(&bytes[..HEADER_BYTES_SIZE]); - let batch_size = if let Some(size) = u16::from_le_bytes(header).checked_add(1) { - size - } else { - bail!("Compression error: unable to convert compression size into u16",) - }; - buff[0..HEADER_BYTES_SIZE].copy_from_slice(&batch_size.to_le_bytes()); - buff[COMPRESSION_BYTE_INDEX_STREAMED] = COMPRESSION_DISABLED; - let batch_size: usize = batch_size.into(); - buff[3..batch_size + 2].copy_from_slice(&bytes[2..batch_size + 1]); - final_batch_size = batch_size + 2; - } else { - buff[COMPRESSION_BYTE_INDEX] = COMPRESSION_DISABLED; - let len = 1 + bytes.len(); - buff[1..1 + bytes.len()].copy_from_slice(bytes); - final_batch_size = len; - } - if final_batch_size > MAX_BATCH_SIZE { - // May happen when the payload size is itself the MTU and adding the header exceeds it. - Err(zerror!("Failed to send uncompressed batch, batch size ({}) exceeds the maximum batch size of {}.", final_batch_size, MAX_BATCH_SIZE))?; - } - Ok(final_batch_size) -} - -#[cfg(all(feature = "transport_compression", feature = "unstable"))] -#[test] -fn tx_compression_test() { - const COMPRESSION_BYTE: usize = 1; - let payload = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]; - let mut buff: Box<[u8]> = - vec![0; lz4_flex::block::get_maximum_output_size(MAX_BATCH_SIZE) + 3].into_boxed_slice(); - - // Compression done for the sake of comparing the result. - let payload_compression_size = lz4_flex::block::compress_into(&payload, &mut buff).unwrap(); - - fn get_header_value(buff: &[u8]) -> u16 { - let mut header = [0_u8, 0_u8]; - header[..HEADER_BYTES_SIZE].copy_from_slice(&buff[..HEADER_BYTES_SIZE]); - u16::from_le_bytes(header) - } - - // Streamed with compression enabled - let batch = [16, 0, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]; - let (batch_size, was_compressed) = tx_compressed(true, true, &batch, &mut buff).unwrap(); - let header = get_header_value(&buff); - assert!(was_compressed); - assert_eq!(header as usize, payload_compression_size + COMPRESSION_BYTE); - assert!(batch_size < batch.len() + COMPRESSION_BYTE); - assert_eq!(batch_size, payload_compression_size + 3); - - // Not streamed with compression enabled - let batch = payload; - let (batch_size, was_compressed) = tx_compressed(true, false, &batch, &mut buff).unwrap(); - assert!(was_compressed); - assert!(batch_size < batch.len() + COMPRESSION_BYTE); - assert_eq!(batch_size, payload_compression_size + COMPRESSION_BYTE); - - // Streamed with compression disabled - let batch = [16, 0, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]; - let (batch_size, was_compressed) = tx_compressed(false, true, &batch, &mut buff).unwrap(); - let header = get_header_value(&buff); - assert!(!was_compressed); - assert_eq!(header as usize, payload.len() + COMPRESSION_BYTE); - assert_eq!(batch_size, batch.len() + COMPRESSION_BYTE); - - // Not streamed and compression disabled - let batch = payload; - let (batch_size, was_compressed) = tx_compressed(false, false, &batch, &mut buff).unwrap(); - assert!(!was_compressed); - assert_eq!(batch_size, payload.len() + COMPRESSION_BYTE); - - // Verify that if the compression result is bigger than the original payload size, then the non compressed payload is returned. - let batch = [16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; // a non compressable payload with no repetitions - let (batch_size, was_compressed) = tx_compressed(true, true, &batch, &mut buff).unwrap(); - assert!(!was_compressed); - assert_eq!(batch_size, batch.len() + COMPRESSION_BYTE); -} - -#[cfg(all(feature = "transport_compression", feature = "unstable"))] -#[test] -fn rx_compression_test() { - let pool = RecyclingObjectPool::new(2, || vec![0_u8; MAX_BATCH_SIZE].into_boxed_slice()); - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); - - // Compressed batch - let payload: [u8; 8] = [1, 2, 3, 4, 5, 6, 7, 8]; - let compression_size = lz4_flex::block::compress_into(&payload, &mut buffer[1..]).unwrap(); - buffer[0] = 1; // is compressed byte - - let mut start_pos: usize = 0; - let mut end_pos: usize = 0; - - rx_decompress( - &mut buffer, - &pool, - compression_size + 1, - &mut start_pos, - &mut end_pos, - ) - .unwrap(); - - assert_eq!(start_pos, 0); - assert_eq!(end_pos, payload.len()); - assert_eq!(buffer[start_pos..end_pos], payload); - - // Non compressed batch - let mut start_pos: usize = 0; - let mut end_pos: usize = 0; - - buffer[0] = 0; - buffer[1..payload.len() + 1].copy_from_slice(&payload[..]); - rx_decompress( - &mut buffer, - &pool, - payload.len() + 1, - &mut start_pos, - &mut end_pos, - ) - .unwrap(); - - assert_eq!(start_pos, 1); - assert_eq!(end_pos, payload.len() + 1); - assert_eq!(buffer[start_pos..end_pos], payload); -} diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 5822b09931..459998ddcf 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -1,5 +1,3 @@ -use crate::transport_unicast_inner::TransportUnicastTrait; - // // Copyright (c) 2023 ZettaScale Technology // @@ -14,16 +12,16 @@ use crate::transport_unicast_inner::TransportUnicastTrait; // ZettaScale Zenoh Team, // use super::transport::TransportUnicastUniversal; -use crate::common::priority::TransportChannelRx; +use crate::{ + common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, + }, + unicast::{link::TransportLinkUnicast, transport_unicast_inner::TransportUnicastTrait}, +}; use async_std::task; use std::sync::MutexGuard; -use zenoh_buffers::{ - reader::{HasReader, Reader}, - ZSlice, -}; -use zenoh_codec::{RCodec, Zenoh080}; use zenoh_core::{zlock, zread}; -use zenoh_link::LinkUnicast; use zenoh_protocol::{ core::{Priority, Reliability}, network::NetworkMessage, @@ -62,7 +60,7 @@ impl TransportUnicastUniversal { } } - fn handle_close(&self, link: &LinkUnicast, _reason: u8, session: bool) -> ZResult<()> { + fn handle_close(&self, link: &TransportLinkUnicast, _reason: u8, session: bool) -> ZResult<()> { // Stop now rx and tx tasks before doing the proper cleanup let _ = self.stop_rx(link); let _ = self.stop_tx(link); @@ -189,12 +187,14 @@ impl TransportUnicastUniversal { Ok(()) } - pub(super) fn read_messages(&self, mut zslice: ZSlice, link: &LinkUnicast) -> ZResult<()> { - let codec = Zenoh080::new(); - let mut reader = zslice.reader(); - while reader.can_read() { - let msg: TransportMessage = codec - .read(&mut reader) + pub(super) fn read_messages( + &self, + mut batch: RBatch, + link: &TransportLinkUnicast, + ) -> ZResult<()> { + while !batch.is_empty() { + let msg: TransportMessage = batch + .decode() .map_err(|_| zerror!("{}: decoding error", link))?; log::trace!("Received: {:?}", msg); diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 5c17b36827..a920ac90b9 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -11,42 +11,50 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::priority::{TransportPriorityRx, TransportPriorityTx}; #[cfg(feature = "stats")] use crate::stats::TransportStats; -use crate::transport_unicast_inner::TransportUnicastTrait; -use crate::unicast::universal::link::TransportLinkUnicast; -use crate::TransportConfigUnicast; -use crate::{TransportExecutor, TransportManager, TransportPeerEventHandler}; +use crate::{ + common::priority::{TransportPriorityRx, TransportPriorityTx}, + unicast::{ + link::{TransportLinkUnicast, TransportLinkUnicastDirection}, + transport_unicast_inner::TransportUnicastTrait, + universal::link::TransportLinkUnicastUniversal, + TransportConfigUnicast, + }, + TransportExecutor, TransportManager, TransportPeerEventHandler, +}; use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use async_trait::async_trait; use std::fmt::DebugStruct; use std::sync::{Arc, RwLock}; use std::time::Duration; use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; -use zenoh_link::{Link, LinkUnicast, LinkUnicastDirection}; -use zenoh_protocol::network::NetworkMessage; +use zenoh_link::Link; use zenoh_protocol::{ core::{Priority, WhatAmI, ZenohId}, + network::NetworkMessage, transport::{Close, PrioritySn, TransportMessage, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; macro_rules! zlinkget { ($guard:expr, $link:expr) => { - $guard.iter().find(|tl| &tl.link == $link) + // Compare LinkUnicast link to not compare TransportLinkUnicast direction + $guard.iter().find(|tl| &tl.link.link == &$link.link) }; } macro_rules! zlinkgetmut { ($guard:expr, $link:expr) => { - $guard.iter_mut().find(|tl| &tl.link == $link) + // Compare LinkUnicast link to not compare TransportLinkUnicast direction + $guard.iter_mut().find(|tl| &tl.link.link == &$link.link) }; } macro_rules! zlinkindex { ($guard:expr, $link:expr) => { - $guard.iter().position(|tl| &tl.link == $link) + // Compare LinkUnicast link to not compare TransportLinkUnicast direction + $guard.iter().position(|tl| &tl.link.link == &$link.link) }; } @@ -64,7 +72,7 @@ pub(crate) struct TransportUnicastUniversal { // Rx priorities pub(super) priority_rx: Arc<[TransportPriorityRx]>, // The links associated to the channel - pub(super) links: Arc>>, + pub(super) links: Arc>>, // The callback pub(super) callback: Arc>>>, // Mutex for notification @@ -162,10 +170,10 @@ impl TransportUnicastUniversal { Ok(()) } - pub(crate) async fn del_link(&self, link: &LinkUnicast) -> ZResult<()> { + pub(crate) async fn del_link(&self, link: &TransportLinkUnicast) -> ZResult<()> { enum Target { Transport, - Link(Box), + Link(Box), } // Try to remove the link @@ -206,7 +214,7 @@ impl TransportUnicastUniversal { } } - pub(crate) fn stop_tx(&self, link: &LinkUnicast) -> ZResult<()> { + pub(crate) fn stop_tx(&self, link: &TransportLinkUnicast) -> ZResult<()> { let mut guard = zwrite!(self.links); match zlinkgetmut!(guard, link) { Some(l) => { @@ -223,7 +231,7 @@ impl TransportUnicastUniversal { } } - pub(crate) fn stop_rx(&self, link: &LinkUnicast) -> ZResult<()> { + pub(crate) fn stop_rx(&self, link: &TransportLinkUnicast) -> ZResult<()> { let mut guard = zwrite!(self.links); match zlinkgetmut!(guard, link) { Some(l) => { @@ -246,13 +254,16 @@ impl TransportUnicastTrait for TransportUnicastUniversal { /*************************************/ /* LINK */ /*************************************/ - async fn add_link(&self, link: LinkUnicast, direction: LinkUnicastDirection) -> ZResult<()> { + async fn add_link(&self, link: TransportLinkUnicast) -> ZResult<()> { // Add the link to the channel let mut guard = zwrite!(self.links); // Check if we can add more inbound links - if let LinkUnicastDirection::Inbound = direction { - let count = guard.iter().filter(|l| l.direction == direction).count(); + if let TransportLinkUnicastDirection::Inbound = link.config.direction { + let count = guard + .iter() + .filter(|l| l.link.config.direction == link.config.direction) + .count(); let limit = zcondfeat!( "transport_multilink", @@ -275,8 +286,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { } } - // Create a channel link from a link - let link = TransportLinkUnicast::new(self.clone(), link, direction); + let link = TransportLinkUnicastUniversal::new(self.clone(), link); let mut links = Vec::with_capacity(guard.len() + 1); links.extend_from_slice(&guard); @@ -357,7 +367,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: &LinkUnicast, reason: u8) -> ZResult<()> { + async fn close_link(&self, link: &TransportLinkUnicast, reason: u8) -> ZResult<()> { log::trace!("Closing link {} with peer: {}", link, self.config.zid); let mut pipeline = zlinkget!(zread!(self.links), link) @@ -403,7 +413,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { self.delete().await } - fn get_links(&self) -> Vec { + fn get_links(&self) -> Vec { zread!(self.links).iter().map(|l| l.link.clone()).collect() } @@ -419,33 +429,32 @@ impl TransportUnicastTrait for TransportUnicastUniversal { fn start_tx( &self, - link: &LinkUnicast, + link: &TransportLinkUnicast, executor: &TransportExecutor, keep_alive: Duration, - batch_size: u16, ) -> ZResult<()> { let mut guard = zwrite!(self.links); match zlinkgetmut!(guard, link) { Some(l) => { assert!(!self.priority_tx.is_empty()); - l.start_tx(executor, keep_alive, batch_size, &self.priority_tx); + l.start_tx(executor, keep_alive, &self.priority_tx); Ok(()) } None => { bail!( - "Can not start Link TX {} with peer: {}", + "Can not start Link TX {} with ZID: {}", link, - self.config.zid + self.config.zid, ) } } } - fn start_rx(&self, link: &LinkUnicast, lease: Duration, batch_size: u16) -> ZResult<()> { + fn start_rx(&self, link: &TransportLinkUnicast, lease: Duration) -> ZResult<()> { let mut guard = zwrite!(self.links); match zlinkgetmut!(guard, link) { Some(l) => { - l.start_rx(lease, batch_size); + l.start_rx(lease); Ok(()) } None => { diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index 7dbc5329e6..bf5be7e702 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -34,7 +34,7 @@ impl TransportUnicastUniversal { if let Some(pl) = guard .iter() .filter_map(|tl| { - if msg.is_reliable() == tl.link.is_reliable() { + if msg.is_reliable() == tl.link.link.is_reliable() { tl.pipeline.as_ref() } else { None diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index e372e9e013..2ac2084552 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -21,8 +21,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs new file mode 100644 index 0000000000..fafb28e642 --- /dev/null +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -0,0 +1,376 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +// Restricting to macos by default because of no IPv6 support +// on GitHub CI actions on Linux and Windows. +#[cfg(all(target_family = "unix", feature = "transport_compression"))] +mod tests { + use async_std::{prelude::FutureExt, task}; + use std::{ + any::Any, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + use zenoh_core::zasync_executor_init; + use zenoh_link::Link; + use zenoh_protocol::{ + core::{ + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + }, + network::{ + push::{ + ext::{NodeIdType, QoSType}, + Push, + }, + NetworkMessage, + }, + zenoh::Put, + }; + use zenoh_result::ZResult; + use zenoh_transport::{ + multicast::{TransportManagerBuilderMulticast, TransportMulticast}, + unicast::TransportUnicast, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, + }; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const SLEEP_COUNT: Duration = Duration::from_millis(10); + + const MSG_COUNT: usize = 1_000; + const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + + macro_rules! ztimeout { + ($f:expr) => { + $f.timeout(TIMEOUT).await.unwrap() + }; + } + + // Transport Handler for the peer02 + struct SHPeer { + count: Arc, + } + + impl Default for SHPeer { + fn default() -> Self { + Self { + count: Arc::new(AtomicUsize::new(0)), + } + } + } + + impl SHPeer { + fn get_count(&self) -> usize { + self.count.load(Ordering::Relaxed) + } + } + + impl TransportEventHandler for SHPeer { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + panic!(); + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + let arc = Arc::new(SCPeer::new(self.count.clone())); + Ok(arc) + } + } + + // Transport Callback for the peer02 + pub struct SCPeer { + count: Arc, + } + + impl SCPeer { + pub fn new(count: Arc) -> Self { + Self { count } + } + } + + impl TransportMulticastEventHandler for SCPeer { + fn new_peer(&self, peer: TransportPeer) -> ZResult> { + println!("\tNew peer: {:?}", peer); + Ok(Arc::new(SCPeer { + count: self.count.clone(), + })) + } + fn closing(&self) {} + fn closed(&self) {} + + fn as_any(&self) -> &dyn Any { + self + } + } + + impl TransportPeerEventHandler for SCPeer { + fn handle_message(&self, _msg: NetworkMessage) -> ZResult<()> { + self.count.fetch_add(1, Ordering::Relaxed); + Ok(()) + } + + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} + + fn as_any(&self) -> &dyn Any { + self + } + } + + struct TransportMulticastPeer { + manager: TransportManager, + handler: Arc, + transport: TransportMulticast, + } + + async fn open_transport( + endpoint: &EndPoint, + ) -> (TransportMulticastPeer, TransportMulticastPeer) { + // Define peer01 and peer02 IDs + let peer01_id = ZenohId::try_from([1]).unwrap(); + let peer02_id = ZenohId::try_from([2]).unwrap(); + + // Create the peer01 transport manager + let peer01_handler = Arc::new(SHPeer::default()); + let peer01_manager = TransportManager::builder() + .zid(peer01_id) + .whatami(WhatAmI::Peer) + .multicast(TransportManagerBuilderMulticast::default().compression(true)) + .build(peer01_handler.clone()) + .unwrap(); + + // Create the peer02 transport manager + let peer02_handler = Arc::new(SHPeer::default()); + let peer02_manager = TransportManager::builder() + .zid(peer02_id) + .whatami(WhatAmI::Peer) + .multicast(TransportManagerBuilderMulticast::default().compression(true)) + .build(peer02_handler.clone()) + .unwrap(); + + // Create an empty transport with the peer01 + // Open transport -> This should be accepted + println!("Opening transport with {endpoint}"); + let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); + assert!(!peer01_manager.get_transports_multicast().await.is_empty()); + println!("\t{:?}", peer01_manager.get_transports_multicast().await); + + println!("Opening transport with {endpoint}"); + let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); + assert!(!peer02_manager.get_transports_multicast().await.is_empty()); + println!("\t{:?}", peer02_manager.get_transports_multicast().await); + + // Wait to for peer 01 and 02 to join each other + ztimeout!(async { + while peer01_manager + .get_transport_multicast(&peer02_id) + .await + .is_none() + { + task::sleep(SLEEP_COUNT).await; + } + }); + let peer01_transport = peer01_manager + .get_transport_multicast(&peer02_id) + .await + .unwrap(); + println!( + "\tPeer01 peers: {:?}", + peer01_transport.get_peers().unwrap() + ); + + ztimeout!(async { + while peer02_manager + .get_transport_multicast(&peer01_id) + .await + .is_none() + { + task::sleep(SLEEP_COUNT).await; + } + }); + let peer02_transport = peer02_manager + .get_transport_multicast(&peer01_id) + .await + .unwrap(); + println!( + "\tPeer02 peers: {:?}", + peer02_transport.get_peers().unwrap() + ); + + ( + TransportMulticastPeer { + manager: peer01_manager, + handler: peer01_handler, + transport: peer01_transport, + }, + TransportMulticastPeer { + manager: peer02_manager, + handler: peer02_handler, + transport: peer02_transport, + }, + ) + } + + async fn close_transport( + peer01: TransportMulticastPeer, + peer02: TransportMulticastPeer, + endpoint: &EndPoint, + ) { + // Close the peer01 transport + println!("Closing transport with {endpoint}"); + ztimeout!(peer01.transport.close()).unwrap(); + assert!(peer01.manager.get_transports_multicast().await.is_empty()); + ztimeout!(async { + while !peer02.transport.get_peers().unwrap().is_empty() { + task::sleep(SLEEP_COUNT).await; + } + }); + + // Close the peer02 transport + println!("Closing transport with {endpoint}"); + ztimeout!(peer02.transport.close()).unwrap(); + assert!(peer02.manager.get_transports_multicast().await.is_empty()); + + // Wait a little bit + task::sleep(SLEEP).await; + } + + async fn test_transport( + peer01: &TransportMulticastPeer, + peer02: &TransportMulticastPeer, + channel: Channel, + msg_size: usize, + ) { + // Create the message to send + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::default(), + payload: Put { + payload: vec![0u8; msg_size].into(), + timestamp: None, + encoding: Encoding::default(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_unknown: vec![], + } + .into(), + } + .into(); + + println!("Sending {MSG_COUNT} messages... {channel:?} {msg_size}"); + for _ in 0..MSG_COUNT { + peer01.transport.schedule(message.clone()).unwrap(); + } + + match channel.reliability { + Reliability::Reliable => { + ztimeout!(async { + while peer02.handler.get_count() != MSG_COUNT { + task::sleep(SLEEP_COUNT).await; + } + }); + } + Reliability::BestEffort => { + ztimeout!(async { + while peer02.handler.get_count() == 0 { + task::sleep(SLEEP_COUNT).await; + } + }); + } + }; + + // Wait a little bit + task::sleep(SLEEP).await; + } + + async fn run_single(endpoint: &EndPoint, channel: Channel, msg_size: usize) { + let (peer01, peer02) = open_transport(endpoint).await; + test_transport(&peer01, &peer02, channel, msg_size).await; + + #[cfg(feature = "stats")] + { + let stats = peer01.transport.get_stats().unwrap().report(); + println!("\tPeer 01: {:?}", stats); + let stats = peer02.transport.get_stats().unwrap().report(); + println!("\tPeer 02: {:?}", stats); + } + + close_transport(peer01, peer02, endpoint).await; + } + + async fn run(endpoints: &[EndPoint], channel: &[Channel], msg_size: &[usize]) { + for e in endpoints.iter() { + for ch in channel.iter() { + for ms in msg_size.iter() { + run_single(e, *ch, *ms).await; + } + } + } + } + + #[cfg(feature = "transport_udp")] + #[test] + fn transport_multicast_compression_udp_only() { + env_logger::init(); + + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locator + let endpoints: Vec = vec![ + format!( + "udp/224.{}.{}.{}:21000", + rand::random::(), + rand::random::(), + rand::random::() + ) + .parse() + .unwrap(), + // Disabling by default because of no IPv6 support + // on GitHub CI actions. + // format!("udp/{}", ZN_MULTICAST_IPV6_ADDRESS_DEFAULT) + // .parse() + // .unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + task::block_on(run(&endpoints, &channel, &MSG_SIZE_NOFRAG)); + } +} diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 28f69ef3b7..0822d08f58 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -42,8 +42,8 @@ mod tests { }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); @@ -330,7 +330,7 @@ mod tests { } } - #[cfg(feature = "transport_udp")] + #[cfg(all(feature = "transport_compression", feature = "transport_udp"))] #[test] fn transport_multicast_udp_only() { env_logger::init(); @@ -342,7 +342,7 @@ mod tests { // Define the locator let endpoints: Vec = vec![ format!( - "udp/224.{}.{}.{}:7447", + "udp/224.{}.{}.{}:20000", rand::random::(), rand::random::(), rand::random::() diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index 5279dcff21..5a929ed18c 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -21,8 +21,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index b22d7875fd..51e78d4ee8 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -21,11 +21,12 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - unicast::establishment::ext::auth::Auth, TransportMulticast, TransportMulticastEventHandler, + multicast::TransportMulticast, unicast::establishment::ext::auth::Auth, + TransportMulticastEventHandler, }; use zenoh_transport::{ - DummyTransportPeerEventHandler, TransportEventHandler, TransportPeer, - TransportPeerEventHandler, TransportUnicast, + unicast::TransportUnicast, DummyTransportPeerEventHandler, TransportEventHandler, + TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); @@ -109,9 +110,13 @@ impl TransportEventHandler for SHClientAuthenticator { #[cfg(feature = "auth_pubkey")] async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { use rsa::{BigUint, RsaPrivateKey, RsaPublicKey}; - use zenoh_transport::test_helpers::make_basic_transport_manager_builder; - use zenoh_transport::unicast::establishment::ext::auth::AuthPubKey; - use zenoh_transport::TransportManager; + use zenoh_transport::{ + unicast::{ + establishment::ext::auth::AuthPubKey, + test_helpers::make_basic_transport_manager_builder, + }, + TransportManager, + }; // Create the transport transport manager for the client 01 let client01_id = ZenohId::try_from([2]).unwrap(); @@ -411,9 +416,13 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { #[cfg(feature = "auth_usrpwd")] async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { - use zenoh_transport::test_helpers::make_basic_transport_manager_builder; - use zenoh_transport::unicast::establishment::ext::auth::AuthUsrPwd; - use zenoh_transport::TransportManager; + use zenoh_transport::{ + unicast::{ + establishment::ext::auth::AuthUsrPwd, + test_helpers::make_basic_transport_manager_builder, + }, + TransportManager, + }; /* [CLIENT] */ let client01_id = ZenohId::try_from([2]).unwrap(); diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs new file mode 100644 index 0000000000..be979fef23 --- /dev/null +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -0,0 +1,553 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#[cfg(feature = "transport_compression")] +mod tests { + use async_std::{prelude::FutureExt, task}; + use std::fmt::Write as _; + use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + use zenoh_core::zasync_executor_init; + use zenoh_link::Link; + use zenoh_protocol::{ + core::{ + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + }, + network::{ + push::ext::{NodeIdType, QoSType}, + NetworkMessage, Push, + }, + zenoh::Put, + }; + use zenoh_result::ZResult; + use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, + }; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const SLEEP_COUNT: Duration = Duration::from_millis(10); + + const MSG_COUNT: usize = 1_000; + const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; + const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; + const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + + macro_rules! ztimeout { + ($f:expr) => { + $f.timeout(TIMEOUT).await.unwrap() + }; + } + + // Transport Handler for the router + struct SHRouter { + count: Arc, + } + + impl Default for SHRouter { + fn default() -> Self { + Self { + count: Arc::new(AtomicUsize::new(0)), + } + } + } + + impl SHRouter { + fn get_count(&self) -> usize { + self.count.load(Ordering::SeqCst) + } + } + + impl TransportEventHandler for SHRouter { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + let arc = Arc::new(SCRouter::new(self.count.clone())); + Ok(arc) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } + } + + // Transport Callback for the router + pub struct SCRouter { + count: Arc, + } + + impl SCRouter { + pub fn new(count: Arc) -> Self { + Self { count } + } + } + + impl TransportPeerEventHandler for SCRouter { + fn handle_message(&self, _message: NetworkMessage) -> ZResult<()> { + self.count.fetch_add(1, Ordering::SeqCst); + Ok(()) + } + + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} + + fn as_any(&self) -> &dyn Any { + self + } + } + + // Transport Handler for the client + #[derive(Default)] + struct SHClient; + + impl TransportEventHandler for SHClient { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(SCClient)) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } + } + + // Transport Callback for the client + #[derive(Default)] + pub struct SCClient; + + impl TransportPeerEventHandler for SCClient { + fn handle_message(&self, _message: NetworkMessage) -> ZResult<()> { + Ok(()) + } + + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} + + fn as_any(&self) -> &dyn Any { + self + } + } + + async fn open_transport_unicast( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + lowlatency_transport: bool, + ) -> ( + TransportManager, + Arc, + TransportManager, + TransportUnicast, + ) { + // Define client and router IDs + let client_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohId::try_from([2]).unwrap(); + + // Create the router transport manager + let router_handler = Arc::new(SHRouter::default()); + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + server_endpoints.len(), + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ); + let router_manager = TransportManager::builder() + .zid(router_id) + .whatami(WhatAmI::Router) + .unicast(unicast) + .build(router_handler.clone()) + .unwrap(); + + // Create the listener on the router + for e in server_endpoints.iter() { + println!("Add endpoint: {}", e); + let _ = ztimeout!(router_manager.add_listener(e.clone())).unwrap(); + } + + // Create the client transport manager + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + client_endpoints.len(), + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .compression(true); + let client_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client_id) + .unicast(unicast) + .build(Arc::new(SHClient)) + .unwrap(); + + // Create an empty transport with the client + // Open transport -> This should be accepted + for e in client_endpoints.iter() { + println!("Opening transport with {}", e); + let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); + } + + let client_transport = client_manager + .get_transport_unicast(&router_id) + .await + .unwrap(); + + // Return the handlers + ( + router_manager, + router_handler, + client_manager, + client_transport, + ) + } + + async fn close_transport( + router_manager: TransportManager, + client_manager: TransportManager, + client_transport: TransportUnicast, + endpoints: &[EndPoint], + ) { + // Close the client transport + let mut ee = String::new(); + for e in endpoints.iter() { + let _ = write!(ee, "{e} "); + } + println!("Closing transport with {}", ee); + ztimeout!(client_transport.close()).unwrap(); + + ztimeout!(async { + while !router_manager.get_transports_unicast().await.is_empty() { + task::sleep(SLEEP).await; + } + }); + + // Stop the locators on the manager + for e in endpoints.iter() { + println!("Del locator: {}", e); + ztimeout!(router_manager.del_listener(e)).unwrap(); + } + + ztimeout!(async { + while !router_manager.get_listeners().is_empty() { + task::sleep(SLEEP).await; + } + }); + + // Wait a little bit + task::sleep(SLEEP).await; + + ztimeout!(router_manager.close()); + ztimeout!(client_manager.close()); + + // Wait a little bit + task::sleep(SLEEP).await; + } + + async fn test_transport( + router_handler: Arc, + client_transport: TransportUnicast, + channel: Channel, + msg_size: usize, + ) { + println!( + "Sending {} messages... {:?} {}", + MSG_COUNT, channel, msg_size + ); + let cctrl = match channel.reliability { + Reliability::Reliable => CongestionControl::Block, + Reliability::BestEffort => CongestionControl::Drop, + }; + // Create the message to send + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(channel.priority, cctrl, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::default(), + payload: Put { + payload: vec![0u8; msg_size].into(), + timestamp: None, + encoding: Encoding::default(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_unknown: vec![], + } + .into(), + } + .into(); + for _ in 0..MSG_COUNT { + let _ = client_transport.schedule(message.clone()); + } + + match channel.reliability { + Reliability::Reliable => { + ztimeout!(async { + while router_handler.get_count() != MSG_COUNT { + task::sleep(SLEEP_COUNT).await; + } + }); + } + Reliability::BestEffort => { + ztimeout!(async { + while router_handler.get_count() == 0 { + task::sleep(SLEEP_COUNT).await; + } + }); + } + }; + + // Wait a little bit + task::sleep(SLEEP).await; + } + + async fn run_single( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: Channel, + msg_size: usize, + lowlatency_transport: bool, + ) { + println!( + "\n>>> Running test for: {:?}, {:?}, {:?}, {}", + client_endpoints, server_endpoints, channel, msg_size + ); + + #[allow(unused_variables)] // Used when stats feature is enabled + let (router_manager, router_handler, client_manager, client_transport) = + open_transport_unicast(client_endpoints, server_endpoints, lowlatency_transport).await; + + test_transport( + router_handler.clone(), + client_transport.clone(), + channel, + msg_size, + ) + .await; + + #[cfg(feature = "stats")] + { + let c_stats = client_transport.get_stats().unwrap().report(); + println!("\tClient: {:?}", c_stats); + let r_stats = router_manager + .get_transport_unicast(&client_manager.config.zid) + .await + .unwrap() + .get_stats() + .map(|s| s.report()) + .unwrap(); + println!("\tRouter: {:?}", r_stats); + } + + close_transport( + router_manager, + client_manager, + client_transport, + client_endpoints, + ) + .await; + } + + async fn run_internal( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], + lowlatency_transport: bool, + ) { + for ch in channel.iter() { + for ms in msg_size.iter() { + run_single( + client_endpoints, + server_endpoints, + *ch, + *ms, + lowlatency_transport, + ) + .await; + } + } + } + + async fn run_with_universal_transport( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], + ) { + run_internal(client_endpoints, server_endpoints, channel, msg_size, false).await; + } + + async fn run_with_lowlatency_transport( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], + ) { + if client_endpoints.len() > 1 || server_endpoints.len() > 1 { + println!("LowLatency transport doesn't support more than one link, so this test would produce MAX_LINKS error!"); + panic!(); + } + run_internal(client_endpoints, server_endpoints, channel, msg_size, true).await; + } + + #[cfg(feature = "transport_tcp")] + #[test] + fn transport_unicast_compression_tcp_only() { + let _ = env_logger::try_init(); + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locators + let endpoints: Vec = vec![ + format!("tcp/127.0.0.1:{}", 19000).parse().unwrap(), + format!("tcp/[::1]:{}", 19001).parse().unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + ]; + // Run + task::block_on(run_with_universal_transport( + &endpoints, + &endpoints, + &channel, + &MSG_SIZE_ALL, + )); + } + + #[cfg(feature = "transport_tcp")] + #[test] + fn transport_unicast_compression_tcp_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locators + let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 19100).parse().unwrap()]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + ]; + // Run + task::block_on(run_with_lowlatency_transport( + &endpoints, + &endpoints, + &channel, + &MSG_SIZE_LOWLATENCY, + )); + } + + #[cfg(feature = "transport_udp")] + #[test] + fn transport_unicast_compression_udp_only() { + let _ = env_logger::try_init(); + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locator + let endpoints: Vec = vec![ + format!("udp/127.0.0.1:{}", 19010).parse().unwrap(), + format!("udp/[::1]:{}", 19011).parse().unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + task::block_on(run_with_universal_transport( + &endpoints, + &endpoints, + &channel, + &MSG_SIZE_NOFRAG, + )); + } + + #[cfg(feature = "transport_udp")] + #[test] + fn transport_unicast_compression_udp_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + task::block_on(async { + zasync_executor_init!(); + }); + + // Define the locator + let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 19110).parse().unwrap()]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::default(), + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + task::block_on(run_with_lowlatency_transport( + &endpoints, + &endpoints, + &channel, + &MSG_SIZE_NOFRAG, + )); + } +} diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 11f5e46ca7..64516f6f26 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -33,8 +33,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const MSG_COUNT: usize = 1_000; diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 01ee0e3751..4c7934309b 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -33,10 +33,11 @@ use zenoh_protocol::{ zenoh::Put, }; use zenoh_result::ZResult; -use zenoh_transport::test_helpers::make_transport_manager_builder; use zenoh_transport::{ - DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, + TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const MSG_SIZE: usize = 8; diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 182408f75b..cd8a48565a 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -20,9 +20,9 @@ mod tests { use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_result::ZResult; use zenoh_transport::{ - DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, - TransportMulticast, TransportMulticastEventHandler, TransportPeer, - TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, DummyTransportPeerEventHandler, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index f361f6f684..76a63cc6e0 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -18,9 +18,10 @@ use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_result::ZResult; use zenoh_transport::{ - test_helpers::make_transport_manager_builder, DummyTransportPeerEventHandler, - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, + TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index 7d8b70b4d3..07f8e43bcb 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -35,8 +35,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); @@ -102,8 +102,8 @@ impl TransportEventHandler for SHRouter { fn new_multicast( &self, - _transport: zenoh_transport::TransportMulticast, - ) -> ZResult> { + _transport: TransportMulticast, + ) -> ZResult> { panic!(); } } diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 59fc1467cf..500a174daf 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -23,7 +23,7 @@ mod tests { }, time::Duration, }; - use zenoh_buffers::SplitBuffer; + use zenoh_buffers::buffer::SplitBuffer; use zenoh_core::zasync_executor_init; use zenoh_link::Link; use zenoh_protocol::{ @@ -37,8 +37,8 @@ mod tests { use zenoh_result::ZResult; use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager}; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 3de47aba03..dad4b6f775 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -32,8 +32,8 @@ mod tests { }; use zenoh_result::ZResult; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index e01d9d0130..ac35090cdb 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -35,10 +35,11 @@ use zenoh_protocol::{ zenoh::Put, }; use zenoh_result::ZResult; -use zenoh_transport::test_helpers::make_transport_manager_builder; use zenoh_transport::{ - TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, - TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, }; // These keys and certificates below are purposedly generated to run TLS and mTLS tests. @@ -482,9 +483,6 @@ async fn test_transport( .into(); for _ in 0..MSG_COUNT { let _ = client_transport.schedule(message.clone()); - // print!("S-{i} "); - use std::io::Write; - std::io::stdout().flush().unwrap(); } match channel.reliability { @@ -1215,6 +1213,7 @@ fn transport_unicast_quic_only_server() { fn transport_unicast_tls_only_mutual_success() { use zenoh_link::tls::config::*; + let _ = env_logger::try_init(); task::block_on(async { zasync_executor_init!(); }); @@ -1282,24 +1281,13 @@ fn transport_unicast_tls_only_mutual_success() { )); } -// Constants replicating the alert descriptions thrown by the Rustls library. -// These alert descriptions are internal of the library and cannot be reached from these tests -// as to do a proper comparison. For the sake of simplicity we verify these constants are contained -// in the expected error messages from the tests below. -// -// See: https://docs.rs/rustls/latest/src/rustls/msgs/enums.rs.html#128 -#[cfg(all(feature = "transport_tls", target_family = "unix"))] -const RUSTLS_UNKNOWN_CA_ALERT_DESCRIPTION: &str = "UnknownCA"; -#[cfg(all(feature = "transport_tls", target_family = "unix"))] -const RUSTLS_CERTIFICATE_REQUIRED_ALERT_DESCRIPTION: &str = "CertificateRequired"; - #[cfg(all(feature = "transport_tls", target_family = "unix"))] #[test] fn transport_unicast_tls_only_mutual_no_client_certs_failure() { use std::vec; - use zenoh_link::tls::config::*; + let _ = env_logger::try_init(); task::block_on(async { zasync_executor_init!(); }); @@ -1361,9 +1349,6 @@ fn transport_unicast_tls_only_mutual_no_client_certs_failure() { )) }); assert!(result.is_err()); - let err = result.unwrap_err(); - let error_msg = panic_message::panic_message(&err); - assert!(error_msg.contains(RUSTLS_CERTIFICATE_REQUIRED_ALERT_DESCRIPTION)); } #[cfg(all(feature = "transport_tls", target_family = "unix"))] @@ -1371,6 +1356,7 @@ fn transport_unicast_tls_only_mutual_no_client_certs_failure() { fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { use zenoh_link::tls::config::*; + let _ = env_logger::try_init(); task::block_on(async { zasync_executor_init!(); }); @@ -1443,9 +1429,6 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { )) }); assert!(result.is_err()); - let err = result.unwrap_err(); - let error_msg = panic_message::panic_message(&err); - assert!(error_msg.contains(RUSTLS_UNKNOWN_CA_ALERT_DESCRIPTION)); } #[test] diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 053bb7e285..0177c2d454 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -39,6 +39,7 @@ shared-memory = [ ] stats = ["zenoh-transport/stats", "zenoh-protocol/stats"] transport_multilink = ["zenoh-transport/transport_multilink"] +transport_compression = ["zenoh-transport/transport_compression"] transport_quic = ["zenoh-transport/transport_quic"] transport_serial = ["zenoh-transport/transport_serial"] transport_unixpipe = ["zenoh-transport/transport_unixpipe"] @@ -52,6 +53,7 @@ default = [ "auth_pubkey", "auth_usrpwd", "transport_multilink", + "transport_compression", "transport_quic", "transport_tcp", "transport_tls", diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 56772797ce..a8aad9c809 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -122,14 +122,14 @@ impl TransportEventHandler for Handler { fn new_unicast( &self, peer: zenoh_transport::TransportPeer, - _transport: zenoh_transport::TransportUnicast, + _transport: zenoh_transport::unicast::TransportUnicast, ) -> ZResult> { self.new_peer(peer) } fn new_multicast( &self, - _transport: zenoh_transport::TransportMulticast, + _transport: zenoh_transport::multicast::TransportMulticast, ) -> ZResult> { Ok(Arc::new(self.clone())) } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index b3e0c4b87c..9f6418974e 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -26,7 +26,7 @@ use zenoh_protocol::{ network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; -use zenoh_transport::Primitives; +use zenoh_transport::primitives::Primitives; use crate::{prelude::Selector, Session, Undeclarable}; diff --git a/zenoh/src/net/routing/face.rs b/zenoh/src/net/routing/face.rs index cb01f3ea6e..0d2ee926d1 100644 --- a/zenoh/src/net/routing/face.rs +++ b/zenoh/src/net/routing/face.rs @@ -25,7 +25,7 @@ use zenoh_protocol::{ }; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; -use zenoh_transport::{Primitives, TransportMulticast}; +use zenoh_transport::{multicast::TransportMulticast, primitives::Primitives}; pub struct FaceState { pub(super) id: usize, diff --git a/zenoh/src/net/routing/network.rs b/zenoh/src/net/routing/network.rs index 3af1e0a87c..0fb9f36120 100644 --- a/zenoh/src/net/routing/network.rs +++ b/zenoh/src/net/routing/network.rs @@ -27,7 +27,7 @@ use zenoh_protocol::common::ZExtBody; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::network::oam::id::OAM_LINKSTATE; use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; -use zenoh_transport::TransportUnicast; +use zenoh_transport::unicast::TransportUnicast; #[derive(Clone)] struct Details { diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 444730e24d..1ad5d93609 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -37,8 +37,10 @@ use zenoh_protocol::network::{Mapping, NetworkBody, NetworkMessage}; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; use zenoh_transport::{ - DeMux, DummyPrimitives, McastMux, Mux, Primitives, TransportMulticast, TransportPeer, - TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, + primitives::{DeMux, DummyPrimitives, McastMux, Mux, Primitives}, + unicast::TransportUnicast, + TransportPeer, TransportPeerEventHandler, }; // use zenoh_collections::Timer; use zenoh_core::zconfigurable; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 0eb099a098..08b00c5047 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -26,7 +26,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::sync::Arc; use std::sync::Mutex; -use zenoh_buffers::SplitBuffer; +use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::ValidatedMap; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, ExprId, KnownEncoding, WireExpr, ZenohId, EMPTY_EXPR_ID}, @@ -38,7 +38,7 @@ use zenoh_protocol::{ zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; -use zenoh_transport::{Primitives, TransportUnicast}; +use zenoh_transport::{primitives::Primitives, unicast::TransportUnicast}; pub struct AdminContext { runtime: Runtime, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 92d369e998..f9486ea59c 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -42,8 +42,9 @@ use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_result::{bail, ZResult}; use zenoh_sync::get_mut_unchecked; use zenoh_transport::{ - DeMux, TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + multicast::TransportMulticast, primitives::DeMux, unicast::TransportUnicast, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, }; pub struct RuntimeState { diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 5dadf8d8a9..933a2e46a4 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -27,7 +27,7 @@ use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; use zenoh_protocol::network::declare::Mode; use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; use zenoh_protocol::zenoh::{PushBody, Put}; -use zenoh_transport::{DummyPrimitives, Primitives}; +use zenoh_transport::primitives::{DummyPrimitives, Primitives}; #[test] fn base_test() { diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 2f2e7650a0..36a841d1ef 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -25,7 +25,11 @@ pub use common::*; pub(crate) mod common { pub use crate::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; - pub use zenoh_buffers::{reader::HasReader, writer::HasWriter, SplitBuffer}; + pub use zenoh_buffers::{ + buffer::{Buffer, SplitBuffer}, + reader::HasReader, + writer::HasWriter, + }; pub use zenoh_core::Resolve; pub(crate) type Id = usize; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index ac1d6bf55a..be439b6f2d 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,12 +13,11 @@ // //! Publishing primitives. - #[zenoh_macros::unstable] use crate::handlers::Callback; #[zenoh_macros::unstable] use crate::handlers::DefaultHandler; -use crate::net::transport::Primitives; +use crate::net::transport::primitives::Primitives; use crate::prelude::*; use crate::sample::DataInfo; use crate::Encoding; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6156b0aa78..4881de6ec1 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -32,7 +32,7 @@ use zenoh_protocol::zenoh::ext::ValueType; use zenoh_protocol::zenoh::reply::ext::ConsolidationType; use zenoh_protocol::zenoh::{self, ResponseBody}; use zenoh_result::ZResult; -use zenoh_transport::Primitives; +use zenoh_transport::primitives::Primitives; pub(crate) struct QueryInner { /// The key expression of this Query. diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index e8314c9cc6..8c566a6640 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -22,7 +22,7 @@ use crate::key_expr::KeyExprInner; use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::routing::face::Face; use crate::net::runtime::Runtime; -use crate::net::transport::Primitives; +use crate::net::transport::primitives::Primitives; use crate::prelude::Locality; use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index f719fce3eb..96cca533df 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -32,9 +32,18 @@ fn zenoh_liveliness() { task::block_on(async { zasync_executor_init!(); - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); - - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let mut c1 = config::peer(); + c1.listen + .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) + .unwrap(); + c1.scouting.multicast.set_enabled(Some(false)).unwrap(); + let session1 = ztimeout!(zenoh::open(c1).res_async()).unwrap(); + let mut c2 = config::peer(); + c2.connect + .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) + .unwrap(); + c2.scouting.multicast.set_enabled(Some(false)).unwrap(); + let session2 = ztimeout!(zenoh::open(c2).res_async()).unwrap(); let replies = ztimeout!(session2 .liveliness() diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index ad8ea1d05a..7219bf5ff2 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -319,7 +319,7 @@ fn gossip() -> Result<()> { async_std::task::block_on(async { zasync_executor_init!(); - let locator = String::from("tcp/127.0.0.1:17448"); + let locator = String::from("tcp/127.0.0.1:17449"); let ke = String::from("testKeyExprGossip"); let msg_size = 8; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index c94cb36510..c2cec7c627 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -126,6 +126,9 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re } }); + // Wait for the messages to arrive + task::sleep(SLEEP).await; + println!("[PS][03b] Unsubscribing on peer01 session"); ztimeout!(sub.undeclare().res_async()).unwrap();